├── .gitignore ├── .idea ├── VirtualMicrogridSegmentation.iml ├── encodings.xml ├── misc.xml ├── modules.xml └── vcs.xml ├── Functioning_environment_test.ipynb ├── Interacting_with_pp_network.ipynb ├── LICENSE ├── POC_6bus.ipynb ├── README.md ├── Siobhan_scratch_testing.ipynb ├── requirements.txt ├── scripts ├── run_ddpg.py └── run_pg.py ├── testing.ipynb └── virtual_microgrids ├── __init__.py ├── agents ├── __init__.py ├── actor_network.py └── critic_network.py ├── algorithms ├── __init__.py ├── ddpg.py └── pg.py ├── configs ├── __init__.py ├── config.py ├── config_base.py ├── six_bus_mvp1.py ├── six_bus_mvp2.py ├── six_bus_mvp3.py ├── six_bus_poc.py └── standard_lv_network.py ├── powerflow ├── __init__.py ├── network_generation.py └── pp_network.py └── utils ├── __init__.py ├── general.py ├── graph.py ├── linear_schedule.py ├── log_schedule.py ├── orstein_uhlenbeck_action_noise.py └── replay_buffer.py /.gitignore: -------------------------------------------------------------------------------- 1 | # Tensorboard Results 2 | results/ 3 | 4 | # Created by https://www.gitignore.io/api/python,pycharm,sublimetext,jupyternotebook 5 | # Edit at https://www.gitignore.io/?templates=python,pycharm,sublimetext,jupyternotebook 6 | 7 | ### JupyterNotebook ### 8 | .ipynb_checkpoints 9 | */.ipynb_checkpoints/* 10 | 11 | # Remove previous ipynb_checkpoints 12 | # git rm -r .ipynb_checkpoints/ 13 | # 14 | 15 | ### PyCharm ### 16 | # Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio and WebStorm 17 | # Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839 18 | 19 | # User-specific stuff 20 | .idea/**/workspace.xml 21 | .idea/**/tasks.xml 22 | .idea/**/usage.statistics.xml 23 | .idea/**/dictionaries 24 | .idea/**/shelf 25 | 26 | # Generated files 27 | .idea/**/contentModel.xml 28 | 29 | # Sensitive or high-churn files 30 | .idea/**/dataSources/ 31 | .idea/**/dataSources.ids 32 | .idea/**/dataSources.local.xml 33 | .idea/**/sqlDataSources.xml 34 | .idea/**/dynamic.xml 35 | .idea/**/uiDesigner.xml 36 | .idea/**/dbnavigator.xml 37 | 38 | # Gradle 39 | .idea/**/gradle.xml 40 | .idea/**/libraries 41 | 42 | # Gradle and Maven with auto-import 43 | # When using Gradle or Maven with auto-import, you should exclude module files, 44 | # since they will be recreated, and may cause churn. Uncomment if using 45 | # auto-import. 46 | # .idea/modules.xml 47 | # .idea/*.iml 48 | # .idea/modules 49 | 50 | # CMake 51 | cmake-build-*/ 52 | 53 | # Mongo Explorer plugin 54 | .idea/**/mongoSettings.xml 55 | 56 | # File-based project format 57 | *.iws 58 | 59 | # IntelliJ 60 | out/ 61 | 62 | # mpeltonen/sbt-idea plugin 63 | .idea_modules/ 64 | 65 | # JIRA plugin 66 | atlassian-ide-plugin.xml 67 | 68 | # Cursive Clojure plugin 69 | .idea/replstate.xml 70 | 71 | # Crashlytics plugin (for Android Studio and IntelliJ) 72 | com_crashlytics_export_strings.xml 73 | crashlytics.properties 74 | crashlytics-build.properties 75 | fabric.properties 76 | 77 | # Editor-based Rest Client 78 | .idea/httpRequests 79 | 80 | # Android studio 3.1+ serialized cache file 81 | .idea/caches/build_file_checksums.ser 82 | 83 | ### PyCharm Patch ### 84 | # Comment Reason: https://github.com/joeblau/gitignore.io/issues/186#issuecomment-215987721 85 | 86 | # *.iml 87 | # modules.xml 88 | # .idea/misc.xml 89 | # *.ipr 90 | 91 | # Sonarlint plugin 92 | .idea/sonarlint 93 | 94 | ### Python ### 95 | # Byte-compiled / optimized / DLL files 96 | __pycache__/ 97 | *.py[cod] 98 | *$py.class 99 | 100 | # C extensions 101 | *.so 102 | 103 | # Distribution / packaging 104 | .Python 105 | build/ 106 | develop-eggs/ 107 | dist/ 108 | downloads/ 109 | eggs/ 110 | .eggs/ 111 | lib/ 112 | lib64/ 113 | parts/ 114 | sdist/ 115 | var/ 116 | wheels/ 117 | pip-wheel-metadata/ 118 | share/python-wheels/ 119 | *.egg-info/ 120 | .installed.cfg 121 | *.egg 122 | MANIFEST 123 | 124 | # PyInstaller 125 | # Usually these files are written by a python script from a template 126 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 127 | *.manifest 128 | *.spec 129 | 130 | # Installer logs 131 | pip-log.txt 132 | pip-delete-this-directory.txt 133 | 134 | # Unit test / coverage reports 135 | htmlcov/ 136 | .tox/ 137 | .nox/ 138 | .coverage 139 | .coverage.* 140 | .cache 141 | nosetests.xml 142 | coverage.xml 143 | *.cover 144 | .hypothesis/ 145 | .pytest_cache/ 146 | 147 | # Translations 148 | *.mo 149 | *.pot 150 | 151 | # Django stuff: 152 | *.log 153 | local_settings.py 154 | db.sqlite3 155 | 156 | # Flask stuff: 157 | instance/ 158 | .webassets-cache 159 | 160 | # Scrapy stuff: 161 | .scrapy 162 | 163 | # Sphinx documentation 164 | docs/_build/ 165 | 166 | # PyBuilder 167 | target/ 168 | 169 | # Jupyter Notebook 170 | 171 | # IPython 172 | profile_default/ 173 | ipython_config.py 174 | 175 | # pyenv 176 | .python-version 177 | 178 | # celery beat schedule file 179 | celerybeat-schedule 180 | 181 | # SageMath parsed files 182 | *.sage.py 183 | 184 | # Environments 185 | .env 186 | .venv 187 | env/ 188 | venv/ 189 | ENV/ 190 | env.bak/ 191 | venv.bak/ 192 | 193 | # Spyder project settings 194 | .spyderproject 195 | .spyproject 196 | 197 | # Rope project settings 198 | .ropeproject 199 | 200 | # mkdocs documentation 201 | /site 202 | 203 | # mypy 204 | .mypy_cache/ 205 | .dmypy.json 206 | dmypy.json 207 | 208 | # Pyre type checker 209 | .pyre/ 210 | 211 | ### Python Patch ### 212 | .venv/ 213 | 214 | ### SublimeText ### 215 | # Cache files for Sublime Text 216 | *.tmlanguage.cache 217 | *.tmPreferences.cache 218 | *.stTheme.cache 219 | 220 | # Workspace files are user-specific 221 | *.sublime-workspace 222 | 223 | # Project files should be checked into the repository, unless a significant 224 | # proportion of contributors will probably not be using Sublime Text 225 | # *.sublime-project 226 | 227 | # SFTP configuration file 228 | sftp-config.json 229 | 230 | # Package control specific files 231 | Package Control.last-run 232 | Package Control.ca-list 233 | Package Control.ca-bundle 234 | Package Control.system-ca-bundle 235 | Package Control.cache/ 236 | Package Control.ca-certs/ 237 | Package Control.merged-ca-bundle 238 | Package Control.user-ca-bundle 239 | oscrypto-ca-bundle.crt 240 | bh_unicode_properties.cache 241 | 242 | # Sublime-github package stores a github token in this file 243 | # https://packagecontrol.io/packages/sublime-github 244 | GitHub.sublime-settings 245 | 246 | ### macOS ### 247 | # General 248 | .DS_Store 249 | .AppleDouble 250 | .LSOverride 251 | 252 | # Icon must end with two \r 253 | Icon 254 | 255 | # Thumbnails 256 | ._* 257 | 258 | # Files that might appear in the root of a volume 259 | .DocumentRevisions-V100 260 | .fseventsd 261 | .Spotlight-V100 262 | .TemporaryItems 263 | .Trashes 264 | .VolumeIcon.icns 265 | .com.apple.timemachine.donotpresent 266 | 267 | # Directories potentially created on remote AFP share 268 | .AppleDB 269 | .AppleDesktop 270 | Network Trash Folder 271 | Temporary Items 272 | .apdisk 273 | 274 | # End of https://www.gitignore.io/api/python,pycharm,sublimetext,jupyternotebook 275 | -------------------------------------------------------------------------------- /.idea/VirtualMicrogridSegmentation.iml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 11 | -------------------------------------------------------------------------------- /.idea/encodings.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | -------------------------------------------------------------------------------- /.idea/misc.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | -------------------------------------------------------------------------------- /.idea/modules.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | -------------------------------------------------------------------------------- /.idea/vcs.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | -------------------------------------------------------------------------------- /Interacting_with_pp_network.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# How to work with pp_network.py\n", 8 | "\n", 9 | "*pp_network.py* has a class called *net_model* that handles the whole network side of our simulation. The network state object includes a pandapower object called *net* as well as methods to implement actions, calculate the reward, and run simulations on *net*.\n", 10 | "\n", 11 | "Another matpower note: net.line has an attribute 'in_service' so it should be easy for us to simulate a fallen line\n" 12 | ] 13 | }, 14 | { 15 | "cell_type": "code", 16 | "execution_count": 2, 17 | "metadata": {}, 18 | "outputs": [], 19 | "source": [ 20 | "import matplotlib.pyplot as plt\n", 21 | "import numpy as np\n", 22 | "import pandas as pd\n", 23 | "import pandapower as pp\n", 24 | "import pandapower.networks\n", 25 | "import pandapower.plotting\n", 26 | "import pp_network\n", 27 | "from pp_network import *" 28 | ] 29 | }, 30 | { 31 | "cell_type": "markdown", 32 | "metadata": {}, 33 | "source": [ 34 | "## Initialize network\n", 35 | "\n", 36 | "Options to initialize: 'case5','case9','case14','rural_1','rural_2','village_1','village_2','suburb_1','iceland',etc... Basically pick anything from https://pandapower.readthedocs.io/en/v1.6.0/networks.html" 37 | ] 38 | }, 39 | { 40 | "cell_type": "code", 41 | "execution_count": 4, 42 | "metadata": {}, 43 | "outputs": [], 44 | "source": [ 45 | "network_model = NetModel(network_name='rural_1', zero_out_gen_shunt_storage=True)" 46 | ] 47 | }, 48 | { 49 | "cell_type": "code", 50 | "execution_count": 5, 51 | "metadata": {}, 52 | "outputs": [ 53 | { 54 | "name": "stdout", 55 | "output_type": "stream", 56 | "text": [ 57 | "Number of load buses: 14\nWhich buses are they on: [ 3 8 9 10 11 19 20 21 22 23 24 25 7 13]\n" 58 | ] 59 | } 60 | ], 61 | "source": [ 62 | "print('Number of load buses: ',network_model.net.load.shape[0])\n", 63 | "print('Which buses are they on: ',network_model.net.load.bus.values)" 64 | ] 65 | }, 66 | { 67 | "cell_type": "code", 68 | "execution_count": 6, 69 | "metadata": {}, 70 | "outputs": [ 71 | { 72 | "data": { 73 | "text/html": [ 74 | "
\n", 75 | "\n", 88 | "\n", 89 | " \n", 90 | " \n", 91 | " \n", 92 | " \n", 93 | " \n", 94 | " \n", 95 | " \n", 96 | " \n", 97 | " \n", 98 | " \n", 99 | " \n", 100 | " \n", 101 | " \n", 102 | " \n", 103 | " \n", 104 | " \n", 105 | " \n", 106 | " \n", 107 | " \n", 108 | " \n", 109 | " \n", 110 | " \n", 111 | " \n", 112 | " \n", 113 | " \n", 114 | " \n", 115 | " \n", 116 | " \n", 117 | " \n", 118 | " \n", 119 | " \n", 120 | " \n", 121 | " \n", 122 | " \n", 123 | " \n", 124 | " \n", 125 | " \n", 126 | " \n", 127 | " \n", 128 | " \n", 129 | " \n", 130 | " \n", 131 | " \n", 132 | " \n", 133 | " \n", 134 | " \n", 135 | " \n", 136 | " \n", 137 | " \n", 138 | " \n", 139 | " \n", 140 | " \n", 141 | " \n", 142 | " \n", 143 | " \n", 144 | " \n", 145 | " \n", 146 | " \n", 147 | " \n", 148 | " \n", 149 | " \n", 150 | " \n", 151 | " \n", 152 | " \n", 153 | " \n", 154 | " \n", 155 | " \n", 156 | " \n", 157 | " \n", 158 | " \n", 159 | "
namebusp_kwq_kvarsn_kvascalingin_servicetype
0None800NaN1.0TrueNone
1None900NaN1.0TrueNone
2None1100NaN1.0TrueNone
3None2200NaN1.0TrueNone
4None2300NaN1.0TrueNone
\n", 160 | "
" 161 | ], 162 | "text/plain": [ 163 | "
\n", 164 | "\n", 177 | "\n", 178 | " \n", 179 | " \n", 180 | " \n", 181 | " \n", 182 | " \n", 183 | " \n", 184 | " \n", 185 | " \n", 186 | " \n", 187 | " \n", 188 | " \n", 189 | " \n", 190 | " \n", 191 | " \n", 192 | " \n", 193 | " \n", 194 | " \n", 195 | " \n", 196 | " \n", 197 | " \n", 198 | " \n", 199 | " \n", 200 | " \n", 201 | " \n", 202 | " \n", 203 | " \n", 204 | " \n", 205 | " \n", 206 | " \n", 207 | " \n", 208 | " \n", 209 | " \n", 210 | " \n", 211 | " \n", 212 | " \n", 213 | " \n", 214 | " \n", 215 | " \n", 216 | " \n", 217 | " \n", 218 | " \n", 219 | " \n", 220 | " \n", 221 | " \n", 222 | " \n", 223 | " \n", 224 | " \n", 225 | " \n", 226 | " \n", 227 | " \n", 228 | " \n", 229 | " \n", 230 | " \n", 231 | " \n", 232 | " \n", 233 | " \n", 234 | " \n", 235 | " \n", 236 | " \n", 237 | " \n", 238 | " \n", 239 | " \n", 240 | " \n", 241 | " \n", 242 | " \n", 243 | " \n", 244 | " \n", 245 | " \n", 246 | " \n", 247 | " \n", 248 | "
namebusp_kwq_kvarsn_kvascalingin_servicetype
0None800NaN1.0TrueNone
1None900NaN1.0TrueNone
2None1100NaN1.0TrueNone
3None2200NaN1.0TrueNone
4None2300NaN1.0TrueNone
\n", 249 | "
" 250 | ] 251 | }, 252 | "execution_count": 6, 253 | "metadata": {}, 254 | "output_type": "execute_result" 255 | } 256 | ], 257 | "source": [ 258 | "network_model.net.sgen" 259 | ] 260 | }, 261 | { 262 | "cell_type": "markdown", 263 | "metadata": {}, 264 | "source": [ 265 | "### Look at powerflows to begin with: " 266 | ] 267 | }, 268 | { 269 | "cell_type": "code", 270 | "execution_count": 7, 271 | "metadata": {}, 272 | "outputs": [ 273 | { 274 | "name": "stderr", 275 | "output_type": "stream", 276 | "text": [ 277 | "numba cannot be imported and numba functions are disabled.\nProbably the execution is slow.\nPlease install numba to gain a massive speedup.\n(or if you prefer slow execution, set the flag numba=False to avoid this warning!)\n\n" 278 | ] 279 | } 280 | ], 281 | "source": [ 282 | "network_model.run_powerflow()" 283 | ] 284 | }, 285 | { 286 | "cell_type": "code", 287 | "execution_count": 8, 288 | "metadata": { 289 | "scrolled": true 290 | }, 291 | "outputs": [ 292 | { 293 | "data": { 294 | "text/html": [ 295 | "
\n", 296 | "\n", 309 | "\n", 310 | " \n", 311 | " \n", 312 | " \n", 313 | " \n", 314 | " \n", 315 | " \n", 316 | " \n", 317 | " \n", 318 | " \n", 319 | " \n", 320 | " \n", 321 | " \n", 322 | " \n", 323 | " \n", 324 | " \n", 325 | " \n", 326 | " \n", 327 | " \n", 328 | " \n", 329 | " \n", 330 | " \n", 331 | " \n", 332 | " \n", 333 | " \n", 334 | " \n", 335 | " \n", 336 | " \n", 337 | " \n", 338 | " \n", 339 | " \n", 340 | " \n", 341 | " \n", 342 | " \n", 343 | " \n", 344 | " \n", 345 | " \n", 346 | " \n", 347 | " \n", 348 | " \n", 349 | " \n", 350 | " \n", 351 | " \n", 352 | " \n", 353 | " \n", 354 | " \n", 355 | " \n", 356 | " \n", 357 | " \n", 358 | " \n", 359 | " \n", 360 | " \n", 361 | " \n", 362 | " \n", 363 | " \n", 364 | " \n", 365 | " \n", 366 | " \n", 367 | " \n", 368 | " \n", 369 | " \n", 370 | " \n", 371 | " \n", 372 | " \n", 373 | " \n", 374 | " \n", 375 | " \n", 376 | " \n", 377 | " \n", 378 | " \n", 379 | " \n", 380 | " \n", 381 | " \n", 382 | " \n", 383 | " \n", 384 | " \n", 385 | " \n", 386 | " \n", 387 | " \n", 388 | " \n", 389 | " \n", 390 | " \n", 391 | " \n", 392 | " \n", 393 | " \n", 394 | " \n", 395 | " \n", 396 | " \n", 397 | " \n", 398 | " \n", 399 | " \n", 400 | " \n", 401 | " \n", 402 | " \n", 403 | " \n", 404 | " \n", 405 | " \n", 406 | " \n", 407 | " \n", 408 | " \n", 409 | " \n", 410 | " \n", 411 | " \n", 412 | " \n", 413 | " \n", 414 | " \n", 415 | " \n", 416 | " \n", 417 | " \n", 418 | " \n", 419 | " \n", 420 | " \n", 421 | " \n", 422 | " \n", 423 | " \n", 424 | " \n", 425 | " \n", 426 | " \n", 427 | " \n", 428 | " \n", 429 | " \n", 430 | " \n", 431 | " \n", 432 | " \n", 433 | " \n", 434 | " \n", 435 | " \n", 436 | " \n", 437 | " \n", 438 | " \n", 439 | " \n", 440 | " \n", 441 | " \n", 442 | " \n", 443 | " \n", 444 | " \n", 445 | " \n", 446 | " \n", 447 | " \n", 448 | " \n", 449 | " \n", 450 | " \n", 451 | " \n", 452 | " \n", 453 | " \n", 454 | " \n", 455 | " \n", 456 | " \n", 457 | " \n", 458 | " \n", 459 | " \n", 460 | " \n", 461 | " \n", 462 | " \n", 463 | " \n", 464 | " \n", 465 | " \n", 466 | " \n", 467 | " \n", 468 | " \n", 469 | " \n", 470 | " \n", 471 | " \n", 472 | " \n", 473 | " \n", 474 | " \n", 475 | " \n", 476 | " \n", 477 | " \n", 478 | " \n", 479 | " \n", 480 | " \n", 481 | " \n", 482 | " \n", 483 | " \n", 484 | " \n", 485 | " \n", 486 | " \n", 487 | " \n", 488 | " \n", 489 | " \n", 490 | " \n", 491 | " \n", 492 | " \n", 493 | " \n", 494 | " \n", 495 | " \n", 496 | " \n", 497 | " \n", 498 | " \n", 499 | " \n", 500 | " \n", 501 | " \n", 502 | " \n", 503 | "
vm_puva_degreep_kwq_kvar
01.0000000.000000-78.80322-1.780376
10.995039-1.0908490.000000.000000
20.993303-1.1293370.000000.000000
30.992705-1.1337965.100000.000000
40.990056-1.2004070.000000.000000
50.985967-1.2910370.000000.000000
60.982775-1.3622570.000000.000000
70.980482-1.4136817.900000.000000
80.989456-1.2048955.100000.000000
90.985365-1.2955635.100000.000000
100.982171-1.3668125.100000.000000
110.979876-1.4182575.100000.000000
120.991120-1.1770430.000000.000000
130.987658-1.2537217.900000.000000
140.985361-1.3048160.000000.000000
150.983523-1.3458630.000000.000000
160.982144-1.3767490.000000.000000
170.981149-1.3973880.000000.000000
180.980651-1.4077220.000000.000000
190.990521-1.1815225.100000.000000
200.987057-1.2582325.100000.000000
210.984759-1.3093475.100000.000000
220.982919-1.3504115.100000.000000
230.981539-1.3813105.100000.000000
240.980543-1.4019585.100000.000000
250.980046-1.4122975.100000.000000
\n", 504 | "
" 505 | ], 506 | "text/plain": [ 507 | "
\n", 508 | "\n", 521 | "\n", 522 | " \n", 523 | " \n", 524 | " \n", 525 | " \n", 526 | " \n", 527 | " \n", 528 | " \n", 529 | " \n", 530 | " \n", 531 | " \n", 532 | " \n", 533 | " \n", 534 | " \n", 535 | " \n", 536 | " \n", 537 | " \n", 538 | " \n", 539 | " \n", 540 | " \n", 541 | " \n", 542 | " \n", 543 | " \n", 544 | " \n", 545 | " \n", 546 | " \n", 547 | " \n", 548 | " \n", 549 | " \n", 550 | " \n", 551 | " \n", 552 | " \n", 553 | " \n", 554 | " \n", 555 | " \n", 556 | " \n", 557 | " \n", 558 | " \n", 559 | " \n", 560 | " \n", 561 | " \n", 562 | " \n", 563 | " \n", 564 | " \n", 565 | " \n", 566 | " \n", 567 | " \n", 568 | " \n", 569 | " \n", 570 | " \n", 571 | " \n", 572 | " \n", 573 | " \n", 574 | " \n", 575 | " \n", 576 | " \n", 577 | " \n", 578 | " \n", 579 | " \n", 580 | " \n", 581 | " \n", 582 | " \n", 583 | " \n", 584 | " \n", 585 | " \n", 586 | " \n", 587 | " \n", 588 | " \n", 589 | " \n", 590 | " \n", 591 | " \n", 592 | " \n", 593 | " \n", 594 | " \n", 595 | " \n", 596 | " \n", 597 | " \n", 598 | " \n", 599 | " \n", 600 | " \n", 601 | " \n", 602 | " \n", 603 | " \n", 604 | " \n", 605 | " \n", 606 | " \n", 607 | " \n", 608 | " \n", 609 | " \n", 610 | " \n", 611 | " \n", 612 | " \n", 613 | " \n", 614 | " \n", 615 | " \n", 616 | " \n", 617 | " \n", 618 | " \n", 619 | " \n", 620 | " \n", 621 | " \n", 622 | " \n", 623 | " \n", 624 | " \n", 625 | " \n", 626 | " \n", 627 | " \n", 628 | " \n", 629 | " \n", 630 | " \n", 631 | " \n", 632 | " \n", 633 | " \n", 634 | " \n", 635 | " \n", 636 | " \n", 637 | " \n", 638 | " \n", 639 | " \n", 640 | " \n", 641 | " \n", 642 | " \n", 643 | " \n", 644 | " \n", 645 | " \n", 646 | " \n", 647 | " \n", 648 | " \n", 649 | " \n", 650 | " \n", 651 | " \n", 652 | " \n", 653 | " \n", 654 | " \n", 655 | " \n", 656 | " \n", 657 | " \n", 658 | " \n", 659 | " \n", 660 | " \n", 661 | " \n", 662 | " \n", 663 | " \n", 664 | " \n", 665 | " \n", 666 | " \n", 667 | " \n", 668 | " \n", 669 | " \n", 670 | " \n", 671 | " \n", 672 | " \n", 673 | " \n", 674 | " \n", 675 | " \n", 676 | " \n", 677 | " \n", 678 | " \n", 679 | " \n", 680 | " \n", 681 | " \n", 682 | " \n", 683 | " \n", 684 | " \n", 685 | " \n", 686 | " \n", 687 | " \n", 688 | " \n", 689 | " \n", 690 | " \n", 691 | " \n", 692 | " \n", 693 | " \n", 694 | " \n", 695 | " \n", 696 | " \n", 697 | " \n", 698 | " \n", 699 | " \n", 700 | " \n", 701 | " \n", 702 | " \n", 703 | " \n", 704 | " \n", 705 | " \n", 706 | " \n", 707 | " \n", 708 | " \n", 709 | " \n", 710 | " \n", 711 | " \n", 712 | " \n", 713 | " \n", 714 | " \n", 715 | "
vm_puva_degreep_kwq_kvar
01.0000000.000000-78.80322-1.780376
10.995039-1.0908490.000000.000000
20.993303-1.1293370.000000.000000
30.992705-1.1337965.100000.000000
40.990056-1.2004070.000000.000000
50.985967-1.2910370.000000.000000
60.982775-1.3622570.000000.000000
70.980482-1.4136817.900000.000000
80.989456-1.2048955.100000.000000
90.985365-1.2955635.100000.000000
100.982171-1.3668125.100000.000000
110.979876-1.4182575.100000.000000
120.991120-1.1770430.000000.000000
130.987658-1.2537217.900000.000000
140.985361-1.3048160.000000.000000
150.983523-1.3458630.000000.000000
160.982144-1.3767490.000000.000000
170.981149-1.3973880.000000.000000
180.980651-1.4077220.000000.000000
190.990521-1.1815225.100000.000000
200.987057-1.2582325.100000.000000
210.984759-1.3093475.100000.000000
220.982919-1.3504115.100000.000000
230.981539-1.3813105.100000.000000
240.980543-1.4019585.100000.000000
250.980046-1.4122975.100000.000000
\n", 716 | "
" 717 | ] 718 | }, 719 | "execution_count": 8, 720 | "metadata": {}, 721 | "output_type": "execute_result" 722 | } 723 | ], 724 | "source": [ 725 | "network_model.net.res_bus" 726 | ] 727 | }, 728 | { 729 | "cell_type": "code", 730 | "execution_count": 9, 731 | "metadata": { 732 | "scrolled": true 733 | }, 734 | "outputs": [ 735 | { 736 | "data": { 737 | "text/html": [ 738 | "
\n", 739 | "\n", 752 | "\n", 753 | " \n", 754 | " \n", 755 | " \n", 756 | " \n", 757 | " \n", 758 | " \n", 759 | " \n", 760 | " \n", 761 | " \n", 762 | " \n", 763 | " \n", 764 | " \n", 765 | " \n", 766 | " \n", 767 | " \n", 768 | " \n", 769 | " \n", 770 | " \n", 771 | " \n", 772 | " \n", 773 | " \n", 774 | " \n", 775 | " \n", 776 | " \n", 777 | " \n", 778 | " \n", 779 | " \n", 780 | " \n", 781 | " \n", 782 | " \n", 783 | " \n", 784 | " \n", 785 | " \n", 786 | " \n", 787 | " \n", 788 | " \n", 789 | " \n", 790 | " \n", 791 | " \n", 792 | " \n", 793 | " \n", 794 | " \n", 795 | " \n", 796 | " \n", 797 | " \n", 798 | " \n", 799 | " \n", 800 | " \n", 801 | " \n", 802 | " \n", 803 | " \n", 804 | " \n", 805 | " \n", 806 | " \n", 807 | " \n", 808 | " \n", 809 | " \n", 810 | " \n", 811 | " \n", 812 | " \n", 813 | " \n", 814 | " \n", 815 | " \n", 816 | " \n", 817 | " \n", 818 | " \n", 819 | " \n", 820 | " \n", 821 | " \n", 822 | " \n", 823 | " \n", 824 | " \n", 825 | " \n", 826 | " \n", 827 | " \n", 828 | " \n", 829 | " \n", 830 | " \n", 831 | " \n", 832 | " \n", 833 | " \n", 834 | " \n", 835 | " \n", 836 | " \n", 837 | " \n", 838 | " \n", 839 | " \n", 840 | " \n", 841 | " \n", 842 | " \n", 843 | " \n", 844 | " \n", 845 | " \n", 846 | " \n", 847 | " \n", 848 | " \n", 849 | " \n", 850 | " \n", 851 | " \n", 852 | " \n", 853 | " \n", 854 | " \n", 855 | " \n", 856 | " \n", 857 | " \n", 858 | " \n", 859 | " \n", 860 | " \n", 861 | " \n", 862 | " \n", 863 | " \n", 864 | " \n", 865 | " \n", 866 | " \n", 867 | " \n", 868 | " \n", 869 | " \n", 870 | " \n", 871 | " \n", 872 | " \n", 873 | " \n", 874 | " \n", 875 | " \n", 876 | " \n", 877 | " \n", 878 | " \n", 879 | " \n", 880 | " \n", 881 | " \n", 882 | " \n", 883 | " \n", 884 | " \n", 885 | " \n", 886 | " \n", 887 | " \n", 888 | " \n", 889 | " \n", 890 | " \n", 891 | " \n", 892 | " \n", 893 | " \n", 894 | " \n", 895 | " \n", 896 | " \n", 897 | " \n", 898 | " \n", 899 | " \n", 900 | " \n", 901 | " \n", 902 | " \n", 903 | " \n", 904 | " \n", 905 | " \n", 906 | " \n", 907 | " \n", 908 | " \n", 909 | " \n", 910 | " \n", 911 | " \n", 912 | " \n", 913 | " \n", 914 | " \n", 915 | " \n", 916 | " \n", 917 | " \n", 918 | " \n", 919 | " \n", 920 | " \n", 921 | " \n", 922 | " \n", 923 | " \n", 924 | " \n", 925 | " \n", 926 | " \n", 927 | " \n", 928 | " \n", 929 | " \n", 930 | " \n", 931 | " \n", 932 | " \n", 933 | " \n", 934 | " \n", 935 | " \n", 936 | " \n", 937 | " \n", 938 | " \n", 939 | " \n", 940 | " \n", 941 | " \n", 942 | " \n", 943 | " \n", 944 | " \n", 945 | " \n", 946 | " \n", 947 | " \n", 948 | " \n", 949 | " \n", 950 | " \n", 951 | " \n", 952 | " \n", 953 | " \n", 954 | " \n", 955 | " \n", 956 | " \n", 957 | " \n", 958 | " \n", 959 | " \n", 960 | " \n", 961 | " \n", 962 | " \n", 963 | " \n", 964 | " \n", 965 | " \n", 966 | " \n", 967 | " \n", 968 | " \n", 969 | " \n", 970 | " \n", 971 | " \n", 972 | " \n", 973 | " \n", 974 | " \n", 975 | " \n", 976 | " \n", 977 | " \n", 978 | " \n", 979 | " \n", 980 | " \n", 981 | " \n", 982 | " \n", 983 | " \n", 984 | " \n", 985 | " \n", 986 | " \n", 987 | " \n", 988 | " \n", 989 | " \n", 990 | " \n", 991 | " \n", 992 | " \n", 993 | " \n", 994 | " \n", 995 | " \n", 996 | " \n", 997 | " \n", 998 | " \n", 999 | " \n", 1000 | " \n", 1001 | " \n", 1002 | " \n", 1003 | " \n", 1004 | " \n", 1005 | " \n", 1006 | " \n", 1007 | " \n", 1008 | " \n", 1009 | " \n", 1010 | " \n", 1011 | " \n", 1012 | " \n", 1013 | " \n", 1014 | " \n", 1015 | " \n", 1016 | " \n", 1017 | " \n", 1018 | " \n", 1019 | " \n", 1020 | " \n", 1021 | " \n", 1022 | " \n", 1023 | " \n", 1024 | " \n", 1025 | " \n", 1026 | " \n", 1027 | " \n", 1028 | " \n", 1029 | " \n", 1030 | " \n", 1031 | " \n", 1032 | " \n", 1033 | " \n", 1034 | " \n", 1035 | " \n", 1036 | " \n", 1037 | " \n", 1038 | " \n", 1039 | " \n", 1040 | " \n", 1041 | " \n", 1042 | " \n", 1043 | " \n", 1044 | " \n", 1045 | " \n", 1046 | " \n", 1047 | " \n", 1048 | " \n", 1049 | " \n", 1050 | " \n", 1051 | " \n", 1052 | " \n", 1053 | " \n", 1054 | " \n", 1055 | " \n", 1056 | " \n", 1057 | " \n", 1058 | " \n", 1059 | " \n", 1060 | " \n", 1061 | " \n", 1062 | " \n", 1063 | " \n", 1064 | " \n", 1065 | " \n", 1066 | " \n", 1067 | " \n", 1068 | " \n", 1069 | " \n", 1070 | " \n", 1071 | " \n", 1072 | " \n", 1073 | " \n", 1074 | " \n", 1075 | " \n", 1076 | " \n", 1077 | " \n", 1078 | " \n", 1079 | " \n", 1080 | " \n", 1081 | " \n", 1082 | "
p_from_kwq_from_kvarp_to_kwq_to_kvarpl_kwql_kvari_from_kai_to_kai_kaloading_percent
05.1119920.000155-5.103071-9.521132e-050.0089210.0000600.0074150.0074150.0074152.746411
15.1030710.000095-5.100000-2.283533e-120.0030710.0000950.0074150.0074150.0074155.222050
228.6415960.120211-28.498339-6.683071e-020.1432570.0533800.0415470.0415470.04154715.387799
323.3952470.066731-23.298701-3.130114e-020.0965460.0354300.0341070.0341070.03410712.632382
418.1955840.031196-18.136699-1.023829e-020.0588850.0209570.0266370.0266370.0266379.865516
513.0335620.010128-13.003152-1.134231e-040.0304100.0100150.0191420.0191420.0191427.089650
65.1030910.000100-5.100000-5.377196e-120.0030910.0001000.0074400.0074400.0074405.239195
75.1031170.000106-5.100000-1.019192e-110.0031170.0001060.0074710.0074710.0074715.260949
85.1031370.000110-5.100000-1.612224e-110.0031370.0001100.0074950.0074950.0074955.278058
95.1031520.000113-5.100000-2.119764e-110.0031520.0001130.0075120.0075120.0075125.290419
1044.0628970.163912-43.889547-9.811871e-020.1733500.0657930.0639170.0639170.06391723.672899
1138.7864620.098020-38.651079-4.682349e-020.1353830.0511970.0564850.0564850.05648520.920441
1225.6479730.046720-25.588359-2.466014e-020.0596140.0220600.0374820.0374820.03748213.882340
1320.4852380.024554-20.447031-1.072323e-020.0382070.0138300.0300070.0300070.03000711.113770
1415.3438980.010614-15.322383-3.200644e-030.0215160.0074140.0225180.0225180.0225188.340016
1510.2192410.003090-10.208889-2.781422e-040.0103530.0028110.0150180.0150180.0150186.205941
165.1057410.000166-5.103151-1.131805e-040.0025900.0000520.0075110.0075110.0075113.103759
175.1030850.000098-5.100000-3.835731e-120.0030850.0000980.0074320.0074320.0074325.233563
185.1031060.000103-5.100000-7.074921e-120.0031060.0001030.0074580.0074580.0074585.251930
195.1031210.000106-5.100000-1.006943e-110.0031210.0001060.0074750.0074750.0074755.264187
205.1031330.000109-5.100000-1.314385e-110.0031330.0001090.0074890.0074890.0074895.274039
215.1031420.000111-5.100000-1.544812e-110.0031420.0001110.0075000.0075000.0075005.281455
225.1031480.000112-5.100000-1.815659e-110.0031480.0001120.0075070.0075070.0075075.286818
235.1031510.000113-5.100000-1.904379e-110.0031510.0001130.0075110.0075110.0075115.289504
\n", 1083 | "
" 1084 | ], 1085 | "text/plain": [ 1086 | "
\n", 1087 | "\n", 1100 | "\n", 1101 | " \n", 1102 | " \n", 1103 | " \n", 1104 | " \n", 1105 | " \n", 1106 | " \n", 1107 | " \n", 1108 | " \n", 1109 | " \n", 1110 | " \n", 1111 | " \n", 1112 | " \n", 1113 | " \n", 1114 | " \n", 1115 | " \n", 1116 | " \n", 1117 | " \n", 1118 | " \n", 1119 | " \n", 1120 | " \n", 1121 | " \n", 1122 | " \n", 1123 | " \n", 1124 | " \n", 1125 | " \n", 1126 | " \n", 1127 | " \n", 1128 | " \n", 1129 | " \n", 1130 | " \n", 1131 | " \n", 1132 | " \n", 1133 | " \n", 1134 | " \n", 1135 | " \n", 1136 | " \n", 1137 | " \n", 1138 | " \n", 1139 | " \n", 1140 | " \n", 1141 | " \n", 1142 | " \n", 1143 | " \n", 1144 | " \n", 1145 | " \n", 1146 | " \n", 1147 | " \n", 1148 | " \n", 1149 | " \n", 1150 | " \n", 1151 | " \n", 1152 | " \n", 1153 | " \n", 1154 | " \n", 1155 | " \n", 1156 | " \n", 1157 | " \n", 1158 | " \n", 1159 | " \n", 1160 | " \n", 1161 | " \n", 1162 | " \n", 1163 | " \n", 1164 | " \n", 1165 | " \n", 1166 | " \n", 1167 | " \n", 1168 | " \n", 1169 | " \n", 1170 | " \n", 1171 | " \n", 1172 | " \n", 1173 | " \n", 1174 | " \n", 1175 | " \n", 1176 | " \n", 1177 | " \n", 1178 | " \n", 1179 | " \n", 1180 | " \n", 1181 | " \n", 1182 | " \n", 1183 | " \n", 1184 | " \n", 1185 | " \n", 1186 | " \n", 1187 | " \n", 1188 | " \n", 1189 | " \n", 1190 | " \n", 1191 | " \n", 1192 | " \n", 1193 | " \n", 1194 | " \n", 1195 | " \n", 1196 | " \n", 1197 | " \n", 1198 | " \n", 1199 | " \n", 1200 | " \n", 1201 | " \n", 1202 | " \n", 1203 | " \n", 1204 | " \n", 1205 | " \n", 1206 | " \n", 1207 | " \n", 1208 | " \n", 1209 | " \n", 1210 | " \n", 1211 | " \n", 1212 | " \n", 1213 | " \n", 1214 | " \n", 1215 | " \n", 1216 | " \n", 1217 | " \n", 1218 | " \n", 1219 | " \n", 1220 | " \n", 1221 | " \n", 1222 | " \n", 1223 | " \n", 1224 | " \n", 1225 | " \n", 1226 | " \n", 1227 | " \n", 1228 | " \n", 1229 | " \n", 1230 | " \n", 1231 | " \n", 1232 | " \n", 1233 | " \n", 1234 | " \n", 1235 | " \n", 1236 | " \n", 1237 | " \n", 1238 | " \n", 1239 | " \n", 1240 | " \n", 1241 | " \n", 1242 | " \n", 1243 | " \n", 1244 | " \n", 1245 | " \n", 1246 | " \n", 1247 | " \n", 1248 | " \n", 1249 | " \n", 1250 | " \n", 1251 | " \n", 1252 | " \n", 1253 | " \n", 1254 | " \n", 1255 | " \n", 1256 | " \n", 1257 | " \n", 1258 | " \n", 1259 | " \n", 1260 | " \n", 1261 | " \n", 1262 | " \n", 1263 | " \n", 1264 | " \n", 1265 | " \n", 1266 | " \n", 1267 | " \n", 1268 | " \n", 1269 | " \n", 1270 | " \n", 1271 | " \n", 1272 | " \n", 1273 | " \n", 1274 | " \n", 1275 | " \n", 1276 | " \n", 1277 | " \n", 1278 | " \n", 1279 | " \n", 1280 | " \n", 1281 | " \n", 1282 | " \n", 1283 | " \n", 1284 | " \n", 1285 | " \n", 1286 | " \n", 1287 | " \n", 1288 | " \n", 1289 | " \n", 1290 | " \n", 1291 | " \n", 1292 | " \n", 1293 | " \n", 1294 | " \n", 1295 | " \n", 1296 | " \n", 1297 | " \n", 1298 | " \n", 1299 | " \n", 1300 | " \n", 1301 | " \n", 1302 | " \n", 1303 | " \n", 1304 | " \n", 1305 | " \n", 1306 | " \n", 1307 | " \n", 1308 | " \n", 1309 | " \n", 1310 | " \n", 1311 | " \n", 1312 | " \n", 1313 | " \n", 1314 | " \n", 1315 | " \n", 1316 | " \n", 1317 | " \n", 1318 | " \n", 1319 | " \n", 1320 | " \n", 1321 | " \n", 1322 | " \n", 1323 | " \n", 1324 | " \n", 1325 | " \n", 1326 | " \n", 1327 | " \n", 1328 | " \n", 1329 | " \n", 1330 | " \n", 1331 | " \n", 1332 | " \n", 1333 | " \n", 1334 | " \n", 1335 | " \n", 1336 | " \n", 1337 | " \n", 1338 | " \n", 1339 | " \n", 1340 | " \n", 1341 | " \n", 1342 | " \n", 1343 | " \n", 1344 | " \n", 1345 | " \n", 1346 | " \n", 1347 | " \n", 1348 | " \n", 1349 | " \n", 1350 | " \n", 1351 | " \n", 1352 | " \n", 1353 | " \n", 1354 | " \n", 1355 | " \n", 1356 | " \n", 1357 | " \n", 1358 | " \n", 1359 | " \n", 1360 | " \n", 1361 | " \n", 1362 | " \n", 1363 | " \n", 1364 | " \n", 1365 | " \n", 1366 | " \n", 1367 | " \n", 1368 | " \n", 1369 | " \n", 1370 | " \n", 1371 | " \n", 1372 | " \n", 1373 | " \n", 1374 | " \n", 1375 | " \n", 1376 | " \n", 1377 | " \n", 1378 | " \n", 1379 | " \n", 1380 | " \n", 1381 | " \n", 1382 | " \n", 1383 | " \n", 1384 | " \n", 1385 | " \n", 1386 | " \n", 1387 | " \n", 1388 | " \n", 1389 | " \n", 1390 | " \n", 1391 | " \n", 1392 | " \n", 1393 | " \n", 1394 | " \n", 1395 | " \n", 1396 | " \n", 1397 | " \n", 1398 | " \n", 1399 | " \n", 1400 | " \n", 1401 | " \n", 1402 | " \n", 1403 | " \n", 1404 | " \n", 1405 | " \n", 1406 | " \n", 1407 | " \n", 1408 | " \n", 1409 | " \n", 1410 | " \n", 1411 | " \n", 1412 | " \n", 1413 | " \n", 1414 | " \n", 1415 | " \n", 1416 | " \n", 1417 | " \n", 1418 | " \n", 1419 | " \n", 1420 | " \n", 1421 | " \n", 1422 | " \n", 1423 | " \n", 1424 | " \n", 1425 | " \n", 1426 | " \n", 1427 | " \n", 1428 | " \n", 1429 | " \n", 1430 | "
p_from_kwq_from_kvarp_to_kwq_to_kvarpl_kwql_kvari_from_kai_to_kai_kaloading_percent
05.1119920.000155-5.103071-9.521132e-050.0089210.0000600.0074150.0074150.0074152.746411
15.1030710.000095-5.100000-2.283533e-120.0030710.0000950.0074150.0074150.0074155.222050
228.6415960.120211-28.498339-6.683071e-020.1432570.0533800.0415470.0415470.04154715.387799
323.3952470.066731-23.298701-3.130114e-020.0965460.0354300.0341070.0341070.03410712.632382
418.1955840.031196-18.136699-1.023829e-020.0588850.0209570.0266370.0266370.0266379.865516
513.0335620.010128-13.003152-1.134231e-040.0304100.0100150.0191420.0191420.0191427.089650
65.1030910.000100-5.100000-5.377196e-120.0030910.0001000.0074400.0074400.0074405.239195
75.1031170.000106-5.100000-1.019192e-110.0031170.0001060.0074710.0074710.0074715.260949
85.1031370.000110-5.100000-1.612224e-110.0031370.0001100.0074950.0074950.0074955.278058
95.1031520.000113-5.100000-2.119764e-110.0031520.0001130.0075120.0075120.0075125.290419
1044.0628970.163912-43.889547-9.811871e-020.1733500.0657930.0639170.0639170.06391723.672899
1138.7864620.098020-38.651079-4.682349e-020.1353830.0511970.0564850.0564850.05648520.920441
1225.6479730.046720-25.588359-2.466014e-020.0596140.0220600.0374820.0374820.03748213.882340
1320.4852380.024554-20.447031-1.072323e-020.0382070.0138300.0300070.0300070.03000711.113770
1415.3438980.010614-15.322383-3.200644e-030.0215160.0074140.0225180.0225180.0225188.340016
1510.2192410.003090-10.208889-2.781422e-040.0103530.0028110.0150180.0150180.0150186.205941
165.1057410.000166-5.103151-1.131805e-040.0025900.0000520.0075110.0075110.0075113.103759
175.1030850.000098-5.100000-3.835731e-120.0030850.0000980.0074320.0074320.0074325.233563
185.1031060.000103-5.100000-7.074921e-120.0031060.0001030.0074580.0074580.0074585.251930
195.1031210.000106-5.100000-1.006943e-110.0031210.0001060.0074750.0074750.0074755.264187
205.1031330.000109-5.100000-1.314385e-110.0031330.0001090.0074890.0074890.0074895.274039
215.1031420.000111-5.100000-1.544812e-110.0031420.0001110.0075000.0075000.0075005.281455
225.1031480.000112-5.100000-1.815659e-110.0031480.0001120.0075070.0075070.0075075.286818
235.1031510.000113-5.100000-1.904379e-110.0031510.0001130.0075110.0075110.0075115.289504
\n", 1431 | "
" 1432 | ] 1433 | }, 1434 | "execution_count": 9, 1435 | "metadata": {}, 1436 | "output_type": "execute_result" 1437 | } 1438 | ], 1439 | "source": [ 1440 | "network_model.net.res_line" 1441 | ] 1442 | }, 1443 | { 1444 | "cell_type": "code", 1445 | "execution_count": 10, 1446 | "metadata": {}, 1447 | "outputs": [ 1448 | { 1449 | "data": { 1450 | "text/html": [ 1451 | "
\n", 1452 | "\n", 1465 | "\n", 1466 | " \n", 1467 | " \n", 1468 | " \n", 1469 | " \n", 1470 | " \n", 1471 | " \n", 1472 | " \n", 1473 | " \n", 1474 | " \n", 1475 | " \n", 1476 | " \n", 1477 | " \n", 1478 | " \n", 1479 | " \n", 1480 | " \n", 1481 | " \n", 1482 | " \n", 1483 | " \n", 1484 | " \n", 1485 | " \n", 1486 | " \n", 1487 | " \n", 1488 | " \n", 1489 | " \n", 1490 | " \n", 1491 | " \n", 1492 | " \n", 1493 | " \n", 1494 | " \n", 1495 | " \n", 1496 | " \n", 1497 | " \n", 1498 | " \n", 1499 | " \n", 1500 | "
p_kwq_kvar
00.00.0
10.00.0
20.00.0
30.00.0
40.00.0
\n", 1501 | "
" 1502 | ], 1503 | "text/plain": [ 1504 | "
\n", 1505 | "\n", 1518 | "\n", 1519 | " \n", 1520 | " \n", 1521 | " \n", 1522 | " \n", 1523 | " \n", 1524 | " \n", 1525 | " \n", 1526 | " \n", 1527 | " \n", 1528 | " \n", 1529 | " \n", 1530 | " \n", 1531 | " \n", 1532 | " \n", 1533 | " \n", 1534 | " \n", 1535 | " \n", 1536 | " \n", 1537 | " \n", 1538 | " \n", 1539 | " \n", 1540 | " \n", 1541 | " \n", 1542 | " \n", 1543 | " \n", 1544 | " \n", 1545 | " \n", 1546 | " \n", 1547 | " \n", 1548 | " \n", 1549 | " \n", 1550 | " \n", 1551 | " \n", 1552 | " \n", 1553 | "
p_kwq_kvar
00.00.0
10.00.0
20.00.0
30.00.0
40.00.0
\n", 1554 | "
" 1555 | ] 1556 | }, 1557 | "execution_count": 10, 1558 | "metadata": {}, 1559 | "output_type": "execute_result" 1560 | } 1561 | ], 1562 | "source": [ 1563 | "network_model.net.res_sgen # we set these to zero as an option in the instantiation" 1564 | ] 1565 | }, 1566 | { 1567 | "cell_type": "markdown", 1568 | "metadata": {}, 1569 | "source": [ 1570 | "### Fake test data" 1571 | ] 1572 | }, 1573 | { 1574 | "cell_type": "code", 1575 | "execution_count": 11, 1576 | "metadata": {}, 1577 | "outputs": [], 1578 | "source": [ 1579 | "num_times = 24\n", 1580 | "p_load_data = np.random.randn(network_model.net.load.shape[0],num_times)\n", 1581 | "q_load_data = np.random.randn(network_model.net.load.shape[0],num_times)" 1582 | ] 1583 | }, 1584 | { 1585 | "cell_type": "markdown", 1586 | "metadata": {}, 1587 | "source": [ 1588 | "## Add generators" 1589 | ] 1590 | }, 1591 | { 1592 | "cell_type": "code", 1593 | "execution_count": 12, 1594 | "metadata": {}, 1595 | "outputs": [], 1596 | "source": [ 1597 | "# Add static generators at bus 7 and at bus 10 and initialize their real power production as 1 kW\n", 1598 | "network_model.add_sgen(7,1.0) \n", 1599 | "network_model.add_sgen(10,1.0)" 1600 | ] 1601 | }, 1602 | { 1603 | "cell_type": "code", 1604 | "execution_count": 13, 1605 | "metadata": {}, 1606 | "outputs": [ 1607 | { 1608 | "data": { 1609 | "text/html": [ 1610 | "
\n", 1611 | "\n", 1624 | "\n", 1625 | " \n", 1626 | " \n", 1627 | " \n", 1628 | " \n", 1629 | " \n", 1630 | " \n", 1631 | " \n", 1632 | " \n", 1633 | " \n", 1634 | " \n", 1635 | " \n", 1636 | " \n", 1637 | " \n", 1638 | " \n", 1639 | " \n", 1640 | " \n", 1641 | " \n", 1642 | " \n", 1643 | " \n", 1644 | " \n", 1645 | " \n", 1646 | " \n", 1647 | " \n", 1648 | " \n", 1649 | " \n", 1650 | " \n", 1651 | " \n", 1652 | " \n", 1653 | " \n", 1654 | " \n", 1655 | " \n", 1656 | " \n", 1657 | " \n", 1658 | " \n", 1659 | " \n", 1660 | " \n", 1661 | " \n", 1662 | " \n", 1663 | " \n", 1664 | " \n", 1665 | " \n", 1666 | " \n", 1667 | " \n", 1668 | " \n", 1669 | " \n", 1670 | " \n", 1671 | " \n", 1672 | " \n", 1673 | " \n", 1674 | " \n", 1675 | " \n", 1676 | " \n", 1677 | " \n", 1678 | " \n", 1679 | " \n", 1680 | " \n", 1681 | " \n", 1682 | " \n", 1683 | " \n", 1684 | " \n", 1685 | " \n", 1686 | " \n", 1687 | " \n", 1688 | " \n", 1689 | " \n", 1690 | " \n", 1691 | " \n", 1692 | " \n", 1693 | " \n", 1694 | " \n", 1695 | " \n", 1696 | " \n", 1697 | " \n", 1698 | " \n", 1699 | " \n", 1700 | " \n", 1701 | " \n", 1702 | " \n", 1703 | " \n", 1704 | " \n", 1705 | " \n", 1706 | " \n", 1707 | " \n", 1708 | " \n", 1709 | " \n", 1710 | " \n", 1711 | " \n", 1712 | " \n", 1713 | " \n", 1714 | " \n", 1715 | " \n", 1716 | " \n", 1717 | "
namebusp_kwq_kvarsn_kvascalingin_servicetype
0None800NaN1.0TrueNone
1None900NaN1.0TrueNone
2None1100NaN1.0TrueNone
3None2200NaN1.0TrueNone
4None2300NaN1.0TrueNone
5None710NaN1.0TrueNone
6None1010NaN1.0TrueNone
\n", 1718 | "
" 1719 | ], 1720 | "text/plain": [ 1721 | "
\n", 1722 | "\n", 1735 | "\n", 1736 | " \n", 1737 | " \n", 1738 | " \n", 1739 | " \n", 1740 | " \n", 1741 | " \n", 1742 | " \n", 1743 | " \n", 1744 | " \n", 1745 | " \n", 1746 | " \n", 1747 | " \n", 1748 | " \n", 1749 | " \n", 1750 | " \n", 1751 | " \n", 1752 | " \n", 1753 | " \n", 1754 | " \n", 1755 | " \n", 1756 | " \n", 1757 | " \n", 1758 | " \n", 1759 | " \n", 1760 | " \n", 1761 | " \n", 1762 | " \n", 1763 | " \n", 1764 | " \n", 1765 | " \n", 1766 | " \n", 1767 | " \n", 1768 | " \n", 1769 | " \n", 1770 | " \n", 1771 | " \n", 1772 | " \n", 1773 | " \n", 1774 | " \n", 1775 | " \n", 1776 | " \n", 1777 | " \n", 1778 | " \n", 1779 | " \n", 1780 | " \n", 1781 | " \n", 1782 | " \n", 1783 | " \n", 1784 | " \n", 1785 | " \n", 1786 | " \n", 1787 | " \n", 1788 | " \n", 1789 | " \n", 1790 | " \n", 1791 | " \n", 1792 | " \n", 1793 | " \n", 1794 | " \n", 1795 | " \n", 1796 | " \n", 1797 | " \n", 1798 | " \n", 1799 | " \n", 1800 | " \n", 1801 | " \n", 1802 | " \n", 1803 | " \n", 1804 | " \n", 1805 | " \n", 1806 | " \n", 1807 | " \n", 1808 | " \n", 1809 | " \n", 1810 | " \n", 1811 | " \n", 1812 | " \n", 1813 | " \n", 1814 | " \n", 1815 | " \n", 1816 | " \n", 1817 | " \n", 1818 | " \n", 1819 | " \n", 1820 | " \n", 1821 | " \n", 1822 | " \n", 1823 | " \n", 1824 | " \n", 1825 | " \n", 1826 | " \n", 1827 | " \n", 1828 | "
namebusp_kwq_kvarsn_kvascalingin_servicetype
0None800NaN1.0TrueNone
1None900NaN1.0TrueNone
2None1100NaN1.0TrueNone
3None2200NaN1.0TrueNone
4None2300NaN1.0TrueNone
5None710NaN1.0TrueNone
6None1010NaN1.0TrueNone
\n", 1829 | "
" 1830 | ] 1831 | }, 1832 | "execution_count": 13, 1833 | "metadata": {}, 1834 | "output_type": "execute_result" 1835 | } 1836 | ], 1837 | "source": [ 1838 | "network_model.net.sgen" 1839 | ] 1840 | }, 1841 | { 1842 | "cell_type": "markdown", 1843 | "metadata": {}, 1844 | "source": [ 1845 | "# Add battery" 1846 | ] 1847 | }, 1848 | { 1849 | "cell_type": "code", 1850 | "execution_count": 14, 1851 | "metadata": {}, 1852 | "outputs": [], 1853 | "source": [ 1854 | "# Initialize at bus 3 with power flow 1.0 (charging), capacity 10 kWh, initialy SOC 0 %\n", 1855 | "network_model.add_battery(3,1.0,10,0.0) " 1856 | ] 1857 | }, 1858 | { 1859 | "cell_type": "code", 1860 | "execution_count": 15, 1861 | "metadata": {}, 1862 | "outputs": [ 1863 | { 1864 | "data": { 1865 | "text/plain": [ 1866 | "1" 1867 | ] 1868 | }, 1869 | "execution_count": 15, 1870 | "metadata": {}, 1871 | "output_type": "execute_result" 1872 | } 1873 | ], 1874 | "source": [ 1875 | "# How many batteries?\n", 1876 | "network_model.net.storage.shape[0]" 1877 | ] 1878 | }, 1879 | { 1880 | "cell_type": "code", 1881 | "execution_count": 16, 1882 | "metadata": {}, 1883 | "outputs": [ 1884 | { 1885 | "data": { 1886 | "text/html": [ 1887 | "
\n", 1888 | "\n", 1901 | "\n", 1902 | " \n", 1903 | " \n", 1904 | " \n", 1905 | " \n", 1906 | " \n", 1907 | " \n", 1908 | " \n", 1909 | " \n", 1910 | " \n", 1911 | " \n", 1912 | " \n", 1913 | " \n", 1914 | " \n", 1915 | " \n", 1916 | " \n", 1917 | " \n", 1918 | " \n", 1919 | " \n", 1920 | " \n", 1921 | " \n", 1922 | " \n", 1923 | " \n", 1924 | " \n", 1925 | " \n", 1926 | " \n", 1927 | " \n", 1928 | " \n", 1929 | " \n", 1930 | " \n", 1931 | " \n", 1932 | " \n", 1933 | " \n", 1934 | "
namebusp_kwq_kvarsn_kvasoc_percentmin_e_kwhmax_e_kwhscalingin_servicetype
0None31.00.0NaN0.00.010.01.0TrueNone
\n", 1935 | "
" 1936 | ], 1937 | "text/plain": [ 1938 | "
\n", 1939 | "\n", 1952 | "\n", 1953 | " \n", 1954 | " \n", 1955 | " \n", 1956 | " \n", 1957 | " \n", 1958 | " \n", 1959 | " \n", 1960 | " \n", 1961 | " \n", 1962 | " \n", 1963 | " \n", 1964 | " \n", 1965 | " \n", 1966 | " \n", 1967 | " \n", 1968 | " \n", 1969 | " \n", 1970 | " \n", 1971 | " \n", 1972 | " \n", 1973 | " \n", 1974 | " \n", 1975 | " \n", 1976 | " \n", 1977 | " \n", 1978 | " \n", 1979 | " \n", 1980 | " \n", 1981 | " \n", 1982 | " \n", 1983 | " \n", 1984 | " \n", 1985 | "
namebusp_kwq_kvarsn_kvasoc_percentmin_e_kwhmax_e_kwhscalingin_servicetype
0None31.00.0NaN0.00.010.01.0TrueNone
\n", 1986 | "
" 1987 | ] 1988 | }, 1989 | "execution_count": 16, 1990 | "metadata": {}, 1991 | "output_type": "execute_result" 1992 | } 1993 | ], 1994 | "source": [ 1995 | "network_model.net.storage" 1996 | ] 1997 | }, 1998 | { 1999 | "cell_type": "markdown", 2000 | "metadata": {}, 2001 | "source": [ 2002 | "### Change the battery power" 2003 | ] 2004 | }, 2005 | { 2006 | "cell_type": "code", 2007 | "execution_count": 17, 2008 | "metadata": {}, 2009 | "outputs": [ 2010 | { 2011 | "data": { 2012 | "text/html": [ 2013 | "
\n", 2014 | "\n", 2027 | "\n", 2028 | " \n", 2029 | " \n", 2030 | " \n", 2031 | " \n", 2032 | " \n", 2033 | " \n", 2034 | " \n", 2035 | " \n", 2036 | " \n", 2037 | " \n", 2038 | " \n", 2039 | " \n", 2040 | " \n", 2041 | " \n", 2042 | " \n", 2043 | " \n", 2044 | " \n", 2045 | " \n", 2046 | " \n", 2047 | " \n", 2048 | " \n", 2049 | " \n", 2050 | " \n", 2051 | " \n", 2052 | " \n", 2053 | " \n", 2054 | " \n", 2055 | " \n", 2056 | " \n", 2057 | " \n", 2058 | " \n", 2059 | " \n", 2060 | "
namebusp_kwq_kvarsn_kvasoc_percentmin_e_kwhmax_e_kwhscalingin_servicetype
0None32.00.0NaN0.20.010.01.0TrueNone
\n", 2061 | "
" 2062 | ], 2063 | "text/plain": [ 2064 | "
\n", 2065 | "\n", 2078 | "\n", 2079 | " \n", 2080 | " \n", 2081 | " \n", 2082 | " \n", 2083 | " \n", 2084 | " \n", 2085 | " \n", 2086 | " \n", 2087 | " \n", 2088 | " \n", 2089 | " \n", 2090 | " \n", 2091 | " \n", 2092 | " \n", 2093 | " \n", 2094 | " \n", 2095 | " \n", 2096 | " \n", 2097 | " \n", 2098 | " \n", 2099 | " \n", 2100 | " \n", 2101 | " \n", 2102 | " \n", 2103 | " \n", 2104 | " \n", 2105 | " \n", 2106 | " \n", 2107 | " \n", 2108 | " \n", 2109 | " \n", 2110 | " \n", 2111 | "
namebusp_kwq_kvarsn_kvasoc_percentmin_e_kwhmax_e_kwhscalingin_servicetype
0None32.00.0NaN0.20.010.01.0TrueNone
\n", 2112 | "
" 2113 | ] 2114 | }, 2115 | "execution_count": 17, 2116 | "metadata": {}, 2117 | "output_type": "execute_result" 2118 | } 2119 | ], 2120 | "source": [ 2121 | "network_model.update_batteries(battery_powers=2*np.ones((network_model.net.storage.shape[0],)), dt=1)\n", 2122 | "network_model.net.storage" 2123 | ] 2124 | }, 2125 | { 2126 | "cell_type": "markdown", 2127 | "metadata": {}, 2128 | "source": [ 2129 | "## Update loads and run a powerflow" 2130 | ] 2131 | }, 2132 | { 2133 | "cell_type": "code", 2134 | "execution_count": 18, 2135 | "metadata": {}, 2136 | "outputs": [], 2137 | "source": [ 2138 | "network_model.update_loads(p_load_data[:,0],q_load_data[:,0])" 2139 | ] 2140 | }, 2141 | { 2142 | "cell_type": "code", 2143 | "execution_count": 19, 2144 | "metadata": { 2145 | "scrolled": false 2146 | }, 2147 | "outputs": [ 2148 | { 2149 | "name": "stderr", 2150 | "output_type": "stream", 2151 | "text": [ 2152 | "numba cannot be imported and numba functions are disabled.\nProbably the execution is slow.\nPlease install numba to gain a massive speedup.\n(or if you prefer slow execution, set the flag numba=False to avoid this warning!)\n\n" 2153 | ] 2154 | } 2155 | ], 2156 | "source": [ 2157 | "network_model.run_powerflow()" 2158 | ] 2159 | }, 2160 | { 2161 | "cell_type": "code", 2162 | "execution_count": 20, 2163 | "metadata": {}, 2164 | "outputs": [ 2165 | { 2166 | "data": { 2167 | "text/html": [ 2168 | "
\n", 2169 | "\n", 2182 | "\n", 2183 | " \n", 2184 | " \n", 2185 | " \n", 2186 | " \n", 2187 | " \n", 2188 | " \n", 2189 | " \n", 2190 | " \n", 2191 | " \n", 2192 | " \n", 2193 | " \n", 2194 | " \n", 2195 | " \n", 2196 | " \n", 2197 | " \n", 2198 | " \n", 2199 | " \n", 2200 | " \n", 2201 | " \n", 2202 | " \n", 2203 | " \n", 2204 | " \n", 2205 | " \n", 2206 | " \n", 2207 | " \n", 2208 | " \n", 2209 | " \n", 2210 | " \n", 2211 | " \n", 2212 | " \n", 2213 | " \n", 2214 | " \n", 2215 | " \n", 2216 | " \n", 2217 | " \n", 2218 | " \n", 2219 | " \n", 2220 | " \n", 2221 | " \n", 2222 | " \n", 2223 | " \n", 2224 | " \n", 2225 | " \n", 2226 | " \n", 2227 | " \n", 2228 | " \n", 2229 | " \n", 2230 | " \n", 2231 | " \n", 2232 | " \n", 2233 | " \n", 2234 | " \n", 2235 | " \n", 2236 | " \n", 2237 | " \n", 2238 | " \n", 2239 | " \n", 2240 | " \n", 2241 | " \n", 2242 | " \n", 2243 | " \n", 2244 | " \n", 2245 | " \n", 2246 | " \n", 2247 | " \n", 2248 | " \n", 2249 | " \n", 2250 | " \n", 2251 | " \n", 2252 | " \n", 2253 | " \n", 2254 | " \n", 2255 | " \n", 2256 | " \n", 2257 | " \n", 2258 | " \n", 2259 | " \n", 2260 | " \n", 2261 | " \n", 2262 | " \n", 2263 | " \n", 2264 | " \n", 2265 | " \n", 2266 | " \n", 2267 | " \n", 2268 | " \n", 2269 | " \n", 2270 | " \n", 2271 | " \n", 2272 | " \n", 2273 | " \n", 2274 | " \n", 2275 | " \n", 2276 | " \n", 2277 | " \n", 2278 | " \n", 2279 | " \n", 2280 | " \n", 2281 | " \n", 2282 | " \n", 2283 | " \n", 2284 | " \n", 2285 | " \n", 2286 | " \n", 2287 | " \n", 2288 | " \n", 2289 | " \n", 2290 | " \n", 2291 | " \n", 2292 | " \n", 2293 | " \n", 2294 | " \n", 2295 | " \n", 2296 | " \n", 2297 | " \n", 2298 | " \n", 2299 | " \n", 2300 | " \n", 2301 | " \n", 2302 | " \n", 2303 | " \n", 2304 | " \n", 2305 | " \n", 2306 | " \n", 2307 | " \n", 2308 | " \n", 2309 | " \n", 2310 | " \n", 2311 | " \n", 2312 | " \n", 2313 | " \n", 2314 | " \n", 2315 | " \n", 2316 | " \n", 2317 | " \n", 2318 | " \n", 2319 | " \n", 2320 | " \n", 2321 | " \n", 2322 | " \n", 2323 | " \n", 2324 | " \n", 2325 | " \n", 2326 | " \n", 2327 | " \n", 2328 | " \n", 2329 | " \n", 2330 | " \n", 2331 | " \n", 2332 | " \n", 2333 | " \n", 2334 | " \n", 2335 | " \n", 2336 | " \n", 2337 | " \n", 2338 | " \n", 2339 | " \n", 2340 | " \n", 2341 | " \n", 2342 | " \n", 2343 | " \n", 2344 | " \n", 2345 | " \n", 2346 | " \n", 2347 | " \n", 2348 | " \n", 2349 | " \n", 2350 | " \n", 2351 | " \n", 2352 | " \n", 2353 | " \n", 2354 | " \n", 2355 | " \n", 2356 | " \n", 2357 | " \n", 2358 | " \n", 2359 | " \n", 2360 | " \n", 2361 | " \n", 2362 | " \n", 2363 | " \n", 2364 | " \n", 2365 | " \n", 2366 | " \n", 2367 | " \n", 2368 | " \n", 2369 | " \n", 2370 | " \n", 2371 | " \n", 2372 | " \n", 2373 | " \n", 2374 | " \n", 2375 | " \n", 2376 | " \n", 2377 | " \n", 2378 | " \n", 2379 | " \n", 2380 | " \n", 2381 | " \n", 2382 | " \n", 2383 | " \n", 2384 | " \n", 2385 | " \n", 2386 | " \n", 2387 | " \n", 2388 | " \n", 2389 | " \n", 2390 | " \n", 2391 | " \n", 2392 | " \n", 2393 | " \n", 2394 | " \n", 2395 | " \n", 2396 | " \n", 2397 | " \n", 2398 | " \n", 2399 | " \n", 2400 | " \n", 2401 | " \n", 2402 | " \n", 2403 | " \n", 2404 | " \n", 2405 | " \n", 2406 | " \n", 2407 | " \n", 2408 | " \n", 2409 | " \n", 2410 | " \n", 2411 | " \n", 2412 | " \n", 2413 | " \n", 2414 | " \n", 2415 | " \n", 2416 | " \n", 2417 | " \n", 2418 | " \n", 2419 | " \n", 2420 | " \n", 2421 | " \n", 2422 | " \n", 2423 | " \n", 2424 | " \n", 2425 | " \n", 2426 | " \n", 2427 | " \n", 2428 | " \n", 2429 | " \n", 2430 | " \n", 2431 | " \n", 2432 | " \n", 2433 | " \n", 2434 | " \n", 2435 | " \n", 2436 | " \n", 2437 | " \n", 2438 | " \n", 2439 | " \n", 2440 | " \n", 2441 | " \n", 2442 | " \n", 2443 | " \n", 2444 | " \n", 2445 | " \n", 2446 | " \n", 2447 | " \n", 2448 | " \n", 2449 | " \n", 2450 | " \n", 2451 | " \n", 2452 | " \n", 2453 | " \n", 2454 | " \n", 2455 | " \n", 2456 | " \n", 2457 | " \n", 2458 | " \n", 2459 | " \n", 2460 | " \n", 2461 | " \n", 2462 | " \n", 2463 | " \n", 2464 | " \n", 2465 | " \n", 2466 | " \n", 2467 | " \n", 2468 | " \n", 2469 | " \n", 2470 | " \n", 2471 | " \n", 2472 | " \n", 2473 | " \n", 2474 | " \n", 2475 | " \n", 2476 | " \n", 2477 | " \n", 2478 | " \n", 2479 | " \n", 2480 | " \n", 2481 | " \n", 2482 | " \n", 2483 | " \n", 2484 | " \n", 2485 | " \n", 2486 | " \n", 2487 | " \n", 2488 | " \n", 2489 | " \n", 2490 | " \n", 2491 | " \n", 2492 | " \n", 2493 | " \n", 2494 | " \n", 2495 | " \n", 2496 | " \n", 2497 | " \n", 2498 | " \n", 2499 | " \n", 2500 | " \n", 2501 | " \n", 2502 | " \n", 2503 | " \n", 2504 | " \n", 2505 | " \n", 2506 | " \n", 2507 | " \n", 2508 | " \n", 2509 | " \n", 2510 | " \n", 2511 | " \n", 2512 | "
p_from_kwq_from_kvarp_to_kwq_to_kvarpl_kwql_kvari_from_kai_to_kai_kaloading_percent
01.671724-1.272237-1.6702281.2694170.001496-0.0028200.0030380.0030350.0030381.125305
11.670228-1.269417-1.6697131.2691790.000515-0.0002380.0030350.0030350.0030352.137570
23.1452293.323651-3.141593-3.3239890.003636-0.0003380.0066180.0066200.0066202.451827
32.3733834.171482-2.369377-4.1716750.004006-0.0001930.0069470.0069490.0069492.573639
42.2263743.503736-2.223372-3.5043140.003001-0.0005770.0060130.0060150.0060152.227702
51.4179473.190509-1.415821-3.1914210.002126-0.0009120.0050600.0050620.0050621.874961
60.768210-0.847493-0.7680570.8472080.000153-0.0002850.0016560.0016550.0016561.165920
70.1430030.667939-0.142948-0.6682360.000055-0.0002970.0009890.0009900.0009900.697041
80.8054250.313805-0.805338-0.3140970.000088-0.0002920.0012530.0012530.0012530.882347
9-0.0836781.4602220.083929-1.4604930.000251-0.0002710.0021210.0021210.0021211.493797
101.4625154.596197-1.460450-4.5962910.002065-0.0000940.0069760.0069770.0069772.584099
111.0139265.484886-1.011162-5.4847110.0027630.0001750.0080690.0080710.0080712.989149
120.9532504.212345-0.951592-4.2125940.001658-0.0002500.0062500.0062510.0062512.315218
131.3769854.307779-1.375167-4.3079670.001819-0.0001870.0065460.0065470.0065472.424908
141.3717453.743916-1.370331-3.7442580.001414-0.0003420.0057730.0057740.0057742.138564
151.0070382.879960-1.006142-2.8805380.000896-0.0005780.0044180.0044200.0044201.826269
160.8985481.092111-0.898355-1.0929390.000193-0.0008270.0020480.0020490.0020490.846900
170.446524-0.888595-0.4464080.8883050.000116-0.0002900.0014390.0014380.0014391.013178
18-0.2625051.7989700.262892-1.7992240.000387-0.0002550.0026310.0026310.0026311.853029
19-0.425393-0.0951850.4254150.0948830.000022-0.0003020.0006310.0006310.0006310.444334
200.0034220.564051-0.003385-0.5643500.000037-0.0002990.0008170.0008170.0008170.575424
210.3632920.864298-0.363189-0.8645890.000103-0.0002910.0013580.0013580.0013580.956442
220.1075941.788426-0.107218-1.7886820.000376-0.0002550.0025950.0025960.0025961.827891
230.8983551.092939-0.898121-1.0932120.000235-0.0002740.0020490.0020500.0020501.443548
\n", 2513 | "
" 2514 | ], 2515 | "text/plain": [ 2516 | "
\n", 2517 | "\n", 2530 | "\n", 2531 | " \n", 2532 | " \n", 2533 | " \n", 2534 | " \n", 2535 | " \n", 2536 | " \n", 2537 | " \n", 2538 | " \n", 2539 | " \n", 2540 | " \n", 2541 | " \n", 2542 | " \n", 2543 | " \n", 2544 | " \n", 2545 | " \n", 2546 | " \n", 2547 | " \n", 2548 | " \n", 2549 | " \n", 2550 | " \n", 2551 | " \n", 2552 | " \n", 2553 | " \n", 2554 | " \n", 2555 | " \n", 2556 | " \n", 2557 | " \n", 2558 | " \n", 2559 | " \n", 2560 | " \n", 2561 | " \n", 2562 | " \n", 2563 | " \n", 2564 | " \n", 2565 | " \n", 2566 | " \n", 2567 | " \n", 2568 | " \n", 2569 | " \n", 2570 | " \n", 2571 | " \n", 2572 | " \n", 2573 | " \n", 2574 | " \n", 2575 | " \n", 2576 | " \n", 2577 | " \n", 2578 | " \n", 2579 | " \n", 2580 | " \n", 2581 | " \n", 2582 | " \n", 2583 | " \n", 2584 | " \n", 2585 | " \n", 2586 | " \n", 2587 | " \n", 2588 | " \n", 2589 | " \n", 2590 | " \n", 2591 | " \n", 2592 | " \n", 2593 | " \n", 2594 | " \n", 2595 | " \n", 2596 | " \n", 2597 | " \n", 2598 | " \n", 2599 | " \n", 2600 | " \n", 2601 | " \n", 2602 | " \n", 2603 | " \n", 2604 | " \n", 2605 | " \n", 2606 | " \n", 2607 | " \n", 2608 | " \n", 2609 | " \n", 2610 | " \n", 2611 | " \n", 2612 | " \n", 2613 | " \n", 2614 | " \n", 2615 | " \n", 2616 | " \n", 2617 | " \n", 2618 | " \n", 2619 | " \n", 2620 | " \n", 2621 | " \n", 2622 | " \n", 2623 | " \n", 2624 | " \n", 2625 | " \n", 2626 | " \n", 2627 | " \n", 2628 | " \n", 2629 | " \n", 2630 | " \n", 2631 | " \n", 2632 | " \n", 2633 | " \n", 2634 | " \n", 2635 | " \n", 2636 | " \n", 2637 | " \n", 2638 | " \n", 2639 | " \n", 2640 | " \n", 2641 | " \n", 2642 | " \n", 2643 | " \n", 2644 | " \n", 2645 | " \n", 2646 | " \n", 2647 | " \n", 2648 | " \n", 2649 | " \n", 2650 | " \n", 2651 | " \n", 2652 | " \n", 2653 | " \n", 2654 | " \n", 2655 | " \n", 2656 | " \n", 2657 | " \n", 2658 | " \n", 2659 | " \n", 2660 | " \n", 2661 | " \n", 2662 | " \n", 2663 | " \n", 2664 | " \n", 2665 | " \n", 2666 | " \n", 2667 | " \n", 2668 | " \n", 2669 | " \n", 2670 | " \n", 2671 | " \n", 2672 | " \n", 2673 | " \n", 2674 | " \n", 2675 | " \n", 2676 | " \n", 2677 | " \n", 2678 | " \n", 2679 | " \n", 2680 | " \n", 2681 | " \n", 2682 | " \n", 2683 | " \n", 2684 | " \n", 2685 | " \n", 2686 | " \n", 2687 | " \n", 2688 | " \n", 2689 | " \n", 2690 | " \n", 2691 | " \n", 2692 | " \n", 2693 | " \n", 2694 | " \n", 2695 | " \n", 2696 | " \n", 2697 | " \n", 2698 | " \n", 2699 | " \n", 2700 | " \n", 2701 | " \n", 2702 | " \n", 2703 | " \n", 2704 | " \n", 2705 | " \n", 2706 | " \n", 2707 | " \n", 2708 | " \n", 2709 | " \n", 2710 | " \n", 2711 | " \n", 2712 | " \n", 2713 | " \n", 2714 | " \n", 2715 | " \n", 2716 | " \n", 2717 | " \n", 2718 | " \n", 2719 | " \n", 2720 | " \n", 2721 | " \n", 2722 | " \n", 2723 | " \n", 2724 | " \n", 2725 | " \n", 2726 | " \n", 2727 | " \n", 2728 | " \n", 2729 | " \n", 2730 | " \n", 2731 | " \n", 2732 | " \n", 2733 | " \n", 2734 | " \n", 2735 | " \n", 2736 | " \n", 2737 | " \n", 2738 | " \n", 2739 | " \n", 2740 | " \n", 2741 | " \n", 2742 | " \n", 2743 | " \n", 2744 | " \n", 2745 | " \n", 2746 | " \n", 2747 | " \n", 2748 | " \n", 2749 | " \n", 2750 | " \n", 2751 | " \n", 2752 | " \n", 2753 | " \n", 2754 | " \n", 2755 | " \n", 2756 | " \n", 2757 | " \n", 2758 | " \n", 2759 | " \n", 2760 | " \n", 2761 | " \n", 2762 | " \n", 2763 | " \n", 2764 | " \n", 2765 | " \n", 2766 | " \n", 2767 | " \n", 2768 | " \n", 2769 | " \n", 2770 | " \n", 2771 | " \n", 2772 | " \n", 2773 | " \n", 2774 | " \n", 2775 | " \n", 2776 | " \n", 2777 | " \n", 2778 | " \n", 2779 | " \n", 2780 | " \n", 2781 | " \n", 2782 | " \n", 2783 | " \n", 2784 | " \n", 2785 | " \n", 2786 | " \n", 2787 | " \n", 2788 | " \n", 2789 | " \n", 2790 | " \n", 2791 | " \n", 2792 | " \n", 2793 | " \n", 2794 | " \n", 2795 | " \n", 2796 | " \n", 2797 | " \n", 2798 | " \n", 2799 | " \n", 2800 | " \n", 2801 | " \n", 2802 | " \n", 2803 | " \n", 2804 | " \n", 2805 | " \n", 2806 | " \n", 2807 | " \n", 2808 | " \n", 2809 | " \n", 2810 | " \n", 2811 | " \n", 2812 | " \n", 2813 | " \n", 2814 | " \n", 2815 | " \n", 2816 | " \n", 2817 | " \n", 2818 | " \n", 2819 | " \n", 2820 | " \n", 2821 | " \n", 2822 | " \n", 2823 | " \n", 2824 | " \n", 2825 | " \n", 2826 | " \n", 2827 | " \n", 2828 | " \n", 2829 | " \n", 2830 | " \n", 2831 | " \n", 2832 | " \n", 2833 | " \n", 2834 | " \n", 2835 | " \n", 2836 | " \n", 2837 | " \n", 2838 | " \n", 2839 | " \n", 2840 | " \n", 2841 | " \n", 2842 | " \n", 2843 | " \n", 2844 | " \n", 2845 | " \n", 2846 | " \n", 2847 | " \n", 2848 | " \n", 2849 | " \n", 2850 | " \n", 2851 | " \n", 2852 | " \n", 2853 | " \n", 2854 | " \n", 2855 | " \n", 2856 | " \n", 2857 | " \n", 2858 | " \n", 2859 | " \n", 2860 | "
p_from_kwq_from_kvarp_to_kwq_to_kvarpl_kwql_kvari_from_kai_to_kai_kaloading_percent
01.671724-1.272237-1.6702281.2694170.001496-0.0028200.0030380.0030350.0030381.125305
11.670228-1.269417-1.6697131.2691790.000515-0.0002380.0030350.0030350.0030352.137570
23.1452293.323651-3.141593-3.3239890.003636-0.0003380.0066180.0066200.0066202.451827
32.3733834.171482-2.369377-4.1716750.004006-0.0001930.0069470.0069490.0069492.573639
42.2263743.503736-2.223372-3.5043140.003001-0.0005770.0060130.0060150.0060152.227702
51.4179473.190509-1.415821-3.1914210.002126-0.0009120.0050600.0050620.0050621.874961
60.768210-0.847493-0.7680570.8472080.000153-0.0002850.0016560.0016550.0016561.165920
70.1430030.667939-0.142948-0.6682360.000055-0.0002970.0009890.0009900.0009900.697041
80.8054250.313805-0.805338-0.3140970.000088-0.0002920.0012530.0012530.0012530.882347
9-0.0836781.4602220.083929-1.4604930.000251-0.0002710.0021210.0021210.0021211.493797
101.4625154.596197-1.460450-4.5962910.002065-0.0000940.0069760.0069770.0069772.584099
111.0139265.484886-1.011162-5.4847110.0027630.0001750.0080690.0080710.0080712.989149
120.9532504.212345-0.951592-4.2125940.001658-0.0002500.0062500.0062510.0062512.315218
131.3769854.307779-1.375167-4.3079670.001819-0.0001870.0065460.0065470.0065472.424908
141.3717453.743916-1.370331-3.7442580.001414-0.0003420.0057730.0057740.0057742.138564
151.0070382.879960-1.006142-2.8805380.000896-0.0005780.0044180.0044200.0044201.826269
160.8985481.092111-0.898355-1.0929390.000193-0.0008270.0020480.0020490.0020490.846900
170.446524-0.888595-0.4464080.8883050.000116-0.0002900.0014390.0014380.0014391.013178
18-0.2625051.7989700.262892-1.7992240.000387-0.0002550.0026310.0026310.0026311.853029
19-0.425393-0.0951850.4254150.0948830.000022-0.0003020.0006310.0006310.0006310.444334
200.0034220.564051-0.003385-0.5643500.000037-0.0002990.0008170.0008170.0008170.575424
210.3632920.864298-0.363189-0.8645890.000103-0.0002910.0013580.0013580.0013580.956442
220.1075941.788426-0.107218-1.7886820.000376-0.0002550.0025950.0025960.0025961.827891
230.8983551.092939-0.898121-1.0932120.000235-0.0002740.0020490.0020500.0020501.443548
\n", 2861 | "
" 2862 | ] 2863 | }, 2864 | "execution_count": 20, 2865 | "metadata": {}, 2866 | "output_type": "execute_result" 2867 | } 2868 | ], 2869 | "source": [ 2870 | "network_model.net.res_line" 2871 | ] 2872 | }, 2873 | { 2874 | "cell_type": "markdown", 2875 | "metadata": {}, 2876 | "source": [ 2877 | "# Calculate the Reward" 2878 | ] 2879 | }, 2880 | { 2881 | "cell_type": "code", 2882 | "execution_count": 21, 2883 | "metadata": {}, 2884 | "outputs": [ 2885 | { 2886 | "data": { 2887 | "text/plain": [ 2888 | "0.0" 2889 | ] 2890 | }, 2891 | "execution_count": 21, 2892 | "metadata": {}, 2893 | "output_type": "execute_result" 2894 | } 2895 | ], 2896 | "source": [ 2897 | "network_model.calculate_reward()\n", 2898 | "network_model.reward_val" 2899 | ] 2900 | }, 2901 | { 2902 | "cell_type": "markdown", 2903 | "metadata": {}, 2904 | "source": [ 2905 | "# Key Documentation: \n", 2906 | "\n", 2907 | "https://pandapower.readthedocs.io/en/v1.6.0/elements.html\n" 2908 | ] 2909 | }, 2910 | { 2911 | "cell_type": "code", 2912 | "execution_count": null, 2913 | "metadata": {}, 2914 | "outputs": [], 2915 | "source": [] 2916 | } 2917 | ], 2918 | "metadata": { 2919 | "kernelspec": { 2920 | "display_name": "Python 3", 2921 | "language": "python", 2922 | "name": "python3" 2923 | }, 2924 | "language_info": { 2925 | "codemirror_mode": { 2926 | "name": "ipython", 2927 | "version": 3 2928 | }, 2929 | "file_extension": ".py", 2930 | "mimetype": "text/x-python", 2931 | "name": "python", 2932 | "nbconvert_exporter": "python", 2933 | "pygments_lexer": "ipython3", 2934 | "version": "3.7.0" 2935 | } 2936 | }, 2937 | "nbformat": 4, 2938 | "nbformat_minor": 2 2939 | } 2940 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | BSD 2-Clause License 2 | 3 | Copyright (c) 2019, Bennet Meyers 4 | All rights reserved. 5 | 6 | Redistribution and use in source and binary forms, with or without 7 | modification, are permitted provided that the following conditions are met: 8 | 9 | * Redistributions of source code must retain the above copyright notice, this 10 | list of conditions and the following disclaimer. 11 | 12 | * Redistributions in binary form must reproduce the above copyright notice, 13 | this list of conditions and the following disclaimer in the documentation 14 | and/or other materials provided with the distribution. 15 | 16 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 17 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 19 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE 20 | FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 22 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 23 | CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 24 | OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 25 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 26 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Virtual Microgrid Segmentation 2 | Stanford CS234 Final Project, Winter 2019 3 | 4 | Instructor: Prof. Emma Brunskill 5 | 6 | Class website: http://web.stanford.edu/class/cs234/index.html 7 | 8 | Project team: Bennet Meyers and Siobhan Powell 9 | 10 | Contact the authors: bennetm or siobhan.powell at stanford dot edu 11 | 12 | ## Overview 13 | Recent work has shown that microgrids can increase both grid flexibility and grid resiliency to unanticipated outages 14 | caused by events such as cyber attacks or extreme weather. A subclass of microgrids, known as “virtual 15 | islands”, occur when sections of a grid operate in isolation without any powerflow between them and the larger grid, 16 | despite remaining physically connected. If a grid can can partition into virtual islands in anticipation of an incoming 17 | resiliency event, customers in those islands will be less likely to experience outages. 18 | 19 | The goal of this project is to train a deep reinforcement learning (RL) agent to create and maintain as many small virtual 20 | islands as possible by operating a grids storage resources. The agent is rewarded for separating nodes from the external 21 | grid connection and for splitting the graphs into as many segments as possible. 22 | 23 | As our environment is deterministic, we implement PG (policy gradient) and DDPG (deep deterministic policy gradient) algorithms to train the agent, and 24 | apply it to a small test network. We find the DDPG performs the best, and it can successfully maintain microgrids even when 25 | the loads are time varying and change between episodes. 26 | 27 | ## The DDPG algorithm 28 | 29 | The DDPG algorithm was introduced by Lillicrap et al in "Continous control with deep reinforcement learning", available on 30 | arXiv at https://arxiv.org/abs/1509.02971. 31 | 32 | This algorithm builds on the DPG deterministic actor-critic approach proposed by Silver et al in "Deterministic 33 | Policy Gradient Algorithms", available at http://proceedings.mlr.press/v32/silver14.pdf. DDPG combines this approach with the 34 | successes of deep learning from DQN. It is model-free, off-policy, and has been shown to learn complex continuous control 35 | tasks in high dimensions quite well. 36 | 37 | Standard stochastic PG involves taking the expectation over the distribution of actions to calculate the gradient step. 38 | DDPG simply moves the policy in the direction of the gradient of Q, removing the need for an integral over the action space, 39 | making it much more efficient at learning in our environment. 40 | 41 | In DDPG the algorithm builds a critic network to estimate the state action value function, Q(s,a). An actor network is built to 42 | learn a behaviour from the critic estimation. The algorithm learns a deterministic policy but implements a stochastic behaviour 43 | policy by adding noise to the action choice to properly explore the solution space. The tuning and scheduling of this exploration 44 | noise term is crucial to the success of the algorithm. 45 | 46 | To help with convergence and stability, the algorithm is implemented with experience replay and with semi-stationary target 47 | networks. For more information on the theory and the algorithm applied, please refer to the papers. 48 | 49 | ## Structure of the Code 50 | 51 | There are two main sides to the code: the network and the agents. 52 | 53 | The network is generated using Pandapower (https://pandapower.readthedocs.io/en/v1.6.1/index.html). 54 | 55 | The NetModel class in `powerflow/pp_network.py` maintains the network 56 | object throughout the simulation. It controls how the agent can interact with the network 57 | and with the powerflow simulations with methods to step in time, calculate the reward, reset the network, 58 | report the state to the agent, and update the network devices. These devices include uncontrollable and controllable devices: 59 | loads and static generators are set by an uncontrollable unknown feed; the powers of storage and diesel generators are 60 | controlled by the agent. 61 | 62 | The initial network is generated by functions in `powerflow/network_generation.py` using configurations stored 63 | in configs. Each config defines all the parameters behind one test set up, including those of the network and some 64 | elements of the agent set up. 65 | 66 | The ActorNetwork and CriticNetwork objects are created in `agents/actor_network.py` and `agents/critic_network.py`, and the 67 | DDPG object uses them to learn the optimal policy. DDPG manages the training of the actor/critic networks 68 | and controls the interactions with the grid network model. 69 | 70 | 71 | #### Code organization 72 | 73 | The main folder contains scratch notebooks for testing, developing, and interacting with the environments. 74 | 75 | The `scripts` folder contains scripts to run the algorithms. For example, change the environment name or config name 76 | in `run_ddpg.py` and then run 77 | 78 | python run_ddpgy.py 79 | 80 | to start the simulation. 81 | 82 | The `virtual_microgrids` folder contains all the pieces of the simulation. To run you do not need to change anything in here, 83 | but to change parameters or change the algorithm you will need to work with these files. 84 | - The subfolder `agents` contains the classes 85 | to build the actor and critic network objects. 86 | - The `algorithms` subfolder classes which run the PG and DDPG implementations. 87 | - The `configs` subfolder contains the configuration files for each test case and network. To create a new or altered test case, 88 | create a new config file in the style of `six_bus_mvp1.py`, for example. 89 | - The `powerflow` subfolder contains a class to manage the power network and functions to create the networks from the config files 90 | - The `utils` subfolder contains tools used throughout the other methods and functions, including the schedules used to generate the noise 91 | 92 | 93 | The `results` folder contains the outputs from running the algorithm. Running the command 94 | ``` 95 | tensorboard --logdir [path to results folder] 96 | ``` 97 | and then visiting 98 | 99 | localhost:6006 100 | 101 | in your browser will let you inspect the tensorflow setup and see plots of the results. 102 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | numpy 2 | pandas 3 | jupyter 4 | pandapower 5 | packaging 6 | plotly 7 | numba 8 | tensorflow 9 | matplotlib 10 | python-igraph -------------------------------------------------------------------------------- /scripts/run_ddpg.py: -------------------------------------------------------------------------------- 1 | import sys 2 | sys.path.append('..') 3 | 4 | from virtual_microgrids.configs import get_config 5 | from virtual_microgrids.powerflow import NetModel 6 | from virtual_microgrids.algorithms import DDPG 7 | 8 | if __name__ == '__main__': 9 | config = get_config('Six_Bus_POC', algorithm='DDPG') 10 | env = NetModel(config=config) 11 | # train model 12 | model = DDPG(env, config) 13 | model.run() -------------------------------------------------------------------------------- /scripts/run_pg.py: -------------------------------------------------------------------------------- 1 | import sys 2 | sys.path.append('..') 3 | 4 | from virtual_microgrids.configs import get_config 5 | from virtual_microgrids.powerflow import NetModel 6 | from virtual_microgrids.algorithms import PG 7 | 8 | if __name__ == '__main__': 9 | config = get_config('Six_Bus_POC', algorithm='PG') 10 | env = NetModel(config=config) 11 | # train model 12 | model = PG(env, config) 13 | model.run() -------------------------------------------------------------------------------- /virtual_microgrids/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bmeyers/VirtualMicrogridSegmentation/cd9e7ef1a2ccc438a855765e4c07904740ec12ee/virtual_microgrids/__init__.py -------------------------------------------------------------------------------- /virtual_microgrids/agents/__init__.py: -------------------------------------------------------------------------------- 1 | from virtual_microgrids.agents.actor_network import ActorNetwork 2 | from virtual_microgrids.agents.critic_network import CriticNetwork -------------------------------------------------------------------------------- /virtual_microgrids/agents/actor_network.py: -------------------------------------------------------------------------------- 1 | # Actor and Critic DNNs 2 | # Based on code published by Patrick Emami on his blog "Deep 3 | # Deterministic Policy Gradients in TensorFlow": 4 | # https://pemami4911.github.io/blog/2016/08/21/ddpg-rl.html 5 | 6 | import tensorflow as tf 7 | 8 | class ActorNetwork(object): 9 | """ 10 | Input to the network is the state, output is the action 11 | under a deterministic policy. 12 | 13 | The output layer activation is a tanh, which is individually scaled and 14 | recentered for each input, to keep each input between p_min and p_max 15 | for the given device. 16 | """ 17 | 18 | def __init__(self, sess, state_dim, action_dim, tau, 19 | n_layers, size, min_p, max_p, batch_size): 20 | self.sess = sess 21 | self.s_dim = state_dim 22 | self.a_dim = action_dim 23 | self.tau = tau 24 | self.n_layers = n_layers 25 | self.size = size 26 | self.min_p = min_p 27 | self.max_p = max_p 28 | self.batch_size = batch_size 29 | 30 | self.actor_lr_placeholder = tf.placeholder(shape=None, dtype=tf.float32) 31 | 32 | # Actor Network 33 | self.inputs, self.out, self.scaled_out, self.in_training = self.create_actor_network() 34 | 35 | self.network_params = tf.trainable_variables() 36 | 37 | # Target Network 38 | self.target_inputs, self.target_out, self.target_scaled_out, self.target_in_training = self.create_actor_network() 39 | 40 | self.target_network_params = tf.trainable_variables()[ 41 | len(self.network_params):] 42 | 43 | # Op for periodically updating target network with online network 44 | # weights 45 | self.update_target_network_params = \ 46 | [self.target_network_params[i].assign(tf.multiply(self.network_params[i], self.tau) + 47 | tf.multiply(self.target_network_params[i], 1. - self.tau)) 48 | for i in range(len(self.target_network_params))] 49 | 50 | extra_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) 51 | with tf.control_dependencies(extra_ops): 52 | # This gradient will be provided by the critic network 53 | self.action_gradient = tf.placeholder(tf.float32, [None, self.a_dim]) 54 | 55 | # Combine the gradients here 56 | self.unnormalized_actor_gradients = tf.gradients( 57 | self.scaled_out, self.network_params, -self.action_gradient) 58 | self.actor_gradients = list(map(lambda x: tf.div(x, self.batch_size), self.unnormalized_actor_gradients)) 59 | 60 | # Optimization Op 61 | self.optimize = tf.train.AdamOptimizer(self.actor_lr_placeholder). \ 62 | apply_gradients(zip(self.actor_gradients, self.network_params)) 63 | 64 | self.num_trainable_vars = len( 65 | self.network_params) + len(self.target_network_params) 66 | 67 | def create_actor_network(self): 68 | 69 | inputs = tf.placeholder(shape=[None, self.s_dim], 70 | dtype=tf.float32, 71 | name='states') 72 | out = tf.layers.flatten(inputs) 73 | in_training_mode = tf.placeholder(tf.bool) 74 | for i in range(self.n_layers): 75 | out = tf.keras.layers.Dense(units=self.size, activation=None)(out) 76 | #out = tf.keras.layers.BatchNormalization()(out, training=in_training_mode) 77 | out = tf.keras.activations.relu(out) 78 | # Final layer weights are init to Uniform[-3e-3, 3e-3] 79 | w_init = tf.initializers.random_uniform(minval=-0.003, maxval=0.003) 80 | out = tf.keras.layers.Dense(units=self.a_dim, activation=None, 81 | kernel_initializer=w_init)(out) 82 | #out = tf.keras.layers.BatchNormalization()(out, training=in_training_mode) 83 | out = tf.keras.activations.tanh(out) 84 | 85 | centers = (self.min_p + self.max_p) / 2.0 86 | scales = (self.max_p -self.min_p) / 2.0 87 | scaled_out = tf.multiply(out, scales) + centers 88 | 89 | return inputs, out, scaled_out, in_training_mode 90 | 91 | def train(self, inputs, a_gradient, learning_rate): 92 | self.sess.run(self.optimize, feed_dict={ 93 | self.inputs: inputs, 94 | self.action_gradient: a_gradient, 95 | self.actor_lr_placeholder: learning_rate, 96 | self.in_training: True 97 | }) 98 | 99 | def predict(self, inputs): 100 | return self.sess.run(self.scaled_out, feed_dict={ 101 | self.inputs: inputs, 102 | self.in_training: False 103 | }) 104 | 105 | def predict_target(self, inputs): 106 | return self.sess.run(self.target_scaled_out, feed_dict={ 107 | self.target_inputs: inputs, 108 | self.target_in_training: False 109 | }) 110 | 111 | def update_target_network(self): 112 | self.sess.run(self.update_target_network_params) 113 | 114 | def get_num_trainable_vars(self): 115 | return self.num_trainable_vars 116 | -------------------------------------------------------------------------------- /virtual_microgrids/agents/critic_network.py: -------------------------------------------------------------------------------- 1 | # Actor and Critic DNNs 2 | # Based on code published by Patrick Emami on his blog "Deep 3 | # Deterministic Policy Gradients in TensorFlow": 4 | # https://pemami4911.github.io/blog/2016/08/21/ddpg-rl.html 5 | 6 | import tensorflow as tf 7 | import os 8 | 9 | class CriticNetwork(object): 10 | """ 11 | Input to the network is the state and action, output is Q(s,a). 12 | The action must be obtained from the output of the Actor network. 13 | 14 | """ 15 | 16 | def __init__(self, sess, state_dim, action_dim, tau, gamma, 17 | n_layers, size, num_actor_vars): 18 | self.sess = sess 19 | self.s_dim = state_dim 20 | self.a_dim = action_dim 21 | self.tau = tau 22 | self.gamma = gamma 23 | self.n_layers = n_layers 24 | self.size = size 25 | 26 | self.critic_lr_placeholder = tf.placeholder(shape=None, dtype=tf.float32) 27 | 28 | # Create the critic network 29 | self.inputs, self.action, self.out, self.in_training = self.create_critic_network() 30 | 31 | self.network_params = tf.trainable_variables()[num_actor_vars:] 32 | 33 | # Target Network 34 | self.target_inputs, self.target_action, self.target_out, self.target_in_training = self.create_critic_network() 35 | 36 | self.target_network_params = tf.trainable_variables()[(len(self.network_params) + num_actor_vars):] 37 | 38 | # Op for periodically updating target network with online network 39 | # weights with regularization 40 | self.update_target_network_params = \ 41 | [self.target_network_params[i].assign(tf.multiply(self.network_params[i], self.tau) \ 42 | + tf.multiply(self.target_network_params[i], 1. - self.tau)) 43 | for i in range(len(self.target_network_params))] 44 | 45 | # Network target (y_i) 46 | self.predicted_q_value = tf.placeholder(tf.float32, [None, 1]) 47 | 48 | extra_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) 49 | with tf.control_dependencies(extra_ops): 50 | # Define loss and optimization Op 51 | self.loss = tf.losses.mean_squared_error(self.predicted_q_value, self.out) 52 | self.optimize = tf.train.AdamOptimizer( 53 | self.critic_lr_placeholder).minimize(self.loss) 54 | 55 | # Get the gradient of the net w.r.t. the action. 56 | # For each action in the minibatch (i.e., for each x in xs), 57 | # this will sum up the gradients of each critic output in the minibatch 58 | # w.r.t. that action. Each output is independent of all 59 | # actions except for one. 60 | self.action_grads = tf.gradients(self.out, self.action) 61 | 62 | def create_critic_network(self): 63 | 64 | inputs = tf.placeholder(shape=[None, self.s_dim], 65 | dtype=tf.float32, 66 | name='observation') 67 | action = tf.placeholder(shape=[None, self.a_dim], 68 | dtype=tf.float32, 69 | name='action') 70 | in_training_mode = tf.placeholder(tf.bool) 71 | 72 | out = tf.layers.flatten(inputs) 73 | out = tf.keras.layers.Dense(units=self.size, activation=None)(out) 74 | #out = tf.keras.layers.BatchNormalization()(out,training=in_training_mode) 75 | out = tf.keras.activations.relu(out) 76 | 77 | t1 = tf.keras.layers.Dense(units=self.size, activation=None)(out) 78 | t2 = tf.keras.layers.Dense(units=self.size, use_bias=False, activation=None)(action) 79 | out = tf.keras.layers.Add()([t1, t2]) 80 | #out = tf.keras.layers.BatchNormalization()(out, training=in_training_mode) 81 | out = tf.keras.activations.relu(out) 82 | for i in range(max(self.n_layers - 2, 0)): 83 | out = tf.keras.layers.Dense(units=self.size, activation=None)(out) 84 | #out = tf.keras.layers.BatchNormalization()(out, training=in_training_mode) 85 | out = tf.keras.activations.relu(out) 86 | 87 | # Final layer weights are init to Uniform[-3e-3, 3e-3] 88 | w_init = tf.initializers.random_uniform(minval=-0.003, maxval=0.003) # Changed from 0.003 values 89 | out = tf.keras.layers.Dense(units=1, activation=None, 90 | kernel_initializer=w_init)(out) 91 | #out = tf.keras.layers.BatchNormalization()(out, training=in_training_mode) 92 | #out = tf.keras.layers.BatchNormalization()(out, training=in_training_mode) 93 | 94 | return inputs, action, out, in_training_mode 95 | 96 | def train(self, inputs, action, predicted_q_value, learning_rate): 97 | return self.sess.run([self.out, self.optimize], feed_dict={ 98 | self.inputs: inputs, 99 | self.action: action, 100 | self.predicted_q_value: predicted_q_value, 101 | self.critic_lr_placeholder: learning_rate, 102 | self.in_training: True 103 | }) 104 | 105 | def predict(self, inputs, action): 106 | return self.sess.run(self.out, feed_dict={ 107 | self.inputs: inputs, 108 | self.action: action, 109 | self.in_training: False 110 | }) 111 | 112 | def predict_target(self, inputs, action): 113 | return self.sess.run(self.target_out, feed_dict={ 114 | self.target_inputs: inputs, 115 | self.target_action: action, 116 | self.target_in_training: False 117 | }) 118 | 119 | def action_gradients(self, inputs, actions): 120 | return self.sess.run(self.action_grads, feed_dict={ 121 | self.inputs: inputs, 122 | self.action: actions, 123 | self.in_training: True 124 | }) 125 | 126 | def update_target_network(self): 127 | self.sess.run(self.update_target_network_params) 128 | -------------------------------------------------------------------------------- /virtual_microgrids/algorithms/__init__.py: -------------------------------------------------------------------------------- 1 | from virtual_microgrids.algorithms.ddpg import DDPG 2 | from virtual_microgrids.algorithms.pg import PG -------------------------------------------------------------------------------- /virtual_microgrids/algorithms/ddpg.py: -------------------------------------------------------------------------------- 1 | # -*- coding: UTF-8 -*- 2 | """The base of this code was prepared for a homework by course staff for CS234 at Stanford, Winter 2019. We have since 3 | altered it to implement DDPG rather than traditional PG. Also inspired by code published by Patrick Emami on his blog 4 | "Deep Deterministic Policy Gradients in TensorFlow": https://pemami4911.github.io/blog/2016/08/21/ddpg-rl.html 5 | """ 6 | 7 | import os 8 | import argparse 9 | import sys 10 | import logging 11 | import time 12 | import numpy as np 13 | import tensorflow as tf 14 | import scipy.signal 15 | import os 16 | import time 17 | import inspect 18 | import matplotlib.pyplot as plt 19 | 20 | sys.path.append('..') 21 | from virtual_microgrids.powerflow import NetModel 22 | from virtual_microgrids.utils.general import get_logger, Progbar, export_plot 23 | from virtual_microgrids.configs import get_config 24 | from virtual_microgrids.utils import ReplayBuffer, LinearSchedule, LogSchedule, OrnsteinUhlenbeckActionNoise 25 | from virtual_microgrids.agents import ActorNetwork, CriticNetwork 26 | 27 | parser = argparse.ArgumentParser() 28 | parser.add_argument('--env_name', required=True, type=str, 29 | choices=['Six_Bus_POC', 'rural_1', 'rural_2', 'village_1', 'village_2', 'suburb_1']) 30 | 31 | 32 | class DDPG(object): 33 | """ 34 | Abstract Class for implementing a Policy Gradient Based Algorithm 35 | """ 36 | def __init__(self, env, config, logger=None): 37 | """ 38 | Initialize Policy Gradient Class 39 | 40 | Args: 41 | env: an OpenAI Gym environment 42 | config: class with hyperparameters 43 | logger: logger instance from the logging module 44 | 45 | Written by course staff. 46 | """ 47 | # directory for training outputs 48 | if not os.path.exists(config.output_path): 49 | os.makedirs(config.output_path) 50 | 51 | # store hyperparameters 52 | self.config = config 53 | self.logger = logger 54 | if logger is None: 55 | self.logger = get_logger(config.log_path) 56 | self.env = env 57 | 58 | self.state_dim = self.env.observation_dim 59 | self.action_dim = self.env.action_dim 60 | 61 | # self.actor_lr = self.config.actor_learning_rate_start 62 | # self.critic_lr = self.config.critic_learning_rate_start 63 | self.gamma = self.config.gamma 64 | self.tau = self.config.tau 65 | self.batch_size = self.config.minibatch_size 66 | 67 | # self.actor_noise = OrnsteinUhlenbeckActionNoise(mu=np.zeros(self.action_dim)) 68 | self.actor_noise = lambda noise_level: np.random.normal(0, noise_level, size=self.action_dim) # changed from 0.2 69 | 70 | # action space limits 71 | min_p = [] 72 | max_p = [] 73 | if len(env.net.gen)>0: 74 | min_p.append(env.net.gen.min_p_kw) 75 | max_p.append(env.net.gen.max_p_kw) 76 | if len(env.net.storage)>0: 77 | min_p.append(env.net.storage.min_p_kw) 78 | max_p.append(env.net.storage.max_p_kw) 79 | self.min_p = np.array(min_p) 80 | self.max_p = np.array(max_p) 81 | 82 | # build model 83 | self.actor = None 84 | self.critic = None 85 | 86 | def initialize(self): 87 | """ 88 | Assumes the graph has been constructed (have called self.build()) 89 | Creates a tf Session and run initializer of variables 90 | 91 | Written by course staff. 92 | """ 93 | # create tf session 94 | self.sess = tf.Session() 95 | # Initialize networks 96 | self.actor = ActorNetwork(self.sess, self.state_dim, self.action_dim, self.tau, self.config.n_layers, 97 | self.config.layer_size, self.min_p, self.max_p, 98 | self.config.minibatch_size) 99 | self.critic = CriticNetwork(self.sess, self.state_dim, self.action_dim, self.tau, self.gamma, 100 | self.config.n_layers, self.config.layer_size, 101 | self.actor.get_num_trainable_vars()) 102 | # tensorboard stuff 103 | self.add_summary() 104 | # initialize all variables 105 | init = tf.global_variables_initializer() 106 | self.sess.run(init) 107 | 108 | def add_summary(self): 109 | """ 110 | Tensorboard stuff. Written by course staff. 111 | """ 112 | # extra placeholders to log stuff from python 113 | self.avg_reward_placeholder = tf.placeholder(tf.float32, shape=(), name="avg_reward") 114 | self.max_reward_placeholder = tf.placeholder(tf.float32, shape=(), name="max_reward") 115 | self.std_reward_placeholder = tf.placeholder(tf.float32, shape=(), name="std_reward") 116 | self.eval_reward_placeholder = tf.placeholder(tf.float32, shape=(), name="eval_reward") 117 | # new DDPG placeholders 118 | self.max_q_placeholder = tf.placeholder(tf.float32, shape=(), name='max_q') 119 | 120 | # extra summaries from python -> placeholders 121 | tf.summary.scalar("Avg_Reward", self.avg_reward_placeholder) 122 | tf.summary.scalar("Max_Reward", self.max_reward_placeholder) 123 | tf.summary.scalar("Std_Reward", self.std_reward_placeholder) 124 | tf.summary.scalar("Eval_Reward", self.eval_reward_placeholder) 125 | # new DDPG summary 126 | tf.summary.scalar("Max_Q_Value", self.max_q_placeholder) 127 | 128 | # logging 129 | self.merged = tf.summary.merge_all() 130 | self.file_writer = tf.summary.FileWriter(self.config.output_path,self.sess.graph) 131 | 132 | def init_averages(self): 133 | """ 134 | Defines extra attributes for tensorboard. Written by course staff. 135 | """ 136 | self.avg_reward = 0. 137 | self.max_reward = 0. 138 | self.std_reward = 0. 139 | self.eval_reward = 0. 140 | self.avg_max_q = 0. 141 | 142 | def update_averages(self, rewards, scores_eval, avg_max_q): 143 | """ 144 | Update the averages. Written by course staff. 145 | 146 | Args: 147 | rewards: deque 148 | scores_eval: list 149 | """ 150 | self.avg_reward = np.mean(rewards) 151 | self.max_reward = np.max(rewards) 152 | self.std_reward = np.sqrt(np.var(rewards) / len(rewards)) 153 | self.avg_max_q = np.mean(avg_max_q) 154 | 155 | if len(scores_eval) > 0: 156 | self.eval_reward = scores_eval[-1] 157 | 158 | def record_summary(self, t): 159 | """ 160 | Add summary to tensorboard. Written by course staff. 161 | """ 162 | 163 | fd = { 164 | self.avg_reward_placeholder: self.avg_reward, 165 | self.max_reward_placeholder: self.max_reward, 166 | self.std_reward_placeholder: self.std_reward, 167 | self.eval_reward_placeholder: self.eval_reward, 168 | self.max_q_placeholder: self.avg_max_q 169 | } 170 | summary = self.sess.run(self.merged, feed_dict=fd) 171 | # tensorboard stuff 172 | self.file_writer.add_summary(summary, t) 173 | 174 | def train(self): 175 | """ 176 | Performs training. 177 | """ 178 | 179 | actor_lr_schedule = LinearSchedule(self.config.actor_learning_rate_start, self.config.actor_learning_rate_end, 180 | self.config.reasonable_max_episodes*self.config.max_ep_steps) 181 | critic_lr_schedule = LinearSchedule(self.config.critic_learning_rate_start, self.config.critic_learning_rate_end, 182 | self.config.reasonable_max_episodes*self.config.max_ep_steps) 183 | noise_schedule = LogSchedule(0.5, 0.0001, self.config.reasonable_max_episodes*self.config.max_ep_steps) 184 | 185 | # noise_schedule = LinearSchedule(0.5, 0.01, self.config.reasonable_max_episodes*self.config.max_ep_steps) 186 | 187 | self.actor.update_target_network() 188 | self.critic.update_target_network() 189 | replay_buffer = ReplayBuffer(self.config.buffer_size) 190 | total_rewards = [] 191 | scores_eval = [] 192 | ave_max_q = [] 193 | 194 | for i in range(self.config.max_episodes): 195 | s = self.env.reset() 196 | ep_reward = 0 197 | ep_ave_max_q = 0 198 | best_ep_reward = 0 199 | 200 | best_r = 0.0 201 | best_reward_logical = None 202 | 203 | soc_track = np.zeros((self.config.max_ep_steps, self.env.net.storage.shape[0])) 204 | p_track = np.zeros((self.config.max_ep_steps, self.env.net.storage.shape[0])) 205 | reward_track = np.zeros((self.config.max_ep_steps, 1)) 206 | 207 | for j in range(self.config.max_ep_steps): 208 | a = self.actor.predict(s[None, :]) + self.actor_noise(noise_schedule.epsilon) 209 | s2, r, done, info = self.env.step(a[0]) 210 | replay_buffer.add(np.reshape(s, (self.state_dim)), 211 | np.reshape(a, (self.action_dim)), 212 | r, done, 213 | np.reshape(s2, (self.state_dim))) 214 | # Keep adding experience to the memory until 215 | # there are at least minibatch size samples 216 | if replay_buffer.size() > self.config.minibatch_size: 217 | s_batch, a_batch, r_batch, t_batch, s2_batch = \ 218 | replay_buffer.sample_batch(self.config.minibatch_size) 219 | # Calc targets 220 | target_q = self.critic.predict_target( 221 | s2_batch, self.actor.predict_target(s2_batch) 222 | ) 223 | y_i = np.array(r_batch) 224 | y_i[~t_batch] = (r_batch + 225 | self.gamma * target_q.squeeze())[~t_batch] 226 | # Update critic given targets 227 | predicted_q_val, _ = self.critic.train(s_batch, a_batch, y_i[:, None], critic_lr_schedule.epsilon) 228 | ep_ave_max_q += np.max(predicted_q_val) 229 | # Update the actor policy using the sampled gradient 230 | a_outs = self.actor.predict(s_batch) 231 | grads = self.critic.action_gradients(s_batch, a_outs) 232 | self.actor.train(s_batch, grads[0], actor_lr_schedule.epsilon) 233 | # Update target networks 234 | self.actor.update_target_network() 235 | self.critic.update_target_network() 236 | actor_lr_schedule.update(i*self.config.max_ep_steps + j) 237 | critic_lr_schedule.update(i * self.config.max_ep_steps + j) 238 | noise_schedule.update(i * self.config.max_ep_steps + j) 239 | # Housekeeping 240 | if r > best_r: 241 | best_r = r 242 | c1 = np.abs(self.env.net.res_line.p_to_kw - self.env.net.res_line.pl_kw) < self.config.reward_epsilon 243 | c2 = np.abs(self.env.net.res_line.p_from_kw - self.env.net.res_line.pl_kw) < self.config.reward_epsilon 244 | best_reward_logical = np.logical_or(c1.values, c2.values) 245 | 246 | soc_track[j, :] = self.env.net.storage.soc_percent 247 | p_track[j, :] = self.env.net.storage.p_kw 248 | reward_track[j] = r 249 | 250 | s = s2 251 | ep_reward += r 252 | if done: 253 | if ep_reward > best_ep_reward: 254 | best_ep_reward = ep_reward 255 | total_rewards.append(ep_reward) 256 | ep_ave_max_q /= j 257 | ave_max_q.append(ep_ave_max_q) 258 | break 259 | 260 | # tf stuff 261 | if (i % self.config.summary_freq2 == 0): 262 | scores_eval.extend(total_rewards) 263 | self.update_averages(np.array(total_rewards), np.array(scores_eval), np.array(ave_max_q)) 264 | self.record_summary(i) 265 | 266 | # compute reward statistics for this batch and log 267 | avg_reward = np.mean(total_rewards) 268 | sigma_reward = np.sqrt(np.var(total_rewards) / len(total_rewards)) 269 | avg_q = np.mean(ave_max_q) 270 | s1 = "---------------------------------------------------------\n" \ 271 | +"Average reward: {:04.2f} +/- {:04.2f} Average Max Q: {:.2f}" 272 | msg = s1.format(avg_reward, sigma_reward, avg_q) 273 | self.logger.info(msg) 274 | msg4 = "Best episode reward: {}".format(best_ep_reward) 275 | self.logger.info(msg4) 276 | 277 | msg2 = "Max single reward: "+str(best_r) 278 | msg3 = "Max reward happened on lines: "+str(best_reward_logical) 279 | end = "\n--------------------------------------------------------" 280 | self.logger.info(msg2) 281 | self.logger.info(msg3 + end) 282 | 283 | fig, ax = plt.subplots(nrows=3, sharex=True) 284 | xs = np.arange(self.config.max_ep_steps) 285 | for k_step in range(self.env.net.storage.shape[0]): 286 | ax[1].plot(xs, soc_track[:, k_step].ravel(), marker='.', 287 | label='soc_{}'.format(k_step + 1)) 288 | ax[0].plot(xs, p_track[:, k_step].ravel(), marker='.', 289 | label='pset_{}'.format(k_step + 1)) 290 | ax[0].legend() 291 | ax[1].legend() 292 | ax[2].stem(xs, reward_track, label='reward') 293 | ax[2].legend() 294 | ax[2].set_xlabel('time') 295 | ax[0].set_ylabel('Power (kW)') 296 | ax[1].set_ylabel('State of Charge') 297 | ax[2].set_ylabel('Reward Received') 298 | ax[0].set_title('Battery Behavior and Rewards') 299 | plt.tight_layout() 300 | plt.savefig(self.config.output_path + 'soc_plot_{}.png'.format(i)) 301 | plt.close() 302 | 303 | total_rewards = [] 304 | ave_max_q = [] 305 | best_ep_reward = 0 306 | 307 | self.logger.info("- Training done.") 308 | export_plot(scores_eval, "Score", self.config.env_name, self.config.plot_output) 309 | 310 | def evaluate(self, env=None, num_episodes=1): 311 | """ 312 | Evaluates the return for num_episodes episodes. Written by course staff. 313 | Not used right now, all evaluation statistics are computed during training 314 | episodes. 315 | """ 316 | if env==None: env = self.env 317 | paths, rewards = self.sample_path(env, num_episodes) 318 | avg_reward = np.mean(rewards) 319 | sigma_reward = np.sqrt(np.var(rewards) / len(rewards)) 320 | msg = "Average reward: {:04.2f} +/- {:04.2f}".format(avg_reward, sigma_reward) 321 | self.logger.info(msg) 322 | return avg_reward 323 | 324 | def run(self): 325 | """ 326 | Apply procedures of training for a PG. Written by course staff. 327 | """ 328 | # initialize 329 | self.initialize() 330 | # model 331 | self.train() 332 | 333 | if __name__ == '__main__': 334 | 335 | #config = get_config('Six_Bus_POC', algorithm='DDPG') 336 | config = get_config('Six_Bus_MVP3', algorithm='DDPG') 337 | env = NetModel(config=config) 338 | # train model 339 | model = DDPG(env, config) 340 | model.run() 341 | -------------------------------------------------------------------------------- /virtual_microgrids/algorithms/pg.py: -------------------------------------------------------------------------------- 1 | # -*- coding: UTF-8 -*- 2 | """The base of this code was prepared for a homework by course staff for CS234 at Stanford, Winter 2019.""" 3 | 4 | import argparse 5 | import numpy as np 6 | import tensorflow as tf 7 | import os 8 | import matplotlib.pyplot as plt 9 | 10 | from virtual_microgrids.powerflow import NetModel 11 | from virtual_microgrids.utils.general import get_logger, Progbar, export_plot 12 | from virtual_microgrids.configs import get_config 13 | 14 | parser = argparse.ArgumentParser() 15 | parser.add_argument('--env_name', required=True, type=str, 16 | choices=['Six_Bus_POC', 'rural_1', 'rural_2', 'village_1', 'village_2', 'suburb_1']) 17 | parser.add_argument('--baseline', dest='use_baseline', action='store_true') 18 | parser.add_argument('--no-baseline', dest='use_baseline', action='store_false') 19 | parser.set_defaults(use_baseline=True) 20 | 21 | 22 | def build_mlp(mlp_input, output_size, scope, n_layers, size, in_training_mode, 23 | output_activation=None): 24 | """ 25 | Build a feed forward network (multi-layer perceptron, or mlp) 26 | with 'n_layers' hidden layers, each of size 'size' units. 27 | Use tf.nn.relu nonlinearity between layers. 28 | Args: 29 | mlp_input: the input to the multi-layer perceptron 30 | output_size: the output layer size 31 | scope: the scope of the neural network 32 | n_layers: the number of hidden layers of the network 33 | size: the size of each layer: 34 | output_activation: the activation of output layer 35 | Returns: 36 | The tensor output of the network 37 | """ 38 | 39 | with tf.variable_scope(scope): 40 | out = tf.layers.flatten(mlp_input) 41 | for i in range(n_layers): 42 | out = tf.keras.layers.Dense(units=size, activation=None)(out) 43 | #out = tf.keras.layers.BatchNormalization()(out, training=in_training_mode) 44 | out = tf.keras.activations.relu(out) 45 | w_init = tf.initializers.random_uniform(minval=-0.003, maxval=0.003) 46 | out = tf.layers.Dense(units=output_size, activation=output_activation, 47 | kernel_initializer=w_init)(out) 48 | 49 | return out 50 | 51 | 52 | class PG(object): 53 | """ 54 | Abstract Class for implementing a Policy Gradient Based Algorithm 55 | """ 56 | def __init__(self, env, config, logger=None): 57 | """ 58 | Initialize Policy Gradient Class 59 | 60 | Args: 61 | env: an OpenAI Gym environment 62 | config: class with hyperparameters 63 | logger: logger instance from the logging module 64 | 65 | Written by course staff. 66 | """ 67 | # directory for training outputs 68 | if not os.path.exists(config.output_path): 69 | os.makedirs(config.output_path) 70 | 71 | # store hyperparameters 72 | self.config = config 73 | self.logger = logger 74 | if logger is None: 75 | self.logger = get_logger(config.log_path) 76 | self.env = env 77 | 78 | self.observation_dim = self.env.observation_dim 79 | self.action_dim = self.env.action_dim 80 | 81 | self.lr = self.config.learning_rate 82 | 83 | # build model 84 | self.build() 85 | 86 | def add_placeholders_op(self): 87 | """ 88 | Add placeholders for observation, action, and advantage: 89 | self.observation_placeholder, type: tf.float32 90 | self.action_placeholder, type: depends on the self.discrete 91 | self.advantage_placeholder, type: tf.float32 92 | """ 93 | self.observation_placeholder = tf.placeholder(shape=[None, self.observation_dim], 94 | dtype=tf.float32, 95 | name='observation') 96 | self.action_placeholder = tf.placeholder(shape=[None, self.action_dim], 97 | dtype=tf.float32, 98 | name='action') 99 | 100 | # Define a placeholder for advantages 101 | self.advantage_placeholder = tf.placeholder(shape=[None], 102 | dtype=tf.float32, 103 | name='advantage') 104 | self.in_training_placeholder = tf.placeholder(tf.bool) 105 | 106 | def build_policy_network_op(self, scope = "policy_network"): 107 | """ 108 | Build the policy network, construct the tensorflow operation to sample 109 | actions from the policy network outputs, and compute the log probabilities 110 | of the actions taken (for computing the loss later). These operations are 111 | stored in self.sampled_action and self.logprob. 112 | 113 | Args: 114 | scope: the scope of the neural network 115 | """ 116 | action_means = build_mlp(self.observation_placeholder, self.action_dim, 117 | scope, self.config.n_layers, self.config.layer_size, 118 | self.in_training_placeholder, output_activation=None) 119 | with tf.variable_scope(scope, reuse=tf.AUTO_REUSE): 120 | log_std = tf.get_variable("log_std", [self.action_dim]) 121 | self.sampled_action = action_means + tf.multiply(tf.exp(log_std), tf.random_normal(tf.shape(action_means))) 122 | mvn = tf.contrib.distributions.MultivariateNormalDiag(loc=action_means, scale_diag=tf.exp(log_std)) 123 | self.logprob = mvn.log_prob(self.action_placeholder) 124 | 125 | def add_loss_op(self): 126 | """ 127 | Compute the loss, averaged for a given batch. 128 | 129 | Recall the update for REINFORCE with advantage: 130 | θ = θ + α ∇_θ log π_θ(a_t|s_t) A_t 131 | """ 132 | 133 | self.loss = - tf.reduce_mean(tf.multiply(self.logprob, self.advantage_placeholder)) 134 | 135 | def add_optimizer_op(self): 136 | """ 137 | Set 'self.train_op' using AdamOptimizer 138 | """ 139 | extra_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) 140 | with tf.control_dependencies(extra_ops): 141 | optimizer = tf.train.AdamOptimizer(learning_rate=self.lr) 142 | self.train_op = optimizer.minimize(self.loss) 143 | 144 | def add_baseline_op(self, scope = "baseline"): 145 | """ 146 | Build the baseline network within the scope. 147 | 148 | In this function we will build the baseline network. 149 | Use build_mlp with the same parameters as the policy network to 150 | get the baseline estimate. You also have to setup a target 151 | placeholder and an update operation so the baseline can be trained. 152 | 153 | Args: 154 | scope: the scope of the baseline network 155 | 156 | """ 157 | 158 | self.baseline_in_training_placeholder = tf.placeholder(tf.bool) 159 | self.baseline = tf.squeeze(build_mlp(self.observation_placeholder, 1, scope, 160 | self.config.n_layers, self.config.layer_size, 161 | self.baseline_in_training_placeholder)) 162 | 163 | self.baseline_target_placeholder = tf.placeholder(shape=[None], dtype=tf.float32, name='baseline') 164 | 165 | self.baseline_loss = tf.losses.mean_squared_error(labels=self.baseline_target_placeholder, 166 | predictions=self.baseline) 167 | extra_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) 168 | with tf.control_dependencies(extra_ops): 169 | optimizer = tf.train.AdamOptimizer(learning_rate=self.lr) 170 | self.update_baseline_op = optimizer.minimize(self.baseline_loss) 171 | 172 | def build(self): 173 | """ 174 | Build the model by adding all necessary variables. 175 | 176 | Written by course staff. 177 | Calling all the operations you already defined above to build the tensorflow graph. 178 | """ 179 | 180 | # add placeholders 181 | self.add_placeholders_op() 182 | # create policy net 183 | self.build_policy_network_op() 184 | # add square loss 185 | self.add_loss_op() 186 | # add optimizer for the main networks 187 | self.add_optimizer_op() 188 | 189 | # add baseline 190 | if self.config.use_baseline: 191 | self.add_baseline_op() 192 | 193 | def initialize(self): 194 | """ 195 | Assumes the graph has been constructed (have called self.build()) 196 | Creates a tf Session and run initializer of variables 197 | 198 | Written by course staff. 199 | """ 200 | # create tf session 201 | self.sess = tf.Session() 202 | # tensorboard stuff 203 | self.add_summary() 204 | # initialize all variables 205 | init = tf.global_variables_initializer() 206 | self.sess.run(init) 207 | 208 | def add_summary(self): 209 | """ 210 | Tensorboard stuff. Written by course staff. 211 | """ 212 | # extra placeholders to log stuff from python 213 | self.avg_reward_placeholder = tf.placeholder(tf.float32, shape=(), name="avg_reward") 214 | self.max_reward_placeholder = tf.placeholder(tf.float32, shape=(), name="max_reward") 215 | self.std_reward_placeholder = tf.placeholder(tf.float32, shape=(), name="std_reward") 216 | 217 | self.eval_reward_placeholder = tf.placeholder(tf.float32, shape=(), name="eval_reward") 218 | 219 | # extra summaries from python -> placeholders 220 | tf.summary.scalar("Avg Reward", self.avg_reward_placeholder) 221 | tf.summary.scalar("Max Reward", self.max_reward_placeholder) 222 | tf.summary.scalar("Std Reward", self.std_reward_placeholder) 223 | tf.summary.scalar("Eval Reward", self.eval_reward_placeholder) 224 | 225 | # logging 226 | self.merged = tf.summary.merge_all() 227 | self.file_writer = tf.summary.FileWriter(self.config.output_path,self.sess.graph) 228 | 229 | def init_averages(self): 230 | """ 231 | Defines extra attributes for tensorboard. Written by course staff. 232 | """ 233 | self.avg_reward = 0. 234 | self.max_reward = 0. 235 | self.std_reward = 0. 236 | self.eval_reward = 0. 237 | 238 | def update_averages(self, rewards, scores_eval): 239 | """ 240 | Update the averages. Written by course staff. 241 | 242 | Args: 243 | rewards: deque 244 | scores_eval: list 245 | """ 246 | self.avg_reward = np.mean(rewards) 247 | self.max_reward = np.max(rewards) 248 | self.std_reward = np.sqrt(np.var(rewards) / len(rewards)) 249 | 250 | if len(scores_eval) > 0: 251 | self.eval_reward = scores_eval[-1] 252 | 253 | def record_summary(self, t): 254 | """ 255 | Add summary to tensorboard. Written by course staff. 256 | """ 257 | 258 | fd = { 259 | self.avg_reward_placeholder: self.avg_reward, 260 | self.max_reward_placeholder: self.max_reward, 261 | self.std_reward_placeholder: self.std_reward, 262 | self.eval_reward_placeholder: self.eval_reward, 263 | } 264 | summary = self.sess.run(self.merged, feed_dict=fd) 265 | # tensorboard stuff 266 | self.file_writer.add_summary(summary, t) 267 | 268 | def sample_path(self, env, num_episodes = None): 269 | """ 270 | Sample paths (trajectories) from the environment. 271 | 272 | Args: 273 | num_episodes: the number of episodes to be sampled 274 | if none, sample one batch (size indicated by config file) 275 | env: open AI Gym envinronment 276 | 277 | Returns: 278 | paths: a list of paths. Each path in paths is a dictionary with 279 | path["observation"] a numpy array of ordered observations in the path 280 | path["actions"] a numpy array of the corresponding actions in the path 281 | path["reward"] a numpy array of the corresponding rewards in the path 282 | total_rewards: the sum of all rewards encountered during this "path" 283 | 284 | Written by course staff. 285 | """ 286 | episode = 0 287 | episode_rewards = [] 288 | paths = [] 289 | t = 0 290 | best_r = 0.0 291 | best_reward_logical = None 292 | 293 | while (num_episodes or t < self.config.batch_size): 294 | state = env.reset() 295 | states, actions, rewards = [], [], [] 296 | episode_reward = 0 297 | 298 | soc_track = np.zeros((self.config.max_ep_steps, self.env.net.storage.shape[0])) 299 | p_track = np.zeros((self.config.max_ep_steps, self.env.net.storage.shape[0])) 300 | reward_track = np.zeros((self.config.max_ep_steps, 1)) 301 | 302 | for step in range(self.config.max_ep_len): 303 | states.append(state) 304 | action = self.sess.run(self.sampled_action, feed_dict={self.observation_placeholder : states[-1][None], 305 | self.in_training_placeholder: False})[0] 306 | state, reward, done, info = env.step(action) 307 | actions.append(action) 308 | rewards.append(reward) 309 | episode_reward += reward 310 | soc_track[step, :] = self.env.net.storage.soc_percent 311 | p_track[step, :] = self.env.net.storage.p_kw 312 | reward_track[step] = reward 313 | if reward > best_r: 314 | best_r = reward 315 | c1 = np.abs(env.net.res_line.p_to_kw - self.env.net.res_line.pl_kw) < self.config.reward_epsilon 316 | c2 = np.abs(env.net.res_line.p_from_kw - self.env.net.res_line.pl_kw) < self.config.reward_epsilon 317 | best_reward_logical = np.logical_or(c1.values, c2.values) 318 | t += 1 319 | if (done or step == self.config.max_ep_len-1): 320 | episode_rewards.append(episode_reward) 321 | break 322 | 323 | path = {"observation" : np.array(states), 324 | "reward" : np.array(rewards), 325 | "action" : np.array(actions)} 326 | paths.append(path) 327 | episode += 1 328 | if num_episodes and episode >= num_episodes: 329 | break 330 | 331 | return paths, episode_rewards, best_r, best_reward_logical, soc_track, p_track, reward_track 332 | 333 | def get_returns(self, paths): 334 | """ 335 | Calculate the returns G_t for each timestep 336 | 337 | Args: 338 | paths: recorded sample paths. See sample_path() for details. 339 | 340 | Return: 341 | returns: return G_t for each timestep 342 | 343 | After acting in the environment, we record the observations, actions, and 344 | rewards. To get the advantages that we need for the policy update, we have 345 | to convert the rewards into returns, G_t, which are themselves an estimate 346 | of Q^π (s_t, a_t): 347 | 348 | G_t = r_t + γ r_{t+1} + γ^2 r_{t+2} + ... + γ^{T-t} r_T 349 | 350 | where T is the last timestep of the episode. 351 | """ 352 | 353 | all_returns = [] 354 | for path in paths: 355 | rewards = path["reward"] 356 | 357 | dim_rewards = np.shape(np.ravel(rewards))[0] # Each path has a different length 358 | returns = np.zeros((dim_rewards,)) 359 | for i in range(dim_rewards): 360 | for j in range(dim_rewards-i): 361 | returns[i] += rewards[i+j]*np.power(self.config.gamma, j) # Implement the sum in the G_t formula 362 | 363 | all_returns.append(returns) 364 | returns = np.concatenate(all_returns) 365 | 366 | return returns 367 | 368 | def calculate_advantage(self, returns, observations): 369 | """ 370 | Calculate the advantage 371 | 372 | Args: 373 | returns: all discounted future returns for each step 374 | observations: observations 375 | Returns: 376 | adv: Advantage 377 | 378 | Calculate the advantages, using baseline adjustment if necessary, 379 | and normalizing the advantages if necessary. 380 | If neither of these options are True, just return returns. 381 | """ 382 | adv = returns 383 | 384 | if self.config.use_baseline: 385 | adv = returns - self.sess.run(self.baseline, feed_dict={self.observation_placeholder: observations, 386 | self.baseline_target_placeholder: returns, 387 | self.baseline_in_training_placeholder: False}) 388 | 389 | if self.config.normalize_advantage: 390 | adv = (adv - np.mean(adv))/np.std(adv) 391 | 392 | return adv 393 | 394 | def update_baseline(self, returns, observations): 395 | """ 396 | Update the baseline from given returns and observation. 397 | 398 | Args: 399 | returns: Returns from get_returns 400 | observations: observations 401 | """ 402 | self.sess.run(self.update_baseline_op, feed_dict={self.observation_placeholder: observations, 403 | self.baseline_target_placeholder: returns, 404 | self.baseline_in_training_placeholder: True}) 405 | 406 | def train(self): 407 | """ 408 | Performs training. Written by course staff. 409 | """ 410 | last_eval = 0 411 | last_record = 0 412 | scores_eval = [] 413 | 414 | self.init_averages() 415 | scores_eval = [] # list of scores computed at iteration time 416 | 417 | for t in range(self.config.num_batches): 418 | 419 | # collect a minibatch of samples 420 | paths, total_rewards, best_r, best_reward_logical, soc_track, p_track, reward_track = self.sample_path(self.env) 421 | scores_eval = scores_eval + total_rewards 422 | observations = np.concatenate([path["observation"] for path in paths]) 423 | actions = np.concatenate([path["action"] for path in paths]) 424 | rewards = np.concatenate([path["reward"] for path in paths]) 425 | # compute Q-val estimates (discounted future returns) for each time step 426 | returns = self.get_returns(paths) 427 | advantages = self.calculate_advantage(returns, observations) 428 | 429 | # run training operations 430 | if self.config.use_baseline: 431 | self.update_baseline(returns, observations) 432 | self.sess.run(self.train_op, feed_dict={ 433 | self.observation_placeholder : observations, 434 | self.action_placeholder : actions, 435 | self.advantage_placeholder : advantages, 436 | self.in_training_placeholder: True}) 437 | 438 | # tf stuff 439 | if (t % self.config.summary_freq == 0): 440 | self.update_averages(total_rewards, scores_eval) 441 | self.record_summary(t) 442 | 443 | # compute reward statistics for this batch and log 444 | avg_reward = np.mean(total_rewards) 445 | best_ep_reward = np.max(total_rewards) 446 | sigma_reward = np.sqrt(np.var(total_rewards) / len(total_rewards)) 447 | s1 = "---------------------------------------------------------\n" \ 448 | + "Average reward: {:04.2f} +/- {:04.2f}" 449 | msg = s1.format(avg_reward, sigma_reward) 450 | self.logger.info(msg) 451 | msg4 = "Best episode reward: {}".format(best_ep_reward) 452 | self.logger.info(msg4) 453 | 454 | msg2 = "Max single reward: " + str(best_r) 455 | msg3 = "Max reward happened on lines: " + str(best_reward_logical) 456 | end = "\n--------------------------------------------------------" 457 | self.logger.info(msg2) 458 | self.logger.info(msg3 + end) 459 | 460 | fig, ax = plt.subplots(nrows=3, sharex=True) 461 | xs = np.arange(self.config.max_ep_steps) 462 | for k_step in range(self.env.net.storage.shape[0]): 463 | ax[1].plot(xs, soc_track[:, k_step].ravel(), marker='.', 464 | label='soc_{}'.format(k_step + 1)) 465 | ax[0].plot(xs, p_track[:, k_step].ravel(), marker='.', 466 | label='pset_{}'.format(k_step + 1)) 467 | ax[0].legend() 468 | ax[1].legend() 469 | ax[2].stem(xs, reward_track, label='reward') 470 | ax[2].legend() 471 | ax[2].set_xlabel('time') 472 | ax[0].set_ylabel('Power (kW)') 473 | ax[1].set_ylabel('State of Charge') 474 | ax[2].set_ylabel('Reward Received') 475 | ax[0].set_title('Battery Behavior and Rewards') 476 | plt.tight_layout() 477 | plt.savefig(self.config.output_path + 'soc_plot_{}.png'.format(t)) 478 | plt.close() 479 | 480 | self.logger.info("- Training done.") 481 | export_plot(scores_eval, "Score", self.config.env_name, self.config.plot_output) 482 | 483 | def evaluate(self, env=None, num_episodes=1): 484 | """ 485 | Evaluates the return for num_episodes episodes. Written by course staff. 486 | Not used right now, all evaluation statistics are computed during training 487 | episodes. 488 | """ 489 | if env==None: env = self.env 490 | paths, rewards = self.sample_path(env, num_episodes) 491 | avg_reward = np.mean(rewards) 492 | sigma_reward = np.sqrt(np.var(rewards) / len(rewards)) 493 | msg = "Average reward: {:04.2f} +/- {:04.2f}".format(avg_reward, sigma_reward) 494 | self.logger.info(msg) 495 | return avg_reward 496 | 497 | def run(self): 498 | """ 499 | Apply procedures of training for a PG. Written by course staff. 500 | """ 501 | # initialize 502 | self.initialize() 503 | # model 504 | self.train() 505 | 506 | if __name__ == '__main__': 507 | #args = parser.parse_args() 508 | #config = get_config(args.env_name, args.use_baseline) 509 | config = get_config('Six_Bus_POC', algorithm='PG') 510 | env = NetModel(config=config) 511 | # train model 512 | model = PG(env, config) 513 | model.run() 514 | -------------------------------------------------------------------------------- /virtual_microgrids/configs/__init__.py: -------------------------------------------------------------------------------- 1 | from virtual_microgrids.configs.config import get_config -------------------------------------------------------------------------------- /virtual_microgrids/configs/config.py: -------------------------------------------------------------------------------- 1 | from virtual_microgrids.configs.six_bus_poc import ConfigSixBusPOC 2 | from virtual_microgrids.configs.six_bus_mvp1 import ConfigSixBusMVP1 3 | from virtual_microgrids.configs.six_bus_mvp2 import ConfigSixBusMVP2 4 | from virtual_microgrids.configs.six_bus_mvp3 import ConfigSixBusMVP3 5 | from virtual_microgrids.configs.standard_lv_network import StandardLVNetwork 6 | 7 | 8 | def get_config(env_name, baseline=True, algorithm='ddpg'): 9 | """Given an environment name and the baseline option, return the configuration.""" 10 | if env_name == 'Six_Bus_POC': 11 | return ConfigSixBusPOC(baseline, algorithm) 12 | if env_name == 'Six_Bus_MVP1': 13 | return ConfigSixBusMVP1(baseline, algorithm) 14 | if env_name == 'Six_Bus_MVP2': 15 | return ConfigSixBusMVP2(baseline, algorithm) 16 | if env_name == 'Six_Bus_MVP3': 17 | return ConfigSixBusMVP3(baseline, algorithm) 18 | if env_name in ['rural_1', 'rural_2', 'village_1', 'village_2', 'suburb_1']: 19 | return StandardLVNetwork(env_name, baseline, algorithm) 20 | 21 | -------------------------------------------------------------------------------- /virtual_microgrids/configs/config_base.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from datetime import datetime as dt 3 | 4 | class ConfigBase(object): 5 | """A base class for configurations""" 6 | def __init__(self, use_baseline, actor, env_name): 7 | 8 | # output config 9 | now = dt.now() 10 | now = ''.join('_'.join(str(now).split(' ')).split(':')) 11 | baseline_str = 'baseline' if use_baseline else 'no_baseline' 12 | self.output_path = "results/{}-{}-{}_{}/".format(env_name, baseline_str, actor, now) 13 | self.model_output = self.output_path + "model.weights/" 14 | self.log_path = self.output_path + "log.txt" 15 | self.plot_output = self.output_path + "scores.png" 16 | self.record_path = self.output_path 17 | self.record_freq = 5 18 | self.summary_freq = 1 19 | self.summary_freq2 = 20 20 | self.actor = actor 21 | 22 | # model and training - general 23 | self.gamma = 0.9 # the discount factor 24 | 25 | # model and training config - PG 26 | self.num_batches = 150 # number of batches trained on 27 | self.batch_size = 1000 # number of steps used to compute each policy update 28 | self.max_ep_len = 60 # maximum episode length 29 | self.learning_rate = 3e-2 30 | self.use_baseline = use_baseline 31 | self.normalize_advantage = True 32 | 33 | # model and training config - DDPG 34 | self.tau = 0.001 35 | 36 | self.buffer_size = 1e6 37 | self.minibatch_size = self.max_ep_len * 4 38 | self.max_episodes = 1000 39 | self.reasonable_max_episodes = min(600, self.max_episodes) 40 | self.max_ep_steps = self.max_ep_len 41 | 42 | self.actor_learning_rate_start = 1e-3 43 | self.actor_learning_rate_end = 1e-6 44 | self.critic_learning_rate_start = 1e-2 45 | self.critic_learning_rate_end = 1e-3 46 | # self.actor_learning_rate_nsteps = self.max_episodes * self.max_ep_steps # What should this be? 47 | 48 | self.randomize_env = False 49 | 50 | # parameters for the policy and baseline models 51 | self.n_layers = 1 52 | self.layer_size = 16 53 | self.activation = None 54 | 55 | # since we start new episodes for each batch 56 | assert self.max_ep_len <= self.batch_size 57 | if self.max_ep_len < 0: 58 | self.max_ep_len = self.batch_size -------------------------------------------------------------------------------- /virtual_microgrids/configs/six_bus_mvp1.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from virtual_microgrids.configs.config_base import ConfigBase 3 | 4 | class ConfigSixBusMVP1(ConfigBase): 5 | """The configurations for the proof of concept (POC) simplest network used in this project. 6 | 7 | The configurations include parameters for the learning algorithm as well as for building and initializing the 8 | network components. The 6 bus POC is a symmetrical network (actually with 8 buses in this build out), designed 9 | to show that the two sides can be isolated from each other. To change the values initialized here, change config 10 | after it is instantiated before using it to build the network. 11 | """ 12 | def __init__(self, use_baseline, actor): 13 | self.env_name = 'Six_Bus_MVP1' 14 | super().__init__(use_baseline, actor, self.env_name) 15 | 16 | # environment generation 17 | self.tstep = 1. / 60 18 | self.net_zero_reward = 1.0 19 | self.vn_high = 20 20 | self.vn_low = 0.4 21 | self.length_km = 0.03 22 | self.std_type = 'NAYY 4x50 SE' 23 | self.static_feeds = { 24 | 3: -10 * np.ones(self.max_ep_len + 1), 25 | 6: -10.5 * np.ones(self.max_ep_len + 1), 26 | 4: 10.5 * np.ones(self.max_ep_len + 1), 27 | 7: 10 * np.ones(self.max_ep_len + 1) 28 | } 29 | self.battery_locations = [3, 6] 30 | self.init_soc = 0.5 31 | self.energy_capacity = 21.0 # changed from 20 to see if endpoint problem 32 | 33 | # Generation 34 | self.gen_locations = None 35 | 36 | # Action space 37 | self.gen_p_min = -50.0 38 | self.gen_p_max = 0.0 39 | self.storage_p_min = -5.0 40 | self.storage_p_max = 5.0 41 | 42 | # state space 43 | self.with_soc = False 44 | 45 | # reward function 46 | self.reward_epsilon = 0.001 47 | self.cont_reward_lambda = 0.1 -------------------------------------------------------------------------------- /virtual_microgrids/configs/six_bus_mvp2.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from virtual_microgrids.configs.config_base import ConfigBase 3 | 4 | class ConfigSixBusMVP2(ConfigBase): 5 | """The configurations for the proof of concept (POC) simplest network used in this project. 6 | 7 | The configurations include parameters for the learning algorithm as well as for building and initializing the 8 | network components. The 6 bus POC is a symmetrical network (actually with 8 buses in this build out), designed 9 | to show that the two sides can be isolated from each other. To change the values initialized here, change config 10 | after it is instantiated before using it to build the network. 11 | """ 12 | def __init__(self, use_baseline, actor): 13 | self.env_name = 'Six_Bus_MVP2' 14 | super().__init__(use_baseline, actor, self.env_name) 15 | 16 | # environment generation 17 | self.tstep = 1. / 60 18 | self.net_zero_reward = 1.0 19 | self.vn_high = 20 20 | self.vn_low = 0.4 21 | self.length_km = 0.03 22 | self.std_type = 'NAYY 4x50 SE' 23 | n = self.max_ep_len + 1 24 | self.static_feeds = { 25 | 3: -10 * np.ones(n), 26 | 6: -10.5 * np.ones(n), 27 | 4: 10.5 * np.ones(n), 28 | 7: 10 * np.ones(n) 29 | } 30 | self.static_feeds[7] += np.sin(2 * np.pi * np.arange(n) / n) 31 | self.battery_locations = [3, 6] 32 | self.init_soc = 0.5 33 | self.energy_capacity = 21.0 # changed from 20 to see if endpoint problem 34 | 35 | # Generation 36 | self.gen_locations = None 37 | 38 | # Action space 39 | self.gen_p_min = -50.0 40 | self.gen_p_max = 0.0 41 | self.storage_p_min = -5.0 42 | self.storage_p_max = 5.0 43 | 44 | # state space 45 | self.with_soc = False 46 | 47 | # reward function 48 | self.reward_epsilon = 0.01 49 | self.cont_reward_lambda = 0.1 50 | 51 | # parameters for the policy and baseline models 52 | self.n_layers = 2 53 | self.layer_size = 64 -------------------------------------------------------------------------------- /virtual_microgrids/configs/six_bus_mvp3.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from scipy.signal import triang 3 | from virtual_microgrids.configs.config_base import ConfigBase 4 | 5 | class ConfigSixBusMVP3(ConfigBase): 6 | """The configurations for the proof of concept (POC) simplest network used in this project. 7 | 8 | The configurations include parameters for the learning algorithm as well as for building and initializing the 9 | network components. The 6 bus POC is a symmetrical network (actually with 8 buses in this build out), designed 10 | to show that the two sides can be isolated from each other. To change the values initialized here, change config 11 | after it is instantiated before using it to build the network. 12 | """ 13 | def __init__(self, use_baseline, actor): 14 | self.env_name = 'Six_Bus_MVP3' 15 | super().__init__(use_baseline, actor, self.env_name) 16 | 17 | self.max_ep_len = 120 # maximum episode length 18 | self.buffer_size = 1e6 19 | self.minibatch_size = self.max_ep_len * 4 20 | self.max_episodes = 1000 21 | self.reasonable_max_episodes = min(600, self.max_episodes) 22 | self.max_ep_steps = self.max_ep_len 23 | self.randomize_env = True 24 | 25 | # environment generation 26 | self.tstep = 1. / 60 / 2 27 | self.net_zero_reward = 1.0 28 | self.vn_high = 20 29 | self.vn_low = 0.4 30 | self.length_km = 0.03 31 | self.std_type = 'NAYY 4x50 SE' 32 | n = self.max_ep_len + 1 33 | self.static_feeds = { 34 | 3: -10 * np.ones(n), 35 | 6: -10 * np.ones(n), 36 | 4: np.random.uniform(9, 11) * np.ones(n), 37 | 7: np.random.uniform(9, 11) * np.ones(n) 38 | } 39 | load_types = np.random.choice(['sine', 'triangle', 'atan'], size=2) 40 | for load_type, feed in zip(load_types, [self.static_feeds[4], self.static_feeds[7]]): 41 | if load_type == 'sine': 42 | a = np.random.uniform(-1, 1) 43 | scale = np.random.uniform(0.5, 2) 44 | feed += a * np.sin(2 * np.pi * np.arange(n) * scale / n) 45 | elif load_type == 'triangle': 46 | a = np.random.uniform(-1, 1) 47 | roll = np.random.randint(0, n) 48 | feed += a * 2 * np.roll(triang(n) - 0.5, roll) 49 | elif load_type == 'atan': 50 | a = np.random.uniform(-1, 1) 51 | xs = np.linspace(-5, 5, n) 52 | feed += a * 2 * np.arctan(xs) / np.pi 53 | self.battery_locations = [3, 6] 54 | self.init_soc = 0.5 55 | self.energy_capacity = 21.0 # changed from 20 to see if endpoint problem 56 | 57 | # Generation 58 | self.gen_locations = None 59 | 60 | # Action space 61 | self.gen_p_min = -50.0 62 | self.gen_p_max = 0.0 63 | self.storage_p_min = -5.0 64 | self.storage_p_max = 5.0 65 | 66 | # state space 67 | self.with_soc = False 68 | 69 | # reward function 70 | self.reward_epsilon = 0.01 71 | self.cont_reward_lambda = 0.1 72 | 73 | # parameters for the policy and baseline models 74 | self.n_layers = 2 75 | self.layer_size = 64 76 | 77 | if __name__ == "__main__": 78 | env = ConfigSixBusMVP3(True, 'DDPG') 79 | -------------------------------------------------------------------------------- /virtual_microgrids/configs/six_bus_poc.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from virtual_microgrids.configs.config_base import ConfigBase 3 | 4 | class ConfigSixBusPOC(ConfigBase): 5 | """The configurations for the proof of concept (POC) simplest network used in this project. 6 | 7 | The configurations include parameters for the learning algorithm as well as for building and initializing the 8 | network components. The 6 bus POC is a symmetrical network (actually with 8 buses in this build out), designed 9 | to show that the two sides can be isolated from each other. To change the values initialized here, change config 10 | after it is instantiated before using it to build the network. 11 | """ 12 | def __init__(self, use_baseline, actor): 13 | self.env_name = 'Six_Bus_POC' 14 | super().__init__(use_baseline, actor, self.env_name) 15 | 16 | # environment generation 17 | self.tstep = 1. / 60 18 | self.net_zero_reward = 1.0 19 | self.vn_high = 20 20 | self.vn_low = 0.4 21 | self.length_km = 0.03 22 | self.std_type = 'NAYY 4x50 SE' 23 | self.static_feeds = { 24 | 3: -10 * np.ones(self.max_ep_len + 1), 25 | 6: -10 * np.ones(self.max_ep_len + 1), 26 | 4: 10 * np.ones(self.max_ep_len + 1), 27 | 7: 10 * np.ones(self.max_ep_len + 1) 28 | } 29 | self.battery_locations = [3, 6] 30 | self.init_soc = 0.5 31 | self.energy_capacity = 21.0 # changed from 20 to see if endpoint problem 32 | 33 | # Generation 34 | self.gen_locations = None 35 | 36 | # Action space 37 | self.gen_p_min = -50.0 38 | self.gen_p_max = 0.0 39 | self.storage_p_min = -5.0 40 | self.storage_p_max = 5.0 41 | 42 | # state space 43 | self.with_soc = False 44 | 45 | # reward function 46 | self.reward_epsilon = 0.001 47 | self.cont_reward_lambda = 0.1 48 | -------------------------------------------------------------------------------- /virtual_microgrids/configs/standard_lv_network.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from pandapower.networks import create_synthetic_voltage_control_lv_network as mknet 3 | 4 | class StandardLVNetwork(object): 5 | """The configurations for using any of the standard low voltage (LV) test networks shipped with pandapower. 6 | 7 | Options in this set up include choices to remove the generation and load elements built in to the test network, and 8 | the option to remove all sources and sinks of reactive power, q. By adding to the dictionary static_feeds_new you 9 | can create new loads or static generators on a custom schedule. 10 | 11 | To add controllable resources you can specify the 12 | locations of new generators, or specify the addition of batteries: either give their locations (by bus number), or 13 | have them assigned randomly. If percent_battery_buses is non zero (must be in the interval [0, 1]) and 14 | batteries_on_leaf_nodes_only is False, then percent_battery_buses percent of all the buses will be assigned storage. 15 | If batteries_on_leaf_nodes_only is True, then percent_battery_buses percent of all the leaf node buses will be 16 | assigned storage. The initial states of charge (soc) and the capacities can also be changed: these can either be 17 | floats or lists with length equal to the number of storage elements in the network. 18 | """ 19 | def __init__(self, env_name, use_baseline, actor): 20 | self.env_name = env_name 21 | 22 | # output config 23 | baseline_str = 'baseline' if use_baseline else 'no_baseline' 24 | self.output_path = "results/{}-{}-{}/".format(self.env_name, baseline_str, actor) 25 | self.model_output = self.output_path + "model.weights/" 26 | self.log_path = self.output_path + "log.txt" 27 | self.plot_output = self.output_path + "scores.png" 28 | self.record_path = self.output_path 29 | self.record_freq = 5 30 | self.summary_freq = 1 31 | self.summary_freq2 = 1000 32 | 33 | # model and training - general 34 | self.gamma = 0.9 # the discount factor 35 | 36 | # model and training config - PG 37 | self.num_batches = 500 # number of batches trained on 38 | self.batch_size = 1000 # number of steps used to compute each policy update 39 | self.max_ep_len = 60 # maximum episode length 40 | self.learning_rate = 3e-2 41 | self.use_baseline = use_baseline 42 | self.normalize_advantage = True 43 | 44 | # model and training config - DDPG 45 | self.tau = 0.001 46 | self.reward_epsilon = 0.001 47 | self.actor_learning_rate = 1e-3 48 | self.critic_learning_rate = 1e-2 49 | self.buffer_size = 1e6 50 | self.minibatch_size = 64 51 | self.max_episodes = 500 52 | self.max_ep_steps = self.max_ep_len 53 | 54 | self.remove_q = True 55 | self.clear_loads_sgen = False 56 | self.clear_gen = True 57 | 58 | # environment generation 59 | self.tstep = 1. / 60 60 | self.net_zero_reward = 1.0 61 | self.static_feeds_new = None # Acts how static_feeds does in the 6BusPOC config 62 | 63 | # Fill static_feeds with the loads and static generators that ship with the network 64 | if self.static_feeds_new is None: 65 | self.static_feeds = {} 66 | else: 67 | self.static_feeds = self.static_feeds_new.copy() 68 | net = mknet(network_class=env_name) 69 | if not self.clear_loads_sgen: 70 | if net.load.shape[0] > 0: 71 | for idx, row in net.load.iterrows(): 72 | self.static_feeds[row['bus']] = row['p_kw'] * np.ones(self.max_ep_len) 73 | if net.sgen.shape[0] > 0: 74 | for idx, row in net.sgen.iterrows(): 75 | self.static_feeds[row['bus']] = row['p_kw'] * np.ones(self.max_ep_len) 76 | 77 | self.battery_locations = None # Specify specific locations, or can pick options for random generation: 78 | self.percent_battery_buses = 0.5 # How many of the buses should be assigned batteries 79 | self.batteries_on_leaf_nodes_only = True 80 | 81 | # Action space 82 | self.gen_p_min = -50.0 83 | self.gen_p_max = 0.0 84 | self.storage_p_min = -50.0 85 | self.storage_p_max = 50.0 86 | 87 | # Generation 88 | self.gen_locations = [4] 89 | self.gen_max_p_kw = [20.0] 90 | 91 | self.init_soc = 0.5 92 | self.energy_capacity = 20.0 93 | 94 | # parameters for the policy and baseline models 95 | self.n_layers = 1 96 | self.layer_size = 16 97 | self.activation = None 98 | 99 | # since we start new episodes for each batch 100 | assert self.max_ep_len <= self.batch_size 101 | if self.max_ep_len < 0: 102 | self.max_ep_len = self.batch_size -------------------------------------------------------------------------------- /virtual_microgrids/powerflow/__init__.py: -------------------------------------------------------------------------------- 1 | from virtual_microgrids.powerflow.pp_network import NetModel -------------------------------------------------------------------------------- /virtual_microgrids/powerflow/network_generation.py: -------------------------------------------------------------------------------- 1 | import pandapower as pp 2 | import numpy as np 3 | from pandapower.networks import create_synthetic_voltage_control_lv_network as mknet 4 | 5 | 6 | def get_net(config): 7 | """Given the configuration, call a function to create the network object.""" 8 | if 'Six_Bus' in config.env_name: 9 | return six_bus(config.vn_high, config.vn_low, config.length_km, 10 | config.std_type, config.battery_locations, config.init_soc, 11 | config.energy_capacity, config.static_feeds, config.gen_locations, 12 | config.gen_p_max, config.gen_p_min, config.storage_p_max, 13 | config.storage_p_min) 14 | if config.env_name in ['rural_1', 'rural_2', 'village_1', 'village_2', 'suburb_1']: 15 | return standard_lv(config.env_name, config.remove_q, config.static_feeds_new, config.clear_loads_sgen, 16 | config.clear_gen, config.battery_locations, config.percent_battery_buses, 17 | config.batteries_on_leaf_nodes_only, config.init_soc, config.energy_capacity, 18 | config.gen_locations, config.gen_p_max, config.gen_p_min, config.storage_p_max, 19 | config.storage_p_min) 20 | 21 | 22 | def add_battery(net, bus_number, p_init, energy_capacity, init_soc=0.5, 23 | max_p=50, min_p=-50, eff=1.0, capital_cost=0, min_e=0.): 24 | """Change the network by adding a battery / storage unit. 25 | 26 | This function creates a storage element in net, and adds two non-standard columns: efficiency and capital cost. 27 | 28 | Parameters 29 | ---------- 30 | net: class 31 | The pandapower network model 32 | bus_number: int 33 | Where the battery will be added 34 | p_init: float 35 | The power draw / input of the battery on initialization 36 | init_soc: float 37 | The state of charge 38 | max_p: float 39 | The max rate that power can be drawn by the battery 40 | min_p: float 41 | The max rate that power can be pulled from the battery (negative). 42 | eff: float 43 | The efficiency 44 | capital_cost: float 45 | The capital cost of the battery 46 | min_e: float 47 | The minimum energy in the battery 48 | """ 49 | pp.create_storage(net, bus_number, p_init, energy_capacity, 50 | soc_percent=init_soc, max_p_kw=max_p, min_p_kw=min_p, 51 | min_e_kwh=min_e) 52 | idx = net.storage.index[-1] 53 | net.storage.loc[idx, 'eff'] = eff 54 | net.storage.loc[idx, 'cap_cost'] = capital_cost 55 | 56 | 57 | def six_bus(vn_high=20, vn_low=0.4, length_km=0.03, std_type='NAYY 4x50 SE', battery_locations=[3, 6], init_soc=0.5, 58 | energy_capacity=20.0, static_feeds=None, gen_locations=None, gen_p_max=0.0, gen_p_min=-50.0, 59 | storage_p_max=50.0, storage_p_min=-50.0): 60 | """This function creates the network model for the 6 bus POC network from scratch. 61 | 62 | Buses and lines are added to an empty network based on a hard-coded topology and parameters from the config file 63 | (seen as inputs). The only controllable storage added in this network are batteries, and the input static_feeds is 64 | used to add loads and static generators which are not controlled by the agent. The first value in the series is 65 | taken for initialization of those elements. 66 | """ 67 | net = pp.create_empty_network(name='6bus', f_hz=60., sn_kva=100.) 68 | # create buses 69 | for i in range(8): 70 | nm = 'bus{}'.format(i) 71 | if i == 0: 72 | pp.create_bus(net, name=nm, vn_kv=vn_high) 73 | elif i == 1: 74 | pp.create_bus(net, name=nm, vn_kv=vn_low) 75 | else: 76 | if i <= 4: 77 | zn = 'Side1' 78 | else: 79 | zn = 'Side2' 80 | pp.create_bus(net, name=nm, zone=zn, vn_kv=vn_low) 81 | # create grid connection 82 | pp.create_ext_grid(net, 0) 83 | # create lines 84 | pp.create_line(net, 0, 1, length_km=length_km, std_type=std_type, 85 | name='line0') 86 | pp.create_line(net, 1, 2, length_km=length_km, std_type=std_type, 87 | name='line1') 88 | pp.create_line(net, 2, 3, length_km=length_km, std_type=std_type, 89 | name='line2') 90 | pp.create_line(net, 2, 4, length_km=length_km, std_type=std_type, 91 | name='line3') 92 | pp.create_line(net, 1, 5, length_km=length_km, std_type=std_type, 93 | name='line4') 94 | pp.create_line(net, 5, 6, length_km=length_km, std_type=std_type, 95 | name='line5') 96 | pp.create_line(net, 5, 7, length_km=length_km, std_type=std_type, 97 | name='line6') 98 | 99 | # add controllable storage 100 | for idx, bus_number in enumerate(battery_locations): 101 | energy_capacity_here = energy_capacity 102 | init_soc_here = init_soc 103 | if np.size(energy_capacity) > 1: 104 | energy_capacity_here = energy_capacity[idx] 105 | if np.size(init_soc) > 1: 106 | init_soc_here = init_soc[idx] 107 | 108 | add_battery(net, bus_number=bus_number, p_init=0.0, energy_capacity=energy_capacity_here, 109 | init_soc=init_soc_here, max_p=storage_p_max, min_p=storage_p_min) 110 | 111 | # Add controllable generator 112 | if gen_locations is not None: 113 | for idx, bus_number in enumerate(gen_locations): 114 | pp.create_gen(net, bus_number, p_kw=0.0, min_q_kvar=0.0, max_q_kvar=0.0, min_p_kw=gen_p_min, 115 | max_p_kw=gen_p_max) 116 | 117 | ##### TODO : Have different limits for different generators and storage ##### 118 | 119 | # add loads and static generation 120 | if static_feeds is None: 121 | print('No loads or generation assigned to network') 122 | else: 123 | if len(static_feeds) > 0: 124 | for key, val in static_feeds.items(): 125 | init_flow = val[0] 126 | print('init_flow: ', init_flow, 'at bus: ', key) 127 | if init_flow > 0: 128 | pp.create_load(net, bus=key, p_kw=init_flow, q_kvar=0) 129 | else: 130 | pp.create_sgen(net, bus=key, p_kw=init_flow, q_kvar=0) 131 | 132 | return net 133 | 134 | 135 | def standard_lv(env_name, remove_q=True, static_feeds_new=None, clear_loads_sgen=False, clear_gen=True, 136 | battery_locations=None, percent_battery_buses=0.5, batteries_on_leaf_nodes_only=True, init_soc=0.5, 137 | energy_capacity=20.0, gen_locations=None, gen_p_max=0.0, gen_p_min=-50.0, 138 | storage_p_max=50.0, storage_p_min=-50.0): 139 | """This function creates a network model using the set of synthetic voltage control low voltage (LV) networks from 140 | pandapower. 141 | 142 | The environment name, env_name, chooses which of the models to create out of 'rural_1', 'rural_2', 'village_1', 143 | 'village_2', and 'suburb_1'. 144 | 145 | Then options can be triggered to remove all reactive power components from the network (as we do in this project), 146 | or to remove static generators, loads, and generators that come with the standard model of the network. New 147 | batteries and generators are added which will be used as controllable resources by the agent. 148 | 149 | Static_feeds is a dictionary used by other functions to define the state of the network as we step through time, and 150 | contains the power values of the non-controllable elements: static generators and loads. In this method we use 151 | static_feeds_new, a subset of static_feeds, to create new loads and static generators in the network that did not 152 | ship with the model. 153 | """ 154 | 155 | net = mknet(network_class=env_name) 156 | 157 | # Remove q components 158 | if remove_q: 159 | net.load.q_kvar = 0 160 | net.sgen.q_kvar = 0 161 | net.gen.q_kvar = 0 162 | net.gen.min_q_kvar = 0 163 | net.gen.max_q_kvar = 0 164 | net.shunt.in_service = False 165 | 166 | # Remove built in loads and generators 167 | if clear_loads_sgen: 168 | net.load.in_service = False 169 | net.sgen.in_service = False 170 | if clear_gen: 171 | net.gen.in_service = False 172 | net.storage.in_service = False 173 | 174 | # add controllable storage 175 | if battery_locations is not None: 176 | applied_battery_locations = battery_locations 177 | elif percent_battery_buses > 0: 178 | if batteries_on_leaf_nodes_only: 179 | leaf_nodes = [] 180 | for i in net.line.to_bus.values: 181 | if i not in net.line.from_bus.values: 182 | leaf_nodes.append(i) 183 | applied_battery_locations = np.random.choice(leaf_nodes, int(percent_battery_buses * len(leaf_nodes)), 184 | replace=False) 185 | else: 186 | applied_battery_locations = np.random.choice(net.bus.shape[0], 187 | int(percent_battery_buses * net.bus.shape[0]), replace=False) 188 | if len(applied_battery_locations) > 0: 189 | num_batteries = len(applied_battery_locations) 190 | for idx, bus_number in enumerate(applied_battery_locations): 191 | energy_capacity_here = energy_capacity 192 | init_soc_here = init_soc 193 | if np.size(energy_capacity) > 1: 194 | energy_capacity_here = energy_capacity[0] 195 | if np.size(energy_capacity) == num_batteries: 196 | energy_capacity_here = energy_capacity[idx] 197 | if np.size(init_soc) > 1: 198 | init_soc_here = init_soc[0] 199 | if np.size(energy_capacity) == num_batteries: 200 | init_soc_here = init_soc[idx] 201 | add_battery(net, bus_number=bus_number, p_init=0.0, energy_capacity=energy_capacity_here, 202 | init_soc=init_soc_here, max_p=storage_p_max, min_p=storage_p_min) 203 | # Add controllable generator 204 | if gen_locations is not None: 205 | for idx, bus_number in enumerate(gen_locations): 206 | pp.create_gen(net, bus_number, p_kw=0.0, min_q_kvar=0.0, max_q_kvar=0.0, max_p_kw=gen_p_max, 207 | min_p_kw=gen_p_min) 208 | 209 | if static_feeds_new is None: 210 | print('No loads or generation added to network') 211 | else: 212 | if len(static_feeds_new) > 0: 213 | for key, val in static_feeds_new.items(): 214 | init_flow = val[0] 215 | print('init_flow: ', init_flow, 'at bus: ', key) 216 | if init_flow > 0: 217 | pp.create_load(net, bus=key, p_kw=init_flow, q_kvar=0) 218 | else: 219 | pp.create_sgen(net, bus=key, p_kw=init_flow, q_kvar=0) 220 | 221 | # Name buses for plotting 222 | for i in range(net.bus.name.shape[0]): 223 | net.bus.name.at[i] = 'bus' + str(i) 224 | 225 | return net 226 | 227 | 228 | -------------------------------------------------------------------------------- /virtual_microgrids/powerflow/pp_network.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import pandas as pd 3 | import pandapower as pp 4 | from copy import deepcopy 5 | from virtual_microgrids.configs import get_config 6 | from virtual_microgrids.powerflow.network_generation import get_net 7 | from virtual_microgrids.utils import Graph 8 | 9 | 10 | class NetModel(object): 11 | """Building and interacting with a network model to simulate power flow. 12 | 13 | In this class we model all of the network component including loads, 14 | generators, batteries, lines, buses, and transformers. The state of each is 15 | tracked in a pandapower network object. 16 | """ 17 | def __init__(self, config=None, env_name='Six_Bus_POC', baseline=True, 18 | actor='DDPG'): 19 | """Initialize attributes of the object and zero out certain components 20 | in the standard test network.""" 21 | 22 | if config is not None: 23 | self.config = config 24 | self.net = get_net(self.config) 25 | else: 26 | self.config = get_config(env_name, baseline, actor) 27 | self.net = get_net(self.config) 28 | 29 | self.reward_val = 0.0 30 | 31 | self.tstep = self.config.tstep 32 | self.net_zero_reward = self.config.net_zero_reward 33 | self.initial_net = pp.copy.deepcopy(self.net) 34 | self.time = 0 35 | self.n_load = len(self.net.load) 36 | self.n_sgen = len(self.net.sgen) 37 | self.n_gen = len(self.net.gen) 38 | self.n_storage = len(self.net.storage) 39 | if self.config.with_soc: 40 | self.observation_dim = self.n_load + self.n_sgen + self.n_storage 41 | else: 42 | self.observation_dim = self.n_load + self.n_sgen 43 | self.observation_dim *= 2 44 | self.action_dim = self.n_gen + self.n_storage 45 | self.graph = Graph(len(self.net.bus)) 46 | for idx, entry in self.net.line.iterrows(): 47 | self.graph.addEdge(entry.from_bus, entry.to_bus) 48 | self.current_state = None 49 | self.last_state = None 50 | 51 | def reset(self): 52 | """Reset the network and reward values back to how they were initialized.""" 53 | if not self.config.randomize_env: 54 | self.net = pp.copy.deepcopy(self.initial_net) 55 | else: 56 | self.config = get_config(self.config.env_name, self.config.use_baseline, 57 | self.config.actor) 58 | self.net = get_net(self.config) 59 | self.reward_val = 0.0 60 | self.time = 0 61 | self.run_powerflow() 62 | self.current_state = self.get_state(self.config.with_soc) 63 | self.last_state = deepcopy(self.current_state) 64 | return np.concatenate([self.current_state, self.current_state - self.last_state]) 65 | 66 | def step(self, p_set): 67 | """Update the simulation by one step 68 | 69 | :param p_set: 1D numpy array of floats, the action for the agent 70 | :return: 71 | """ 72 | # Increment the time 73 | self.time += 1 74 | self.last_state = deepcopy(self.current_state) 75 | # Update non-controllable resources from their predefined data feeds 76 | new_loads = pd.Series(data=None, index=self.net.load.bus) 77 | new_sgens = pd.Series(data=None, index=self.net.sgen.bus) 78 | for bus, feed in self.config.static_feeds.items(): 79 | p_new = feed[self.time] 80 | if p_new > 0: 81 | new_loads[bus] = p_new 82 | else: 83 | new_sgens[bus] = p_new 84 | self.update_loads(new_p=new_loads.values) 85 | self.update_static_generation(new_p=new_sgens.values) 86 | # Update controllable resources 87 | new_gens = p_set[:self.n_gen] 88 | new_storage = p_set[self.n_gen:] 89 | self.update_generation(new_p=new_gens) 90 | self.update_batteries(new_p=new_storage) 91 | # Run power flow 92 | self.run_powerflow() 93 | # Collect items to return 94 | state = self.get_state(self.config.with_soc) 95 | self.current_state = state 96 | reward = self.calculate_reward(eps=self.config.reward_epsilon) 97 | done = self.time >= self.config.max_ep_len 98 | info = '' 99 | return np.concatenate([self.current_state, self.current_state - self.last_state]), reward, done, info 100 | 101 | def get_state(self, with_soc=False): 102 | """Get the current state of the game 103 | 104 | The state is given by the power supplied or consumed by all devices 105 | on the network, plus the state of charge (SoC) of the batteries. This 106 | method defines a "global ordering" for this vector: 107 | - Non-controllable loads (power, kW) 108 | - Non-controllable generators (power, kW) 109 | - Controllable generators (power, kW) 110 | - Controllable batteries (power, kW) 111 | - SoC for batteries (soc, no units) 112 | 113 | We are not currently considering reactive power (Q) as part of the 114 | problem. 115 | 116 | :return: A 1D numpy array containing the current state 117 | """ 118 | p_load = self.net.res_load.p_kw 119 | p_sgen = self.net.res_sgen.p_kw 120 | p_gen = self.net.res_gen.p_kw 121 | p_storage = self.net.res_storage.p_kw 122 | if with_soc: 123 | soc_storage = self.net.storage.soc_percent 124 | state = np.concatenate([p_load, p_sgen, soc_storage]) 125 | else: 126 | state = np.concatenate([p_load, p_sgen]) 127 | return state 128 | 129 | def update_loads(self, new_p=None, new_q=None): 130 | 131 | """Update the loads in the network. 132 | 133 | This method assumes that the orders match, i.e. the order the buses in 134 | self.net.load.bus matches where the loads in new_p and new_q should be 135 | applied based on their indexing. 136 | 137 | Parameters 138 | ---------- 139 | new_p, new_q: array_like 140 | New values for the real and reactive load powers, shape (number of load buses, 1). 141 | 142 | Attributes 143 | ---------- 144 | self.net.load: object 145 | The load values in the network object are updated. 146 | """ 147 | if new_p is not None: 148 | self.net.load.p_kw = new_p 149 | if new_q is not None: 150 | self.net.load.q_kvar = new_q 151 | 152 | def update_static_generation(self, new_p=None, new_q=None): 153 | """Update the static generation in the network. 154 | 155 | This method assumes that the orders match, i.e. the order the buses in 156 | self.net.sgen.bus matches where the generation values in new_sgen_p and 157 | new_sgen_q should be applied based on their indexing. 158 | 159 | Parameters 160 | ---------- 161 | new_sgen_p, new_sgen_q: array_like 162 | New values for the real and reactive static generation, shape 163 | (number of static generators, 1). 164 | 165 | Attributes 166 | ---------- 167 | self.net.sgen: object 168 | The static generation values in the network object are updated. 169 | """ 170 | if new_p is not None: 171 | self.net.sgen.p_kw = new_p 172 | if new_q is not None: 173 | self.net.sgen.q_kvar = new_q 174 | 175 | def update_generation(self, new_p=None, new_q=None): 176 | """Update the traditional (not static) generation in the network. 177 | 178 | This method assumes that the orders match, i.e. the order the buses in 179 | self.net.gen.bus matches where the generation values in new_gen_p 180 | should be applied based on their indexing. 181 | 182 | Parameters 183 | ---------- 184 | new_gen_p: array_like 185 | New values for the real and reactive generation, shape (number of 186 | traditional generators, 1). 187 | 188 | Attributes 189 | ---------- 190 | self.net.gen: object 191 | The traditional generation values in the network object are updated. 192 | """ 193 | if new_p is not None: 194 | self.net.gen.p_kw = new_p 195 | if new_q is not None: 196 | self.net.gen.q_kvar = new_q 197 | 198 | def update_batteries(self, new_p): 199 | """Update the batteries / storage units in the network. 200 | 201 | This method assumes that the orders match, i.e. the order the buses in 202 | self.net.gen.bus matches where the generation values in new_gen_p 203 | should be applied based on their indexing. 204 | 205 | Parameters 206 | ---------- 207 | battery_powers: array_like 208 | The power flow into / out of each battery, shape (number of traditional generators, 1). 209 | 210 | Attributes 211 | ---------- 212 | self.net.storage: object 213 | The storage values in the network object are updated. 214 | """ 215 | soc = self.net.storage.soc_percent 216 | cap = self.net.storage.max_e_kwh 217 | eff = self.net.storage.eff 218 | pmin = self.net.storage.min_p_kw 219 | pmin_soc = -1 * soc * cap * eff / self.tstep 220 | pmin = np.max([pmin, pmin_soc], axis=0) 221 | pmax = self.net.storage.max_p_kw 222 | pmax_soc = (1. - soc) * cap / (eff * self.tstep) 223 | pmax = np.min([pmax, pmax_soc], axis=0) 224 | ps = np.clip(new_p, pmin, pmax) 225 | self.net.storage.p_kw = ps 226 | soc_next = soc + ps * self.tstep * eff / cap 227 | msk = ps < 0 228 | soc_next[msk] = (soc + ps * self.tstep / (eff * cap))[msk] 229 | self.net.storage.soc_percent = soc_next 230 | 231 | def run_powerflow(self): 232 | """Evaluate the power flow. Results are stored in the results matrices 233 | of the net object, e.g. self.net.res_bus. 234 | 235 | Attributes 236 | ---------- 237 | self.net: object 238 | The network matrices are updated to reflect the results. 239 | Specifically: self.net.res_bus, self.net.res_line, self.net.res_gen, 240 | self.net.res_sgen, self.net.res_trafo, self.net.res_storage. 241 | """ 242 | try: 243 | pp.runpp(self.net, enforce_q_lims=True, 244 | calculate_voltage_angles=False, 245 | voltage_depend_loads=False) 246 | except: 247 | print('There was an error running the powerflow! pp.runpp() didnt work') 248 | 249 | def calculate_reward(self, eps=0.001, type=4): 250 | """Calculate the reward associated with a power flow result. 251 | 252 | We count zero flow through the line as when the power flowing into the 253 | line is equal to the power lost in it. This gives a positive reward. 254 | 255 | A cost (negative reward) is incurred for running the batteries, based 256 | on the capital cost of the battery and the expected lifetime (currently 257 | hardcoded to 1000 cycles). So, if the capital cost of the battery is set 258 | to zero, then producing or consuming power with the battery is free to 259 | use. 260 | 261 | Parameters 262 | ---------- 263 | eps: float 264 | Tolerance 265 | 266 | Attributes 267 | ---------- 268 | reward_val: The value of the reward function is returned. 269 | """ 270 | c1 = np.abs(self.net.res_line.p_to_kw - self.net.res_line.pl_kw) < eps 271 | c2 = np.abs(self.net.res_line.p_from_kw - self.net.res_line.pl_kw) < eps 272 | zeroed_lines = np.logical_or(c1.values, c2.values) 273 | # Type 1 Reward: count of lines with zero-net-flow 274 | if type == 1: 275 | self.reward_val = np.sum(zeroed_lines, dtype=np.float) 276 | # Type 2 Reward: count of nodes not pulling power from grid 277 | elif type in [2, 3, 4]: 278 | graph_new = deepcopy(self.graph) 279 | for line_idx, zeroed in enumerate(zeroed_lines): 280 | if zeroed: 281 | v = self.net.line.from_bus[line_idx] 282 | w = self.net.line.to_bus[line_idx] 283 | graph_new.removeEdge(v, w) 284 | self.reward_val = 0 285 | ext_connections = self.net.ext_grid.bus.values 286 | num_vmgs = 0 287 | for subgraph in graph_new.connectedComponents(): 288 | if not np.any([item in subgraph for item in ext_connections]): 289 | self.reward_val += len(subgraph) 290 | num_vmgs += 1 291 | self.reward_val *= num_vmgs 292 | elif type == 5: 293 | pass 294 | 295 | # Add distance function: 296 | if type == 3: 297 | line_flow_values = np.maximum(np.abs(self.net.res_line.p_to_kw), 298 | np.abs(self.net.res_line.p_from_kw)) - self.net.res_line.pl_kw 299 | self.reward_val -= self.config.cont_reward_lambda * np.linalg.norm(line_flow_values, 1) 300 | elif type == 4: 301 | line_flow_values = np.maximum(np.abs(self.net.res_line.p_to_kw), 302 | np.abs(self.net.res_line.p_from_kw)) - self.net.res_line.pl_kw 303 | self.reward_val -= self.config.cont_reward_lambda * np.sum(np.minimum(np.abs(line_flow_values), 304 | 1.0*np.ones(np.shape(line_flow_values)[0]))) 305 | # Costs for running batteries 306 | cap_costs = self.net.storage.cap_cost 307 | max_e = self.net.storage.max_e_kwh 308 | min_e = self.net.storage.min_e_kwh 309 | betas = cap_costs / (2 * 1000 * (max_e - min_e)) 310 | incurred_costs = betas * np.abs(self.net.storage.p_kw) 311 | for c in incurred_costs: 312 | self.reward_val -= c 313 | return self.reward_val 314 | 315 | if __name__ == "__main__": 316 | env1 = NetModel(env_name='Six_Bus_POC') 317 | #env1 = NetModel(env_name='Six_Bus_MVP3') 318 | env1.config.reward_epsilon = 0.1 319 | env1.reset() 320 | env1.step([-0.02, -0.02]) 321 | -------------------------------------------------------------------------------- /virtual_microgrids/utils/__init__.py: -------------------------------------------------------------------------------- 1 | from virtual_microgrids.utils.replay_buffer import ReplayBuffer 2 | from virtual_microgrids.utils.linear_schedule import LinearSchedule 3 | from virtual_microgrids.utils.log_schedule import LogSchedule 4 | from virtual_microgrids.utils.orstein_uhlenbeck_action_noise import OrnsteinUhlenbeckActionNoise 5 | from virtual_microgrids.utils.orstein_uhlenbeck_action_noise import OrnsteinUhlenbeckActionNoise 6 | from virtual_microgrids.utils.graph import Graph 7 | -------------------------------------------------------------------------------- /virtual_microgrids/utils/general.py: -------------------------------------------------------------------------------- 1 | """The base of this code was prepared for a homework by course staff for CS234 2 | at Stanford, Winter 2019.""" 3 | 4 | import time 5 | import sys 6 | import logging 7 | import numpy as np 8 | from collections import deque 9 | import matplotlib 10 | matplotlib.use('agg') 11 | import matplotlib.pyplot as plt 12 | 13 | 14 | def export_plot(ys, ylabel, title, filename): 15 | """ 16 | Export a plot in filename 17 | 18 | Args: 19 | ys: (list) of float / int to plot 20 | filename: (string) directory 21 | """ 22 | plt.figure() 23 | plt.plot(range(len(ys)), ys) 24 | plt.xlabel("Training Episode") 25 | plt.ylabel(ylabel) 26 | plt.title(title) 27 | plt.savefig(filename) 28 | plt.close() 29 | 30 | 31 | def get_logger(filename): 32 | """ 33 | Return a logger instance to a file 34 | """ 35 | logger = logging.getLogger('logger') 36 | logger.setLevel(logging.DEBUG) 37 | logging.basicConfig(format='%(message)s', level=logging.DEBUG) 38 | handler = logging.FileHandler(filename) 39 | handler.setLevel(logging.DEBUG) 40 | handler.setFormatter(logging.Formatter('%(asctime)s:%(levelname)s: %(message)s')) 41 | logging.getLogger().addHandler(handler) 42 | return logger 43 | 44 | 45 | class Progbar(object): 46 | """Progbar class copied from keras (https://github.com/fchollet/keras/) 47 | 48 | Displays a progress bar. 49 | Small edit : added strict arg to update 50 | # Arguments 51 | target: Total number of steps expected. 52 | interval: Minimum visual progress update interval (in seconds). 53 | """ 54 | 55 | def __init__(self, target, width=30, verbose=1, discount=0.9): 56 | self.width = width 57 | self.target = target 58 | self.sum_values = {} 59 | self.exp_avg = {} 60 | self.unique_values = [] 61 | self.start = time.time() 62 | self.total_width = 0 63 | self.seen_so_far = 0 64 | self.verbose = verbose 65 | self.discount = discount 66 | 67 | def update(self, current, values=[], exact=[], strict=[], exp_avg=[]): 68 | """ 69 | Updates the progress bar. 70 | # Arguments 71 | current: Index of current step. 72 | values: List of tuples (name, value_for_last_step). 73 | The progress bar will display averages for these values. 74 | exact: List of tuples (name, value_for_last_step). 75 | The progress bar will display these values directly. 76 | """ 77 | 78 | for k, v in values: 79 | if k not in self.sum_values: 80 | self.sum_values[k] = [v * (current - self.seen_so_far), current - self.seen_so_far] 81 | self.unique_values.append(k) 82 | else: 83 | self.sum_values[k][0] += v * (current - self.seen_so_far) 84 | self.sum_values[k][1] += (current - self.seen_so_far) 85 | for k, v in exact: 86 | if k not in self.sum_values: 87 | self.unique_values.append(k) 88 | self.sum_values[k] = [v, 1] 89 | for k, v in strict: 90 | if k not in self.sum_values: 91 | self.unique_values.append(k) 92 | self.sum_values[k] = v 93 | for k, v in exp_avg: 94 | if k not in self.exp_avg: 95 | self.exp_avg[k] = v 96 | else: 97 | self.exp_avg[k] *= self.discount 98 | self.exp_avg[k] += (1-self.discount)*v 99 | 100 | self.seen_so_far = current 101 | 102 | now = time.time() 103 | if self.verbose == 1: 104 | prev_total_width = self.total_width 105 | sys.stdout.write("\b" * prev_total_width) 106 | sys.stdout.write("\r") 107 | 108 | numdigits = int(np.floor(np.log10(self.target))) + 1 109 | barstr = '%%%dd/%%%dd [' % (numdigits, numdigits) 110 | bar = barstr % (current, self.target) 111 | prog = float(current)/self.target 112 | prog_width = int(self.width*prog) 113 | if prog_width > 0: 114 | bar += ('='*(prog_width-1)) 115 | if current < self.target: 116 | bar += '>' 117 | else: 118 | bar += '=' 119 | bar += ('.'*(self.width-prog_width)) 120 | bar += ']' 121 | sys.stdout.write(bar) 122 | self.total_width = len(bar) 123 | 124 | if current: 125 | time_per_unit = (now - self.start) / current 126 | else: 127 | time_per_unit = 0 128 | eta = time_per_unit*(self.target - current) 129 | info = '' 130 | if current < self.target: 131 | info += ' - ETA: %ds' % eta 132 | else: 133 | info += ' - %ds' % (now - self.start) 134 | for k in self.unique_values: 135 | if type(self.sum_values[k]) is list: 136 | info += ' - %s: %.4f' % (k, self.sum_values[k][0] / max(1, self.sum_values[k][1])) 137 | else: 138 | info += ' - %s: %s' % (k, self.sum_values[k]) 139 | 140 | for k, v in self.exp_avg.iteritems(): 141 | info += ' - %s: %.4f' % (k, v) 142 | 143 | self.total_width += len(info) 144 | if prev_total_width > self.total_width: 145 | info += ((prev_total_width-self.total_width) * " ") 146 | 147 | sys.stdout.write(info) 148 | sys.stdout.flush() 149 | 150 | if current >= self.target: 151 | sys.stdout.write("\n") 152 | 153 | if self.verbose == 2: 154 | if current >= self.target: 155 | info = '%ds' % (now - self.start) 156 | for k in self.unique_values: 157 | info += ' - %s: %.4f' % (k, self.sum_values[k][0] / max(1, self.sum_values[k][1])) 158 | sys.stdout.write(info + "\n") 159 | 160 | def add(self, n, values=[]): 161 | self.update(self.seen_so_far+n, values) 162 | -------------------------------------------------------------------------------- /virtual_microgrids/utils/graph.py: -------------------------------------------------------------------------------- 1 | class Graph: 2 | # init function to declare class variables 3 | def __init__(self, V): 4 | self.V = V 5 | self.adj = [[] for i in range(V)] 6 | 7 | def DFSUtil(self, temp, v, visited): 8 | 9 | # Mark the current vertex as visited 10 | visited[v] = True 11 | 12 | # Store the vertex to list 13 | temp.append(v) 14 | 15 | # Repeat for all vertices adjacent 16 | # to this vertex v 17 | for i in self.adj[v]: 18 | if visited[i] == False: 19 | # Update the list 20 | temp = self.DFSUtil(temp, i, visited) 21 | return temp 22 | 23 | # method to add an undirected edge 24 | 25 | def addEdge(self, v, w): 26 | self.adj[v].append(w) 27 | self.adj[w].append(v) 28 | 29 | def removeEdge(self, v, w): 30 | self.adj[v].remove(w) 31 | self.adj[w].remove(v) 32 | 33 | # Method to retrieve connected components 34 | # in an undirected graph 35 | def connectedComponents(self): 36 | visited = [] 37 | cc = [] 38 | for i in range(self.V): 39 | visited.append(False) 40 | for v in range(self.V): 41 | if visited[v] == False: 42 | temp = [] 43 | cc.append(self.DFSUtil(temp, v, visited)) 44 | return cc 45 | -------------------------------------------------------------------------------- /virtual_microgrids/utils/linear_schedule.py: -------------------------------------------------------------------------------- 1 | class LinearSchedule(object): 2 | def __init__(self, eps_begin, eps_end, nsteps): 3 | """ 4 | Args: 5 | eps_begin: initial exploration 6 | eps_end: end exploration 7 | nsteps: number of steps between the two values of eps 8 | """ 9 | self.epsilon = eps_begin 10 | self.eps_begin = eps_begin 11 | self.eps_end = eps_end 12 | self.nsteps = nsteps 13 | 14 | def update(self, t): 15 | """ 16 | Updates epsilon 17 | 18 | Args: 19 | t: int 20 | frame number 21 | """ 22 | if t <= self.nsteps: 23 | self.epsilon = self.eps_begin + (self.eps_end - self.eps_begin) * t / self.nsteps 24 | else: 25 | self.epsilon = self.eps_end -------------------------------------------------------------------------------- /virtual_microgrids/utils/log_schedule.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | 4 | class LogSchedule(object): 5 | def __init__(self, eps_begin, eps_end, nsteps): 6 | """ 7 | Args: 8 | eps_begin: initial exploration 9 | eps_end: end exploration 10 | nsteps: number of steps between the two values of eps 11 | """ 12 | self.epsilon = eps_begin 13 | self.eps_begin = eps_begin 14 | self.eps_end = eps_end 15 | self.begin_exp = np.log10(self.eps_begin) 16 | self.end_exp = np.log10(self.eps_end) 17 | self.nsteps = nsteps 18 | 19 | def update(self, t): 20 | """ 21 | Updates epsilon 22 | 23 | Args: 24 | t: int 25 | frame number 26 | """ 27 | 28 | if t <= self.nsteps: 29 | inter_exp = self.begin_exp + (self.end_exp - self.begin_exp) * t / self.nsteps 30 | self.epsilon = np.power(10, inter_exp) 31 | else: 32 | self.epsilon = self.eps_end 33 | -------------------------------------------------------------------------------- /virtual_microgrids/utils/orstein_uhlenbeck_action_noise.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | class OrnsteinUhlenbeckActionNoise(object): 4 | """ 5 | Implementation of an Ornstein–Uhlenbeck process for exploration. Based on: 6 | https://github.com/openai/baselines/blob/master/baselines/ddpg/noise.py 7 | https://math.stackexchange.com/questions/1287634/implementing-ornstein-uhlenbeck-in-matlab 8 | """ 9 | def __init__(self, mu, sigma=0.1 , theta=.15, dt=1e-2, x0=None): 10 | self.theta = theta 11 | self.mu = mu 12 | self.sigma = sigma 13 | self.dt = dt 14 | self.x0 = x0 15 | self.reset() 16 | 17 | def __call__(self): 18 | x = (self.x_prev + self.theta * (self.mu - self.x_prev) * self.dt + 19 | self.sigma * np.sqrt(self.dt) * np.random.normal(size=self.mu.shape)) 20 | self.x_prev = x 21 | return x 22 | 23 | def reset(self): 24 | self.x_prev = self.x0 if self.x0 is not None else np.zeros_like(self.mu) 25 | 26 | def __repr__(self): 27 | return 'OrnsteinUhlenbeckActionNoise(mu={}, sigma={})'.format(self.mu, 28 | self.sigma) -------------------------------------------------------------------------------- /virtual_microgrids/utils/replay_buffer.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from collections import deque 3 | import random 4 | 5 | class ReplayBuffer(object): 6 | """ 7 | A data structure to hold the replay buffer. Based on this blog post: 8 | https://pemami4911.github.io/blog/2016/08/21/ddpg-rl.html 9 | """ 10 | def __init__(self, buffer_size): 11 | self.buffer_size = buffer_size 12 | self.count = 0 13 | self.buffer = deque() 14 | 15 | def add(self, s, a, r, t, s2): 16 | experience = (s, a, r, t, s2) 17 | if self.count < self.buffer_size: 18 | self.buffer.append(experience) 19 | self.count += 1 20 | else: 21 | self.buffer.popleft() 22 | self.buffer.append(experience) 23 | 24 | def size(self): 25 | return self.count 26 | 27 | def sample_batch(self, batch_size): 28 | ''' 29 | batch_size specifies the number of experiences to add 30 | to the batch. If the replay buffer has less than batch_size 31 | elements, simply return all of the elements within the buffer. 32 | Generally, you'll want to wait until the buffer has at least 33 | batch_size elements before beginning to sample from it. 34 | ''' 35 | batch = [] 36 | 37 | if self.count < batch_size: 38 | batch = random.sample(self.buffer, self.count) 39 | else: 40 | batch = random.sample(self.buffer, batch_size) 41 | 42 | s_batch = np.array([_[0] for _ in batch]) 43 | a_batch = np.array([_[1] for _ in batch]) 44 | r_batch = np.array([_[2] for _ in batch]) 45 | t_batch = np.array([_[3] for _ in batch]) 46 | s2_batch = np.array([_[4] for _ in batch]) 47 | 48 | return s_batch, a_batch, r_batch, t_batch, s2_batch 49 | 50 | def clear(self): 51 | self.buffer.clear() 52 | self.count = 0 --------------------------------------------------------------------------------