├── .gitignore
├── .idea
├── VirtualMicrogridSegmentation.iml
├── encodings.xml
├── misc.xml
├── modules.xml
└── vcs.xml
├── Functioning_environment_test.ipynb
├── Interacting_with_pp_network.ipynb
├── LICENSE
├── POC_6bus.ipynb
├── README.md
├── Siobhan_scratch_testing.ipynb
├── requirements.txt
├── scripts
├── run_ddpg.py
└── run_pg.py
├── testing.ipynb
└── virtual_microgrids
├── __init__.py
├── agents
├── __init__.py
├── actor_network.py
└── critic_network.py
├── algorithms
├── __init__.py
├── ddpg.py
└── pg.py
├── configs
├── __init__.py
├── config.py
├── config_base.py
├── six_bus_mvp1.py
├── six_bus_mvp2.py
├── six_bus_mvp3.py
├── six_bus_poc.py
└── standard_lv_network.py
├── powerflow
├── __init__.py
├── network_generation.py
└── pp_network.py
└── utils
├── __init__.py
├── general.py
├── graph.py
├── linear_schedule.py
├── log_schedule.py
├── orstein_uhlenbeck_action_noise.py
└── replay_buffer.py
/.gitignore:
--------------------------------------------------------------------------------
1 | # Tensorboard Results
2 | results/
3 |
4 | # Created by https://www.gitignore.io/api/python,pycharm,sublimetext,jupyternotebook
5 | # Edit at https://www.gitignore.io/?templates=python,pycharm,sublimetext,jupyternotebook
6 |
7 | ### JupyterNotebook ###
8 | .ipynb_checkpoints
9 | */.ipynb_checkpoints/*
10 |
11 | # Remove previous ipynb_checkpoints
12 | # git rm -r .ipynb_checkpoints/
13 | #
14 |
15 | ### PyCharm ###
16 | # Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio and WebStorm
17 | # Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839
18 |
19 | # User-specific stuff
20 | .idea/**/workspace.xml
21 | .idea/**/tasks.xml
22 | .idea/**/usage.statistics.xml
23 | .idea/**/dictionaries
24 | .idea/**/shelf
25 |
26 | # Generated files
27 | .idea/**/contentModel.xml
28 |
29 | # Sensitive or high-churn files
30 | .idea/**/dataSources/
31 | .idea/**/dataSources.ids
32 | .idea/**/dataSources.local.xml
33 | .idea/**/sqlDataSources.xml
34 | .idea/**/dynamic.xml
35 | .idea/**/uiDesigner.xml
36 | .idea/**/dbnavigator.xml
37 |
38 | # Gradle
39 | .idea/**/gradle.xml
40 | .idea/**/libraries
41 |
42 | # Gradle and Maven with auto-import
43 | # When using Gradle or Maven with auto-import, you should exclude module files,
44 | # since they will be recreated, and may cause churn. Uncomment if using
45 | # auto-import.
46 | # .idea/modules.xml
47 | # .idea/*.iml
48 | # .idea/modules
49 |
50 | # CMake
51 | cmake-build-*/
52 |
53 | # Mongo Explorer plugin
54 | .idea/**/mongoSettings.xml
55 |
56 | # File-based project format
57 | *.iws
58 |
59 | # IntelliJ
60 | out/
61 |
62 | # mpeltonen/sbt-idea plugin
63 | .idea_modules/
64 |
65 | # JIRA plugin
66 | atlassian-ide-plugin.xml
67 |
68 | # Cursive Clojure plugin
69 | .idea/replstate.xml
70 |
71 | # Crashlytics plugin (for Android Studio and IntelliJ)
72 | com_crashlytics_export_strings.xml
73 | crashlytics.properties
74 | crashlytics-build.properties
75 | fabric.properties
76 |
77 | # Editor-based Rest Client
78 | .idea/httpRequests
79 |
80 | # Android studio 3.1+ serialized cache file
81 | .idea/caches/build_file_checksums.ser
82 |
83 | ### PyCharm Patch ###
84 | # Comment Reason: https://github.com/joeblau/gitignore.io/issues/186#issuecomment-215987721
85 |
86 | # *.iml
87 | # modules.xml
88 | # .idea/misc.xml
89 | # *.ipr
90 |
91 | # Sonarlint plugin
92 | .idea/sonarlint
93 |
94 | ### Python ###
95 | # Byte-compiled / optimized / DLL files
96 | __pycache__/
97 | *.py[cod]
98 | *$py.class
99 |
100 | # C extensions
101 | *.so
102 |
103 | # Distribution / packaging
104 | .Python
105 | build/
106 | develop-eggs/
107 | dist/
108 | downloads/
109 | eggs/
110 | .eggs/
111 | lib/
112 | lib64/
113 | parts/
114 | sdist/
115 | var/
116 | wheels/
117 | pip-wheel-metadata/
118 | share/python-wheels/
119 | *.egg-info/
120 | .installed.cfg
121 | *.egg
122 | MANIFEST
123 |
124 | # PyInstaller
125 | # Usually these files are written by a python script from a template
126 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
127 | *.manifest
128 | *.spec
129 |
130 | # Installer logs
131 | pip-log.txt
132 | pip-delete-this-directory.txt
133 |
134 | # Unit test / coverage reports
135 | htmlcov/
136 | .tox/
137 | .nox/
138 | .coverage
139 | .coverage.*
140 | .cache
141 | nosetests.xml
142 | coverage.xml
143 | *.cover
144 | .hypothesis/
145 | .pytest_cache/
146 |
147 | # Translations
148 | *.mo
149 | *.pot
150 |
151 | # Django stuff:
152 | *.log
153 | local_settings.py
154 | db.sqlite3
155 |
156 | # Flask stuff:
157 | instance/
158 | .webassets-cache
159 |
160 | # Scrapy stuff:
161 | .scrapy
162 |
163 | # Sphinx documentation
164 | docs/_build/
165 |
166 | # PyBuilder
167 | target/
168 |
169 | # Jupyter Notebook
170 |
171 | # IPython
172 | profile_default/
173 | ipython_config.py
174 |
175 | # pyenv
176 | .python-version
177 |
178 | # celery beat schedule file
179 | celerybeat-schedule
180 |
181 | # SageMath parsed files
182 | *.sage.py
183 |
184 | # Environments
185 | .env
186 | .venv
187 | env/
188 | venv/
189 | ENV/
190 | env.bak/
191 | venv.bak/
192 |
193 | # Spyder project settings
194 | .spyderproject
195 | .spyproject
196 |
197 | # Rope project settings
198 | .ropeproject
199 |
200 | # mkdocs documentation
201 | /site
202 |
203 | # mypy
204 | .mypy_cache/
205 | .dmypy.json
206 | dmypy.json
207 |
208 | # Pyre type checker
209 | .pyre/
210 |
211 | ### Python Patch ###
212 | .venv/
213 |
214 | ### SublimeText ###
215 | # Cache files for Sublime Text
216 | *.tmlanguage.cache
217 | *.tmPreferences.cache
218 | *.stTheme.cache
219 |
220 | # Workspace files are user-specific
221 | *.sublime-workspace
222 |
223 | # Project files should be checked into the repository, unless a significant
224 | # proportion of contributors will probably not be using Sublime Text
225 | # *.sublime-project
226 |
227 | # SFTP configuration file
228 | sftp-config.json
229 |
230 | # Package control specific files
231 | Package Control.last-run
232 | Package Control.ca-list
233 | Package Control.ca-bundle
234 | Package Control.system-ca-bundle
235 | Package Control.cache/
236 | Package Control.ca-certs/
237 | Package Control.merged-ca-bundle
238 | Package Control.user-ca-bundle
239 | oscrypto-ca-bundle.crt
240 | bh_unicode_properties.cache
241 |
242 | # Sublime-github package stores a github token in this file
243 | # https://packagecontrol.io/packages/sublime-github
244 | GitHub.sublime-settings
245 |
246 | ### macOS ###
247 | # General
248 | .DS_Store
249 | .AppleDouble
250 | .LSOverride
251 |
252 | # Icon must end with two \r
253 | Icon
254 |
255 | # Thumbnails
256 | ._*
257 |
258 | # Files that might appear in the root of a volume
259 | .DocumentRevisions-V100
260 | .fseventsd
261 | .Spotlight-V100
262 | .TemporaryItems
263 | .Trashes
264 | .VolumeIcon.icns
265 | .com.apple.timemachine.donotpresent
266 |
267 | # Directories potentially created on remote AFP share
268 | .AppleDB
269 | .AppleDesktop
270 | Network Trash Folder
271 | Temporary Items
272 | .apdisk
273 |
274 | # End of https://www.gitignore.io/api/python,pycharm,sublimetext,jupyternotebook
275 |
--------------------------------------------------------------------------------
/.idea/VirtualMicrogridSegmentation.iml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
--------------------------------------------------------------------------------
/.idea/encodings.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
--------------------------------------------------------------------------------
/.idea/misc.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
--------------------------------------------------------------------------------
/.idea/modules.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
--------------------------------------------------------------------------------
/.idea/vcs.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
--------------------------------------------------------------------------------
/Interacting_with_pp_network.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {},
6 | "source": [
7 | "# How to work with pp_network.py\n",
8 | "\n",
9 | "*pp_network.py* has a class called *net_model* that handles the whole network side of our simulation. The network state object includes a pandapower object called *net* as well as methods to implement actions, calculate the reward, and run simulations on *net*.\n",
10 | "\n",
11 | "Another matpower note: net.line has an attribute 'in_service' so it should be easy for us to simulate a fallen line\n"
12 | ]
13 | },
14 | {
15 | "cell_type": "code",
16 | "execution_count": 2,
17 | "metadata": {},
18 | "outputs": [],
19 | "source": [
20 | "import matplotlib.pyplot as plt\n",
21 | "import numpy as np\n",
22 | "import pandas as pd\n",
23 | "import pandapower as pp\n",
24 | "import pandapower.networks\n",
25 | "import pandapower.plotting\n",
26 | "import pp_network\n",
27 | "from pp_network import *"
28 | ]
29 | },
30 | {
31 | "cell_type": "markdown",
32 | "metadata": {},
33 | "source": [
34 | "## Initialize network\n",
35 | "\n",
36 | "Options to initialize: 'case5','case9','case14','rural_1','rural_2','village_1','village_2','suburb_1','iceland',etc... Basically pick anything from https://pandapower.readthedocs.io/en/v1.6.0/networks.html"
37 | ]
38 | },
39 | {
40 | "cell_type": "code",
41 | "execution_count": 4,
42 | "metadata": {},
43 | "outputs": [],
44 | "source": [
45 | "network_model = NetModel(network_name='rural_1', zero_out_gen_shunt_storage=True)"
46 | ]
47 | },
48 | {
49 | "cell_type": "code",
50 | "execution_count": 5,
51 | "metadata": {},
52 | "outputs": [
53 | {
54 | "name": "stdout",
55 | "output_type": "stream",
56 | "text": [
57 | "Number of load buses: 14\nWhich buses are they on: [ 3 8 9 10 11 19 20 21 22 23 24 25 7 13]\n"
58 | ]
59 | }
60 | ],
61 | "source": [
62 | "print('Number of load buses: ',network_model.net.load.shape[0])\n",
63 | "print('Which buses are they on: ',network_model.net.load.bus.values)"
64 | ]
65 | },
66 | {
67 | "cell_type": "code",
68 | "execution_count": 6,
69 | "metadata": {},
70 | "outputs": [
71 | {
72 | "data": {
73 | "text/html": [
74 | "
\n",
75 | "\n",
88 | "
\n",
89 | " \n",
90 | " \n",
91 | " | \n",
92 | " name | \n",
93 | " bus | \n",
94 | " p_kw | \n",
95 | " q_kvar | \n",
96 | " sn_kva | \n",
97 | " scaling | \n",
98 | " in_service | \n",
99 | " type | \n",
100 | "
\n",
101 | " \n",
102 | " \n",
103 | " \n",
104 | " 0 | \n",
105 | " None | \n",
106 | " 8 | \n",
107 | " 0 | \n",
108 | " 0 | \n",
109 | " NaN | \n",
110 | " 1.0 | \n",
111 | " True | \n",
112 | " None | \n",
113 | "
\n",
114 | " \n",
115 | " 1 | \n",
116 | " None | \n",
117 | " 9 | \n",
118 | " 0 | \n",
119 | " 0 | \n",
120 | " NaN | \n",
121 | " 1.0 | \n",
122 | " True | \n",
123 | " None | \n",
124 | "
\n",
125 | " \n",
126 | " 2 | \n",
127 | " None | \n",
128 | " 11 | \n",
129 | " 0 | \n",
130 | " 0 | \n",
131 | " NaN | \n",
132 | " 1.0 | \n",
133 | " True | \n",
134 | " None | \n",
135 | "
\n",
136 | " \n",
137 | " 3 | \n",
138 | " None | \n",
139 | " 22 | \n",
140 | " 0 | \n",
141 | " 0 | \n",
142 | " NaN | \n",
143 | " 1.0 | \n",
144 | " True | \n",
145 | " None | \n",
146 | "
\n",
147 | " \n",
148 | " 4 | \n",
149 | " None | \n",
150 | " 23 | \n",
151 | " 0 | \n",
152 | " 0 | \n",
153 | " NaN | \n",
154 | " 1.0 | \n",
155 | " True | \n",
156 | " None | \n",
157 | "
\n",
158 | " \n",
159 | "
\n",
160 | "
"
161 | ],
162 | "text/plain": [
163 | "\n",
164 | "\n",
177 | "
\n",
178 | " \n",
179 | " \n",
180 | " | \n",
181 | " name | \n",
182 | " bus | \n",
183 | " p_kw | \n",
184 | " q_kvar | \n",
185 | " sn_kva | \n",
186 | " scaling | \n",
187 | " in_service | \n",
188 | " type | \n",
189 | "
\n",
190 | " \n",
191 | " \n",
192 | " \n",
193 | " 0 | \n",
194 | " None | \n",
195 | " 8 | \n",
196 | " 0 | \n",
197 | " 0 | \n",
198 | " NaN | \n",
199 | " 1.0 | \n",
200 | " True | \n",
201 | " None | \n",
202 | "
\n",
203 | " \n",
204 | " 1 | \n",
205 | " None | \n",
206 | " 9 | \n",
207 | " 0 | \n",
208 | " 0 | \n",
209 | " NaN | \n",
210 | " 1.0 | \n",
211 | " True | \n",
212 | " None | \n",
213 | "
\n",
214 | " \n",
215 | " 2 | \n",
216 | " None | \n",
217 | " 11 | \n",
218 | " 0 | \n",
219 | " 0 | \n",
220 | " NaN | \n",
221 | " 1.0 | \n",
222 | " True | \n",
223 | " None | \n",
224 | "
\n",
225 | " \n",
226 | " 3 | \n",
227 | " None | \n",
228 | " 22 | \n",
229 | " 0 | \n",
230 | " 0 | \n",
231 | " NaN | \n",
232 | " 1.0 | \n",
233 | " True | \n",
234 | " None | \n",
235 | "
\n",
236 | " \n",
237 | " 4 | \n",
238 | " None | \n",
239 | " 23 | \n",
240 | " 0 | \n",
241 | " 0 | \n",
242 | " NaN | \n",
243 | " 1.0 | \n",
244 | " True | \n",
245 | " None | \n",
246 | "
\n",
247 | " \n",
248 | "
\n",
249 | "
"
250 | ]
251 | },
252 | "execution_count": 6,
253 | "metadata": {},
254 | "output_type": "execute_result"
255 | }
256 | ],
257 | "source": [
258 | "network_model.net.sgen"
259 | ]
260 | },
261 | {
262 | "cell_type": "markdown",
263 | "metadata": {},
264 | "source": [
265 | "### Look at powerflows to begin with: "
266 | ]
267 | },
268 | {
269 | "cell_type": "code",
270 | "execution_count": 7,
271 | "metadata": {},
272 | "outputs": [
273 | {
274 | "name": "stderr",
275 | "output_type": "stream",
276 | "text": [
277 | "numba cannot be imported and numba functions are disabled.\nProbably the execution is slow.\nPlease install numba to gain a massive speedup.\n(or if you prefer slow execution, set the flag numba=False to avoid this warning!)\n\n"
278 | ]
279 | }
280 | ],
281 | "source": [
282 | "network_model.run_powerflow()"
283 | ]
284 | },
285 | {
286 | "cell_type": "code",
287 | "execution_count": 8,
288 | "metadata": {
289 | "scrolled": true
290 | },
291 | "outputs": [
292 | {
293 | "data": {
294 | "text/html": [
295 | "\n",
296 | "\n",
309 | "
\n",
310 | " \n",
311 | " \n",
312 | " | \n",
313 | " vm_pu | \n",
314 | " va_degree | \n",
315 | " p_kw | \n",
316 | " q_kvar | \n",
317 | "
\n",
318 | " \n",
319 | " \n",
320 | " \n",
321 | " 0 | \n",
322 | " 1.000000 | \n",
323 | " 0.000000 | \n",
324 | " -78.80322 | \n",
325 | " -1.780376 | \n",
326 | "
\n",
327 | " \n",
328 | " 1 | \n",
329 | " 0.995039 | \n",
330 | " -1.090849 | \n",
331 | " 0.00000 | \n",
332 | " 0.000000 | \n",
333 | "
\n",
334 | " \n",
335 | " 2 | \n",
336 | " 0.993303 | \n",
337 | " -1.129337 | \n",
338 | " 0.00000 | \n",
339 | " 0.000000 | \n",
340 | "
\n",
341 | " \n",
342 | " 3 | \n",
343 | " 0.992705 | \n",
344 | " -1.133796 | \n",
345 | " 5.10000 | \n",
346 | " 0.000000 | \n",
347 | "
\n",
348 | " \n",
349 | " 4 | \n",
350 | " 0.990056 | \n",
351 | " -1.200407 | \n",
352 | " 0.00000 | \n",
353 | " 0.000000 | \n",
354 | "
\n",
355 | " \n",
356 | " 5 | \n",
357 | " 0.985967 | \n",
358 | " -1.291037 | \n",
359 | " 0.00000 | \n",
360 | " 0.000000 | \n",
361 | "
\n",
362 | " \n",
363 | " 6 | \n",
364 | " 0.982775 | \n",
365 | " -1.362257 | \n",
366 | " 0.00000 | \n",
367 | " 0.000000 | \n",
368 | "
\n",
369 | " \n",
370 | " 7 | \n",
371 | " 0.980482 | \n",
372 | " -1.413681 | \n",
373 | " 7.90000 | \n",
374 | " 0.000000 | \n",
375 | "
\n",
376 | " \n",
377 | " 8 | \n",
378 | " 0.989456 | \n",
379 | " -1.204895 | \n",
380 | " 5.10000 | \n",
381 | " 0.000000 | \n",
382 | "
\n",
383 | " \n",
384 | " 9 | \n",
385 | " 0.985365 | \n",
386 | " -1.295563 | \n",
387 | " 5.10000 | \n",
388 | " 0.000000 | \n",
389 | "
\n",
390 | " \n",
391 | " 10 | \n",
392 | " 0.982171 | \n",
393 | " -1.366812 | \n",
394 | " 5.10000 | \n",
395 | " 0.000000 | \n",
396 | "
\n",
397 | " \n",
398 | " 11 | \n",
399 | " 0.979876 | \n",
400 | " -1.418257 | \n",
401 | " 5.10000 | \n",
402 | " 0.000000 | \n",
403 | "
\n",
404 | " \n",
405 | " 12 | \n",
406 | " 0.991120 | \n",
407 | " -1.177043 | \n",
408 | " 0.00000 | \n",
409 | " 0.000000 | \n",
410 | "
\n",
411 | " \n",
412 | " 13 | \n",
413 | " 0.987658 | \n",
414 | " -1.253721 | \n",
415 | " 7.90000 | \n",
416 | " 0.000000 | \n",
417 | "
\n",
418 | " \n",
419 | " 14 | \n",
420 | " 0.985361 | \n",
421 | " -1.304816 | \n",
422 | " 0.00000 | \n",
423 | " 0.000000 | \n",
424 | "
\n",
425 | " \n",
426 | " 15 | \n",
427 | " 0.983523 | \n",
428 | " -1.345863 | \n",
429 | " 0.00000 | \n",
430 | " 0.000000 | \n",
431 | "
\n",
432 | " \n",
433 | " 16 | \n",
434 | " 0.982144 | \n",
435 | " -1.376749 | \n",
436 | " 0.00000 | \n",
437 | " 0.000000 | \n",
438 | "
\n",
439 | " \n",
440 | " 17 | \n",
441 | " 0.981149 | \n",
442 | " -1.397388 | \n",
443 | " 0.00000 | \n",
444 | " 0.000000 | \n",
445 | "
\n",
446 | " \n",
447 | " 18 | \n",
448 | " 0.980651 | \n",
449 | " -1.407722 | \n",
450 | " 0.00000 | \n",
451 | " 0.000000 | \n",
452 | "
\n",
453 | " \n",
454 | " 19 | \n",
455 | " 0.990521 | \n",
456 | " -1.181522 | \n",
457 | " 5.10000 | \n",
458 | " 0.000000 | \n",
459 | "
\n",
460 | " \n",
461 | " 20 | \n",
462 | " 0.987057 | \n",
463 | " -1.258232 | \n",
464 | " 5.10000 | \n",
465 | " 0.000000 | \n",
466 | "
\n",
467 | " \n",
468 | " 21 | \n",
469 | " 0.984759 | \n",
470 | " -1.309347 | \n",
471 | " 5.10000 | \n",
472 | " 0.000000 | \n",
473 | "
\n",
474 | " \n",
475 | " 22 | \n",
476 | " 0.982919 | \n",
477 | " -1.350411 | \n",
478 | " 5.10000 | \n",
479 | " 0.000000 | \n",
480 | "
\n",
481 | " \n",
482 | " 23 | \n",
483 | " 0.981539 | \n",
484 | " -1.381310 | \n",
485 | " 5.10000 | \n",
486 | " 0.000000 | \n",
487 | "
\n",
488 | " \n",
489 | " 24 | \n",
490 | " 0.980543 | \n",
491 | " -1.401958 | \n",
492 | " 5.10000 | \n",
493 | " 0.000000 | \n",
494 | "
\n",
495 | " \n",
496 | " 25 | \n",
497 | " 0.980046 | \n",
498 | " -1.412297 | \n",
499 | " 5.10000 | \n",
500 | " 0.000000 | \n",
501 | "
\n",
502 | " \n",
503 | "
\n",
504 | "
"
505 | ],
506 | "text/plain": [
507 | "\n",
508 | "\n",
521 | "
\n",
522 | " \n",
523 | " \n",
524 | " | \n",
525 | " vm_pu | \n",
526 | " va_degree | \n",
527 | " p_kw | \n",
528 | " q_kvar | \n",
529 | "
\n",
530 | " \n",
531 | " \n",
532 | " \n",
533 | " 0 | \n",
534 | " 1.000000 | \n",
535 | " 0.000000 | \n",
536 | " -78.80322 | \n",
537 | " -1.780376 | \n",
538 | "
\n",
539 | " \n",
540 | " 1 | \n",
541 | " 0.995039 | \n",
542 | " -1.090849 | \n",
543 | " 0.00000 | \n",
544 | " 0.000000 | \n",
545 | "
\n",
546 | " \n",
547 | " 2 | \n",
548 | " 0.993303 | \n",
549 | " -1.129337 | \n",
550 | " 0.00000 | \n",
551 | " 0.000000 | \n",
552 | "
\n",
553 | " \n",
554 | " 3 | \n",
555 | " 0.992705 | \n",
556 | " -1.133796 | \n",
557 | " 5.10000 | \n",
558 | " 0.000000 | \n",
559 | "
\n",
560 | " \n",
561 | " 4 | \n",
562 | " 0.990056 | \n",
563 | " -1.200407 | \n",
564 | " 0.00000 | \n",
565 | " 0.000000 | \n",
566 | "
\n",
567 | " \n",
568 | " 5 | \n",
569 | " 0.985967 | \n",
570 | " -1.291037 | \n",
571 | " 0.00000 | \n",
572 | " 0.000000 | \n",
573 | "
\n",
574 | " \n",
575 | " 6 | \n",
576 | " 0.982775 | \n",
577 | " -1.362257 | \n",
578 | " 0.00000 | \n",
579 | " 0.000000 | \n",
580 | "
\n",
581 | " \n",
582 | " 7 | \n",
583 | " 0.980482 | \n",
584 | " -1.413681 | \n",
585 | " 7.90000 | \n",
586 | " 0.000000 | \n",
587 | "
\n",
588 | " \n",
589 | " 8 | \n",
590 | " 0.989456 | \n",
591 | " -1.204895 | \n",
592 | " 5.10000 | \n",
593 | " 0.000000 | \n",
594 | "
\n",
595 | " \n",
596 | " 9 | \n",
597 | " 0.985365 | \n",
598 | " -1.295563 | \n",
599 | " 5.10000 | \n",
600 | " 0.000000 | \n",
601 | "
\n",
602 | " \n",
603 | " 10 | \n",
604 | " 0.982171 | \n",
605 | " -1.366812 | \n",
606 | " 5.10000 | \n",
607 | " 0.000000 | \n",
608 | "
\n",
609 | " \n",
610 | " 11 | \n",
611 | " 0.979876 | \n",
612 | " -1.418257 | \n",
613 | " 5.10000 | \n",
614 | " 0.000000 | \n",
615 | "
\n",
616 | " \n",
617 | " 12 | \n",
618 | " 0.991120 | \n",
619 | " -1.177043 | \n",
620 | " 0.00000 | \n",
621 | " 0.000000 | \n",
622 | "
\n",
623 | " \n",
624 | " 13 | \n",
625 | " 0.987658 | \n",
626 | " -1.253721 | \n",
627 | " 7.90000 | \n",
628 | " 0.000000 | \n",
629 | "
\n",
630 | " \n",
631 | " 14 | \n",
632 | " 0.985361 | \n",
633 | " -1.304816 | \n",
634 | " 0.00000 | \n",
635 | " 0.000000 | \n",
636 | "
\n",
637 | " \n",
638 | " 15 | \n",
639 | " 0.983523 | \n",
640 | " -1.345863 | \n",
641 | " 0.00000 | \n",
642 | " 0.000000 | \n",
643 | "
\n",
644 | " \n",
645 | " 16 | \n",
646 | " 0.982144 | \n",
647 | " -1.376749 | \n",
648 | " 0.00000 | \n",
649 | " 0.000000 | \n",
650 | "
\n",
651 | " \n",
652 | " 17 | \n",
653 | " 0.981149 | \n",
654 | " -1.397388 | \n",
655 | " 0.00000 | \n",
656 | " 0.000000 | \n",
657 | "
\n",
658 | " \n",
659 | " 18 | \n",
660 | " 0.980651 | \n",
661 | " -1.407722 | \n",
662 | " 0.00000 | \n",
663 | " 0.000000 | \n",
664 | "
\n",
665 | " \n",
666 | " 19 | \n",
667 | " 0.990521 | \n",
668 | " -1.181522 | \n",
669 | " 5.10000 | \n",
670 | " 0.000000 | \n",
671 | "
\n",
672 | " \n",
673 | " 20 | \n",
674 | " 0.987057 | \n",
675 | " -1.258232 | \n",
676 | " 5.10000 | \n",
677 | " 0.000000 | \n",
678 | "
\n",
679 | " \n",
680 | " 21 | \n",
681 | " 0.984759 | \n",
682 | " -1.309347 | \n",
683 | " 5.10000 | \n",
684 | " 0.000000 | \n",
685 | "
\n",
686 | " \n",
687 | " 22 | \n",
688 | " 0.982919 | \n",
689 | " -1.350411 | \n",
690 | " 5.10000 | \n",
691 | " 0.000000 | \n",
692 | "
\n",
693 | " \n",
694 | " 23 | \n",
695 | " 0.981539 | \n",
696 | " -1.381310 | \n",
697 | " 5.10000 | \n",
698 | " 0.000000 | \n",
699 | "
\n",
700 | " \n",
701 | " 24 | \n",
702 | " 0.980543 | \n",
703 | " -1.401958 | \n",
704 | " 5.10000 | \n",
705 | " 0.000000 | \n",
706 | "
\n",
707 | " \n",
708 | " 25 | \n",
709 | " 0.980046 | \n",
710 | " -1.412297 | \n",
711 | " 5.10000 | \n",
712 | " 0.000000 | \n",
713 | "
\n",
714 | " \n",
715 | "
\n",
716 | "
"
717 | ]
718 | },
719 | "execution_count": 8,
720 | "metadata": {},
721 | "output_type": "execute_result"
722 | }
723 | ],
724 | "source": [
725 | "network_model.net.res_bus"
726 | ]
727 | },
728 | {
729 | "cell_type": "code",
730 | "execution_count": 9,
731 | "metadata": {
732 | "scrolled": true
733 | },
734 | "outputs": [
735 | {
736 | "data": {
737 | "text/html": [
738 | "\n",
739 | "\n",
752 | "
\n",
753 | " \n",
754 | " \n",
755 | " | \n",
756 | " p_from_kw | \n",
757 | " q_from_kvar | \n",
758 | " p_to_kw | \n",
759 | " q_to_kvar | \n",
760 | " pl_kw | \n",
761 | " ql_kvar | \n",
762 | " i_from_ka | \n",
763 | " i_to_ka | \n",
764 | " i_ka | \n",
765 | " loading_percent | \n",
766 | "
\n",
767 | " \n",
768 | " \n",
769 | " \n",
770 | " 0 | \n",
771 | " 5.111992 | \n",
772 | " 0.000155 | \n",
773 | " -5.103071 | \n",
774 | " -9.521132e-05 | \n",
775 | " 0.008921 | \n",
776 | " 0.000060 | \n",
777 | " 0.007415 | \n",
778 | " 0.007415 | \n",
779 | " 0.007415 | \n",
780 | " 2.746411 | \n",
781 | "
\n",
782 | " \n",
783 | " 1 | \n",
784 | " 5.103071 | \n",
785 | " 0.000095 | \n",
786 | " -5.100000 | \n",
787 | " -2.283533e-12 | \n",
788 | " 0.003071 | \n",
789 | " 0.000095 | \n",
790 | " 0.007415 | \n",
791 | " 0.007415 | \n",
792 | " 0.007415 | \n",
793 | " 5.222050 | \n",
794 | "
\n",
795 | " \n",
796 | " 2 | \n",
797 | " 28.641596 | \n",
798 | " 0.120211 | \n",
799 | " -28.498339 | \n",
800 | " -6.683071e-02 | \n",
801 | " 0.143257 | \n",
802 | " 0.053380 | \n",
803 | " 0.041547 | \n",
804 | " 0.041547 | \n",
805 | " 0.041547 | \n",
806 | " 15.387799 | \n",
807 | "
\n",
808 | " \n",
809 | " 3 | \n",
810 | " 23.395247 | \n",
811 | " 0.066731 | \n",
812 | " -23.298701 | \n",
813 | " -3.130114e-02 | \n",
814 | " 0.096546 | \n",
815 | " 0.035430 | \n",
816 | " 0.034107 | \n",
817 | " 0.034107 | \n",
818 | " 0.034107 | \n",
819 | " 12.632382 | \n",
820 | "
\n",
821 | " \n",
822 | " 4 | \n",
823 | " 18.195584 | \n",
824 | " 0.031196 | \n",
825 | " -18.136699 | \n",
826 | " -1.023829e-02 | \n",
827 | " 0.058885 | \n",
828 | " 0.020957 | \n",
829 | " 0.026637 | \n",
830 | " 0.026637 | \n",
831 | " 0.026637 | \n",
832 | " 9.865516 | \n",
833 | "
\n",
834 | " \n",
835 | " 5 | \n",
836 | " 13.033562 | \n",
837 | " 0.010128 | \n",
838 | " -13.003152 | \n",
839 | " -1.134231e-04 | \n",
840 | " 0.030410 | \n",
841 | " 0.010015 | \n",
842 | " 0.019142 | \n",
843 | " 0.019142 | \n",
844 | " 0.019142 | \n",
845 | " 7.089650 | \n",
846 | "
\n",
847 | " \n",
848 | " 6 | \n",
849 | " 5.103091 | \n",
850 | " 0.000100 | \n",
851 | " -5.100000 | \n",
852 | " -5.377196e-12 | \n",
853 | " 0.003091 | \n",
854 | " 0.000100 | \n",
855 | " 0.007440 | \n",
856 | " 0.007440 | \n",
857 | " 0.007440 | \n",
858 | " 5.239195 | \n",
859 | "
\n",
860 | " \n",
861 | " 7 | \n",
862 | " 5.103117 | \n",
863 | " 0.000106 | \n",
864 | " -5.100000 | \n",
865 | " -1.019192e-11 | \n",
866 | " 0.003117 | \n",
867 | " 0.000106 | \n",
868 | " 0.007471 | \n",
869 | " 0.007471 | \n",
870 | " 0.007471 | \n",
871 | " 5.260949 | \n",
872 | "
\n",
873 | " \n",
874 | " 8 | \n",
875 | " 5.103137 | \n",
876 | " 0.000110 | \n",
877 | " -5.100000 | \n",
878 | " -1.612224e-11 | \n",
879 | " 0.003137 | \n",
880 | " 0.000110 | \n",
881 | " 0.007495 | \n",
882 | " 0.007495 | \n",
883 | " 0.007495 | \n",
884 | " 5.278058 | \n",
885 | "
\n",
886 | " \n",
887 | " 9 | \n",
888 | " 5.103152 | \n",
889 | " 0.000113 | \n",
890 | " -5.100000 | \n",
891 | " -2.119764e-11 | \n",
892 | " 0.003152 | \n",
893 | " 0.000113 | \n",
894 | " 0.007512 | \n",
895 | " 0.007512 | \n",
896 | " 0.007512 | \n",
897 | " 5.290419 | \n",
898 | "
\n",
899 | " \n",
900 | " 10 | \n",
901 | " 44.062897 | \n",
902 | " 0.163912 | \n",
903 | " -43.889547 | \n",
904 | " -9.811871e-02 | \n",
905 | " 0.173350 | \n",
906 | " 0.065793 | \n",
907 | " 0.063917 | \n",
908 | " 0.063917 | \n",
909 | " 0.063917 | \n",
910 | " 23.672899 | \n",
911 | "
\n",
912 | " \n",
913 | " 11 | \n",
914 | " 38.786462 | \n",
915 | " 0.098020 | \n",
916 | " -38.651079 | \n",
917 | " -4.682349e-02 | \n",
918 | " 0.135383 | \n",
919 | " 0.051197 | \n",
920 | " 0.056485 | \n",
921 | " 0.056485 | \n",
922 | " 0.056485 | \n",
923 | " 20.920441 | \n",
924 | "
\n",
925 | " \n",
926 | " 12 | \n",
927 | " 25.647973 | \n",
928 | " 0.046720 | \n",
929 | " -25.588359 | \n",
930 | " -2.466014e-02 | \n",
931 | " 0.059614 | \n",
932 | " 0.022060 | \n",
933 | " 0.037482 | \n",
934 | " 0.037482 | \n",
935 | " 0.037482 | \n",
936 | " 13.882340 | \n",
937 | "
\n",
938 | " \n",
939 | " 13 | \n",
940 | " 20.485238 | \n",
941 | " 0.024554 | \n",
942 | " -20.447031 | \n",
943 | " -1.072323e-02 | \n",
944 | " 0.038207 | \n",
945 | " 0.013830 | \n",
946 | " 0.030007 | \n",
947 | " 0.030007 | \n",
948 | " 0.030007 | \n",
949 | " 11.113770 | \n",
950 | "
\n",
951 | " \n",
952 | " 14 | \n",
953 | " 15.343898 | \n",
954 | " 0.010614 | \n",
955 | " -15.322383 | \n",
956 | " -3.200644e-03 | \n",
957 | " 0.021516 | \n",
958 | " 0.007414 | \n",
959 | " 0.022518 | \n",
960 | " 0.022518 | \n",
961 | " 0.022518 | \n",
962 | " 8.340016 | \n",
963 | "
\n",
964 | " \n",
965 | " 15 | \n",
966 | " 10.219241 | \n",
967 | " 0.003090 | \n",
968 | " -10.208889 | \n",
969 | " -2.781422e-04 | \n",
970 | " 0.010353 | \n",
971 | " 0.002811 | \n",
972 | " 0.015018 | \n",
973 | " 0.015018 | \n",
974 | " 0.015018 | \n",
975 | " 6.205941 | \n",
976 | "
\n",
977 | " \n",
978 | " 16 | \n",
979 | " 5.105741 | \n",
980 | " 0.000166 | \n",
981 | " -5.103151 | \n",
982 | " -1.131805e-04 | \n",
983 | " 0.002590 | \n",
984 | " 0.000052 | \n",
985 | " 0.007511 | \n",
986 | " 0.007511 | \n",
987 | " 0.007511 | \n",
988 | " 3.103759 | \n",
989 | "
\n",
990 | " \n",
991 | " 17 | \n",
992 | " 5.103085 | \n",
993 | " 0.000098 | \n",
994 | " -5.100000 | \n",
995 | " -3.835731e-12 | \n",
996 | " 0.003085 | \n",
997 | " 0.000098 | \n",
998 | " 0.007432 | \n",
999 | " 0.007432 | \n",
1000 | " 0.007432 | \n",
1001 | " 5.233563 | \n",
1002 | "
\n",
1003 | " \n",
1004 | " 18 | \n",
1005 | " 5.103106 | \n",
1006 | " 0.000103 | \n",
1007 | " -5.100000 | \n",
1008 | " -7.074921e-12 | \n",
1009 | " 0.003106 | \n",
1010 | " 0.000103 | \n",
1011 | " 0.007458 | \n",
1012 | " 0.007458 | \n",
1013 | " 0.007458 | \n",
1014 | " 5.251930 | \n",
1015 | "
\n",
1016 | " \n",
1017 | " 19 | \n",
1018 | " 5.103121 | \n",
1019 | " 0.000106 | \n",
1020 | " -5.100000 | \n",
1021 | " -1.006943e-11 | \n",
1022 | " 0.003121 | \n",
1023 | " 0.000106 | \n",
1024 | " 0.007475 | \n",
1025 | " 0.007475 | \n",
1026 | " 0.007475 | \n",
1027 | " 5.264187 | \n",
1028 | "
\n",
1029 | " \n",
1030 | " 20 | \n",
1031 | " 5.103133 | \n",
1032 | " 0.000109 | \n",
1033 | " -5.100000 | \n",
1034 | " -1.314385e-11 | \n",
1035 | " 0.003133 | \n",
1036 | " 0.000109 | \n",
1037 | " 0.007489 | \n",
1038 | " 0.007489 | \n",
1039 | " 0.007489 | \n",
1040 | " 5.274039 | \n",
1041 | "
\n",
1042 | " \n",
1043 | " 21 | \n",
1044 | " 5.103142 | \n",
1045 | " 0.000111 | \n",
1046 | " -5.100000 | \n",
1047 | " -1.544812e-11 | \n",
1048 | " 0.003142 | \n",
1049 | " 0.000111 | \n",
1050 | " 0.007500 | \n",
1051 | " 0.007500 | \n",
1052 | " 0.007500 | \n",
1053 | " 5.281455 | \n",
1054 | "
\n",
1055 | " \n",
1056 | " 22 | \n",
1057 | " 5.103148 | \n",
1058 | " 0.000112 | \n",
1059 | " -5.100000 | \n",
1060 | " -1.815659e-11 | \n",
1061 | " 0.003148 | \n",
1062 | " 0.000112 | \n",
1063 | " 0.007507 | \n",
1064 | " 0.007507 | \n",
1065 | " 0.007507 | \n",
1066 | " 5.286818 | \n",
1067 | "
\n",
1068 | " \n",
1069 | " 23 | \n",
1070 | " 5.103151 | \n",
1071 | " 0.000113 | \n",
1072 | " -5.100000 | \n",
1073 | " -1.904379e-11 | \n",
1074 | " 0.003151 | \n",
1075 | " 0.000113 | \n",
1076 | " 0.007511 | \n",
1077 | " 0.007511 | \n",
1078 | " 0.007511 | \n",
1079 | " 5.289504 | \n",
1080 | "
\n",
1081 | " \n",
1082 | "
\n",
1083 | "
"
1084 | ],
1085 | "text/plain": [
1086 | "\n",
1087 | "\n",
1100 | "
\n",
1101 | " \n",
1102 | " \n",
1103 | " | \n",
1104 | " p_from_kw | \n",
1105 | " q_from_kvar | \n",
1106 | " p_to_kw | \n",
1107 | " q_to_kvar | \n",
1108 | " pl_kw | \n",
1109 | " ql_kvar | \n",
1110 | " i_from_ka | \n",
1111 | " i_to_ka | \n",
1112 | " i_ka | \n",
1113 | " loading_percent | \n",
1114 | "
\n",
1115 | " \n",
1116 | " \n",
1117 | " \n",
1118 | " 0 | \n",
1119 | " 5.111992 | \n",
1120 | " 0.000155 | \n",
1121 | " -5.103071 | \n",
1122 | " -9.521132e-05 | \n",
1123 | " 0.008921 | \n",
1124 | " 0.000060 | \n",
1125 | " 0.007415 | \n",
1126 | " 0.007415 | \n",
1127 | " 0.007415 | \n",
1128 | " 2.746411 | \n",
1129 | "
\n",
1130 | " \n",
1131 | " 1 | \n",
1132 | " 5.103071 | \n",
1133 | " 0.000095 | \n",
1134 | " -5.100000 | \n",
1135 | " -2.283533e-12 | \n",
1136 | " 0.003071 | \n",
1137 | " 0.000095 | \n",
1138 | " 0.007415 | \n",
1139 | " 0.007415 | \n",
1140 | " 0.007415 | \n",
1141 | " 5.222050 | \n",
1142 | "
\n",
1143 | " \n",
1144 | " 2 | \n",
1145 | " 28.641596 | \n",
1146 | " 0.120211 | \n",
1147 | " -28.498339 | \n",
1148 | " -6.683071e-02 | \n",
1149 | " 0.143257 | \n",
1150 | " 0.053380 | \n",
1151 | " 0.041547 | \n",
1152 | " 0.041547 | \n",
1153 | " 0.041547 | \n",
1154 | " 15.387799 | \n",
1155 | "
\n",
1156 | " \n",
1157 | " 3 | \n",
1158 | " 23.395247 | \n",
1159 | " 0.066731 | \n",
1160 | " -23.298701 | \n",
1161 | " -3.130114e-02 | \n",
1162 | " 0.096546 | \n",
1163 | " 0.035430 | \n",
1164 | " 0.034107 | \n",
1165 | " 0.034107 | \n",
1166 | " 0.034107 | \n",
1167 | " 12.632382 | \n",
1168 | "
\n",
1169 | " \n",
1170 | " 4 | \n",
1171 | " 18.195584 | \n",
1172 | " 0.031196 | \n",
1173 | " -18.136699 | \n",
1174 | " -1.023829e-02 | \n",
1175 | " 0.058885 | \n",
1176 | " 0.020957 | \n",
1177 | " 0.026637 | \n",
1178 | " 0.026637 | \n",
1179 | " 0.026637 | \n",
1180 | " 9.865516 | \n",
1181 | "
\n",
1182 | " \n",
1183 | " 5 | \n",
1184 | " 13.033562 | \n",
1185 | " 0.010128 | \n",
1186 | " -13.003152 | \n",
1187 | " -1.134231e-04 | \n",
1188 | " 0.030410 | \n",
1189 | " 0.010015 | \n",
1190 | " 0.019142 | \n",
1191 | " 0.019142 | \n",
1192 | " 0.019142 | \n",
1193 | " 7.089650 | \n",
1194 | "
\n",
1195 | " \n",
1196 | " 6 | \n",
1197 | " 5.103091 | \n",
1198 | " 0.000100 | \n",
1199 | " -5.100000 | \n",
1200 | " -5.377196e-12 | \n",
1201 | " 0.003091 | \n",
1202 | " 0.000100 | \n",
1203 | " 0.007440 | \n",
1204 | " 0.007440 | \n",
1205 | " 0.007440 | \n",
1206 | " 5.239195 | \n",
1207 | "
\n",
1208 | " \n",
1209 | " 7 | \n",
1210 | " 5.103117 | \n",
1211 | " 0.000106 | \n",
1212 | " -5.100000 | \n",
1213 | " -1.019192e-11 | \n",
1214 | " 0.003117 | \n",
1215 | " 0.000106 | \n",
1216 | " 0.007471 | \n",
1217 | " 0.007471 | \n",
1218 | " 0.007471 | \n",
1219 | " 5.260949 | \n",
1220 | "
\n",
1221 | " \n",
1222 | " 8 | \n",
1223 | " 5.103137 | \n",
1224 | " 0.000110 | \n",
1225 | " -5.100000 | \n",
1226 | " -1.612224e-11 | \n",
1227 | " 0.003137 | \n",
1228 | " 0.000110 | \n",
1229 | " 0.007495 | \n",
1230 | " 0.007495 | \n",
1231 | " 0.007495 | \n",
1232 | " 5.278058 | \n",
1233 | "
\n",
1234 | " \n",
1235 | " 9 | \n",
1236 | " 5.103152 | \n",
1237 | " 0.000113 | \n",
1238 | " -5.100000 | \n",
1239 | " -2.119764e-11 | \n",
1240 | " 0.003152 | \n",
1241 | " 0.000113 | \n",
1242 | " 0.007512 | \n",
1243 | " 0.007512 | \n",
1244 | " 0.007512 | \n",
1245 | " 5.290419 | \n",
1246 | "
\n",
1247 | " \n",
1248 | " 10 | \n",
1249 | " 44.062897 | \n",
1250 | " 0.163912 | \n",
1251 | " -43.889547 | \n",
1252 | " -9.811871e-02 | \n",
1253 | " 0.173350 | \n",
1254 | " 0.065793 | \n",
1255 | " 0.063917 | \n",
1256 | " 0.063917 | \n",
1257 | " 0.063917 | \n",
1258 | " 23.672899 | \n",
1259 | "
\n",
1260 | " \n",
1261 | " 11 | \n",
1262 | " 38.786462 | \n",
1263 | " 0.098020 | \n",
1264 | " -38.651079 | \n",
1265 | " -4.682349e-02 | \n",
1266 | " 0.135383 | \n",
1267 | " 0.051197 | \n",
1268 | " 0.056485 | \n",
1269 | " 0.056485 | \n",
1270 | " 0.056485 | \n",
1271 | " 20.920441 | \n",
1272 | "
\n",
1273 | " \n",
1274 | " 12 | \n",
1275 | " 25.647973 | \n",
1276 | " 0.046720 | \n",
1277 | " -25.588359 | \n",
1278 | " -2.466014e-02 | \n",
1279 | " 0.059614 | \n",
1280 | " 0.022060 | \n",
1281 | " 0.037482 | \n",
1282 | " 0.037482 | \n",
1283 | " 0.037482 | \n",
1284 | " 13.882340 | \n",
1285 | "
\n",
1286 | " \n",
1287 | " 13 | \n",
1288 | " 20.485238 | \n",
1289 | " 0.024554 | \n",
1290 | " -20.447031 | \n",
1291 | " -1.072323e-02 | \n",
1292 | " 0.038207 | \n",
1293 | " 0.013830 | \n",
1294 | " 0.030007 | \n",
1295 | " 0.030007 | \n",
1296 | " 0.030007 | \n",
1297 | " 11.113770 | \n",
1298 | "
\n",
1299 | " \n",
1300 | " 14 | \n",
1301 | " 15.343898 | \n",
1302 | " 0.010614 | \n",
1303 | " -15.322383 | \n",
1304 | " -3.200644e-03 | \n",
1305 | " 0.021516 | \n",
1306 | " 0.007414 | \n",
1307 | " 0.022518 | \n",
1308 | " 0.022518 | \n",
1309 | " 0.022518 | \n",
1310 | " 8.340016 | \n",
1311 | "
\n",
1312 | " \n",
1313 | " 15 | \n",
1314 | " 10.219241 | \n",
1315 | " 0.003090 | \n",
1316 | " -10.208889 | \n",
1317 | " -2.781422e-04 | \n",
1318 | " 0.010353 | \n",
1319 | " 0.002811 | \n",
1320 | " 0.015018 | \n",
1321 | " 0.015018 | \n",
1322 | " 0.015018 | \n",
1323 | " 6.205941 | \n",
1324 | "
\n",
1325 | " \n",
1326 | " 16 | \n",
1327 | " 5.105741 | \n",
1328 | " 0.000166 | \n",
1329 | " -5.103151 | \n",
1330 | " -1.131805e-04 | \n",
1331 | " 0.002590 | \n",
1332 | " 0.000052 | \n",
1333 | " 0.007511 | \n",
1334 | " 0.007511 | \n",
1335 | " 0.007511 | \n",
1336 | " 3.103759 | \n",
1337 | "
\n",
1338 | " \n",
1339 | " 17 | \n",
1340 | " 5.103085 | \n",
1341 | " 0.000098 | \n",
1342 | " -5.100000 | \n",
1343 | " -3.835731e-12 | \n",
1344 | " 0.003085 | \n",
1345 | " 0.000098 | \n",
1346 | " 0.007432 | \n",
1347 | " 0.007432 | \n",
1348 | " 0.007432 | \n",
1349 | " 5.233563 | \n",
1350 | "
\n",
1351 | " \n",
1352 | " 18 | \n",
1353 | " 5.103106 | \n",
1354 | " 0.000103 | \n",
1355 | " -5.100000 | \n",
1356 | " -7.074921e-12 | \n",
1357 | " 0.003106 | \n",
1358 | " 0.000103 | \n",
1359 | " 0.007458 | \n",
1360 | " 0.007458 | \n",
1361 | " 0.007458 | \n",
1362 | " 5.251930 | \n",
1363 | "
\n",
1364 | " \n",
1365 | " 19 | \n",
1366 | " 5.103121 | \n",
1367 | " 0.000106 | \n",
1368 | " -5.100000 | \n",
1369 | " -1.006943e-11 | \n",
1370 | " 0.003121 | \n",
1371 | " 0.000106 | \n",
1372 | " 0.007475 | \n",
1373 | " 0.007475 | \n",
1374 | " 0.007475 | \n",
1375 | " 5.264187 | \n",
1376 | "
\n",
1377 | " \n",
1378 | " 20 | \n",
1379 | " 5.103133 | \n",
1380 | " 0.000109 | \n",
1381 | " -5.100000 | \n",
1382 | " -1.314385e-11 | \n",
1383 | " 0.003133 | \n",
1384 | " 0.000109 | \n",
1385 | " 0.007489 | \n",
1386 | " 0.007489 | \n",
1387 | " 0.007489 | \n",
1388 | " 5.274039 | \n",
1389 | "
\n",
1390 | " \n",
1391 | " 21 | \n",
1392 | " 5.103142 | \n",
1393 | " 0.000111 | \n",
1394 | " -5.100000 | \n",
1395 | " -1.544812e-11 | \n",
1396 | " 0.003142 | \n",
1397 | " 0.000111 | \n",
1398 | " 0.007500 | \n",
1399 | " 0.007500 | \n",
1400 | " 0.007500 | \n",
1401 | " 5.281455 | \n",
1402 | "
\n",
1403 | " \n",
1404 | " 22 | \n",
1405 | " 5.103148 | \n",
1406 | " 0.000112 | \n",
1407 | " -5.100000 | \n",
1408 | " -1.815659e-11 | \n",
1409 | " 0.003148 | \n",
1410 | " 0.000112 | \n",
1411 | " 0.007507 | \n",
1412 | " 0.007507 | \n",
1413 | " 0.007507 | \n",
1414 | " 5.286818 | \n",
1415 | "
\n",
1416 | " \n",
1417 | " 23 | \n",
1418 | " 5.103151 | \n",
1419 | " 0.000113 | \n",
1420 | " -5.100000 | \n",
1421 | " -1.904379e-11 | \n",
1422 | " 0.003151 | \n",
1423 | " 0.000113 | \n",
1424 | " 0.007511 | \n",
1425 | " 0.007511 | \n",
1426 | " 0.007511 | \n",
1427 | " 5.289504 | \n",
1428 | "
\n",
1429 | " \n",
1430 | "
\n",
1431 | "
"
1432 | ]
1433 | },
1434 | "execution_count": 9,
1435 | "metadata": {},
1436 | "output_type": "execute_result"
1437 | }
1438 | ],
1439 | "source": [
1440 | "network_model.net.res_line"
1441 | ]
1442 | },
1443 | {
1444 | "cell_type": "code",
1445 | "execution_count": 10,
1446 | "metadata": {},
1447 | "outputs": [
1448 | {
1449 | "data": {
1450 | "text/html": [
1451 | "\n",
1452 | "\n",
1465 | "
\n",
1466 | " \n",
1467 | " \n",
1468 | " | \n",
1469 | " p_kw | \n",
1470 | " q_kvar | \n",
1471 | "
\n",
1472 | " \n",
1473 | " \n",
1474 | " \n",
1475 | " 0 | \n",
1476 | " 0.0 | \n",
1477 | " 0.0 | \n",
1478 | "
\n",
1479 | " \n",
1480 | " 1 | \n",
1481 | " 0.0 | \n",
1482 | " 0.0 | \n",
1483 | "
\n",
1484 | " \n",
1485 | " 2 | \n",
1486 | " 0.0 | \n",
1487 | " 0.0 | \n",
1488 | "
\n",
1489 | " \n",
1490 | " 3 | \n",
1491 | " 0.0 | \n",
1492 | " 0.0 | \n",
1493 | "
\n",
1494 | " \n",
1495 | " 4 | \n",
1496 | " 0.0 | \n",
1497 | " 0.0 | \n",
1498 | "
\n",
1499 | " \n",
1500 | "
\n",
1501 | "
"
1502 | ],
1503 | "text/plain": [
1504 | "\n",
1505 | "\n",
1518 | "
\n",
1519 | " \n",
1520 | " \n",
1521 | " | \n",
1522 | " p_kw | \n",
1523 | " q_kvar | \n",
1524 | "
\n",
1525 | " \n",
1526 | " \n",
1527 | " \n",
1528 | " 0 | \n",
1529 | " 0.0 | \n",
1530 | " 0.0 | \n",
1531 | "
\n",
1532 | " \n",
1533 | " 1 | \n",
1534 | " 0.0 | \n",
1535 | " 0.0 | \n",
1536 | "
\n",
1537 | " \n",
1538 | " 2 | \n",
1539 | " 0.0 | \n",
1540 | " 0.0 | \n",
1541 | "
\n",
1542 | " \n",
1543 | " 3 | \n",
1544 | " 0.0 | \n",
1545 | " 0.0 | \n",
1546 | "
\n",
1547 | " \n",
1548 | " 4 | \n",
1549 | " 0.0 | \n",
1550 | " 0.0 | \n",
1551 | "
\n",
1552 | " \n",
1553 | "
\n",
1554 | "
"
1555 | ]
1556 | },
1557 | "execution_count": 10,
1558 | "metadata": {},
1559 | "output_type": "execute_result"
1560 | }
1561 | ],
1562 | "source": [
1563 | "network_model.net.res_sgen # we set these to zero as an option in the instantiation"
1564 | ]
1565 | },
1566 | {
1567 | "cell_type": "markdown",
1568 | "metadata": {},
1569 | "source": [
1570 | "### Fake test data"
1571 | ]
1572 | },
1573 | {
1574 | "cell_type": "code",
1575 | "execution_count": 11,
1576 | "metadata": {},
1577 | "outputs": [],
1578 | "source": [
1579 | "num_times = 24\n",
1580 | "p_load_data = np.random.randn(network_model.net.load.shape[0],num_times)\n",
1581 | "q_load_data = np.random.randn(network_model.net.load.shape[0],num_times)"
1582 | ]
1583 | },
1584 | {
1585 | "cell_type": "markdown",
1586 | "metadata": {},
1587 | "source": [
1588 | "## Add generators"
1589 | ]
1590 | },
1591 | {
1592 | "cell_type": "code",
1593 | "execution_count": 12,
1594 | "metadata": {},
1595 | "outputs": [],
1596 | "source": [
1597 | "# Add static generators at bus 7 and at bus 10 and initialize their real power production as 1 kW\n",
1598 | "network_model.add_sgen(7,1.0) \n",
1599 | "network_model.add_sgen(10,1.0)"
1600 | ]
1601 | },
1602 | {
1603 | "cell_type": "code",
1604 | "execution_count": 13,
1605 | "metadata": {},
1606 | "outputs": [
1607 | {
1608 | "data": {
1609 | "text/html": [
1610 | "\n",
1611 | "\n",
1624 | "
\n",
1625 | " \n",
1626 | " \n",
1627 | " | \n",
1628 | " name | \n",
1629 | " bus | \n",
1630 | " p_kw | \n",
1631 | " q_kvar | \n",
1632 | " sn_kva | \n",
1633 | " scaling | \n",
1634 | " in_service | \n",
1635 | " type | \n",
1636 | "
\n",
1637 | " \n",
1638 | " \n",
1639 | " \n",
1640 | " 0 | \n",
1641 | " None | \n",
1642 | " 8 | \n",
1643 | " 0 | \n",
1644 | " 0 | \n",
1645 | " NaN | \n",
1646 | " 1.0 | \n",
1647 | " True | \n",
1648 | " None | \n",
1649 | "
\n",
1650 | " \n",
1651 | " 1 | \n",
1652 | " None | \n",
1653 | " 9 | \n",
1654 | " 0 | \n",
1655 | " 0 | \n",
1656 | " NaN | \n",
1657 | " 1.0 | \n",
1658 | " True | \n",
1659 | " None | \n",
1660 | "
\n",
1661 | " \n",
1662 | " 2 | \n",
1663 | " None | \n",
1664 | " 11 | \n",
1665 | " 0 | \n",
1666 | " 0 | \n",
1667 | " NaN | \n",
1668 | " 1.0 | \n",
1669 | " True | \n",
1670 | " None | \n",
1671 | "
\n",
1672 | " \n",
1673 | " 3 | \n",
1674 | " None | \n",
1675 | " 22 | \n",
1676 | " 0 | \n",
1677 | " 0 | \n",
1678 | " NaN | \n",
1679 | " 1.0 | \n",
1680 | " True | \n",
1681 | " None | \n",
1682 | "
\n",
1683 | " \n",
1684 | " 4 | \n",
1685 | " None | \n",
1686 | " 23 | \n",
1687 | " 0 | \n",
1688 | " 0 | \n",
1689 | " NaN | \n",
1690 | " 1.0 | \n",
1691 | " True | \n",
1692 | " None | \n",
1693 | "
\n",
1694 | " \n",
1695 | " 5 | \n",
1696 | " None | \n",
1697 | " 7 | \n",
1698 | " 1 | \n",
1699 | " 0 | \n",
1700 | " NaN | \n",
1701 | " 1.0 | \n",
1702 | " True | \n",
1703 | " None | \n",
1704 | "
\n",
1705 | " \n",
1706 | " 6 | \n",
1707 | " None | \n",
1708 | " 10 | \n",
1709 | " 1 | \n",
1710 | " 0 | \n",
1711 | " NaN | \n",
1712 | " 1.0 | \n",
1713 | " True | \n",
1714 | " None | \n",
1715 | "
\n",
1716 | " \n",
1717 | "
\n",
1718 | "
"
1719 | ],
1720 | "text/plain": [
1721 | "\n",
1722 | "\n",
1735 | "
\n",
1736 | " \n",
1737 | " \n",
1738 | " | \n",
1739 | " name | \n",
1740 | " bus | \n",
1741 | " p_kw | \n",
1742 | " q_kvar | \n",
1743 | " sn_kva | \n",
1744 | " scaling | \n",
1745 | " in_service | \n",
1746 | " type | \n",
1747 | "
\n",
1748 | " \n",
1749 | " \n",
1750 | " \n",
1751 | " 0 | \n",
1752 | " None | \n",
1753 | " 8 | \n",
1754 | " 0 | \n",
1755 | " 0 | \n",
1756 | " NaN | \n",
1757 | " 1.0 | \n",
1758 | " True | \n",
1759 | " None | \n",
1760 | "
\n",
1761 | " \n",
1762 | " 1 | \n",
1763 | " None | \n",
1764 | " 9 | \n",
1765 | " 0 | \n",
1766 | " 0 | \n",
1767 | " NaN | \n",
1768 | " 1.0 | \n",
1769 | " True | \n",
1770 | " None | \n",
1771 | "
\n",
1772 | " \n",
1773 | " 2 | \n",
1774 | " None | \n",
1775 | " 11 | \n",
1776 | " 0 | \n",
1777 | " 0 | \n",
1778 | " NaN | \n",
1779 | " 1.0 | \n",
1780 | " True | \n",
1781 | " None | \n",
1782 | "
\n",
1783 | " \n",
1784 | " 3 | \n",
1785 | " None | \n",
1786 | " 22 | \n",
1787 | " 0 | \n",
1788 | " 0 | \n",
1789 | " NaN | \n",
1790 | " 1.0 | \n",
1791 | " True | \n",
1792 | " None | \n",
1793 | "
\n",
1794 | " \n",
1795 | " 4 | \n",
1796 | " None | \n",
1797 | " 23 | \n",
1798 | " 0 | \n",
1799 | " 0 | \n",
1800 | " NaN | \n",
1801 | " 1.0 | \n",
1802 | " True | \n",
1803 | " None | \n",
1804 | "
\n",
1805 | " \n",
1806 | " 5 | \n",
1807 | " None | \n",
1808 | " 7 | \n",
1809 | " 1 | \n",
1810 | " 0 | \n",
1811 | " NaN | \n",
1812 | " 1.0 | \n",
1813 | " True | \n",
1814 | " None | \n",
1815 | "
\n",
1816 | " \n",
1817 | " 6 | \n",
1818 | " None | \n",
1819 | " 10 | \n",
1820 | " 1 | \n",
1821 | " 0 | \n",
1822 | " NaN | \n",
1823 | " 1.0 | \n",
1824 | " True | \n",
1825 | " None | \n",
1826 | "
\n",
1827 | " \n",
1828 | "
\n",
1829 | "
"
1830 | ]
1831 | },
1832 | "execution_count": 13,
1833 | "metadata": {},
1834 | "output_type": "execute_result"
1835 | }
1836 | ],
1837 | "source": [
1838 | "network_model.net.sgen"
1839 | ]
1840 | },
1841 | {
1842 | "cell_type": "markdown",
1843 | "metadata": {},
1844 | "source": [
1845 | "# Add battery"
1846 | ]
1847 | },
1848 | {
1849 | "cell_type": "code",
1850 | "execution_count": 14,
1851 | "metadata": {},
1852 | "outputs": [],
1853 | "source": [
1854 | "# Initialize at bus 3 with power flow 1.0 (charging), capacity 10 kWh, initialy SOC 0 %\n",
1855 | "network_model.add_battery(3,1.0,10,0.0) "
1856 | ]
1857 | },
1858 | {
1859 | "cell_type": "code",
1860 | "execution_count": 15,
1861 | "metadata": {},
1862 | "outputs": [
1863 | {
1864 | "data": {
1865 | "text/plain": [
1866 | "1"
1867 | ]
1868 | },
1869 | "execution_count": 15,
1870 | "metadata": {},
1871 | "output_type": "execute_result"
1872 | }
1873 | ],
1874 | "source": [
1875 | "# How many batteries?\n",
1876 | "network_model.net.storage.shape[0]"
1877 | ]
1878 | },
1879 | {
1880 | "cell_type": "code",
1881 | "execution_count": 16,
1882 | "metadata": {},
1883 | "outputs": [
1884 | {
1885 | "data": {
1886 | "text/html": [
1887 | "\n",
1888 | "\n",
1901 | "
\n",
1902 | " \n",
1903 | " \n",
1904 | " | \n",
1905 | " name | \n",
1906 | " bus | \n",
1907 | " p_kw | \n",
1908 | " q_kvar | \n",
1909 | " sn_kva | \n",
1910 | " soc_percent | \n",
1911 | " min_e_kwh | \n",
1912 | " max_e_kwh | \n",
1913 | " scaling | \n",
1914 | " in_service | \n",
1915 | " type | \n",
1916 | "
\n",
1917 | " \n",
1918 | " \n",
1919 | " \n",
1920 | " 0 | \n",
1921 | " None | \n",
1922 | " 3 | \n",
1923 | " 1.0 | \n",
1924 | " 0.0 | \n",
1925 | " NaN | \n",
1926 | " 0.0 | \n",
1927 | " 0.0 | \n",
1928 | " 10.0 | \n",
1929 | " 1.0 | \n",
1930 | " True | \n",
1931 | " None | \n",
1932 | "
\n",
1933 | " \n",
1934 | "
\n",
1935 | "
"
1936 | ],
1937 | "text/plain": [
1938 | "\n",
1939 | "\n",
1952 | "
\n",
1953 | " \n",
1954 | " \n",
1955 | " | \n",
1956 | " name | \n",
1957 | " bus | \n",
1958 | " p_kw | \n",
1959 | " q_kvar | \n",
1960 | " sn_kva | \n",
1961 | " soc_percent | \n",
1962 | " min_e_kwh | \n",
1963 | " max_e_kwh | \n",
1964 | " scaling | \n",
1965 | " in_service | \n",
1966 | " type | \n",
1967 | "
\n",
1968 | " \n",
1969 | " \n",
1970 | " \n",
1971 | " 0 | \n",
1972 | " None | \n",
1973 | " 3 | \n",
1974 | " 1.0 | \n",
1975 | " 0.0 | \n",
1976 | " NaN | \n",
1977 | " 0.0 | \n",
1978 | " 0.0 | \n",
1979 | " 10.0 | \n",
1980 | " 1.0 | \n",
1981 | " True | \n",
1982 | " None | \n",
1983 | "
\n",
1984 | " \n",
1985 | "
\n",
1986 | "
"
1987 | ]
1988 | },
1989 | "execution_count": 16,
1990 | "metadata": {},
1991 | "output_type": "execute_result"
1992 | }
1993 | ],
1994 | "source": [
1995 | "network_model.net.storage"
1996 | ]
1997 | },
1998 | {
1999 | "cell_type": "markdown",
2000 | "metadata": {},
2001 | "source": [
2002 | "### Change the battery power"
2003 | ]
2004 | },
2005 | {
2006 | "cell_type": "code",
2007 | "execution_count": 17,
2008 | "metadata": {},
2009 | "outputs": [
2010 | {
2011 | "data": {
2012 | "text/html": [
2013 | "\n",
2014 | "\n",
2027 | "
\n",
2028 | " \n",
2029 | " \n",
2030 | " | \n",
2031 | " name | \n",
2032 | " bus | \n",
2033 | " p_kw | \n",
2034 | " q_kvar | \n",
2035 | " sn_kva | \n",
2036 | " soc_percent | \n",
2037 | " min_e_kwh | \n",
2038 | " max_e_kwh | \n",
2039 | " scaling | \n",
2040 | " in_service | \n",
2041 | " type | \n",
2042 | "
\n",
2043 | " \n",
2044 | " \n",
2045 | " \n",
2046 | " 0 | \n",
2047 | " None | \n",
2048 | " 3 | \n",
2049 | " 2.0 | \n",
2050 | " 0.0 | \n",
2051 | " NaN | \n",
2052 | " 0.2 | \n",
2053 | " 0.0 | \n",
2054 | " 10.0 | \n",
2055 | " 1.0 | \n",
2056 | " True | \n",
2057 | " None | \n",
2058 | "
\n",
2059 | " \n",
2060 | "
\n",
2061 | "
"
2062 | ],
2063 | "text/plain": [
2064 | "\n",
2065 | "\n",
2078 | "
\n",
2079 | " \n",
2080 | " \n",
2081 | " | \n",
2082 | " name | \n",
2083 | " bus | \n",
2084 | " p_kw | \n",
2085 | " q_kvar | \n",
2086 | " sn_kva | \n",
2087 | " soc_percent | \n",
2088 | " min_e_kwh | \n",
2089 | " max_e_kwh | \n",
2090 | " scaling | \n",
2091 | " in_service | \n",
2092 | " type | \n",
2093 | "
\n",
2094 | " \n",
2095 | " \n",
2096 | " \n",
2097 | " 0 | \n",
2098 | " None | \n",
2099 | " 3 | \n",
2100 | " 2.0 | \n",
2101 | " 0.0 | \n",
2102 | " NaN | \n",
2103 | " 0.2 | \n",
2104 | " 0.0 | \n",
2105 | " 10.0 | \n",
2106 | " 1.0 | \n",
2107 | " True | \n",
2108 | " None | \n",
2109 | "
\n",
2110 | " \n",
2111 | "
\n",
2112 | "
"
2113 | ]
2114 | },
2115 | "execution_count": 17,
2116 | "metadata": {},
2117 | "output_type": "execute_result"
2118 | }
2119 | ],
2120 | "source": [
2121 | "network_model.update_batteries(battery_powers=2*np.ones((network_model.net.storage.shape[0],)), dt=1)\n",
2122 | "network_model.net.storage"
2123 | ]
2124 | },
2125 | {
2126 | "cell_type": "markdown",
2127 | "metadata": {},
2128 | "source": [
2129 | "## Update loads and run a powerflow"
2130 | ]
2131 | },
2132 | {
2133 | "cell_type": "code",
2134 | "execution_count": 18,
2135 | "metadata": {},
2136 | "outputs": [],
2137 | "source": [
2138 | "network_model.update_loads(p_load_data[:,0],q_load_data[:,0])"
2139 | ]
2140 | },
2141 | {
2142 | "cell_type": "code",
2143 | "execution_count": 19,
2144 | "metadata": {
2145 | "scrolled": false
2146 | },
2147 | "outputs": [
2148 | {
2149 | "name": "stderr",
2150 | "output_type": "stream",
2151 | "text": [
2152 | "numba cannot be imported and numba functions are disabled.\nProbably the execution is slow.\nPlease install numba to gain a massive speedup.\n(or if you prefer slow execution, set the flag numba=False to avoid this warning!)\n\n"
2153 | ]
2154 | }
2155 | ],
2156 | "source": [
2157 | "network_model.run_powerflow()"
2158 | ]
2159 | },
2160 | {
2161 | "cell_type": "code",
2162 | "execution_count": 20,
2163 | "metadata": {},
2164 | "outputs": [
2165 | {
2166 | "data": {
2167 | "text/html": [
2168 | "\n",
2169 | "\n",
2182 | "
\n",
2183 | " \n",
2184 | " \n",
2185 | " | \n",
2186 | " p_from_kw | \n",
2187 | " q_from_kvar | \n",
2188 | " p_to_kw | \n",
2189 | " q_to_kvar | \n",
2190 | " pl_kw | \n",
2191 | " ql_kvar | \n",
2192 | " i_from_ka | \n",
2193 | " i_to_ka | \n",
2194 | " i_ka | \n",
2195 | " loading_percent | \n",
2196 | "
\n",
2197 | " \n",
2198 | " \n",
2199 | " \n",
2200 | " 0 | \n",
2201 | " 1.671724 | \n",
2202 | " -1.272237 | \n",
2203 | " -1.670228 | \n",
2204 | " 1.269417 | \n",
2205 | " 0.001496 | \n",
2206 | " -0.002820 | \n",
2207 | " 0.003038 | \n",
2208 | " 0.003035 | \n",
2209 | " 0.003038 | \n",
2210 | " 1.125305 | \n",
2211 | "
\n",
2212 | " \n",
2213 | " 1 | \n",
2214 | " 1.670228 | \n",
2215 | " -1.269417 | \n",
2216 | " -1.669713 | \n",
2217 | " 1.269179 | \n",
2218 | " 0.000515 | \n",
2219 | " -0.000238 | \n",
2220 | " 0.003035 | \n",
2221 | " 0.003035 | \n",
2222 | " 0.003035 | \n",
2223 | " 2.137570 | \n",
2224 | "
\n",
2225 | " \n",
2226 | " 2 | \n",
2227 | " 3.145229 | \n",
2228 | " 3.323651 | \n",
2229 | " -3.141593 | \n",
2230 | " -3.323989 | \n",
2231 | " 0.003636 | \n",
2232 | " -0.000338 | \n",
2233 | " 0.006618 | \n",
2234 | " 0.006620 | \n",
2235 | " 0.006620 | \n",
2236 | " 2.451827 | \n",
2237 | "
\n",
2238 | " \n",
2239 | " 3 | \n",
2240 | " 2.373383 | \n",
2241 | " 4.171482 | \n",
2242 | " -2.369377 | \n",
2243 | " -4.171675 | \n",
2244 | " 0.004006 | \n",
2245 | " -0.000193 | \n",
2246 | " 0.006947 | \n",
2247 | " 0.006949 | \n",
2248 | " 0.006949 | \n",
2249 | " 2.573639 | \n",
2250 | "
\n",
2251 | " \n",
2252 | " 4 | \n",
2253 | " 2.226374 | \n",
2254 | " 3.503736 | \n",
2255 | " -2.223372 | \n",
2256 | " -3.504314 | \n",
2257 | " 0.003001 | \n",
2258 | " -0.000577 | \n",
2259 | " 0.006013 | \n",
2260 | " 0.006015 | \n",
2261 | " 0.006015 | \n",
2262 | " 2.227702 | \n",
2263 | "
\n",
2264 | " \n",
2265 | " 5 | \n",
2266 | " 1.417947 | \n",
2267 | " 3.190509 | \n",
2268 | " -1.415821 | \n",
2269 | " -3.191421 | \n",
2270 | " 0.002126 | \n",
2271 | " -0.000912 | \n",
2272 | " 0.005060 | \n",
2273 | " 0.005062 | \n",
2274 | " 0.005062 | \n",
2275 | " 1.874961 | \n",
2276 | "
\n",
2277 | " \n",
2278 | " 6 | \n",
2279 | " 0.768210 | \n",
2280 | " -0.847493 | \n",
2281 | " -0.768057 | \n",
2282 | " 0.847208 | \n",
2283 | " 0.000153 | \n",
2284 | " -0.000285 | \n",
2285 | " 0.001656 | \n",
2286 | " 0.001655 | \n",
2287 | " 0.001656 | \n",
2288 | " 1.165920 | \n",
2289 | "
\n",
2290 | " \n",
2291 | " 7 | \n",
2292 | " 0.143003 | \n",
2293 | " 0.667939 | \n",
2294 | " -0.142948 | \n",
2295 | " -0.668236 | \n",
2296 | " 0.000055 | \n",
2297 | " -0.000297 | \n",
2298 | " 0.000989 | \n",
2299 | " 0.000990 | \n",
2300 | " 0.000990 | \n",
2301 | " 0.697041 | \n",
2302 | "
\n",
2303 | " \n",
2304 | " 8 | \n",
2305 | " 0.805425 | \n",
2306 | " 0.313805 | \n",
2307 | " -0.805338 | \n",
2308 | " -0.314097 | \n",
2309 | " 0.000088 | \n",
2310 | " -0.000292 | \n",
2311 | " 0.001253 | \n",
2312 | " 0.001253 | \n",
2313 | " 0.001253 | \n",
2314 | " 0.882347 | \n",
2315 | "
\n",
2316 | " \n",
2317 | " 9 | \n",
2318 | " -0.083678 | \n",
2319 | " 1.460222 | \n",
2320 | " 0.083929 | \n",
2321 | " -1.460493 | \n",
2322 | " 0.000251 | \n",
2323 | " -0.000271 | \n",
2324 | " 0.002121 | \n",
2325 | " 0.002121 | \n",
2326 | " 0.002121 | \n",
2327 | " 1.493797 | \n",
2328 | "
\n",
2329 | " \n",
2330 | " 10 | \n",
2331 | " 1.462515 | \n",
2332 | " 4.596197 | \n",
2333 | " -1.460450 | \n",
2334 | " -4.596291 | \n",
2335 | " 0.002065 | \n",
2336 | " -0.000094 | \n",
2337 | " 0.006976 | \n",
2338 | " 0.006977 | \n",
2339 | " 0.006977 | \n",
2340 | " 2.584099 | \n",
2341 | "
\n",
2342 | " \n",
2343 | " 11 | \n",
2344 | " 1.013926 | \n",
2345 | " 5.484886 | \n",
2346 | " -1.011162 | \n",
2347 | " -5.484711 | \n",
2348 | " 0.002763 | \n",
2349 | " 0.000175 | \n",
2350 | " 0.008069 | \n",
2351 | " 0.008071 | \n",
2352 | " 0.008071 | \n",
2353 | " 2.989149 | \n",
2354 | "
\n",
2355 | " \n",
2356 | " 12 | \n",
2357 | " 0.953250 | \n",
2358 | " 4.212345 | \n",
2359 | " -0.951592 | \n",
2360 | " -4.212594 | \n",
2361 | " 0.001658 | \n",
2362 | " -0.000250 | \n",
2363 | " 0.006250 | \n",
2364 | " 0.006251 | \n",
2365 | " 0.006251 | \n",
2366 | " 2.315218 | \n",
2367 | "
\n",
2368 | " \n",
2369 | " 13 | \n",
2370 | " 1.376985 | \n",
2371 | " 4.307779 | \n",
2372 | " -1.375167 | \n",
2373 | " -4.307967 | \n",
2374 | " 0.001819 | \n",
2375 | " -0.000187 | \n",
2376 | " 0.006546 | \n",
2377 | " 0.006547 | \n",
2378 | " 0.006547 | \n",
2379 | " 2.424908 | \n",
2380 | "
\n",
2381 | " \n",
2382 | " 14 | \n",
2383 | " 1.371745 | \n",
2384 | " 3.743916 | \n",
2385 | " -1.370331 | \n",
2386 | " -3.744258 | \n",
2387 | " 0.001414 | \n",
2388 | " -0.000342 | \n",
2389 | " 0.005773 | \n",
2390 | " 0.005774 | \n",
2391 | " 0.005774 | \n",
2392 | " 2.138564 | \n",
2393 | "
\n",
2394 | " \n",
2395 | " 15 | \n",
2396 | " 1.007038 | \n",
2397 | " 2.879960 | \n",
2398 | " -1.006142 | \n",
2399 | " -2.880538 | \n",
2400 | " 0.000896 | \n",
2401 | " -0.000578 | \n",
2402 | " 0.004418 | \n",
2403 | " 0.004420 | \n",
2404 | " 0.004420 | \n",
2405 | " 1.826269 | \n",
2406 | "
\n",
2407 | " \n",
2408 | " 16 | \n",
2409 | " 0.898548 | \n",
2410 | " 1.092111 | \n",
2411 | " -0.898355 | \n",
2412 | " -1.092939 | \n",
2413 | " 0.000193 | \n",
2414 | " -0.000827 | \n",
2415 | " 0.002048 | \n",
2416 | " 0.002049 | \n",
2417 | " 0.002049 | \n",
2418 | " 0.846900 | \n",
2419 | "
\n",
2420 | " \n",
2421 | " 17 | \n",
2422 | " 0.446524 | \n",
2423 | " -0.888595 | \n",
2424 | " -0.446408 | \n",
2425 | " 0.888305 | \n",
2426 | " 0.000116 | \n",
2427 | " -0.000290 | \n",
2428 | " 0.001439 | \n",
2429 | " 0.001438 | \n",
2430 | " 0.001439 | \n",
2431 | " 1.013178 | \n",
2432 | "
\n",
2433 | " \n",
2434 | " 18 | \n",
2435 | " -0.262505 | \n",
2436 | " 1.798970 | \n",
2437 | " 0.262892 | \n",
2438 | " -1.799224 | \n",
2439 | " 0.000387 | \n",
2440 | " -0.000255 | \n",
2441 | " 0.002631 | \n",
2442 | " 0.002631 | \n",
2443 | " 0.002631 | \n",
2444 | " 1.853029 | \n",
2445 | "
\n",
2446 | " \n",
2447 | " 19 | \n",
2448 | " -0.425393 | \n",
2449 | " -0.095185 | \n",
2450 | " 0.425415 | \n",
2451 | " 0.094883 | \n",
2452 | " 0.000022 | \n",
2453 | " -0.000302 | \n",
2454 | " 0.000631 | \n",
2455 | " 0.000631 | \n",
2456 | " 0.000631 | \n",
2457 | " 0.444334 | \n",
2458 | "
\n",
2459 | " \n",
2460 | " 20 | \n",
2461 | " 0.003422 | \n",
2462 | " 0.564051 | \n",
2463 | " -0.003385 | \n",
2464 | " -0.564350 | \n",
2465 | " 0.000037 | \n",
2466 | " -0.000299 | \n",
2467 | " 0.000817 | \n",
2468 | " 0.000817 | \n",
2469 | " 0.000817 | \n",
2470 | " 0.575424 | \n",
2471 | "
\n",
2472 | " \n",
2473 | " 21 | \n",
2474 | " 0.363292 | \n",
2475 | " 0.864298 | \n",
2476 | " -0.363189 | \n",
2477 | " -0.864589 | \n",
2478 | " 0.000103 | \n",
2479 | " -0.000291 | \n",
2480 | " 0.001358 | \n",
2481 | " 0.001358 | \n",
2482 | " 0.001358 | \n",
2483 | " 0.956442 | \n",
2484 | "
\n",
2485 | " \n",
2486 | " 22 | \n",
2487 | " 0.107594 | \n",
2488 | " 1.788426 | \n",
2489 | " -0.107218 | \n",
2490 | " -1.788682 | \n",
2491 | " 0.000376 | \n",
2492 | " -0.000255 | \n",
2493 | " 0.002595 | \n",
2494 | " 0.002596 | \n",
2495 | " 0.002596 | \n",
2496 | " 1.827891 | \n",
2497 | "
\n",
2498 | " \n",
2499 | " 23 | \n",
2500 | " 0.898355 | \n",
2501 | " 1.092939 | \n",
2502 | " -0.898121 | \n",
2503 | " -1.093212 | \n",
2504 | " 0.000235 | \n",
2505 | " -0.000274 | \n",
2506 | " 0.002049 | \n",
2507 | " 0.002050 | \n",
2508 | " 0.002050 | \n",
2509 | " 1.443548 | \n",
2510 | "
\n",
2511 | " \n",
2512 | "
\n",
2513 | "
"
2514 | ],
2515 | "text/plain": [
2516 | "\n",
2517 | "\n",
2530 | "
\n",
2531 | " \n",
2532 | " \n",
2533 | " | \n",
2534 | " p_from_kw | \n",
2535 | " q_from_kvar | \n",
2536 | " p_to_kw | \n",
2537 | " q_to_kvar | \n",
2538 | " pl_kw | \n",
2539 | " ql_kvar | \n",
2540 | " i_from_ka | \n",
2541 | " i_to_ka | \n",
2542 | " i_ka | \n",
2543 | " loading_percent | \n",
2544 | "
\n",
2545 | " \n",
2546 | " \n",
2547 | " \n",
2548 | " 0 | \n",
2549 | " 1.671724 | \n",
2550 | " -1.272237 | \n",
2551 | " -1.670228 | \n",
2552 | " 1.269417 | \n",
2553 | " 0.001496 | \n",
2554 | " -0.002820 | \n",
2555 | " 0.003038 | \n",
2556 | " 0.003035 | \n",
2557 | " 0.003038 | \n",
2558 | " 1.125305 | \n",
2559 | "
\n",
2560 | " \n",
2561 | " 1 | \n",
2562 | " 1.670228 | \n",
2563 | " -1.269417 | \n",
2564 | " -1.669713 | \n",
2565 | " 1.269179 | \n",
2566 | " 0.000515 | \n",
2567 | " -0.000238 | \n",
2568 | " 0.003035 | \n",
2569 | " 0.003035 | \n",
2570 | " 0.003035 | \n",
2571 | " 2.137570 | \n",
2572 | "
\n",
2573 | " \n",
2574 | " 2 | \n",
2575 | " 3.145229 | \n",
2576 | " 3.323651 | \n",
2577 | " -3.141593 | \n",
2578 | " -3.323989 | \n",
2579 | " 0.003636 | \n",
2580 | " -0.000338 | \n",
2581 | " 0.006618 | \n",
2582 | " 0.006620 | \n",
2583 | " 0.006620 | \n",
2584 | " 2.451827 | \n",
2585 | "
\n",
2586 | " \n",
2587 | " 3 | \n",
2588 | " 2.373383 | \n",
2589 | " 4.171482 | \n",
2590 | " -2.369377 | \n",
2591 | " -4.171675 | \n",
2592 | " 0.004006 | \n",
2593 | " -0.000193 | \n",
2594 | " 0.006947 | \n",
2595 | " 0.006949 | \n",
2596 | " 0.006949 | \n",
2597 | " 2.573639 | \n",
2598 | "
\n",
2599 | " \n",
2600 | " 4 | \n",
2601 | " 2.226374 | \n",
2602 | " 3.503736 | \n",
2603 | " -2.223372 | \n",
2604 | " -3.504314 | \n",
2605 | " 0.003001 | \n",
2606 | " -0.000577 | \n",
2607 | " 0.006013 | \n",
2608 | " 0.006015 | \n",
2609 | " 0.006015 | \n",
2610 | " 2.227702 | \n",
2611 | "
\n",
2612 | " \n",
2613 | " 5 | \n",
2614 | " 1.417947 | \n",
2615 | " 3.190509 | \n",
2616 | " -1.415821 | \n",
2617 | " -3.191421 | \n",
2618 | " 0.002126 | \n",
2619 | " -0.000912 | \n",
2620 | " 0.005060 | \n",
2621 | " 0.005062 | \n",
2622 | " 0.005062 | \n",
2623 | " 1.874961 | \n",
2624 | "
\n",
2625 | " \n",
2626 | " 6 | \n",
2627 | " 0.768210 | \n",
2628 | " -0.847493 | \n",
2629 | " -0.768057 | \n",
2630 | " 0.847208 | \n",
2631 | " 0.000153 | \n",
2632 | " -0.000285 | \n",
2633 | " 0.001656 | \n",
2634 | " 0.001655 | \n",
2635 | " 0.001656 | \n",
2636 | " 1.165920 | \n",
2637 | "
\n",
2638 | " \n",
2639 | " 7 | \n",
2640 | " 0.143003 | \n",
2641 | " 0.667939 | \n",
2642 | " -0.142948 | \n",
2643 | " -0.668236 | \n",
2644 | " 0.000055 | \n",
2645 | " -0.000297 | \n",
2646 | " 0.000989 | \n",
2647 | " 0.000990 | \n",
2648 | " 0.000990 | \n",
2649 | " 0.697041 | \n",
2650 | "
\n",
2651 | " \n",
2652 | " 8 | \n",
2653 | " 0.805425 | \n",
2654 | " 0.313805 | \n",
2655 | " -0.805338 | \n",
2656 | " -0.314097 | \n",
2657 | " 0.000088 | \n",
2658 | " -0.000292 | \n",
2659 | " 0.001253 | \n",
2660 | " 0.001253 | \n",
2661 | " 0.001253 | \n",
2662 | " 0.882347 | \n",
2663 | "
\n",
2664 | " \n",
2665 | " 9 | \n",
2666 | " -0.083678 | \n",
2667 | " 1.460222 | \n",
2668 | " 0.083929 | \n",
2669 | " -1.460493 | \n",
2670 | " 0.000251 | \n",
2671 | " -0.000271 | \n",
2672 | " 0.002121 | \n",
2673 | " 0.002121 | \n",
2674 | " 0.002121 | \n",
2675 | " 1.493797 | \n",
2676 | "
\n",
2677 | " \n",
2678 | " 10 | \n",
2679 | " 1.462515 | \n",
2680 | " 4.596197 | \n",
2681 | " -1.460450 | \n",
2682 | " -4.596291 | \n",
2683 | " 0.002065 | \n",
2684 | " -0.000094 | \n",
2685 | " 0.006976 | \n",
2686 | " 0.006977 | \n",
2687 | " 0.006977 | \n",
2688 | " 2.584099 | \n",
2689 | "
\n",
2690 | " \n",
2691 | " 11 | \n",
2692 | " 1.013926 | \n",
2693 | " 5.484886 | \n",
2694 | " -1.011162 | \n",
2695 | " -5.484711 | \n",
2696 | " 0.002763 | \n",
2697 | " 0.000175 | \n",
2698 | " 0.008069 | \n",
2699 | " 0.008071 | \n",
2700 | " 0.008071 | \n",
2701 | " 2.989149 | \n",
2702 | "
\n",
2703 | " \n",
2704 | " 12 | \n",
2705 | " 0.953250 | \n",
2706 | " 4.212345 | \n",
2707 | " -0.951592 | \n",
2708 | " -4.212594 | \n",
2709 | " 0.001658 | \n",
2710 | " -0.000250 | \n",
2711 | " 0.006250 | \n",
2712 | " 0.006251 | \n",
2713 | " 0.006251 | \n",
2714 | " 2.315218 | \n",
2715 | "
\n",
2716 | " \n",
2717 | " 13 | \n",
2718 | " 1.376985 | \n",
2719 | " 4.307779 | \n",
2720 | " -1.375167 | \n",
2721 | " -4.307967 | \n",
2722 | " 0.001819 | \n",
2723 | " -0.000187 | \n",
2724 | " 0.006546 | \n",
2725 | " 0.006547 | \n",
2726 | " 0.006547 | \n",
2727 | " 2.424908 | \n",
2728 | "
\n",
2729 | " \n",
2730 | " 14 | \n",
2731 | " 1.371745 | \n",
2732 | " 3.743916 | \n",
2733 | " -1.370331 | \n",
2734 | " -3.744258 | \n",
2735 | " 0.001414 | \n",
2736 | " -0.000342 | \n",
2737 | " 0.005773 | \n",
2738 | " 0.005774 | \n",
2739 | " 0.005774 | \n",
2740 | " 2.138564 | \n",
2741 | "
\n",
2742 | " \n",
2743 | " 15 | \n",
2744 | " 1.007038 | \n",
2745 | " 2.879960 | \n",
2746 | " -1.006142 | \n",
2747 | " -2.880538 | \n",
2748 | " 0.000896 | \n",
2749 | " -0.000578 | \n",
2750 | " 0.004418 | \n",
2751 | " 0.004420 | \n",
2752 | " 0.004420 | \n",
2753 | " 1.826269 | \n",
2754 | "
\n",
2755 | " \n",
2756 | " 16 | \n",
2757 | " 0.898548 | \n",
2758 | " 1.092111 | \n",
2759 | " -0.898355 | \n",
2760 | " -1.092939 | \n",
2761 | " 0.000193 | \n",
2762 | " -0.000827 | \n",
2763 | " 0.002048 | \n",
2764 | " 0.002049 | \n",
2765 | " 0.002049 | \n",
2766 | " 0.846900 | \n",
2767 | "
\n",
2768 | " \n",
2769 | " 17 | \n",
2770 | " 0.446524 | \n",
2771 | " -0.888595 | \n",
2772 | " -0.446408 | \n",
2773 | " 0.888305 | \n",
2774 | " 0.000116 | \n",
2775 | " -0.000290 | \n",
2776 | " 0.001439 | \n",
2777 | " 0.001438 | \n",
2778 | " 0.001439 | \n",
2779 | " 1.013178 | \n",
2780 | "
\n",
2781 | " \n",
2782 | " 18 | \n",
2783 | " -0.262505 | \n",
2784 | " 1.798970 | \n",
2785 | " 0.262892 | \n",
2786 | " -1.799224 | \n",
2787 | " 0.000387 | \n",
2788 | " -0.000255 | \n",
2789 | " 0.002631 | \n",
2790 | " 0.002631 | \n",
2791 | " 0.002631 | \n",
2792 | " 1.853029 | \n",
2793 | "
\n",
2794 | " \n",
2795 | " 19 | \n",
2796 | " -0.425393 | \n",
2797 | " -0.095185 | \n",
2798 | " 0.425415 | \n",
2799 | " 0.094883 | \n",
2800 | " 0.000022 | \n",
2801 | " -0.000302 | \n",
2802 | " 0.000631 | \n",
2803 | " 0.000631 | \n",
2804 | " 0.000631 | \n",
2805 | " 0.444334 | \n",
2806 | "
\n",
2807 | " \n",
2808 | " 20 | \n",
2809 | " 0.003422 | \n",
2810 | " 0.564051 | \n",
2811 | " -0.003385 | \n",
2812 | " -0.564350 | \n",
2813 | " 0.000037 | \n",
2814 | " -0.000299 | \n",
2815 | " 0.000817 | \n",
2816 | " 0.000817 | \n",
2817 | " 0.000817 | \n",
2818 | " 0.575424 | \n",
2819 | "
\n",
2820 | " \n",
2821 | " 21 | \n",
2822 | " 0.363292 | \n",
2823 | " 0.864298 | \n",
2824 | " -0.363189 | \n",
2825 | " -0.864589 | \n",
2826 | " 0.000103 | \n",
2827 | " -0.000291 | \n",
2828 | " 0.001358 | \n",
2829 | " 0.001358 | \n",
2830 | " 0.001358 | \n",
2831 | " 0.956442 | \n",
2832 | "
\n",
2833 | " \n",
2834 | " 22 | \n",
2835 | " 0.107594 | \n",
2836 | " 1.788426 | \n",
2837 | " -0.107218 | \n",
2838 | " -1.788682 | \n",
2839 | " 0.000376 | \n",
2840 | " -0.000255 | \n",
2841 | " 0.002595 | \n",
2842 | " 0.002596 | \n",
2843 | " 0.002596 | \n",
2844 | " 1.827891 | \n",
2845 | "
\n",
2846 | " \n",
2847 | " 23 | \n",
2848 | " 0.898355 | \n",
2849 | " 1.092939 | \n",
2850 | " -0.898121 | \n",
2851 | " -1.093212 | \n",
2852 | " 0.000235 | \n",
2853 | " -0.000274 | \n",
2854 | " 0.002049 | \n",
2855 | " 0.002050 | \n",
2856 | " 0.002050 | \n",
2857 | " 1.443548 | \n",
2858 | "
\n",
2859 | " \n",
2860 | "
\n",
2861 | "
"
2862 | ]
2863 | },
2864 | "execution_count": 20,
2865 | "metadata": {},
2866 | "output_type": "execute_result"
2867 | }
2868 | ],
2869 | "source": [
2870 | "network_model.net.res_line"
2871 | ]
2872 | },
2873 | {
2874 | "cell_type": "markdown",
2875 | "metadata": {},
2876 | "source": [
2877 | "# Calculate the Reward"
2878 | ]
2879 | },
2880 | {
2881 | "cell_type": "code",
2882 | "execution_count": 21,
2883 | "metadata": {},
2884 | "outputs": [
2885 | {
2886 | "data": {
2887 | "text/plain": [
2888 | "0.0"
2889 | ]
2890 | },
2891 | "execution_count": 21,
2892 | "metadata": {},
2893 | "output_type": "execute_result"
2894 | }
2895 | ],
2896 | "source": [
2897 | "network_model.calculate_reward()\n",
2898 | "network_model.reward_val"
2899 | ]
2900 | },
2901 | {
2902 | "cell_type": "markdown",
2903 | "metadata": {},
2904 | "source": [
2905 | "# Key Documentation: \n",
2906 | "\n",
2907 | "https://pandapower.readthedocs.io/en/v1.6.0/elements.html\n"
2908 | ]
2909 | },
2910 | {
2911 | "cell_type": "code",
2912 | "execution_count": null,
2913 | "metadata": {},
2914 | "outputs": [],
2915 | "source": []
2916 | }
2917 | ],
2918 | "metadata": {
2919 | "kernelspec": {
2920 | "display_name": "Python 3",
2921 | "language": "python",
2922 | "name": "python3"
2923 | },
2924 | "language_info": {
2925 | "codemirror_mode": {
2926 | "name": "ipython",
2927 | "version": 3
2928 | },
2929 | "file_extension": ".py",
2930 | "mimetype": "text/x-python",
2931 | "name": "python",
2932 | "nbconvert_exporter": "python",
2933 | "pygments_lexer": "ipython3",
2934 | "version": "3.7.0"
2935 | }
2936 | },
2937 | "nbformat": 4,
2938 | "nbformat_minor": 2
2939 | }
2940 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | BSD 2-Clause License
2 |
3 | Copyright (c) 2019, Bennet Meyers
4 | All rights reserved.
5 |
6 | Redistribution and use in source and binary forms, with or without
7 | modification, are permitted provided that the following conditions are met:
8 |
9 | * Redistributions of source code must retain the above copyright notice, this
10 | list of conditions and the following disclaimer.
11 |
12 | * Redistributions in binary form must reproduce the above copyright notice,
13 | this list of conditions and the following disclaimer in the documentation
14 | and/or other materials provided with the distribution.
15 |
16 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
20 | FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
23 | CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 | OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
25 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Virtual Microgrid Segmentation
2 | Stanford CS234 Final Project, Winter 2019
3 |
4 | Instructor: Prof. Emma Brunskill
5 |
6 | Class website: http://web.stanford.edu/class/cs234/index.html
7 |
8 | Project team: Bennet Meyers and Siobhan Powell
9 |
10 | Contact the authors: bennetm or siobhan.powell at stanford dot edu
11 |
12 | ## Overview
13 | Recent work has shown that microgrids can increase both grid flexibility and grid resiliency to unanticipated outages
14 | caused by events such as cyber attacks or extreme weather. A subclass of microgrids, known as “virtual
15 | islands”, occur when sections of a grid operate in isolation without any powerflow between them and the larger grid,
16 | despite remaining physically connected. If a grid can can partition into virtual islands in anticipation of an incoming
17 | resiliency event, customers in those islands will be less likely to experience outages.
18 |
19 | The goal of this project is to train a deep reinforcement learning (RL) agent to create and maintain as many small virtual
20 | islands as possible by operating a grids storage resources. The agent is rewarded for separating nodes from the external
21 | grid connection and for splitting the graphs into as many segments as possible.
22 |
23 | As our environment is deterministic, we implement PG (policy gradient) and DDPG (deep deterministic policy gradient) algorithms to train the agent, and
24 | apply it to a small test network. We find the DDPG performs the best, and it can successfully maintain microgrids even when
25 | the loads are time varying and change between episodes.
26 |
27 | ## The DDPG algorithm
28 |
29 | The DDPG algorithm was introduced by Lillicrap et al in "Continous control with deep reinforcement learning", available on
30 | arXiv at https://arxiv.org/abs/1509.02971.
31 |
32 | This algorithm builds on the DPG deterministic actor-critic approach proposed by Silver et al in "Deterministic
33 | Policy Gradient Algorithms", available at http://proceedings.mlr.press/v32/silver14.pdf. DDPG combines this approach with the
34 | successes of deep learning from DQN. It is model-free, off-policy, and has been shown to learn complex continuous control
35 | tasks in high dimensions quite well.
36 |
37 | Standard stochastic PG involves taking the expectation over the distribution of actions to calculate the gradient step.
38 | DDPG simply moves the policy in the direction of the gradient of Q, removing the need for an integral over the action space,
39 | making it much more efficient at learning in our environment.
40 |
41 | In DDPG the algorithm builds a critic network to estimate the state action value function, Q(s,a). An actor network is built to
42 | learn a behaviour from the critic estimation. The algorithm learns a deterministic policy but implements a stochastic behaviour
43 | policy by adding noise to the action choice to properly explore the solution space. The tuning and scheduling of this exploration
44 | noise term is crucial to the success of the algorithm.
45 |
46 | To help with convergence and stability, the algorithm is implemented with experience replay and with semi-stationary target
47 | networks. For more information on the theory and the algorithm applied, please refer to the papers.
48 |
49 | ## Structure of the Code
50 |
51 | There are two main sides to the code: the network and the agents.
52 |
53 | The network is generated using Pandapower (https://pandapower.readthedocs.io/en/v1.6.1/index.html).
54 |
55 | The NetModel class in `powerflow/pp_network.py` maintains the network
56 | object throughout the simulation. It controls how the agent can interact with the network
57 | and with the powerflow simulations with methods to step in time, calculate the reward, reset the network,
58 | report the state to the agent, and update the network devices. These devices include uncontrollable and controllable devices:
59 | loads and static generators are set by an uncontrollable unknown feed; the powers of storage and diesel generators are
60 | controlled by the agent.
61 |
62 | The initial network is generated by functions in `powerflow/network_generation.py` using configurations stored
63 | in configs. Each config defines all the parameters behind one test set up, including those of the network and some
64 | elements of the agent set up.
65 |
66 | The ActorNetwork and CriticNetwork objects are created in `agents/actor_network.py` and `agents/critic_network.py`, and the
67 | DDPG object uses them to learn the optimal policy. DDPG manages the training of the actor/critic networks
68 | and controls the interactions with the grid network model.
69 |
70 |
71 | #### Code organization
72 |
73 | The main folder contains scratch notebooks for testing, developing, and interacting with the environments.
74 |
75 | The `scripts` folder contains scripts to run the algorithms. For example, change the environment name or config name
76 | in `run_ddpg.py` and then run
77 |
78 | python run_ddpgy.py
79 |
80 | to start the simulation.
81 |
82 | The `virtual_microgrids` folder contains all the pieces of the simulation. To run you do not need to change anything in here,
83 | but to change parameters or change the algorithm you will need to work with these files.
84 | - The subfolder `agents` contains the classes
85 | to build the actor and critic network objects.
86 | - The `algorithms` subfolder classes which run the PG and DDPG implementations.
87 | - The `configs` subfolder contains the configuration files for each test case and network. To create a new or altered test case,
88 | create a new config file in the style of `six_bus_mvp1.py`, for example.
89 | - The `powerflow` subfolder contains a class to manage the power network and functions to create the networks from the config files
90 | - The `utils` subfolder contains tools used throughout the other methods and functions, including the schedules used to generate the noise
91 |
92 |
93 | The `results` folder contains the outputs from running the algorithm. Running the command
94 | ```
95 | tensorboard --logdir [path to results folder]
96 | ```
97 | and then visiting
98 |
99 | localhost:6006
100 |
101 | in your browser will let you inspect the tensorflow setup and see plots of the results.
102 |
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | numpy
2 | pandas
3 | jupyter
4 | pandapower
5 | packaging
6 | plotly
7 | numba
8 | tensorflow
9 | matplotlib
10 | python-igraph
--------------------------------------------------------------------------------
/scripts/run_ddpg.py:
--------------------------------------------------------------------------------
1 | import sys
2 | sys.path.append('..')
3 |
4 | from virtual_microgrids.configs import get_config
5 | from virtual_microgrids.powerflow import NetModel
6 | from virtual_microgrids.algorithms import DDPG
7 |
8 | if __name__ == '__main__':
9 | config = get_config('Six_Bus_POC', algorithm='DDPG')
10 | env = NetModel(config=config)
11 | # train model
12 | model = DDPG(env, config)
13 | model.run()
--------------------------------------------------------------------------------
/scripts/run_pg.py:
--------------------------------------------------------------------------------
1 | import sys
2 | sys.path.append('..')
3 |
4 | from virtual_microgrids.configs import get_config
5 | from virtual_microgrids.powerflow import NetModel
6 | from virtual_microgrids.algorithms import PG
7 |
8 | if __name__ == '__main__':
9 | config = get_config('Six_Bus_POC', algorithm='PG')
10 | env = NetModel(config=config)
11 | # train model
12 | model = PG(env, config)
13 | model.run()
--------------------------------------------------------------------------------
/virtual_microgrids/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bmeyers/VirtualMicrogridSegmentation/cd9e7ef1a2ccc438a855765e4c07904740ec12ee/virtual_microgrids/__init__.py
--------------------------------------------------------------------------------
/virtual_microgrids/agents/__init__.py:
--------------------------------------------------------------------------------
1 | from virtual_microgrids.agents.actor_network import ActorNetwork
2 | from virtual_microgrids.agents.critic_network import CriticNetwork
--------------------------------------------------------------------------------
/virtual_microgrids/agents/actor_network.py:
--------------------------------------------------------------------------------
1 | # Actor and Critic DNNs
2 | # Based on code published by Patrick Emami on his blog "Deep
3 | # Deterministic Policy Gradients in TensorFlow":
4 | # https://pemami4911.github.io/blog/2016/08/21/ddpg-rl.html
5 |
6 | import tensorflow as tf
7 |
8 | class ActorNetwork(object):
9 | """
10 | Input to the network is the state, output is the action
11 | under a deterministic policy.
12 |
13 | The output layer activation is a tanh, which is individually scaled and
14 | recentered for each input, to keep each input between p_min and p_max
15 | for the given device.
16 | """
17 |
18 | def __init__(self, sess, state_dim, action_dim, tau,
19 | n_layers, size, min_p, max_p, batch_size):
20 | self.sess = sess
21 | self.s_dim = state_dim
22 | self.a_dim = action_dim
23 | self.tau = tau
24 | self.n_layers = n_layers
25 | self.size = size
26 | self.min_p = min_p
27 | self.max_p = max_p
28 | self.batch_size = batch_size
29 |
30 | self.actor_lr_placeholder = tf.placeholder(shape=None, dtype=tf.float32)
31 |
32 | # Actor Network
33 | self.inputs, self.out, self.scaled_out, self.in_training = self.create_actor_network()
34 |
35 | self.network_params = tf.trainable_variables()
36 |
37 | # Target Network
38 | self.target_inputs, self.target_out, self.target_scaled_out, self.target_in_training = self.create_actor_network()
39 |
40 | self.target_network_params = tf.trainable_variables()[
41 | len(self.network_params):]
42 |
43 | # Op for periodically updating target network with online network
44 | # weights
45 | self.update_target_network_params = \
46 | [self.target_network_params[i].assign(tf.multiply(self.network_params[i], self.tau) +
47 | tf.multiply(self.target_network_params[i], 1. - self.tau))
48 | for i in range(len(self.target_network_params))]
49 |
50 | extra_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
51 | with tf.control_dependencies(extra_ops):
52 | # This gradient will be provided by the critic network
53 | self.action_gradient = tf.placeholder(tf.float32, [None, self.a_dim])
54 |
55 | # Combine the gradients here
56 | self.unnormalized_actor_gradients = tf.gradients(
57 | self.scaled_out, self.network_params, -self.action_gradient)
58 | self.actor_gradients = list(map(lambda x: tf.div(x, self.batch_size), self.unnormalized_actor_gradients))
59 |
60 | # Optimization Op
61 | self.optimize = tf.train.AdamOptimizer(self.actor_lr_placeholder). \
62 | apply_gradients(zip(self.actor_gradients, self.network_params))
63 |
64 | self.num_trainable_vars = len(
65 | self.network_params) + len(self.target_network_params)
66 |
67 | def create_actor_network(self):
68 |
69 | inputs = tf.placeholder(shape=[None, self.s_dim],
70 | dtype=tf.float32,
71 | name='states')
72 | out = tf.layers.flatten(inputs)
73 | in_training_mode = tf.placeholder(tf.bool)
74 | for i in range(self.n_layers):
75 | out = tf.keras.layers.Dense(units=self.size, activation=None)(out)
76 | #out = tf.keras.layers.BatchNormalization()(out, training=in_training_mode)
77 | out = tf.keras.activations.relu(out)
78 | # Final layer weights are init to Uniform[-3e-3, 3e-3]
79 | w_init = tf.initializers.random_uniform(minval=-0.003, maxval=0.003)
80 | out = tf.keras.layers.Dense(units=self.a_dim, activation=None,
81 | kernel_initializer=w_init)(out)
82 | #out = tf.keras.layers.BatchNormalization()(out, training=in_training_mode)
83 | out = tf.keras.activations.tanh(out)
84 |
85 | centers = (self.min_p + self.max_p) / 2.0
86 | scales = (self.max_p -self.min_p) / 2.0
87 | scaled_out = tf.multiply(out, scales) + centers
88 |
89 | return inputs, out, scaled_out, in_training_mode
90 |
91 | def train(self, inputs, a_gradient, learning_rate):
92 | self.sess.run(self.optimize, feed_dict={
93 | self.inputs: inputs,
94 | self.action_gradient: a_gradient,
95 | self.actor_lr_placeholder: learning_rate,
96 | self.in_training: True
97 | })
98 |
99 | def predict(self, inputs):
100 | return self.sess.run(self.scaled_out, feed_dict={
101 | self.inputs: inputs,
102 | self.in_training: False
103 | })
104 |
105 | def predict_target(self, inputs):
106 | return self.sess.run(self.target_scaled_out, feed_dict={
107 | self.target_inputs: inputs,
108 | self.target_in_training: False
109 | })
110 |
111 | def update_target_network(self):
112 | self.sess.run(self.update_target_network_params)
113 |
114 | def get_num_trainable_vars(self):
115 | return self.num_trainable_vars
116 |
--------------------------------------------------------------------------------
/virtual_microgrids/agents/critic_network.py:
--------------------------------------------------------------------------------
1 | # Actor and Critic DNNs
2 | # Based on code published by Patrick Emami on his blog "Deep
3 | # Deterministic Policy Gradients in TensorFlow":
4 | # https://pemami4911.github.io/blog/2016/08/21/ddpg-rl.html
5 |
6 | import tensorflow as tf
7 | import os
8 |
9 | class CriticNetwork(object):
10 | """
11 | Input to the network is the state and action, output is Q(s,a).
12 | The action must be obtained from the output of the Actor network.
13 |
14 | """
15 |
16 | def __init__(self, sess, state_dim, action_dim, tau, gamma,
17 | n_layers, size, num_actor_vars):
18 | self.sess = sess
19 | self.s_dim = state_dim
20 | self.a_dim = action_dim
21 | self.tau = tau
22 | self.gamma = gamma
23 | self.n_layers = n_layers
24 | self.size = size
25 |
26 | self.critic_lr_placeholder = tf.placeholder(shape=None, dtype=tf.float32)
27 |
28 | # Create the critic network
29 | self.inputs, self.action, self.out, self.in_training = self.create_critic_network()
30 |
31 | self.network_params = tf.trainable_variables()[num_actor_vars:]
32 |
33 | # Target Network
34 | self.target_inputs, self.target_action, self.target_out, self.target_in_training = self.create_critic_network()
35 |
36 | self.target_network_params = tf.trainable_variables()[(len(self.network_params) + num_actor_vars):]
37 |
38 | # Op for periodically updating target network with online network
39 | # weights with regularization
40 | self.update_target_network_params = \
41 | [self.target_network_params[i].assign(tf.multiply(self.network_params[i], self.tau) \
42 | + tf.multiply(self.target_network_params[i], 1. - self.tau))
43 | for i in range(len(self.target_network_params))]
44 |
45 | # Network target (y_i)
46 | self.predicted_q_value = tf.placeholder(tf.float32, [None, 1])
47 |
48 | extra_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
49 | with tf.control_dependencies(extra_ops):
50 | # Define loss and optimization Op
51 | self.loss = tf.losses.mean_squared_error(self.predicted_q_value, self.out)
52 | self.optimize = tf.train.AdamOptimizer(
53 | self.critic_lr_placeholder).minimize(self.loss)
54 |
55 | # Get the gradient of the net w.r.t. the action.
56 | # For each action in the minibatch (i.e., for each x in xs),
57 | # this will sum up the gradients of each critic output in the minibatch
58 | # w.r.t. that action. Each output is independent of all
59 | # actions except for one.
60 | self.action_grads = tf.gradients(self.out, self.action)
61 |
62 | def create_critic_network(self):
63 |
64 | inputs = tf.placeholder(shape=[None, self.s_dim],
65 | dtype=tf.float32,
66 | name='observation')
67 | action = tf.placeholder(shape=[None, self.a_dim],
68 | dtype=tf.float32,
69 | name='action')
70 | in_training_mode = tf.placeholder(tf.bool)
71 |
72 | out = tf.layers.flatten(inputs)
73 | out = tf.keras.layers.Dense(units=self.size, activation=None)(out)
74 | #out = tf.keras.layers.BatchNormalization()(out,training=in_training_mode)
75 | out = tf.keras.activations.relu(out)
76 |
77 | t1 = tf.keras.layers.Dense(units=self.size, activation=None)(out)
78 | t2 = tf.keras.layers.Dense(units=self.size, use_bias=False, activation=None)(action)
79 | out = tf.keras.layers.Add()([t1, t2])
80 | #out = tf.keras.layers.BatchNormalization()(out, training=in_training_mode)
81 | out = tf.keras.activations.relu(out)
82 | for i in range(max(self.n_layers - 2, 0)):
83 | out = tf.keras.layers.Dense(units=self.size, activation=None)(out)
84 | #out = tf.keras.layers.BatchNormalization()(out, training=in_training_mode)
85 | out = tf.keras.activations.relu(out)
86 |
87 | # Final layer weights are init to Uniform[-3e-3, 3e-3]
88 | w_init = tf.initializers.random_uniform(minval=-0.003, maxval=0.003) # Changed from 0.003 values
89 | out = tf.keras.layers.Dense(units=1, activation=None,
90 | kernel_initializer=w_init)(out)
91 | #out = tf.keras.layers.BatchNormalization()(out, training=in_training_mode)
92 | #out = tf.keras.layers.BatchNormalization()(out, training=in_training_mode)
93 |
94 | return inputs, action, out, in_training_mode
95 |
96 | def train(self, inputs, action, predicted_q_value, learning_rate):
97 | return self.sess.run([self.out, self.optimize], feed_dict={
98 | self.inputs: inputs,
99 | self.action: action,
100 | self.predicted_q_value: predicted_q_value,
101 | self.critic_lr_placeholder: learning_rate,
102 | self.in_training: True
103 | })
104 |
105 | def predict(self, inputs, action):
106 | return self.sess.run(self.out, feed_dict={
107 | self.inputs: inputs,
108 | self.action: action,
109 | self.in_training: False
110 | })
111 |
112 | def predict_target(self, inputs, action):
113 | return self.sess.run(self.target_out, feed_dict={
114 | self.target_inputs: inputs,
115 | self.target_action: action,
116 | self.target_in_training: False
117 | })
118 |
119 | def action_gradients(self, inputs, actions):
120 | return self.sess.run(self.action_grads, feed_dict={
121 | self.inputs: inputs,
122 | self.action: actions,
123 | self.in_training: True
124 | })
125 |
126 | def update_target_network(self):
127 | self.sess.run(self.update_target_network_params)
128 |
--------------------------------------------------------------------------------
/virtual_microgrids/algorithms/__init__.py:
--------------------------------------------------------------------------------
1 | from virtual_microgrids.algorithms.ddpg import DDPG
2 | from virtual_microgrids.algorithms.pg import PG
--------------------------------------------------------------------------------
/virtual_microgrids/algorithms/ddpg.py:
--------------------------------------------------------------------------------
1 | # -*- coding: UTF-8 -*-
2 | """The base of this code was prepared for a homework by course staff for CS234 at Stanford, Winter 2019. We have since
3 | altered it to implement DDPG rather than traditional PG. Also inspired by code published by Patrick Emami on his blog
4 | "Deep Deterministic Policy Gradients in TensorFlow": https://pemami4911.github.io/blog/2016/08/21/ddpg-rl.html
5 | """
6 |
7 | import os
8 | import argparse
9 | import sys
10 | import logging
11 | import time
12 | import numpy as np
13 | import tensorflow as tf
14 | import scipy.signal
15 | import os
16 | import time
17 | import inspect
18 | import matplotlib.pyplot as plt
19 |
20 | sys.path.append('..')
21 | from virtual_microgrids.powerflow import NetModel
22 | from virtual_microgrids.utils.general import get_logger, Progbar, export_plot
23 | from virtual_microgrids.configs import get_config
24 | from virtual_microgrids.utils import ReplayBuffer, LinearSchedule, LogSchedule, OrnsteinUhlenbeckActionNoise
25 | from virtual_microgrids.agents import ActorNetwork, CriticNetwork
26 |
27 | parser = argparse.ArgumentParser()
28 | parser.add_argument('--env_name', required=True, type=str,
29 | choices=['Six_Bus_POC', 'rural_1', 'rural_2', 'village_1', 'village_2', 'suburb_1'])
30 |
31 |
32 | class DDPG(object):
33 | """
34 | Abstract Class for implementing a Policy Gradient Based Algorithm
35 | """
36 | def __init__(self, env, config, logger=None):
37 | """
38 | Initialize Policy Gradient Class
39 |
40 | Args:
41 | env: an OpenAI Gym environment
42 | config: class with hyperparameters
43 | logger: logger instance from the logging module
44 |
45 | Written by course staff.
46 | """
47 | # directory for training outputs
48 | if not os.path.exists(config.output_path):
49 | os.makedirs(config.output_path)
50 |
51 | # store hyperparameters
52 | self.config = config
53 | self.logger = logger
54 | if logger is None:
55 | self.logger = get_logger(config.log_path)
56 | self.env = env
57 |
58 | self.state_dim = self.env.observation_dim
59 | self.action_dim = self.env.action_dim
60 |
61 | # self.actor_lr = self.config.actor_learning_rate_start
62 | # self.critic_lr = self.config.critic_learning_rate_start
63 | self.gamma = self.config.gamma
64 | self.tau = self.config.tau
65 | self.batch_size = self.config.minibatch_size
66 |
67 | # self.actor_noise = OrnsteinUhlenbeckActionNoise(mu=np.zeros(self.action_dim))
68 | self.actor_noise = lambda noise_level: np.random.normal(0, noise_level, size=self.action_dim) # changed from 0.2
69 |
70 | # action space limits
71 | min_p = []
72 | max_p = []
73 | if len(env.net.gen)>0:
74 | min_p.append(env.net.gen.min_p_kw)
75 | max_p.append(env.net.gen.max_p_kw)
76 | if len(env.net.storage)>0:
77 | min_p.append(env.net.storage.min_p_kw)
78 | max_p.append(env.net.storage.max_p_kw)
79 | self.min_p = np.array(min_p)
80 | self.max_p = np.array(max_p)
81 |
82 | # build model
83 | self.actor = None
84 | self.critic = None
85 |
86 | def initialize(self):
87 | """
88 | Assumes the graph has been constructed (have called self.build())
89 | Creates a tf Session and run initializer of variables
90 |
91 | Written by course staff.
92 | """
93 | # create tf session
94 | self.sess = tf.Session()
95 | # Initialize networks
96 | self.actor = ActorNetwork(self.sess, self.state_dim, self.action_dim, self.tau, self.config.n_layers,
97 | self.config.layer_size, self.min_p, self.max_p,
98 | self.config.minibatch_size)
99 | self.critic = CriticNetwork(self.sess, self.state_dim, self.action_dim, self.tau, self.gamma,
100 | self.config.n_layers, self.config.layer_size,
101 | self.actor.get_num_trainable_vars())
102 | # tensorboard stuff
103 | self.add_summary()
104 | # initialize all variables
105 | init = tf.global_variables_initializer()
106 | self.sess.run(init)
107 |
108 | def add_summary(self):
109 | """
110 | Tensorboard stuff. Written by course staff.
111 | """
112 | # extra placeholders to log stuff from python
113 | self.avg_reward_placeholder = tf.placeholder(tf.float32, shape=(), name="avg_reward")
114 | self.max_reward_placeholder = tf.placeholder(tf.float32, shape=(), name="max_reward")
115 | self.std_reward_placeholder = tf.placeholder(tf.float32, shape=(), name="std_reward")
116 | self.eval_reward_placeholder = tf.placeholder(tf.float32, shape=(), name="eval_reward")
117 | # new DDPG placeholders
118 | self.max_q_placeholder = tf.placeholder(tf.float32, shape=(), name='max_q')
119 |
120 | # extra summaries from python -> placeholders
121 | tf.summary.scalar("Avg_Reward", self.avg_reward_placeholder)
122 | tf.summary.scalar("Max_Reward", self.max_reward_placeholder)
123 | tf.summary.scalar("Std_Reward", self.std_reward_placeholder)
124 | tf.summary.scalar("Eval_Reward", self.eval_reward_placeholder)
125 | # new DDPG summary
126 | tf.summary.scalar("Max_Q_Value", self.max_q_placeholder)
127 |
128 | # logging
129 | self.merged = tf.summary.merge_all()
130 | self.file_writer = tf.summary.FileWriter(self.config.output_path,self.sess.graph)
131 |
132 | def init_averages(self):
133 | """
134 | Defines extra attributes for tensorboard. Written by course staff.
135 | """
136 | self.avg_reward = 0.
137 | self.max_reward = 0.
138 | self.std_reward = 0.
139 | self.eval_reward = 0.
140 | self.avg_max_q = 0.
141 |
142 | def update_averages(self, rewards, scores_eval, avg_max_q):
143 | """
144 | Update the averages. Written by course staff.
145 |
146 | Args:
147 | rewards: deque
148 | scores_eval: list
149 | """
150 | self.avg_reward = np.mean(rewards)
151 | self.max_reward = np.max(rewards)
152 | self.std_reward = np.sqrt(np.var(rewards) / len(rewards))
153 | self.avg_max_q = np.mean(avg_max_q)
154 |
155 | if len(scores_eval) > 0:
156 | self.eval_reward = scores_eval[-1]
157 |
158 | def record_summary(self, t):
159 | """
160 | Add summary to tensorboard. Written by course staff.
161 | """
162 |
163 | fd = {
164 | self.avg_reward_placeholder: self.avg_reward,
165 | self.max_reward_placeholder: self.max_reward,
166 | self.std_reward_placeholder: self.std_reward,
167 | self.eval_reward_placeholder: self.eval_reward,
168 | self.max_q_placeholder: self.avg_max_q
169 | }
170 | summary = self.sess.run(self.merged, feed_dict=fd)
171 | # tensorboard stuff
172 | self.file_writer.add_summary(summary, t)
173 |
174 | def train(self):
175 | """
176 | Performs training.
177 | """
178 |
179 | actor_lr_schedule = LinearSchedule(self.config.actor_learning_rate_start, self.config.actor_learning_rate_end,
180 | self.config.reasonable_max_episodes*self.config.max_ep_steps)
181 | critic_lr_schedule = LinearSchedule(self.config.critic_learning_rate_start, self.config.critic_learning_rate_end,
182 | self.config.reasonable_max_episodes*self.config.max_ep_steps)
183 | noise_schedule = LogSchedule(0.5, 0.0001, self.config.reasonable_max_episodes*self.config.max_ep_steps)
184 |
185 | # noise_schedule = LinearSchedule(0.5, 0.01, self.config.reasonable_max_episodes*self.config.max_ep_steps)
186 |
187 | self.actor.update_target_network()
188 | self.critic.update_target_network()
189 | replay_buffer = ReplayBuffer(self.config.buffer_size)
190 | total_rewards = []
191 | scores_eval = []
192 | ave_max_q = []
193 |
194 | for i in range(self.config.max_episodes):
195 | s = self.env.reset()
196 | ep_reward = 0
197 | ep_ave_max_q = 0
198 | best_ep_reward = 0
199 |
200 | best_r = 0.0
201 | best_reward_logical = None
202 |
203 | soc_track = np.zeros((self.config.max_ep_steps, self.env.net.storage.shape[0]))
204 | p_track = np.zeros((self.config.max_ep_steps, self.env.net.storage.shape[0]))
205 | reward_track = np.zeros((self.config.max_ep_steps, 1))
206 |
207 | for j in range(self.config.max_ep_steps):
208 | a = self.actor.predict(s[None, :]) + self.actor_noise(noise_schedule.epsilon)
209 | s2, r, done, info = self.env.step(a[0])
210 | replay_buffer.add(np.reshape(s, (self.state_dim)),
211 | np.reshape(a, (self.action_dim)),
212 | r, done,
213 | np.reshape(s2, (self.state_dim)))
214 | # Keep adding experience to the memory until
215 | # there are at least minibatch size samples
216 | if replay_buffer.size() > self.config.minibatch_size:
217 | s_batch, a_batch, r_batch, t_batch, s2_batch = \
218 | replay_buffer.sample_batch(self.config.minibatch_size)
219 | # Calc targets
220 | target_q = self.critic.predict_target(
221 | s2_batch, self.actor.predict_target(s2_batch)
222 | )
223 | y_i = np.array(r_batch)
224 | y_i[~t_batch] = (r_batch +
225 | self.gamma * target_q.squeeze())[~t_batch]
226 | # Update critic given targets
227 | predicted_q_val, _ = self.critic.train(s_batch, a_batch, y_i[:, None], critic_lr_schedule.epsilon)
228 | ep_ave_max_q += np.max(predicted_q_val)
229 | # Update the actor policy using the sampled gradient
230 | a_outs = self.actor.predict(s_batch)
231 | grads = self.critic.action_gradients(s_batch, a_outs)
232 | self.actor.train(s_batch, grads[0], actor_lr_schedule.epsilon)
233 | # Update target networks
234 | self.actor.update_target_network()
235 | self.critic.update_target_network()
236 | actor_lr_schedule.update(i*self.config.max_ep_steps + j)
237 | critic_lr_schedule.update(i * self.config.max_ep_steps + j)
238 | noise_schedule.update(i * self.config.max_ep_steps + j)
239 | # Housekeeping
240 | if r > best_r:
241 | best_r = r
242 | c1 = np.abs(self.env.net.res_line.p_to_kw - self.env.net.res_line.pl_kw) < self.config.reward_epsilon
243 | c2 = np.abs(self.env.net.res_line.p_from_kw - self.env.net.res_line.pl_kw) < self.config.reward_epsilon
244 | best_reward_logical = np.logical_or(c1.values, c2.values)
245 |
246 | soc_track[j, :] = self.env.net.storage.soc_percent
247 | p_track[j, :] = self.env.net.storage.p_kw
248 | reward_track[j] = r
249 |
250 | s = s2
251 | ep_reward += r
252 | if done:
253 | if ep_reward > best_ep_reward:
254 | best_ep_reward = ep_reward
255 | total_rewards.append(ep_reward)
256 | ep_ave_max_q /= j
257 | ave_max_q.append(ep_ave_max_q)
258 | break
259 |
260 | # tf stuff
261 | if (i % self.config.summary_freq2 == 0):
262 | scores_eval.extend(total_rewards)
263 | self.update_averages(np.array(total_rewards), np.array(scores_eval), np.array(ave_max_q))
264 | self.record_summary(i)
265 |
266 | # compute reward statistics for this batch and log
267 | avg_reward = np.mean(total_rewards)
268 | sigma_reward = np.sqrt(np.var(total_rewards) / len(total_rewards))
269 | avg_q = np.mean(ave_max_q)
270 | s1 = "---------------------------------------------------------\n" \
271 | +"Average reward: {:04.2f} +/- {:04.2f} Average Max Q: {:.2f}"
272 | msg = s1.format(avg_reward, sigma_reward, avg_q)
273 | self.logger.info(msg)
274 | msg4 = "Best episode reward: {}".format(best_ep_reward)
275 | self.logger.info(msg4)
276 |
277 | msg2 = "Max single reward: "+str(best_r)
278 | msg3 = "Max reward happened on lines: "+str(best_reward_logical)
279 | end = "\n--------------------------------------------------------"
280 | self.logger.info(msg2)
281 | self.logger.info(msg3 + end)
282 |
283 | fig, ax = plt.subplots(nrows=3, sharex=True)
284 | xs = np.arange(self.config.max_ep_steps)
285 | for k_step in range(self.env.net.storage.shape[0]):
286 | ax[1].plot(xs, soc_track[:, k_step].ravel(), marker='.',
287 | label='soc_{}'.format(k_step + 1))
288 | ax[0].plot(xs, p_track[:, k_step].ravel(), marker='.',
289 | label='pset_{}'.format(k_step + 1))
290 | ax[0].legend()
291 | ax[1].legend()
292 | ax[2].stem(xs, reward_track, label='reward')
293 | ax[2].legend()
294 | ax[2].set_xlabel('time')
295 | ax[0].set_ylabel('Power (kW)')
296 | ax[1].set_ylabel('State of Charge')
297 | ax[2].set_ylabel('Reward Received')
298 | ax[0].set_title('Battery Behavior and Rewards')
299 | plt.tight_layout()
300 | plt.savefig(self.config.output_path + 'soc_plot_{}.png'.format(i))
301 | plt.close()
302 |
303 | total_rewards = []
304 | ave_max_q = []
305 | best_ep_reward = 0
306 |
307 | self.logger.info("- Training done.")
308 | export_plot(scores_eval, "Score", self.config.env_name, self.config.plot_output)
309 |
310 | def evaluate(self, env=None, num_episodes=1):
311 | """
312 | Evaluates the return for num_episodes episodes. Written by course staff.
313 | Not used right now, all evaluation statistics are computed during training
314 | episodes.
315 | """
316 | if env==None: env = self.env
317 | paths, rewards = self.sample_path(env, num_episodes)
318 | avg_reward = np.mean(rewards)
319 | sigma_reward = np.sqrt(np.var(rewards) / len(rewards))
320 | msg = "Average reward: {:04.2f} +/- {:04.2f}".format(avg_reward, sigma_reward)
321 | self.logger.info(msg)
322 | return avg_reward
323 |
324 | def run(self):
325 | """
326 | Apply procedures of training for a PG. Written by course staff.
327 | """
328 | # initialize
329 | self.initialize()
330 | # model
331 | self.train()
332 |
333 | if __name__ == '__main__':
334 |
335 | #config = get_config('Six_Bus_POC', algorithm='DDPG')
336 | config = get_config('Six_Bus_MVP3', algorithm='DDPG')
337 | env = NetModel(config=config)
338 | # train model
339 | model = DDPG(env, config)
340 | model.run()
341 |
--------------------------------------------------------------------------------
/virtual_microgrids/algorithms/pg.py:
--------------------------------------------------------------------------------
1 | # -*- coding: UTF-8 -*-
2 | """The base of this code was prepared for a homework by course staff for CS234 at Stanford, Winter 2019."""
3 |
4 | import argparse
5 | import numpy as np
6 | import tensorflow as tf
7 | import os
8 | import matplotlib.pyplot as plt
9 |
10 | from virtual_microgrids.powerflow import NetModel
11 | from virtual_microgrids.utils.general import get_logger, Progbar, export_plot
12 | from virtual_microgrids.configs import get_config
13 |
14 | parser = argparse.ArgumentParser()
15 | parser.add_argument('--env_name', required=True, type=str,
16 | choices=['Six_Bus_POC', 'rural_1', 'rural_2', 'village_1', 'village_2', 'suburb_1'])
17 | parser.add_argument('--baseline', dest='use_baseline', action='store_true')
18 | parser.add_argument('--no-baseline', dest='use_baseline', action='store_false')
19 | parser.set_defaults(use_baseline=True)
20 |
21 |
22 | def build_mlp(mlp_input, output_size, scope, n_layers, size, in_training_mode,
23 | output_activation=None):
24 | """
25 | Build a feed forward network (multi-layer perceptron, or mlp)
26 | with 'n_layers' hidden layers, each of size 'size' units.
27 | Use tf.nn.relu nonlinearity between layers.
28 | Args:
29 | mlp_input: the input to the multi-layer perceptron
30 | output_size: the output layer size
31 | scope: the scope of the neural network
32 | n_layers: the number of hidden layers of the network
33 | size: the size of each layer:
34 | output_activation: the activation of output layer
35 | Returns:
36 | The tensor output of the network
37 | """
38 |
39 | with tf.variable_scope(scope):
40 | out = tf.layers.flatten(mlp_input)
41 | for i in range(n_layers):
42 | out = tf.keras.layers.Dense(units=size, activation=None)(out)
43 | #out = tf.keras.layers.BatchNormalization()(out, training=in_training_mode)
44 | out = tf.keras.activations.relu(out)
45 | w_init = tf.initializers.random_uniform(minval=-0.003, maxval=0.003)
46 | out = tf.layers.Dense(units=output_size, activation=output_activation,
47 | kernel_initializer=w_init)(out)
48 |
49 | return out
50 |
51 |
52 | class PG(object):
53 | """
54 | Abstract Class for implementing a Policy Gradient Based Algorithm
55 | """
56 | def __init__(self, env, config, logger=None):
57 | """
58 | Initialize Policy Gradient Class
59 |
60 | Args:
61 | env: an OpenAI Gym environment
62 | config: class with hyperparameters
63 | logger: logger instance from the logging module
64 |
65 | Written by course staff.
66 | """
67 | # directory for training outputs
68 | if not os.path.exists(config.output_path):
69 | os.makedirs(config.output_path)
70 |
71 | # store hyperparameters
72 | self.config = config
73 | self.logger = logger
74 | if logger is None:
75 | self.logger = get_logger(config.log_path)
76 | self.env = env
77 |
78 | self.observation_dim = self.env.observation_dim
79 | self.action_dim = self.env.action_dim
80 |
81 | self.lr = self.config.learning_rate
82 |
83 | # build model
84 | self.build()
85 |
86 | def add_placeholders_op(self):
87 | """
88 | Add placeholders for observation, action, and advantage:
89 | self.observation_placeholder, type: tf.float32
90 | self.action_placeholder, type: depends on the self.discrete
91 | self.advantage_placeholder, type: tf.float32
92 | """
93 | self.observation_placeholder = tf.placeholder(shape=[None, self.observation_dim],
94 | dtype=tf.float32,
95 | name='observation')
96 | self.action_placeholder = tf.placeholder(shape=[None, self.action_dim],
97 | dtype=tf.float32,
98 | name='action')
99 |
100 | # Define a placeholder for advantages
101 | self.advantage_placeholder = tf.placeholder(shape=[None],
102 | dtype=tf.float32,
103 | name='advantage')
104 | self.in_training_placeholder = tf.placeholder(tf.bool)
105 |
106 | def build_policy_network_op(self, scope = "policy_network"):
107 | """
108 | Build the policy network, construct the tensorflow operation to sample
109 | actions from the policy network outputs, and compute the log probabilities
110 | of the actions taken (for computing the loss later). These operations are
111 | stored in self.sampled_action and self.logprob.
112 |
113 | Args:
114 | scope: the scope of the neural network
115 | """
116 | action_means = build_mlp(self.observation_placeholder, self.action_dim,
117 | scope, self.config.n_layers, self.config.layer_size,
118 | self.in_training_placeholder, output_activation=None)
119 | with tf.variable_scope(scope, reuse=tf.AUTO_REUSE):
120 | log_std = tf.get_variable("log_std", [self.action_dim])
121 | self.sampled_action = action_means + tf.multiply(tf.exp(log_std), tf.random_normal(tf.shape(action_means)))
122 | mvn = tf.contrib.distributions.MultivariateNormalDiag(loc=action_means, scale_diag=tf.exp(log_std))
123 | self.logprob = mvn.log_prob(self.action_placeholder)
124 |
125 | def add_loss_op(self):
126 | """
127 | Compute the loss, averaged for a given batch.
128 |
129 | Recall the update for REINFORCE with advantage:
130 | θ = θ + α ∇_θ log π_θ(a_t|s_t) A_t
131 | """
132 |
133 | self.loss = - tf.reduce_mean(tf.multiply(self.logprob, self.advantage_placeholder))
134 |
135 | def add_optimizer_op(self):
136 | """
137 | Set 'self.train_op' using AdamOptimizer
138 | """
139 | extra_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
140 | with tf.control_dependencies(extra_ops):
141 | optimizer = tf.train.AdamOptimizer(learning_rate=self.lr)
142 | self.train_op = optimizer.minimize(self.loss)
143 |
144 | def add_baseline_op(self, scope = "baseline"):
145 | """
146 | Build the baseline network within the scope.
147 |
148 | In this function we will build the baseline network.
149 | Use build_mlp with the same parameters as the policy network to
150 | get the baseline estimate. You also have to setup a target
151 | placeholder and an update operation so the baseline can be trained.
152 |
153 | Args:
154 | scope: the scope of the baseline network
155 |
156 | """
157 |
158 | self.baseline_in_training_placeholder = tf.placeholder(tf.bool)
159 | self.baseline = tf.squeeze(build_mlp(self.observation_placeholder, 1, scope,
160 | self.config.n_layers, self.config.layer_size,
161 | self.baseline_in_training_placeholder))
162 |
163 | self.baseline_target_placeholder = tf.placeholder(shape=[None], dtype=tf.float32, name='baseline')
164 |
165 | self.baseline_loss = tf.losses.mean_squared_error(labels=self.baseline_target_placeholder,
166 | predictions=self.baseline)
167 | extra_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
168 | with tf.control_dependencies(extra_ops):
169 | optimizer = tf.train.AdamOptimizer(learning_rate=self.lr)
170 | self.update_baseline_op = optimizer.minimize(self.baseline_loss)
171 |
172 | def build(self):
173 | """
174 | Build the model by adding all necessary variables.
175 |
176 | Written by course staff.
177 | Calling all the operations you already defined above to build the tensorflow graph.
178 | """
179 |
180 | # add placeholders
181 | self.add_placeholders_op()
182 | # create policy net
183 | self.build_policy_network_op()
184 | # add square loss
185 | self.add_loss_op()
186 | # add optimizer for the main networks
187 | self.add_optimizer_op()
188 |
189 | # add baseline
190 | if self.config.use_baseline:
191 | self.add_baseline_op()
192 |
193 | def initialize(self):
194 | """
195 | Assumes the graph has been constructed (have called self.build())
196 | Creates a tf Session and run initializer of variables
197 |
198 | Written by course staff.
199 | """
200 | # create tf session
201 | self.sess = tf.Session()
202 | # tensorboard stuff
203 | self.add_summary()
204 | # initialize all variables
205 | init = tf.global_variables_initializer()
206 | self.sess.run(init)
207 |
208 | def add_summary(self):
209 | """
210 | Tensorboard stuff. Written by course staff.
211 | """
212 | # extra placeholders to log stuff from python
213 | self.avg_reward_placeholder = tf.placeholder(tf.float32, shape=(), name="avg_reward")
214 | self.max_reward_placeholder = tf.placeholder(tf.float32, shape=(), name="max_reward")
215 | self.std_reward_placeholder = tf.placeholder(tf.float32, shape=(), name="std_reward")
216 |
217 | self.eval_reward_placeholder = tf.placeholder(tf.float32, shape=(), name="eval_reward")
218 |
219 | # extra summaries from python -> placeholders
220 | tf.summary.scalar("Avg Reward", self.avg_reward_placeholder)
221 | tf.summary.scalar("Max Reward", self.max_reward_placeholder)
222 | tf.summary.scalar("Std Reward", self.std_reward_placeholder)
223 | tf.summary.scalar("Eval Reward", self.eval_reward_placeholder)
224 |
225 | # logging
226 | self.merged = tf.summary.merge_all()
227 | self.file_writer = tf.summary.FileWriter(self.config.output_path,self.sess.graph)
228 |
229 | def init_averages(self):
230 | """
231 | Defines extra attributes for tensorboard. Written by course staff.
232 | """
233 | self.avg_reward = 0.
234 | self.max_reward = 0.
235 | self.std_reward = 0.
236 | self.eval_reward = 0.
237 |
238 | def update_averages(self, rewards, scores_eval):
239 | """
240 | Update the averages. Written by course staff.
241 |
242 | Args:
243 | rewards: deque
244 | scores_eval: list
245 | """
246 | self.avg_reward = np.mean(rewards)
247 | self.max_reward = np.max(rewards)
248 | self.std_reward = np.sqrt(np.var(rewards) / len(rewards))
249 |
250 | if len(scores_eval) > 0:
251 | self.eval_reward = scores_eval[-1]
252 |
253 | def record_summary(self, t):
254 | """
255 | Add summary to tensorboard. Written by course staff.
256 | """
257 |
258 | fd = {
259 | self.avg_reward_placeholder: self.avg_reward,
260 | self.max_reward_placeholder: self.max_reward,
261 | self.std_reward_placeholder: self.std_reward,
262 | self.eval_reward_placeholder: self.eval_reward,
263 | }
264 | summary = self.sess.run(self.merged, feed_dict=fd)
265 | # tensorboard stuff
266 | self.file_writer.add_summary(summary, t)
267 |
268 | def sample_path(self, env, num_episodes = None):
269 | """
270 | Sample paths (trajectories) from the environment.
271 |
272 | Args:
273 | num_episodes: the number of episodes to be sampled
274 | if none, sample one batch (size indicated by config file)
275 | env: open AI Gym envinronment
276 |
277 | Returns:
278 | paths: a list of paths. Each path in paths is a dictionary with
279 | path["observation"] a numpy array of ordered observations in the path
280 | path["actions"] a numpy array of the corresponding actions in the path
281 | path["reward"] a numpy array of the corresponding rewards in the path
282 | total_rewards: the sum of all rewards encountered during this "path"
283 |
284 | Written by course staff.
285 | """
286 | episode = 0
287 | episode_rewards = []
288 | paths = []
289 | t = 0
290 | best_r = 0.0
291 | best_reward_logical = None
292 |
293 | while (num_episodes or t < self.config.batch_size):
294 | state = env.reset()
295 | states, actions, rewards = [], [], []
296 | episode_reward = 0
297 |
298 | soc_track = np.zeros((self.config.max_ep_steps, self.env.net.storage.shape[0]))
299 | p_track = np.zeros((self.config.max_ep_steps, self.env.net.storage.shape[0]))
300 | reward_track = np.zeros((self.config.max_ep_steps, 1))
301 |
302 | for step in range(self.config.max_ep_len):
303 | states.append(state)
304 | action = self.sess.run(self.sampled_action, feed_dict={self.observation_placeholder : states[-1][None],
305 | self.in_training_placeholder: False})[0]
306 | state, reward, done, info = env.step(action)
307 | actions.append(action)
308 | rewards.append(reward)
309 | episode_reward += reward
310 | soc_track[step, :] = self.env.net.storage.soc_percent
311 | p_track[step, :] = self.env.net.storage.p_kw
312 | reward_track[step] = reward
313 | if reward > best_r:
314 | best_r = reward
315 | c1 = np.abs(env.net.res_line.p_to_kw - self.env.net.res_line.pl_kw) < self.config.reward_epsilon
316 | c2 = np.abs(env.net.res_line.p_from_kw - self.env.net.res_line.pl_kw) < self.config.reward_epsilon
317 | best_reward_logical = np.logical_or(c1.values, c2.values)
318 | t += 1
319 | if (done or step == self.config.max_ep_len-1):
320 | episode_rewards.append(episode_reward)
321 | break
322 |
323 | path = {"observation" : np.array(states),
324 | "reward" : np.array(rewards),
325 | "action" : np.array(actions)}
326 | paths.append(path)
327 | episode += 1
328 | if num_episodes and episode >= num_episodes:
329 | break
330 |
331 | return paths, episode_rewards, best_r, best_reward_logical, soc_track, p_track, reward_track
332 |
333 | def get_returns(self, paths):
334 | """
335 | Calculate the returns G_t for each timestep
336 |
337 | Args:
338 | paths: recorded sample paths. See sample_path() for details.
339 |
340 | Return:
341 | returns: return G_t for each timestep
342 |
343 | After acting in the environment, we record the observations, actions, and
344 | rewards. To get the advantages that we need for the policy update, we have
345 | to convert the rewards into returns, G_t, which are themselves an estimate
346 | of Q^π (s_t, a_t):
347 |
348 | G_t = r_t + γ r_{t+1} + γ^2 r_{t+2} + ... + γ^{T-t} r_T
349 |
350 | where T is the last timestep of the episode.
351 | """
352 |
353 | all_returns = []
354 | for path in paths:
355 | rewards = path["reward"]
356 |
357 | dim_rewards = np.shape(np.ravel(rewards))[0] # Each path has a different length
358 | returns = np.zeros((dim_rewards,))
359 | for i in range(dim_rewards):
360 | for j in range(dim_rewards-i):
361 | returns[i] += rewards[i+j]*np.power(self.config.gamma, j) # Implement the sum in the G_t formula
362 |
363 | all_returns.append(returns)
364 | returns = np.concatenate(all_returns)
365 |
366 | return returns
367 |
368 | def calculate_advantage(self, returns, observations):
369 | """
370 | Calculate the advantage
371 |
372 | Args:
373 | returns: all discounted future returns for each step
374 | observations: observations
375 | Returns:
376 | adv: Advantage
377 |
378 | Calculate the advantages, using baseline adjustment if necessary,
379 | and normalizing the advantages if necessary.
380 | If neither of these options are True, just return returns.
381 | """
382 | adv = returns
383 |
384 | if self.config.use_baseline:
385 | adv = returns - self.sess.run(self.baseline, feed_dict={self.observation_placeholder: observations,
386 | self.baseline_target_placeholder: returns,
387 | self.baseline_in_training_placeholder: False})
388 |
389 | if self.config.normalize_advantage:
390 | adv = (adv - np.mean(adv))/np.std(adv)
391 |
392 | return adv
393 |
394 | def update_baseline(self, returns, observations):
395 | """
396 | Update the baseline from given returns and observation.
397 |
398 | Args:
399 | returns: Returns from get_returns
400 | observations: observations
401 | """
402 | self.sess.run(self.update_baseline_op, feed_dict={self.observation_placeholder: observations,
403 | self.baseline_target_placeholder: returns,
404 | self.baseline_in_training_placeholder: True})
405 |
406 | def train(self):
407 | """
408 | Performs training. Written by course staff.
409 | """
410 | last_eval = 0
411 | last_record = 0
412 | scores_eval = []
413 |
414 | self.init_averages()
415 | scores_eval = [] # list of scores computed at iteration time
416 |
417 | for t in range(self.config.num_batches):
418 |
419 | # collect a minibatch of samples
420 | paths, total_rewards, best_r, best_reward_logical, soc_track, p_track, reward_track = self.sample_path(self.env)
421 | scores_eval = scores_eval + total_rewards
422 | observations = np.concatenate([path["observation"] for path in paths])
423 | actions = np.concatenate([path["action"] for path in paths])
424 | rewards = np.concatenate([path["reward"] for path in paths])
425 | # compute Q-val estimates (discounted future returns) for each time step
426 | returns = self.get_returns(paths)
427 | advantages = self.calculate_advantage(returns, observations)
428 |
429 | # run training operations
430 | if self.config.use_baseline:
431 | self.update_baseline(returns, observations)
432 | self.sess.run(self.train_op, feed_dict={
433 | self.observation_placeholder : observations,
434 | self.action_placeholder : actions,
435 | self.advantage_placeholder : advantages,
436 | self.in_training_placeholder: True})
437 |
438 | # tf stuff
439 | if (t % self.config.summary_freq == 0):
440 | self.update_averages(total_rewards, scores_eval)
441 | self.record_summary(t)
442 |
443 | # compute reward statistics for this batch and log
444 | avg_reward = np.mean(total_rewards)
445 | best_ep_reward = np.max(total_rewards)
446 | sigma_reward = np.sqrt(np.var(total_rewards) / len(total_rewards))
447 | s1 = "---------------------------------------------------------\n" \
448 | + "Average reward: {:04.2f} +/- {:04.2f}"
449 | msg = s1.format(avg_reward, sigma_reward)
450 | self.logger.info(msg)
451 | msg4 = "Best episode reward: {}".format(best_ep_reward)
452 | self.logger.info(msg4)
453 |
454 | msg2 = "Max single reward: " + str(best_r)
455 | msg3 = "Max reward happened on lines: " + str(best_reward_logical)
456 | end = "\n--------------------------------------------------------"
457 | self.logger.info(msg2)
458 | self.logger.info(msg3 + end)
459 |
460 | fig, ax = plt.subplots(nrows=3, sharex=True)
461 | xs = np.arange(self.config.max_ep_steps)
462 | for k_step in range(self.env.net.storage.shape[0]):
463 | ax[1].plot(xs, soc_track[:, k_step].ravel(), marker='.',
464 | label='soc_{}'.format(k_step + 1))
465 | ax[0].plot(xs, p_track[:, k_step].ravel(), marker='.',
466 | label='pset_{}'.format(k_step + 1))
467 | ax[0].legend()
468 | ax[1].legend()
469 | ax[2].stem(xs, reward_track, label='reward')
470 | ax[2].legend()
471 | ax[2].set_xlabel('time')
472 | ax[0].set_ylabel('Power (kW)')
473 | ax[1].set_ylabel('State of Charge')
474 | ax[2].set_ylabel('Reward Received')
475 | ax[0].set_title('Battery Behavior and Rewards')
476 | plt.tight_layout()
477 | plt.savefig(self.config.output_path + 'soc_plot_{}.png'.format(t))
478 | plt.close()
479 |
480 | self.logger.info("- Training done.")
481 | export_plot(scores_eval, "Score", self.config.env_name, self.config.plot_output)
482 |
483 | def evaluate(self, env=None, num_episodes=1):
484 | """
485 | Evaluates the return for num_episodes episodes. Written by course staff.
486 | Not used right now, all evaluation statistics are computed during training
487 | episodes.
488 | """
489 | if env==None: env = self.env
490 | paths, rewards = self.sample_path(env, num_episodes)
491 | avg_reward = np.mean(rewards)
492 | sigma_reward = np.sqrt(np.var(rewards) / len(rewards))
493 | msg = "Average reward: {:04.2f} +/- {:04.2f}".format(avg_reward, sigma_reward)
494 | self.logger.info(msg)
495 | return avg_reward
496 |
497 | def run(self):
498 | """
499 | Apply procedures of training for a PG. Written by course staff.
500 | """
501 | # initialize
502 | self.initialize()
503 | # model
504 | self.train()
505 |
506 | if __name__ == '__main__':
507 | #args = parser.parse_args()
508 | #config = get_config(args.env_name, args.use_baseline)
509 | config = get_config('Six_Bus_POC', algorithm='PG')
510 | env = NetModel(config=config)
511 | # train model
512 | model = PG(env, config)
513 | model.run()
514 |
--------------------------------------------------------------------------------
/virtual_microgrids/configs/__init__.py:
--------------------------------------------------------------------------------
1 | from virtual_microgrids.configs.config import get_config
--------------------------------------------------------------------------------
/virtual_microgrids/configs/config.py:
--------------------------------------------------------------------------------
1 | from virtual_microgrids.configs.six_bus_poc import ConfigSixBusPOC
2 | from virtual_microgrids.configs.six_bus_mvp1 import ConfigSixBusMVP1
3 | from virtual_microgrids.configs.six_bus_mvp2 import ConfigSixBusMVP2
4 | from virtual_microgrids.configs.six_bus_mvp3 import ConfigSixBusMVP3
5 | from virtual_microgrids.configs.standard_lv_network import StandardLVNetwork
6 |
7 |
8 | def get_config(env_name, baseline=True, algorithm='ddpg'):
9 | """Given an environment name and the baseline option, return the configuration."""
10 | if env_name == 'Six_Bus_POC':
11 | return ConfigSixBusPOC(baseline, algorithm)
12 | if env_name == 'Six_Bus_MVP1':
13 | return ConfigSixBusMVP1(baseline, algorithm)
14 | if env_name == 'Six_Bus_MVP2':
15 | return ConfigSixBusMVP2(baseline, algorithm)
16 | if env_name == 'Six_Bus_MVP3':
17 | return ConfigSixBusMVP3(baseline, algorithm)
18 | if env_name in ['rural_1', 'rural_2', 'village_1', 'village_2', 'suburb_1']:
19 | return StandardLVNetwork(env_name, baseline, algorithm)
20 |
21 |
--------------------------------------------------------------------------------
/virtual_microgrids/configs/config_base.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | from datetime import datetime as dt
3 |
4 | class ConfigBase(object):
5 | """A base class for configurations"""
6 | def __init__(self, use_baseline, actor, env_name):
7 |
8 | # output config
9 | now = dt.now()
10 | now = ''.join('_'.join(str(now).split(' ')).split(':'))
11 | baseline_str = 'baseline' if use_baseline else 'no_baseline'
12 | self.output_path = "results/{}-{}-{}_{}/".format(env_name, baseline_str, actor, now)
13 | self.model_output = self.output_path + "model.weights/"
14 | self.log_path = self.output_path + "log.txt"
15 | self.plot_output = self.output_path + "scores.png"
16 | self.record_path = self.output_path
17 | self.record_freq = 5
18 | self.summary_freq = 1
19 | self.summary_freq2 = 20
20 | self.actor = actor
21 |
22 | # model and training - general
23 | self.gamma = 0.9 # the discount factor
24 |
25 | # model and training config - PG
26 | self.num_batches = 150 # number of batches trained on
27 | self.batch_size = 1000 # number of steps used to compute each policy update
28 | self.max_ep_len = 60 # maximum episode length
29 | self.learning_rate = 3e-2
30 | self.use_baseline = use_baseline
31 | self.normalize_advantage = True
32 |
33 | # model and training config - DDPG
34 | self.tau = 0.001
35 |
36 | self.buffer_size = 1e6
37 | self.minibatch_size = self.max_ep_len * 4
38 | self.max_episodes = 1000
39 | self.reasonable_max_episodes = min(600, self.max_episodes)
40 | self.max_ep_steps = self.max_ep_len
41 |
42 | self.actor_learning_rate_start = 1e-3
43 | self.actor_learning_rate_end = 1e-6
44 | self.critic_learning_rate_start = 1e-2
45 | self.critic_learning_rate_end = 1e-3
46 | # self.actor_learning_rate_nsteps = self.max_episodes * self.max_ep_steps # What should this be?
47 |
48 | self.randomize_env = False
49 |
50 | # parameters for the policy and baseline models
51 | self.n_layers = 1
52 | self.layer_size = 16
53 | self.activation = None
54 |
55 | # since we start new episodes for each batch
56 | assert self.max_ep_len <= self.batch_size
57 | if self.max_ep_len < 0:
58 | self.max_ep_len = self.batch_size
--------------------------------------------------------------------------------
/virtual_microgrids/configs/six_bus_mvp1.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | from virtual_microgrids.configs.config_base import ConfigBase
3 |
4 | class ConfigSixBusMVP1(ConfigBase):
5 | """The configurations for the proof of concept (POC) simplest network used in this project.
6 |
7 | The configurations include parameters for the learning algorithm as well as for building and initializing the
8 | network components. The 6 bus POC is a symmetrical network (actually with 8 buses in this build out), designed
9 | to show that the two sides can be isolated from each other. To change the values initialized here, change config
10 | after it is instantiated before using it to build the network.
11 | """
12 | def __init__(self, use_baseline, actor):
13 | self.env_name = 'Six_Bus_MVP1'
14 | super().__init__(use_baseline, actor, self.env_name)
15 |
16 | # environment generation
17 | self.tstep = 1. / 60
18 | self.net_zero_reward = 1.0
19 | self.vn_high = 20
20 | self.vn_low = 0.4
21 | self.length_km = 0.03
22 | self.std_type = 'NAYY 4x50 SE'
23 | self.static_feeds = {
24 | 3: -10 * np.ones(self.max_ep_len + 1),
25 | 6: -10.5 * np.ones(self.max_ep_len + 1),
26 | 4: 10.5 * np.ones(self.max_ep_len + 1),
27 | 7: 10 * np.ones(self.max_ep_len + 1)
28 | }
29 | self.battery_locations = [3, 6]
30 | self.init_soc = 0.5
31 | self.energy_capacity = 21.0 # changed from 20 to see if endpoint problem
32 |
33 | # Generation
34 | self.gen_locations = None
35 |
36 | # Action space
37 | self.gen_p_min = -50.0
38 | self.gen_p_max = 0.0
39 | self.storage_p_min = -5.0
40 | self.storage_p_max = 5.0
41 |
42 | # state space
43 | self.with_soc = False
44 |
45 | # reward function
46 | self.reward_epsilon = 0.001
47 | self.cont_reward_lambda = 0.1
--------------------------------------------------------------------------------
/virtual_microgrids/configs/six_bus_mvp2.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | from virtual_microgrids.configs.config_base import ConfigBase
3 |
4 | class ConfigSixBusMVP2(ConfigBase):
5 | """The configurations for the proof of concept (POC) simplest network used in this project.
6 |
7 | The configurations include parameters for the learning algorithm as well as for building and initializing the
8 | network components. The 6 bus POC is a symmetrical network (actually with 8 buses in this build out), designed
9 | to show that the two sides can be isolated from each other. To change the values initialized here, change config
10 | after it is instantiated before using it to build the network.
11 | """
12 | def __init__(self, use_baseline, actor):
13 | self.env_name = 'Six_Bus_MVP2'
14 | super().__init__(use_baseline, actor, self.env_name)
15 |
16 | # environment generation
17 | self.tstep = 1. / 60
18 | self.net_zero_reward = 1.0
19 | self.vn_high = 20
20 | self.vn_low = 0.4
21 | self.length_km = 0.03
22 | self.std_type = 'NAYY 4x50 SE'
23 | n = self.max_ep_len + 1
24 | self.static_feeds = {
25 | 3: -10 * np.ones(n),
26 | 6: -10.5 * np.ones(n),
27 | 4: 10.5 * np.ones(n),
28 | 7: 10 * np.ones(n)
29 | }
30 | self.static_feeds[7] += np.sin(2 * np.pi * np.arange(n) / n)
31 | self.battery_locations = [3, 6]
32 | self.init_soc = 0.5
33 | self.energy_capacity = 21.0 # changed from 20 to see if endpoint problem
34 |
35 | # Generation
36 | self.gen_locations = None
37 |
38 | # Action space
39 | self.gen_p_min = -50.0
40 | self.gen_p_max = 0.0
41 | self.storage_p_min = -5.0
42 | self.storage_p_max = 5.0
43 |
44 | # state space
45 | self.with_soc = False
46 |
47 | # reward function
48 | self.reward_epsilon = 0.01
49 | self.cont_reward_lambda = 0.1
50 |
51 | # parameters for the policy and baseline models
52 | self.n_layers = 2
53 | self.layer_size = 64
--------------------------------------------------------------------------------
/virtual_microgrids/configs/six_bus_mvp3.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | from scipy.signal import triang
3 | from virtual_microgrids.configs.config_base import ConfigBase
4 |
5 | class ConfigSixBusMVP3(ConfigBase):
6 | """The configurations for the proof of concept (POC) simplest network used in this project.
7 |
8 | The configurations include parameters for the learning algorithm as well as for building and initializing the
9 | network components. The 6 bus POC is a symmetrical network (actually with 8 buses in this build out), designed
10 | to show that the two sides can be isolated from each other. To change the values initialized here, change config
11 | after it is instantiated before using it to build the network.
12 | """
13 | def __init__(self, use_baseline, actor):
14 | self.env_name = 'Six_Bus_MVP3'
15 | super().__init__(use_baseline, actor, self.env_name)
16 |
17 | self.max_ep_len = 120 # maximum episode length
18 | self.buffer_size = 1e6
19 | self.minibatch_size = self.max_ep_len * 4
20 | self.max_episodes = 1000
21 | self.reasonable_max_episodes = min(600, self.max_episodes)
22 | self.max_ep_steps = self.max_ep_len
23 | self.randomize_env = True
24 |
25 | # environment generation
26 | self.tstep = 1. / 60 / 2
27 | self.net_zero_reward = 1.0
28 | self.vn_high = 20
29 | self.vn_low = 0.4
30 | self.length_km = 0.03
31 | self.std_type = 'NAYY 4x50 SE'
32 | n = self.max_ep_len + 1
33 | self.static_feeds = {
34 | 3: -10 * np.ones(n),
35 | 6: -10 * np.ones(n),
36 | 4: np.random.uniform(9, 11) * np.ones(n),
37 | 7: np.random.uniform(9, 11) * np.ones(n)
38 | }
39 | load_types = np.random.choice(['sine', 'triangle', 'atan'], size=2)
40 | for load_type, feed in zip(load_types, [self.static_feeds[4], self.static_feeds[7]]):
41 | if load_type == 'sine':
42 | a = np.random.uniform(-1, 1)
43 | scale = np.random.uniform(0.5, 2)
44 | feed += a * np.sin(2 * np.pi * np.arange(n) * scale / n)
45 | elif load_type == 'triangle':
46 | a = np.random.uniform(-1, 1)
47 | roll = np.random.randint(0, n)
48 | feed += a * 2 * np.roll(triang(n) - 0.5, roll)
49 | elif load_type == 'atan':
50 | a = np.random.uniform(-1, 1)
51 | xs = np.linspace(-5, 5, n)
52 | feed += a * 2 * np.arctan(xs) / np.pi
53 | self.battery_locations = [3, 6]
54 | self.init_soc = 0.5
55 | self.energy_capacity = 21.0 # changed from 20 to see if endpoint problem
56 |
57 | # Generation
58 | self.gen_locations = None
59 |
60 | # Action space
61 | self.gen_p_min = -50.0
62 | self.gen_p_max = 0.0
63 | self.storage_p_min = -5.0
64 | self.storage_p_max = 5.0
65 |
66 | # state space
67 | self.with_soc = False
68 |
69 | # reward function
70 | self.reward_epsilon = 0.01
71 | self.cont_reward_lambda = 0.1
72 |
73 | # parameters for the policy and baseline models
74 | self.n_layers = 2
75 | self.layer_size = 64
76 |
77 | if __name__ == "__main__":
78 | env = ConfigSixBusMVP3(True, 'DDPG')
79 |
--------------------------------------------------------------------------------
/virtual_microgrids/configs/six_bus_poc.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | from virtual_microgrids.configs.config_base import ConfigBase
3 |
4 | class ConfigSixBusPOC(ConfigBase):
5 | """The configurations for the proof of concept (POC) simplest network used in this project.
6 |
7 | The configurations include parameters for the learning algorithm as well as for building and initializing the
8 | network components. The 6 bus POC is a symmetrical network (actually with 8 buses in this build out), designed
9 | to show that the two sides can be isolated from each other. To change the values initialized here, change config
10 | after it is instantiated before using it to build the network.
11 | """
12 | def __init__(self, use_baseline, actor):
13 | self.env_name = 'Six_Bus_POC'
14 | super().__init__(use_baseline, actor, self.env_name)
15 |
16 | # environment generation
17 | self.tstep = 1. / 60
18 | self.net_zero_reward = 1.0
19 | self.vn_high = 20
20 | self.vn_low = 0.4
21 | self.length_km = 0.03
22 | self.std_type = 'NAYY 4x50 SE'
23 | self.static_feeds = {
24 | 3: -10 * np.ones(self.max_ep_len + 1),
25 | 6: -10 * np.ones(self.max_ep_len + 1),
26 | 4: 10 * np.ones(self.max_ep_len + 1),
27 | 7: 10 * np.ones(self.max_ep_len + 1)
28 | }
29 | self.battery_locations = [3, 6]
30 | self.init_soc = 0.5
31 | self.energy_capacity = 21.0 # changed from 20 to see if endpoint problem
32 |
33 | # Generation
34 | self.gen_locations = None
35 |
36 | # Action space
37 | self.gen_p_min = -50.0
38 | self.gen_p_max = 0.0
39 | self.storage_p_min = -5.0
40 | self.storage_p_max = 5.0
41 |
42 | # state space
43 | self.with_soc = False
44 |
45 | # reward function
46 | self.reward_epsilon = 0.001
47 | self.cont_reward_lambda = 0.1
48 |
--------------------------------------------------------------------------------
/virtual_microgrids/configs/standard_lv_network.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | from pandapower.networks import create_synthetic_voltage_control_lv_network as mknet
3 |
4 | class StandardLVNetwork(object):
5 | """The configurations for using any of the standard low voltage (LV) test networks shipped with pandapower.
6 |
7 | Options in this set up include choices to remove the generation and load elements built in to the test network, and
8 | the option to remove all sources and sinks of reactive power, q. By adding to the dictionary static_feeds_new you
9 | can create new loads or static generators on a custom schedule.
10 |
11 | To add controllable resources you can specify the
12 | locations of new generators, or specify the addition of batteries: either give their locations (by bus number), or
13 | have them assigned randomly. If percent_battery_buses is non zero (must be in the interval [0, 1]) and
14 | batteries_on_leaf_nodes_only is False, then percent_battery_buses percent of all the buses will be assigned storage.
15 | If batteries_on_leaf_nodes_only is True, then percent_battery_buses percent of all the leaf node buses will be
16 | assigned storage. The initial states of charge (soc) and the capacities can also be changed: these can either be
17 | floats or lists with length equal to the number of storage elements in the network.
18 | """
19 | def __init__(self, env_name, use_baseline, actor):
20 | self.env_name = env_name
21 |
22 | # output config
23 | baseline_str = 'baseline' if use_baseline else 'no_baseline'
24 | self.output_path = "results/{}-{}-{}/".format(self.env_name, baseline_str, actor)
25 | self.model_output = self.output_path + "model.weights/"
26 | self.log_path = self.output_path + "log.txt"
27 | self.plot_output = self.output_path + "scores.png"
28 | self.record_path = self.output_path
29 | self.record_freq = 5
30 | self.summary_freq = 1
31 | self.summary_freq2 = 1000
32 |
33 | # model and training - general
34 | self.gamma = 0.9 # the discount factor
35 |
36 | # model and training config - PG
37 | self.num_batches = 500 # number of batches trained on
38 | self.batch_size = 1000 # number of steps used to compute each policy update
39 | self.max_ep_len = 60 # maximum episode length
40 | self.learning_rate = 3e-2
41 | self.use_baseline = use_baseline
42 | self.normalize_advantage = True
43 |
44 | # model and training config - DDPG
45 | self.tau = 0.001
46 | self.reward_epsilon = 0.001
47 | self.actor_learning_rate = 1e-3
48 | self.critic_learning_rate = 1e-2
49 | self.buffer_size = 1e6
50 | self.minibatch_size = 64
51 | self.max_episodes = 500
52 | self.max_ep_steps = self.max_ep_len
53 |
54 | self.remove_q = True
55 | self.clear_loads_sgen = False
56 | self.clear_gen = True
57 |
58 | # environment generation
59 | self.tstep = 1. / 60
60 | self.net_zero_reward = 1.0
61 | self.static_feeds_new = None # Acts how static_feeds does in the 6BusPOC config
62 |
63 | # Fill static_feeds with the loads and static generators that ship with the network
64 | if self.static_feeds_new is None:
65 | self.static_feeds = {}
66 | else:
67 | self.static_feeds = self.static_feeds_new.copy()
68 | net = mknet(network_class=env_name)
69 | if not self.clear_loads_sgen:
70 | if net.load.shape[0] > 0:
71 | for idx, row in net.load.iterrows():
72 | self.static_feeds[row['bus']] = row['p_kw'] * np.ones(self.max_ep_len)
73 | if net.sgen.shape[0] > 0:
74 | for idx, row in net.sgen.iterrows():
75 | self.static_feeds[row['bus']] = row['p_kw'] * np.ones(self.max_ep_len)
76 |
77 | self.battery_locations = None # Specify specific locations, or can pick options for random generation:
78 | self.percent_battery_buses = 0.5 # How many of the buses should be assigned batteries
79 | self.batteries_on_leaf_nodes_only = True
80 |
81 | # Action space
82 | self.gen_p_min = -50.0
83 | self.gen_p_max = 0.0
84 | self.storage_p_min = -50.0
85 | self.storage_p_max = 50.0
86 |
87 | # Generation
88 | self.gen_locations = [4]
89 | self.gen_max_p_kw = [20.0]
90 |
91 | self.init_soc = 0.5
92 | self.energy_capacity = 20.0
93 |
94 | # parameters for the policy and baseline models
95 | self.n_layers = 1
96 | self.layer_size = 16
97 | self.activation = None
98 |
99 | # since we start new episodes for each batch
100 | assert self.max_ep_len <= self.batch_size
101 | if self.max_ep_len < 0:
102 | self.max_ep_len = self.batch_size
--------------------------------------------------------------------------------
/virtual_microgrids/powerflow/__init__.py:
--------------------------------------------------------------------------------
1 | from virtual_microgrids.powerflow.pp_network import NetModel
--------------------------------------------------------------------------------
/virtual_microgrids/powerflow/network_generation.py:
--------------------------------------------------------------------------------
1 | import pandapower as pp
2 | import numpy as np
3 | from pandapower.networks import create_synthetic_voltage_control_lv_network as mknet
4 |
5 |
6 | def get_net(config):
7 | """Given the configuration, call a function to create the network object."""
8 | if 'Six_Bus' in config.env_name:
9 | return six_bus(config.vn_high, config.vn_low, config.length_km,
10 | config.std_type, config.battery_locations, config.init_soc,
11 | config.energy_capacity, config.static_feeds, config.gen_locations,
12 | config.gen_p_max, config.gen_p_min, config.storage_p_max,
13 | config.storage_p_min)
14 | if config.env_name in ['rural_1', 'rural_2', 'village_1', 'village_2', 'suburb_1']:
15 | return standard_lv(config.env_name, config.remove_q, config.static_feeds_new, config.clear_loads_sgen,
16 | config.clear_gen, config.battery_locations, config.percent_battery_buses,
17 | config.batteries_on_leaf_nodes_only, config.init_soc, config.energy_capacity,
18 | config.gen_locations, config.gen_p_max, config.gen_p_min, config.storage_p_max,
19 | config.storage_p_min)
20 |
21 |
22 | def add_battery(net, bus_number, p_init, energy_capacity, init_soc=0.5,
23 | max_p=50, min_p=-50, eff=1.0, capital_cost=0, min_e=0.):
24 | """Change the network by adding a battery / storage unit.
25 |
26 | This function creates a storage element in net, and adds two non-standard columns: efficiency and capital cost.
27 |
28 | Parameters
29 | ----------
30 | net: class
31 | The pandapower network model
32 | bus_number: int
33 | Where the battery will be added
34 | p_init: float
35 | The power draw / input of the battery on initialization
36 | init_soc: float
37 | The state of charge
38 | max_p: float
39 | The max rate that power can be drawn by the battery
40 | min_p: float
41 | The max rate that power can be pulled from the battery (negative).
42 | eff: float
43 | The efficiency
44 | capital_cost: float
45 | The capital cost of the battery
46 | min_e: float
47 | The minimum energy in the battery
48 | """
49 | pp.create_storage(net, bus_number, p_init, energy_capacity,
50 | soc_percent=init_soc, max_p_kw=max_p, min_p_kw=min_p,
51 | min_e_kwh=min_e)
52 | idx = net.storage.index[-1]
53 | net.storage.loc[idx, 'eff'] = eff
54 | net.storage.loc[idx, 'cap_cost'] = capital_cost
55 |
56 |
57 | def six_bus(vn_high=20, vn_low=0.4, length_km=0.03, std_type='NAYY 4x50 SE', battery_locations=[3, 6], init_soc=0.5,
58 | energy_capacity=20.0, static_feeds=None, gen_locations=None, gen_p_max=0.0, gen_p_min=-50.0,
59 | storage_p_max=50.0, storage_p_min=-50.0):
60 | """This function creates the network model for the 6 bus POC network from scratch.
61 |
62 | Buses and lines are added to an empty network based on a hard-coded topology and parameters from the config file
63 | (seen as inputs). The only controllable storage added in this network are batteries, and the input static_feeds is
64 | used to add loads and static generators which are not controlled by the agent. The first value in the series is
65 | taken for initialization of those elements.
66 | """
67 | net = pp.create_empty_network(name='6bus', f_hz=60., sn_kva=100.)
68 | # create buses
69 | for i in range(8):
70 | nm = 'bus{}'.format(i)
71 | if i == 0:
72 | pp.create_bus(net, name=nm, vn_kv=vn_high)
73 | elif i == 1:
74 | pp.create_bus(net, name=nm, vn_kv=vn_low)
75 | else:
76 | if i <= 4:
77 | zn = 'Side1'
78 | else:
79 | zn = 'Side2'
80 | pp.create_bus(net, name=nm, zone=zn, vn_kv=vn_low)
81 | # create grid connection
82 | pp.create_ext_grid(net, 0)
83 | # create lines
84 | pp.create_line(net, 0, 1, length_km=length_km, std_type=std_type,
85 | name='line0')
86 | pp.create_line(net, 1, 2, length_km=length_km, std_type=std_type,
87 | name='line1')
88 | pp.create_line(net, 2, 3, length_km=length_km, std_type=std_type,
89 | name='line2')
90 | pp.create_line(net, 2, 4, length_km=length_km, std_type=std_type,
91 | name='line3')
92 | pp.create_line(net, 1, 5, length_km=length_km, std_type=std_type,
93 | name='line4')
94 | pp.create_line(net, 5, 6, length_km=length_km, std_type=std_type,
95 | name='line5')
96 | pp.create_line(net, 5, 7, length_km=length_km, std_type=std_type,
97 | name='line6')
98 |
99 | # add controllable storage
100 | for idx, bus_number in enumerate(battery_locations):
101 | energy_capacity_here = energy_capacity
102 | init_soc_here = init_soc
103 | if np.size(energy_capacity) > 1:
104 | energy_capacity_here = energy_capacity[idx]
105 | if np.size(init_soc) > 1:
106 | init_soc_here = init_soc[idx]
107 |
108 | add_battery(net, bus_number=bus_number, p_init=0.0, energy_capacity=energy_capacity_here,
109 | init_soc=init_soc_here, max_p=storage_p_max, min_p=storage_p_min)
110 |
111 | # Add controllable generator
112 | if gen_locations is not None:
113 | for idx, bus_number in enumerate(gen_locations):
114 | pp.create_gen(net, bus_number, p_kw=0.0, min_q_kvar=0.0, max_q_kvar=0.0, min_p_kw=gen_p_min,
115 | max_p_kw=gen_p_max)
116 |
117 | ##### TODO : Have different limits for different generators and storage #####
118 |
119 | # add loads and static generation
120 | if static_feeds is None:
121 | print('No loads or generation assigned to network')
122 | else:
123 | if len(static_feeds) > 0:
124 | for key, val in static_feeds.items():
125 | init_flow = val[0]
126 | print('init_flow: ', init_flow, 'at bus: ', key)
127 | if init_flow > 0:
128 | pp.create_load(net, bus=key, p_kw=init_flow, q_kvar=0)
129 | else:
130 | pp.create_sgen(net, bus=key, p_kw=init_flow, q_kvar=0)
131 |
132 | return net
133 |
134 |
135 | def standard_lv(env_name, remove_q=True, static_feeds_new=None, clear_loads_sgen=False, clear_gen=True,
136 | battery_locations=None, percent_battery_buses=0.5, batteries_on_leaf_nodes_only=True, init_soc=0.5,
137 | energy_capacity=20.0, gen_locations=None, gen_p_max=0.0, gen_p_min=-50.0,
138 | storage_p_max=50.0, storage_p_min=-50.0):
139 | """This function creates a network model using the set of synthetic voltage control low voltage (LV) networks from
140 | pandapower.
141 |
142 | The environment name, env_name, chooses which of the models to create out of 'rural_1', 'rural_2', 'village_1',
143 | 'village_2', and 'suburb_1'.
144 |
145 | Then options can be triggered to remove all reactive power components from the network (as we do in this project),
146 | or to remove static generators, loads, and generators that come with the standard model of the network. New
147 | batteries and generators are added which will be used as controllable resources by the agent.
148 |
149 | Static_feeds is a dictionary used by other functions to define the state of the network as we step through time, and
150 | contains the power values of the non-controllable elements: static generators and loads. In this method we use
151 | static_feeds_new, a subset of static_feeds, to create new loads and static generators in the network that did not
152 | ship with the model.
153 | """
154 |
155 | net = mknet(network_class=env_name)
156 |
157 | # Remove q components
158 | if remove_q:
159 | net.load.q_kvar = 0
160 | net.sgen.q_kvar = 0
161 | net.gen.q_kvar = 0
162 | net.gen.min_q_kvar = 0
163 | net.gen.max_q_kvar = 0
164 | net.shunt.in_service = False
165 |
166 | # Remove built in loads and generators
167 | if clear_loads_sgen:
168 | net.load.in_service = False
169 | net.sgen.in_service = False
170 | if clear_gen:
171 | net.gen.in_service = False
172 | net.storage.in_service = False
173 |
174 | # add controllable storage
175 | if battery_locations is not None:
176 | applied_battery_locations = battery_locations
177 | elif percent_battery_buses > 0:
178 | if batteries_on_leaf_nodes_only:
179 | leaf_nodes = []
180 | for i in net.line.to_bus.values:
181 | if i not in net.line.from_bus.values:
182 | leaf_nodes.append(i)
183 | applied_battery_locations = np.random.choice(leaf_nodes, int(percent_battery_buses * len(leaf_nodes)),
184 | replace=False)
185 | else:
186 | applied_battery_locations = np.random.choice(net.bus.shape[0],
187 | int(percent_battery_buses * net.bus.shape[0]), replace=False)
188 | if len(applied_battery_locations) > 0:
189 | num_batteries = len(applied_battery_locations)
190 | for idx, bus_number in enumerate(applied_battery_locations):
191 | energy_capacity_here = energy_capacity
192 | init_soc_here = init_soc
193 | if np.size(energy_capacity) > 1:
194 | energy_capacity_here = energy_capacity[0]
195 | if np.size(energy_capacity) == num_batteries:
196 | energy_capacity_here = energy_capacity[idx]
197 | if np.size(init_soc) > 1:
198 | init_soc_here = init_soc[0]
199 | if np.size(energy_capacity) == num_batteries:
200 | init_soc_here = init_soc[idx]
201 | add_battery(net, bus_number=bus_number, p_init=0.0, energy_capacity=energy_capacity_here,
202 | init_soc=init_soc_here, max_p=storage_p_max, min_p=storage_p_min)
203 | # Add controllable generator
204 | if gen_locations is not None:
205 | for idx, bus_number in enumerate(gen_locations):
206 | pp.create_gen(net, bus_number, p_kw=0.0, min_q_kvar=0.0, max_q_kvar=0.0, max_p_kw=gen_p_max,
207 | min_p_kw=gen_p_min)
208 |
209 | if static_feeds_new is None:
210 | print('No loads or generation added to network')
211 | else:
212 | if len(static_feeds_new) > 0:
213 | for key, val in static_feeds_new.items():
214 | init_flow = val[0]
215 | print('init_flow: ', init_flow, 'at bus: ', key)
216 | if init_flow > 0:
217 | pp.create_load(net, bus=key, p_kw=init_flow, q_kvar=0)
218 | else:
219 | pp.create_sgen(net, bus=key, p_kw=init_flow, q_kvar=0)
220 |
221 | # Name buses for plotting
222 | for i in range(net.bus.name.shape[0]):
223 | net.bus.name.at[i] = 'bus' + str(i)
224 |
225 | return net
226 |
227 |
228 |
--------------------------------------------------------------------------------
/virtual_microgrids/powerflow/pp_network.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import pandas as pd
3 | import pandapower as pp
4 | from copy import deepcopy
5 | from virtual_microgrids.configs import get_config
6 | from virtual_microgrids.powerflow.network_generation import get_net
7 | from virtual_microgrids.utils import Graph
8 |
9 |
10 | class NetModel(object):
11 | """Building and interacting with a network model to simulate power flow.
12 |
13 | In this class we model all of the network component including loads,
14 | generators, batteries, lines, buses, and transformers. The state of each is
15 | tracked in a pandapower network object.
16 | """
17 | def __init__(self, config=None, env_name='Six_Bus_POC', baseline=True,
18 | actor='DDPG'):
19 | """Initialize attributes of the object and zero out certain components
20 | in the standard test network."""
21 |
22 | if config is not None:
23 | self.config = config
24 | self.net = get_net(self.config)
25 | else:
26 | self.config = get_config(env_name, baseline, actor)
27 | self.net = get_net(self.config)
28 |
29 | self.reward_val = 0.0
30 |
31 | self.tstep = self.config.tstep
32 | self.net_zero_reward = self.config.net_zero_reward
33 | self.initial_net = pp.copy.deepcopy(self.net)
34 | self.time = 0
35 | self.n_load = len(self.net.load)
36 | self.n_sgen = len(self.net.sgen)
37 | self.n_gen = len(self.net.gen)
38 | self.n_storage = len(self.net.storage)
39 | if self.config.with_soc:
40 | self.observation_dim = self.n_load + self.n_sgen + self.n_storage
41 | else:
42 | self.observation_dim = self.n_load + self.n_sgen
43 | self.observation_dim *= 2
44 | self.action_dim = self.n_gen + self.n_storage
45 | self.graph = Graph(len(self.net.bus))
46 | for idx, entry in self.net.line.iterrows():
47 | self.graph.addEdge(entry.from_bus, entry.to_bus)
48 | self.current_state = None
49 | self.last_state = None
50 |
51 | def reset(self):
52 | """Reset the network and reward values back to how they were initialized."""
53 | if not self.config.randomize_env:
54 | self.net = pp.copy.deepcopy(self.initial_net)
55 | else:
56 | self.config = get_config(self.config.env_name, self.config.use_baseline,
57 | self.config.actor)
58 | self.net = get_net(self.config)
59 | self.reward_val = 0.0
60 | self.time = 0
61 | self.run_powerflow()
62 | self.current_state = self.get_state(self.config.with_soc)
63 | self.last_state = deepcopy(self.current_state)
64 | return np.concatenate([self.current_state, self.current_state - self.last_state])
65 |
66 | def step(self, p_set):
67 | """Update the simulation by one step
68 |
69 | :param p_set: 1D numpy array of floats, the action for the agent
70 | :return:
71 | """
72 | # Increment the time
73 | self.time += 1
74 | self.last_state = deepcopy(self.current_state)
75 | # Update non-controllable resources from their predefined data feeds
76 | new_loads = pd.Series(data=None, index=self.net.load.bus)
77 | new_sgens = pd.Series(data=None, index=self.net.sgen.bus)
78 | for bus, feed in self.config.static_feeds.items():
79 | p_new = feed[self.time]
80 | if p_new > 0:
81 | new_loads[bus] = p_new
82 | else:
83 | new_sgens[bus] = p_new
84 | self.update_loads(new_p=new_loads.values)
85 | self.update_static_generation(new_p=new_sgens.values)
86 | # Update controllable resources
87 | new_gens = p_set[:self.n_gen]
88 | new_storage = p_set[self.n_gen:]
89 | self.update_generation(new_p=new_gens)
90 | self.update_batteries(new_p=new_storage)
91 | # Run power flow
92 | self.run_powerflow()
93 | # Collect items to return
94 | state = self.get_state(self.config.with_soc)
95 | self.current_state = state
96 | reward = self.calculate_reward(eps=self.config.reward_epsilon)
97 | done = self.time >= self.config.max_ep_len
98 | info = ''
99 | return np.concatenate([self.current_state, self.current_state - self.last_state]), reward, done, info
100 |
101 | def get_state(self, with_soc=False):
102 | """Get the current state of the game
103 |
104 | The state is given by the power supplied or consumed by all devices
105 | on the network, plus the state of charge (SoC) of the batteries. This
106 | method defines a "global ordering" for this vector:
107 | - Non-controllable loads (power, kW)
108 | - Non-controllable generators (power, kW)
109 | - Controllable generators (power, kW)
110 | - Controllable batteries (power, kW)
111 | - SoC for batteries (soc, no units)
112 |
113 | We are not currently considering reactive power (Q) as part of the
114 | problem.
115 |
116 | :return: A 1D numpy array containing the current state
117 | """
118 | p_load = self.net.res_load.p_kw
119 | p_sgen = self.net.res_sgen.p_kw
120 | p_gen = self.net.res_gen.p_kw
121 | p_storage = self.net.res_storage.p_kw
122 | if with_soc:
123 | soc_storage = self.net.storage.soc_percent
124 | state = np.concatenate([p_load, p_sgen, soc_storage])
125 | else:
126 | state = np.concatenate([p_load, p_sgen])
127 | return state
128 |
129 | def update_loads(self, new_p=None, new_q=None):
130 |
131 | """Update the loads in the network.
132 |
133 | This method assumes that the orders match, i.e. the order the buses in
134 | self.net.load.bus matches where the loads in new_p and new_q should be
135 | applied based on their indexing.
136 |
137 | Parameters
138 | ----------
139 | new_p, new_q: array_like
140 | New values for the real and reactive load powers, shape (number of load buses, 1).
141 |
142 | Attributes
143 | ----------
144 | self.net.load: object
145 | The load values in the network object are updated.
146 | """
147 | if new_p is not None:
148 | self.net.load.p_kw = new_p
149 | if new_q is not None:
150 | self.net.load.q_kvar = new_q
151 |
152 | def update_static_generation(self, new_p=None, new_q=None):
153 | """Update the static generation in the network.
154 |
155 | This method assumes that the orders match, i.e. the order the buses in
156 | self.net.sgen.bus matches where the generation values in new_sgen_p and
157 | new_sgen_q should be applied based on their indexing.
158 |
159 | Parameters
160 | ----------
161 | new_sgen_p, new_sgen_q: array_like
162 | New values for the real and reactive static generation, shape
163 | (number of static generators, 1).
164 |
165 | Attributes
166 | ----------
167 | self.net.sgen: object
168 | The static generation values in the network object are updated.
169 | """
170 | if new_p is not None:
171 | self.net.sgen.p_kw = new_p
172 | if new_q is not None:
173 | self.net.sgen.q_kvar = new_q
174 |
175 | def update_generation(self, new_p=None, new_q=None):
176 | """Update the traditional (not static) generation in the network.
177 |
178 | This method assumes that the orders match, i.e. the order the buses in
179 | self.net.gen.bus matches where the generation values in new_gen_p
180 | should be applied based on their indexing.
181 |
182 | Parameters
183 | ----------
184 | new_gen_p: array_like
185 | New values for the real and reactive generation, shape (number of
186 | traditional generators, 1).
187 |
188 | Attributes
189 | ----------
190 | self.net.gen: object
191 | The traditional generation values in the network object are updated.
192 | """
193 | if new_p is not None:
194 | self.net.gen.p_kw = new_p
195 | if new_q is not None:
196 | self.net.gen.q_kvar = new_q
197 |
198 | def update_batteries(self, new_p):
199 | """Update the batteries / storage units in the network.
200 |
201 | This method assumes that the orders match, i.e. the order the buses in
202 | self.net.gen.bus matches where the generation values in new_gen_p
203 | should be applied based on their indexing.
204 |
205 | Parameters
206 | ----------
207 | battery_powers: array_like
208 | The power flow into / out of each battery, shape (number of traditional generators, 1).
209 |
210 | Attributes
211 | ----------
212 | self.net.storage: object
213 | The storage values in the network object are updated.
214 | """
215 | soc = self.net.storage.soc_percent
216 | cap = self.net.storage.max_e_kwh
217 | eff = self.net.storage.eff
218 | pmin = self.net.storage.min_p_kw
219 | pmin_soc = -1 * soc * cap * eff / self.tstep
220 | pmin = np.max([pmin, pmin_soc], axis=0)
221 | pmax = self.net.storage.max_p_kw
222 | pmax_soc = (1. - soc) * cap / (eff * self.tstep)
223 | pmax = np.min([pmax, pmax_soc], axis=0)
224 | ps = np.clip(new_p, pmin, pmax)
225 | self.net.storage.p_kw = ps
226 | soc_next = soc + ps * self.tstep * eff / cap
227 | msk = ps < 0
228 | soc_next[msk] = (soc + ps * self.tstep / (eff * cap))[msk]
229 | self.net.storage.soc_percent = soc_next
230 |
231 | def run_powerflow(self):
232 | """Evaluate the power flow. Results are stored in the results matrices
233 | of the net object, e.g. self.net.res_bus.
234 |
235 | Attributes
236 | ----------
237 | self.net: object
238 | The network matrices are updated to reflect the results.
239 | Specifically: self.net.res_bus, self.net.res_line, self.net.res_gen,
240 | self.net.res_sgen, self.net.res_trafo, self.net.res_storage.
241 | """
242 | try:
243 | pp.runpp(self.net, enforce_q_lims=True,
244 | calculate_voltage_angles=False,
245 | voltage_depend_loads=False)
246 | except:
247 | print('There was an error running the powerflow! pp.runpp() didnt work')
248 |
249 | def calculate_reward(self, eps=0.001, type=4):
250 | """Calculate the reward associated with a power flow result.
251 |
252 | We count zero flow through the line as when the power flowing into the
253 | line is equal to the power lost in it. This gives a positive reward.
254 |
255 | A cost (negative reward) is incurred for running the batteries, based
256 | on the capital cost of the battery and the expected lifetime (currently
257 | hardcoded to 1000 cycles). So, if the capital cost of the battery is set
258 | to zero, then producing or consuming power with the battery is free to
259 | use.
260 |
261 | Parameters
262 | ----------
263 | eps: float
264 | Tolerance
265 |
266 | Attributes
267 | ----------
268 | reward_val: The value of the reward function is returned.
269 | """
270 | c1 = np.abs(self.net.res_line.p_to_kw - self.net.res_line.pl_kw) < eps
271 | c2 = np.abs(self.net.res_line.p_from_kw - self.net.res_line.pl_kw) < eps
272 | zeroed_lines = np.logical_or(c1.values, c2.values)
273 | # Type 1 Reward: count of lines with zero-net-flow
274 | if type == 1:
275 | self.reward_val = np.sum(zeroed_lines, dtype=np.float)
276 | # Type 2 Reward: count of nodes not pulling power from grid
277 | elif type in [2, 3, 4]:
278 | graph_new = deepcopy(self.graph)
279 | for line_idx, zeroed in enumerate(zeroed_lines):
280 | if zeroed:
281 | v = self.net.line.from_bus[line_idx]
282 | w = self.net.line.to_bus[line_idx]
283 | graph_new.removeEdge(v, w)
284 | self.reward_val = 0
285 | ext_connections = self.net.ext_grid.bus.values
286 | num_vmgs = 0
287 | for subgraph in graph_new.connectedComponents():
288 | if not np.any([item in subgraph for item in ext_connections]):
289 | self.reward_val += len(subgraph)
290 | num_vmgs += 1
291 | self.reward_val *= num_vmgs
292 | elif type == 5:
293 | pass
294 |
295 | # Add distance function:
296 | if type == 3:
297 | line_flow_values = np.maximum(np.abs(self.net.res_line.p_to_kw),
298 | np.abs(self.net.res_line.p_from_kw)) - self.net.res_line.pl_kw
299 | self.reward_val -= self.config.cont_reward_lambda * np.linalg.norm(line_flow_values, 1)
300 | elif type == 4:
301 | line_flow_values = np.maximum(np.abs(self.net.res_line.p_to_kw),
302 | np.abs(self.net.res_line.p_from_kw)) - self.net.res_line.pl_kw
303 | self.reward_val -= self.config.cont_reward_lambda * np.sum(np.minimum(np.abs(line_flow_values),
304 | 1.0*np.ones(np.shape(line_flow_values)[0])))
305 | # Costs for running batteries
306 | cap_costs = self.net.storage.cap_cost
307 | max_e = self.net.storage.max_e_kwh
308 | min_e = self.net.storage.min_e_kwh
309 | betas = cap_costs / (2 * 1000 * (max_e - min_e))
310 | incurred_costs = betas * np.abs(self.net.storage.p_kw)
311 | for c in incurred_costs:
312 | self.reward_val -= c
313 | return self.reward_val
314 |
315 | if __name__ == "__main__":
316 | env1 = NetModel(env_name='Six_Bus_POC')
317 | #env1 = NetModel(env_name='Six_Bus_MVP3')
318 | env1.config.reward_epsilon = 0.1
319 | env1.reset()
320 | env1.step([-0.02, -0.02])
321 |
--------------------------------------------------------------------------------
/virtual_microgrids/utils/__init__.py:
--------------------------------------------------------------------------------
1 | from virtual_microgrids.utils.replay_buffer import ReplayBuffer
2 | from virtual_microgrids.utils.linear_schedule import LinearSchedule
3 | from virtual_microgrids.utils.log_schedule import LogSchedule
4 | from virtual_microgrids.utils.orstein_uhlenbeck_action_noise import OrnsteinUhlenbeckActionNoise
5 | from virtual_microgrids.utils.orstein_uhlenbeck_action_noise import OrnsteinUhlenbeckActionNoise
6 | from virtual_microgrids.utils.graph import Graph
7 |
--------------------------------------------------------------------------------
/virtual_microgrids/utils/general.py:
--------------------------------------------------------------------------------
1 | """The base of this code was prepared for a homework by course staff for CS234
2 | at Stanford, Winter 2019."""
3 |
4 | import time
5 | import sys
6 | import logging
7 | import numpy as np
8 | from collections import deque
9 | import matplotlib
10 | matplotlib.use('agg')
11 | import matplotlib.pyplot as plt
12 |
13 |
14 | def export_plot(ys, ylabel, title, filename):
15 | """
16 | Export a plot in filename
17 |
18 | Args:
19 | ys: (list) of float / int to plot
20 | filename: (string) directory
21 | """
22 | plt.figure()
23 | plt.plot(range(len(ys)), ys)
24 | plt.xlabel("Training Episode")
25 | plt.ylabel(ylabel)
26 | plt.title(title)
27 | plt.savefig(filename)
28 | plt.close()
29 |
30 |
31 | def get_logger(filename):
32 | """
33 | Return a logger instance to a file
34 | """
35 | logger = logging.getLogger('logger')
36 | logger.setLevel(logging.DEBUG)
37 | logging.basicConfig(format='%(message)s', level=logging.DEBUG)
38 | handler = logging.FileHandler(filename)
39 | handler.setLevel(logging.DEBUG)
40 | handler.setFormatter(logging.Formatter('%(asctime)s:%(levelname)s: %(message)s'))
41 | logging.getLogger().addHandler(handler)
42 | return logger
43 |
44 |
45 | class Progbar(object):
46 | """Progbar class copied from keras (https://github.com/fchollet/keras/)
47 |
48 | Displays a progress bar.
49 | Small edit : added strict arg to update
50 | # Arguments
51 | target: Total number of steps expected.
52 | interval: Minimum visual progress update interval (in seconds).
53 | """
54 |
55 | def __init__(self, target, width=30, verbose=1, discount=0.9):
56 | self.width = width
57 | self.target = target
58 | self.sum_values = {}
59 | self.exp_avg = {}
60 | self.unique_values = []
61 | self.start = time.time()
62 | self.total_width = 0
63 | self.seen_so_far = 0
64 | self.verbose = verbose
65 | self.discount = discount
66 |
67 | def update(self, current, values=[], exact=[], strict=[], exp_avg=[]):
68 | """
69 | Updates the progress bar.
70 | # Arguments
71 | current: Index of current step.
72 | values: List of tuples (name, value_for_last_step).
73 | The progress bar will display averages for these values.
74 | exact: List of tuples (name, value_for_last_step).
75 | The progress bar will display these values directly.
76 | """
77 |
78 | for k, v in values:
79 | if k not in self.sum_values:
80 | self.sum_values[k] = [v * (current - self.seen_so_far), current - self.seen_so_far]
81 | self.unique_values.append(k)
82 | else:
83 | self.sum_values[k][0] += v * (current - self.seen_so_far)
84 | self.sum_values[k][1] += (current - self.seen_so_far)
85 | for k, v in exact:
86 | if k not in self.sum_values:
87 | self.unique_values.append(k)
88 | self.sum_values[k] = [v, 1]
89 | for k, v in strict:
90 | if k not in self.sum_values:
91 | self.unique_values.append(k)
92 | self.sum_values[k] = v
93 | for k, v in exp_avg:
94 | if k not in self.exp_avg:
95 | self.exp_avg[k] = v
96 | else:
97 | self.exp_avg[k] *= self.discount
98 | self.exp_avg[k] += (1-self.discount)*v
99 |
100 | self.seen_so_far = current
101 |
102 | now = time.time()
103 | if self.verbose == 1:
104 | prev_total_width = self.total_width
105 | sys.stdout.write("\b" * prev_total_width)
106 | sys.stdout.write("\r")
107 |
108 | numdigits = int(np.floor(np.log10(self.target))) + 1
109 | barstr = '%%%dd/%%%dd [' % (numdigits, numdigits)
110 | bar = barstr % (current, self.target)
111 | prog = float(current)/self.target
112 | prog_width = int(self.width*prog)
113 | if prog_width > 0:
114 | bar += ('='*(prog_width-1))
115 | if current < self.target:
116 | bar += '>'
117 | else:
118 | bar += '='
119 | bar += ('.'*(self.width-prog_width))
120 | bar += ']'
121 | sys.stdout.write(bar)
122 | self.total_width = len(bar)
123 |
124 | if current:
125 | time_per_unit = (now - self.start) / current
126 | else:
127 | time_per_unit = 0
128 | eta = time_per_unit*(self.target - current)
129 | info = ''
130 | if current < self.target:
131 | info += ' - ETA: %ds' % eta
132 | else:
133 | info += ' - %ds' % (now - self.start)
134 | for k in self.unique_values:
135 | if type(self.sum_values[k]) is list:
136 | info += ' - %s: %.4f' % (k, self.sum_values[k][0] / max(1, self.sum_values[k][1]))
137 | else:
138 | info += ' - %s: %s' % (k, self.sum_values[k])
139 |
140 | for k, v in self.exp_avg.iteritems():
141 | info += ' - %s: %.4f' % (k, v)
142 |
143 | self.total_width += len(info)
144 | if prev_total_width > self.total_width:
145 | info += ((prev_total_width-self.total_width) * " ")
146 |
147 | sys.stdout.write(info)
148 | sys.stdout.flush()
149 |
150 | if current >= self.target:
151 | sys.stdout.write("\n")
152 |
153 | if self.verbose == 2:
154 | if current >= self.target:
155 | info = '%ds' % (now - self.start)
156 | for k in self.unique_values:
157 | info += ' - %s: %.4f' % (k, self.sum_values[k][0] / max(1, self.sum_values[k][1]))
158 | sys.stdout.write(info + "\n")
159 |
160 | def add(self, n, values=[]):
161 | self.update(self.seen_so_far+n, values)
162 |
--------------------------------------------------------------------------------
/virtual_microgrids/utils/graph.py:
--------------------------------------------------------------------------------
1 | class Graph:
2 | # init function to declare class variables
3 | def __init__(self, V):
4 | self.V = V
5 | self.adj = [[] for i in range(V)]
6 |
7 | def DFSUtil(self, temp, v, visited):
8 |
9 | # Mark the current vertex as visited
10 | visited[v] = True
11 |
12 | # Store the vertex to list
13 | temp.append(v)
14 |
15 | # Repeat for all vertices adjacent
16 | # to this vertex v
17 | for i in self.adj[v]:
18 | if visited[i] == False:
19 | # Update the list
20 | temp = self.DFSUtil(temp, i, visited)
21 | return temp
22 |
23 | # method to add an undirected edge
24 |
25 | def addEdge(self, v, w):
26 | self.adj[v].append(w)
27 | self.adj[w].append(v)
28 |
29 | def removeEdge(self, v, w):
30 | self.adj[v].remove(w)
31 | self.adj[w].remove(v)
32 |
33 | # Method to retrieve connected components
34 | # in an undirected graph
35 | def connectedComponents(self):
36 | visited = []
37 | cc = []
38 | for i in range(self.V):
39 | visited.append(False)
40 | for v in range(self.V):
41 | if visited[v] == False:
42 | temp = []
43 | cc.append(self.DFSUtil(temp, v, visited))
44 | return cc
45 |
--------------------------------------------------------------------------------
/virtual_microgrids/utils/linear_schedule.py:
--------------------------------------------------------------------------------
1 | class LinearSchedule(object):
2 | def __init__(self, eps_begin, eps_end, nsteps):
3 | """
4 | Args:
5 | eps_begin: initial exploration
6 | eps_end: end exploration
7 | nsteps: number of steps between the two values of eps
8 | """
9 | self.epsilon = eps_begin
10 | self.eps_begin = eps_begin
11 | self.eps_end = eps_end
12 | self.nsteps = nsteps
13 |
14 | def update(self, t):
15 | """
16 | Updates epsilon
17 |
18 | Args:
19 | t: int
20 | frame number
21 | """
22 | if t <= self.nsteps:
23 | self.epsilon = self.eps_begin + (self.eps_end - self.eps_begin) * t / self.nsteps
24 | else:
25 | self.epsilon = self.eps_end
--------------------------------------------------------------------------------
/virtual_microgrids/utils/log_schedule.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 |
3 |
4 | class LogSchedule(object):
5 | def __init__(self, eps_begin, eps_end, nsteps):
6 | """
7 | Args:
8 | eps_begin: initial exploration
9 | eps_end: end exploration
10 | nsteps: number of steps between the two values of eps
11 | """
12 | self.epsilon = eps_begin
13 | self.eps_begin = eps_begin
14 | self.eps_end = eps_end
15 | self.begin_exp = np.log10(self.eps_begin)
16 | self.end_exp = np.log10(self.eps_end)
17 | self.nsteps = nsteps
18 |
19 | def update(self, t):
20 | """
21 | Updates epsilon
22 |
23 | Args:
24 | t: int
25 | frame number
26 | """
27 |
28 | if t <= self.nsteps:
29 | inter_exp = self.begin_exp + (self.end_exp - self.begin_exp) * t / self.nsteps
30 | self.epsilon = np.power(10, inter_exp)
31 | else:
32 | self.epsilon = self.eps_end
33 |
--------------------------------------------------------------------------------
/virtual_microgrids/utils/orstein_uhlenbeck_action_noise.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 |
3 | class OrnsteinUhlenbeckActionNoise(object):
4 | """
5 | Implementation of an Ornstein–Uhlenbeck process for exploration. Based on:
6 | https://github.com/openai/baselines/blob/master/baselines/ddpg/noise.py
7 | https://math.stackexchange.com/questions/1287634/implementing-ornstein-uhlenbeck-in-matlab
8 | """
9 | def __init__(self, mu, sigma=0.1 , theta=.15, dt=1e-2, x0=None):
10 | self.theta = theta
11 | self.mu = mu
12 | self.sigma = sigma
13 | self.dt = dt
14 | self.x0 = x0
15 | self.reset()
16 |
17 | def __call__(self):
18 | x = (self.x_prev + self.theta * (self.mu - self.x_prev) * self.dt +
19 | self.sigma * np.sqrt(self.dt) * np.random.normal(size=self.mu.shape))
20 | self.x_prev = x
21 | return x
22 |
23 | def reset(self):
24 | self.x_prev = self.x0 if self.x0 is not None else np.zeros_like(self.mu)
25 |
26 | def __repr__(self):
27 | return 'OrnsteinUhlenbeckActionNoise(mu={}, sigma={})'.format(self.mu,
28 | self.sigma)
--------------------------------------------------------------------------------
/virtual_microgrids/utils/replay_buffer.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | from collections import deque
3 | import random
4 |
5 | class ReplayBuffer(object):
6 | """
7 | A data structure to hold the replay buffer. Based on this blog post:
8 | https://pemami4911.github.io/blog/2016/08/21/ddpg-rl.html
9 | """
10 | def __init__(self, buffer_size):
11 | self.buffer_size = buffer_size
12 | self.count = 0
13 | self.buffer = deque()
14 |
15 | def add(self, s, a, r, t, s2):
16 | experience = (s, a, r, t, s2)
17 | if self.count < self.buffer_size:
18 | self.buffer.append(experience)
19 | self.count += 1
20 | else:
21 | self.buffer.popleft()
22 | self.buffer.append(experience)
23 |
24 | def size(self):
25 | return self.count
26 |
27 | def sample_batch(self, batch_size):
28 | '''
29 | batch_size specifies the number of experiences to add
30 | to the batch. If the replay buffer has less than batch_size
31 | elements, simply return all of the elements within the buffer.
32 | Generally, you'll want to wait until the buffer has at least
33 | batch_size elements before beginning to sample from it.
34 | '''
35 | batch = []
36 |
37 | if self.count < batch_size:
38 | batch = random.sample(self.buffer, self.count)
39 | else:
40 | batch = random.sample(self.buffer, batch_size)
41 |
42 | s_batch = np.array([_[0] for _ in batch])
43 | a_batch = np.array([_[1] for _ in batch])
44 | r_batch = np.array([_[2] for _ in batch])
45 | t_batch = np.array([_[3] for _ in batch])
46 | s2_batch = np.array([_[4] for _ in batch])
47 |
48 | return s_batch, a_batch, r_batch, t_batch, s2_batch
49 |
50 | def clear(self):
51 | self.buffer.clear()
52 | self.count = 0
--------------------------------------------------------------------------------