├── data
├── __init__.py
├── HSCExampleInputs
│ ├── area
│ │ ├── area.cpg
│ │ ├── area.dbf
│ │ ├── area.shp
│ │ ├── area.shx
│ │ └── area.prj
│ ├── GasGridPoints
│ │ ├── GasGridPoints.cpg
│ │ ├── GasGridPoints.dbf
│ │ ├── GasGridPoints.shp
│ │ ├── GasGridPoints.shx
│ │ ├── GasGridPoints.prj
│ │ └── GasGridPoints.qpj
│ ├── StreetLines
│ │ ├── StreetLines.cpg
│ │ ├── StreetLines.shp
│ │ ├── StreetLines.shx
│ │ └── StreetLines.prj
│ ├── randomSources
│ │ ├── randomSources.cpg
│ │ ├── randomSources.shp
│ │ ├── randomSources.shx
│ │ ├── randomSources.dbf
│ │ └── randomSources.prj
│ └── randomFuelingStations
│ │ ├── randomFuelingStations.cpg
│ │ ├── randomFuelingStations.dbf
│ │ ├── randomFuelingStations.shp
│ │ ├── randomFuelingStations.shx
│ │ └── randomFuelingStations.prj
├── icons
│ ├── LH2.png
│ ├── RES.png
│ ├── Grid.png
│ ├── None.png
│ ├── GH2-Tank.png
│ ├── LH2-Ship.png
│ ├── LH2-Tank.png
│ ├── LH2Pump.png
│ ├── LOHCPump.png
│ ├── Nothing2.png
│ ├── Pipeline.png
│ ├── Compressor.png
│ ├── GH2-Cavern.png
│ ├── GH2-Truck.png
│ ├── LH2-Truck.png
│ ├── LOHC (NG).png
│ ├── LOHC-Ship.png
│ ├── LOHC-Tank.png
│ ├── LOHC-Truck.png
│ ├── Electrolyzer.png
│ ├── Evaporation.png
│ ├── GH2 (Trailer).png
│ ├── GH2-Truck(H2).png
│ ├── Hydrogenation.png
│ ├── Liquefaction.png
│ ├── LohcStationEl.png
│ ├── LohcStationH2.png
│ ├── None - Kopie.png
│ ├── Dehydrogenation.png
│ ├── GH2 (Pipeline).png
│ ├── GH2 (Speicher).png
│ ├── GH2-DepletedGas.png
│ ├── LH2-Tank(scaled).png
│ └── LohcStationDiesel.png
├── Scenarios.xlsx
├── ImportTablesTechnologies.xlsx
└── matplotlibrcEES.mplstyle
├── HIM
├── workflow
│ ├── __init__.py
│ ├── scenarioExample.py
│ ├── truckOptimization.py
│ ├── preprocFunc.py
│ └── workflowFunctionsClean.py
├── __init__.py
├── utils.py
├── optiSetup.py
├── dataHandling.py
├── hscAbstract.py
└── hscTotal.py
├── MANIFEST.in
├── apps
└── results
│ ├── SupplyChain.png
│ └── FigureComparison.png
├── environment.yml
├── .gitattributes
├── setup.py
├── .gitignore
├── LICENSE.txt
└── README.md
/data/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/HIM/workflow/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/data/HSCExampleInputs/area/area.cpg:
--------------------------------------------------------------------------------
1 | ISO-8859-1
--------------------------------------------------------------------------------
/MANIFEST.in:
--------------------------------------------------------------------------------
1 | recursive-include *.py
2 | recursive-include data *
--------------------------------------------------------------------------------
/data/HSCExampleInputs/GasGridPoints/GasGridPoints.cpg:
--------------------------------------------------------------------------------
1 | ISO-8859-1
--------------------------------------------------------------------------------
/data/HSCExampleInputs/StreetLines/StreetLines.cpg:
--------------------------------------------------------------------------------
1 | ISO-8859-1
--------------------------------------------------------------------------------
/data/HSCExampleInputs/randomSources/randomSources.cpg:
--------------------------------------------------------------------------------
1 | ISO-8859-1
--------------------------------------------------------------------------------
/data/HSCExampleInputs/randomFuelingStations/randomFuelingStations.cpg:
--------------------------------------------------------------------------------
1 | ISO-8859-1
--------------------------------------------------------------------------------
/data/icons/LH2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FZJ-IEK3-VSA/HIM/HEAD/data/icons/LH2.png
--------------------------------------------------------------------------------
/data/icons/RES.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FZJ-IEK3-VSA/HIM/HEAD/data/icons/RES.png
--------------------------------------------------------------------------------
/data/Scenarios.xlsx:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FZJ-IEK3-VSA/HIM/HEAD/data/Scenarios.xlsx
--------------------------------------------------------------------------------
/data/icons/Grid.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FZJ-IEK3-VSA/HIM/HEAD/data/icons/Grid.png
--------------------------------------------------------------------------------
/data/icons/None.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FZJ-IEK3-VSA/HIM/HEAD/data/icons/None.png
--------------------------------------------------------------------------------
/data/icons/GH2-Tank.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FZJ-IEK3-VSA/HIM/HEAD/data/icons/GH2-Tank.png
--------------------------------------------------------------------------------
/data/icons/LH2-Ship.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FZJ-IEK3-VSA/HIM/HEAD/data/icons/LH2-Ship.png
--------------------------------------------------------------------------------
/data/icons/LH2-Tank.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FZJ-IEK3-VSA/HIM/HEAD/data/icons/LH2-Tank.png
--------------------------------------------------------------------------------
/data/icons/LH2Pump.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FZJ-IEK3-VSA/HIM/HEAD/data/icons/LH2Pump.png
--------------------------------------------------------------------------------
/data/icons/LOHCPump.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FZJ-IEK3-VSA/HIM/HEAD/data/icons/LOHCPump.png
--------------------------------------------------------------------------------
/data/icons/Nothing2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FZJ-IEK3-VSA/HIM/HEAD/data/icons/Nothing2.png
--------------------------------------------------------------------------------
/data/icons/Pipeline.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FZJ-IEK3-VSA/HIM/HEAD/data/icons/Pipeline.png
--------------------------------------------------------------------------------
/data/icons/Compressor.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FZJ-IEK3-VSA/HIM/HEAD/data/icons/Compressor.png
--------------------------------------------------------------------------------
/data/icons/GH2-Cavern.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FZJ-IEK3-VSA/HIM/HEAD/data/icons/GH2-Cavern.png
--------------------------------------------------------------------------------
/data/icons/GH2-Truck.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FZJ-IEK3-VSA/HIM/HEAD/data/icons/GH2-Truck.png
--------------------------------------------------------------------------------
/data/icons/LH2-Truck.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FZJ-IEK3-VSA/HIM/HEAD/data/icons/LH2-Truck.png
--------------------------------------------------------------------------------
/data/icons/LOHC (NG).png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FZJ-IEK3-VSA/HIM/HEAD/data/icons/LOHC (NG).png
--------------------------------------------------------------------------------
/data/icons/LOHC-Ship.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FZJ-IEK3-VSA/HIM/HEAD/data/icons/LOHC-Ship.png
--------------------------------------------------------------------------------
/data/icons/LOHC-Tank.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FZJ-IEK3-VSA/HIM/HEAD/data/icons/LOHC-Tank.png
--------------------------------------------------------------------------------
/data/icons/LOHC-Truck.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FZJ-IEK3-VSA/HIM/HEAD/data/icons/LOHC-Truck.png
--------------------------------------------------------------------------------
/apps/results/SupplyChain.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FZJ-IEK3-VSA/HIM/HEAD/apps/results/SupplyChain.png
--------------------------------------------------------------------------------
/data/icons/Electrolyzer.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FZJ-IEK3-VSA/HIM/HEAD/data/icons/Electrolyzer.png
--------------------------------------------------------------------------------
/data/icons/Evaporation.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FZJ-IEK3-VSA/HIM/HEAD/data/icons/Evaporation.png
--------------------------------------------------------------------------------
/data/icons/GH2 (Trailer).png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FZJ-IEK3-VSA/HIM/HEAD/data/icons/GH2 (Trailer).png
--------------------------------------------------------------------------------
/data/icons/GH2-Truck(H2).png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FZJ-IEK3-VSA/HIM/HEAD/data/icons/GH2-Truck(H2).png
--------------------------------------------------------------------------------
/data/icons/Hydrogenation.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FZJ-IEK3-VSA/HIM/HEAD/data/icons/Hydrogenation.png
--------------------------------------------------------------------------------
/data/icons/Liquefaction.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FZJ-IEK3-VSA/HIM/HEAD/data/icons/Liquefaction.png
--------------------------------------------------------------------------------
/data/icons/LohcStationEl.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FZJ-IEK3-VSA/HIM/HEAD/data/icons/LohcStationEl.png
--------------------------------------------------------------------------------
/data/icons/LohcStationH2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FZJ-IEK3-VSA/HIM/HEAD/data/icons/LohcStationH2.png
--------------------------------------------------------------------------------
/data/icons/None - Kopie.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FZJ-IEK3-VSA/HIM/HEAD/data/icons/None - Kopie.png
--------------------------------------------------------------------------------
/data/icons/Dehydrogenation.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FZJ-IEK3-VSA/HIM/HEAD/data/icons/Dehydrogenation.png
--------------------------------------------------------------------------------
/data/icons/GH2 (Pipeline).png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FZJ-IEK3-VSA/HIM/HEAD/data/icons/GH2 (Pipeline).png
--------------------------------------------------------------------------------
/data/icons/GH2 (Speicher).png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FZJ-IEK3-VSA/HIM/HEAD/data/icons/GH2 (Speicher).png
--------------------------------------------------------------------------------
/data/icons/GH2-DepletedGas.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FZJ-IEK3-VSA/HIM/HEAD/data/icons/GH2-DepletedGas.png
--------------------------------------------------------------------------------
/apps/results/FigureComparison.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FZJ-IEK3-VSA/HIM/HEAD/apps/results/FigureComparison.png
--------------------------------------------------------------------------------
/data/icons/LH2-Tank(scaled).png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FZJ-IEK3-VSA/HIM/HEAD/data/icons/LH2-Tank(scaled).png
--------------------------------------------------------------------------------
/data/icons/LohcStationDiesel.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FZJ-IEK3-VSA/HIM/HEAD/data/icons/LohcStationDiesel.png
--------------------------------------------------------------------------------
/data/HSCExampleInputs/area/area.dbf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FZJ-IEK3-VSA/HIM/HEAD/data/HSCExampleInputs/area/area.dbf
--------------------------------------------------------------------------------
/data/HSCExampleInputs/area/area.shp:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FZJ-IEK3-VSA/HIM/HEAD/data/HSCExampleInputs/area/area.shp
--------------------------------------------------------------------------------
/data/HSCExampleInputs/area/area.shx:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FZJ-IEK3-VSA/HIM/HEAD/data/HSCExampleInputs/area/area.shx
--------------------------------------------------------------------------------
/data/ImportTablesTechnologies.xlsx:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FZJ-IEK3-VSA/HIM/HEAD/data/ImportTablesTechnologies.xlsx
--------------------------------------------------------------------------------
/HIM/__init__.py:
--------------------------------------------------------------------------------
1 | _all__ = ['aidFunctions', 'dataHandling', 'hscClasses', 'optiSetup',
2 | 'plotFunctions', 'hscTotal']
--------------------------------------------------------------------------------
/data/HSCExampleInputs/StreetLines/StreetLines.shp:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FZJ-IEK3-VSA/HIM/HEAD/data/HSCExampleInputs/StreetLines/StreetLines.shp
--------------------------------------------------------------------------------
/data/HSCExampleInputs/StreetLines/StreetLines.shx:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FZJ-IEK3-VSA/HIM/HEAD/data/HSCExampleInputs/StreetLines/StreetLines.shx
--------------------------------------------------------------------------------
/data/HSCExampleInputs/GasGridPoints/GasGridPoints.dbf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FZJ-IEK3-VSA/HIM/HEAD/data/HSCExampleInputs/GasGridPoints/GasGridPoints.dbf
--------------------------------------------------------------------------------
/data/HSCExampleInputs/GasGridPoints/GasGridPoints.shp:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FZJ-IEK3-VSA/HIM/HEAD/data/HSCExampleInputs/GasGridPoints/GasGridPoints.shp
--------------------------------------------------------------------------------
/data/HSCExampleInputs/GasGridPoints/GasGridPoints.shx:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FZJ-IEK3-VSA/HIM/HEAD/data/HSCExampleInputs/GasGridPoints/GasGridPoints.shx
--------------------------------------------------------------------------------
/data/HSCExampleInputs/randomSources/randomSources.shp:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FZJ-IEK3-VSA/HIM/HEAD/data/HSCExampleInputs/randomSources/randomSources.shp
--------------------------------------------------------------------------------
/data/HSCExampleInputs/randomSources/randomSources.shx:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FZJ-IEK3-VSA/HIM/HEAD/data/HSCExampleInputs/randomSources/randomSources.shx
--------------------------------------------------------------------------------
/data/HSCExampleInputs/randomFuelingStations/randomFuelingStations.dbf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FZJ-IEK3-VSA/HIM/HEAD/data/HSCExampleInputs/randomFuelingStations/randomFuelingStations.dbf
--------------------------------------------------------------------------------
/data/HSCExampleInputs/randomFuelingStations/randomFuelingStations.shp:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FZJ-IEK3-VSA/HIM/HEAD/data/HSCExampleInputs/randomFuelingStations/randomFuelingStations.shp
--------------------------------------------------------------------------------
/data/HSCExampleInputs/randomFuelingStations/randomFuelingStations.shx:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FZJ-IEK3-VSA/HIM/HEAD/data/HSCExampleInputs/randomFuelingStations/randomFuelingStations.shx
--------------------------------------------------------------------------------
/data/HSCExampleInputs/randomSources/randomSources.dbf:
--------------------------------------------------------------------------------
1 | w
2 | A GEWICHT N
0.745268855755945 1.183619376605455 1.065627573330314 1.418932176596074 0.777320817359646 1.447560712633287 1.359717181502158 0.808762003174311 1.186028264628351 1.010495285363056
--------------------------------------------------------------------------------
/data/HSCExampleInputs/area/area.prj:
--------------------------------------------------------------------------------
1 | PROJCS["UTM_Zone_32_Northern_Hemisphere",GEOGCS["GCS_GRS 1980(IUGG, 1980)",DATUM["D_unknown",SPHEROID["GRS80",6378137,298.257222101]],PRIMEM["Greenwich",0],UNIT["Degree",0.017453292519943295]],PROJECTION["Transverse_Mercator"],PARAMETER["latitude_of_origin",0],PARAMETER["central_meridian",9],PARAMETER["scale_factor",0.9996],PARAMETER["false_easting",500000],PARAMETER["false_northing",0],UNIT["Meter",1]]
--------------------------------------------------------------------------------
/environment.yml:
--------------------------------------------------------------------------------
1 | name: him
2 | channels:
3 | - conda-forge
4 | - bryanwweber
5 | dependencies:
6 | - olefile
7 | - python=3.6
8 | - conda-forge::geopandas
9 | - bryanwweber::coolprop
10 | - conda-forge::pyomo
11 | - conda-forge::jupyter
12 | - matplotlib
13 | - networkx=1
14 | - xlsxwriter
15 | - pillow
16 | - xlrd
17 | - openpyxl
18 | - pip
19 | - descartes
20 | - scikit-learn
21 | - pip:
22 | - .\.
--------------------------------------------------------------------------------
/data/HSCExampleInputs/StreetLines/StreetLines.prj:
--------------------------------------------------------------------------------
1 | PROJCS["UTM_Zone_32_Northern_Hemisphere",GEOGCS["GCS_GRS 1980(IUGG, 1980)",DATUM["D_unknown",SPHEROID["GRS80",6378137,298.257222101]],PRIMEM["Greenwich",0],UNIT["Degree",0.017453292519943295]],PROJECTION["Transverse_Mercator"],PARAMETER["latitude_of_origin",0],PARAMETER["central_meridian",9],PARAMETER["scale_factor",0.9996],PARAMETER["false_easting",500000],PARAMETER["false_northing",0],UNIT["Meter",1]]
--------------------------------------------------------------------------------
/data/HSCExampleInputs/GasGridPoints/GasGridPoints.prj:
--------------------------------------------------------------------------------
1 | PROJCS["UTM_Zone_32_Northern_Hemisphere",GEOGCS["GCS_GRS 1980(IUGG, 1980)",DATUM["D_unknown",SPHEROID["GRS80",6378137,298.257222101]],PRIMEM["Greenwich",0],UNIT["Degree",0.017453292519943295]],PROJECTION["Transverse_Mercator"],PARAMETER["latitude_of_origin",0],PARAMETER["central_meridian",9],PARAMETER["scale_factor",0.9996],PARAMETER["false_easting",500000],PARAMETER["false_northing",0],UNIT["Meter",1]]
--------------------------------------------------------------------------------
/data/HSCExampleInputs/randomSources/randomSources.prj:
--------------------------------------------------------------------------------
1 | PROJCS["UTM_Zone_32_Northern_Hemisphere",GEOGCS["GCS_GRS 1980(IUGG, 1980)",DATUM["D_unknown",SPHEROID["GRS80",6378137,298.257222101]],PRIMEM["Greenwich",0],UNIT["Degree",0.017453292519943295]],PROJECTION["Transverse_Mercator"],PARAMETER["latitude_of_origin",0],PARAMETER["central_meridian",9],PARAMETER["scale_factor",0.9996],PARAMETER["false_easting",500000],PARAMETER["false_northing",0],UNIT["Meter",1]]
--------------------------------------------------------------------------------
/.gitattributes:
--------------------------------------------------------------------------------
1 | # Auto detect text files and perform LF normalization
2 | * text=auto
3 |
4 | # Custom for Visual Studio
5 | *.cs diff=csharp
6 |
7 | # Standard to msysgit
8 | *.doc diff=astextplain
9 | *.DOC diff=astextplain
10 | *.docx diff=astextplain
11 | *.DOCX diff=astextplain
12 | *.dot diff=astextplain
13 | *.DOT diff=astextplain
14 | *.pdf diff=astextplain
15 | *.PDF diff=astextplain
16 | *.rtf diff=astextplain
17 | *.RTF diff=astextplain
18 |
--------------------------------------------------------------------------------
/data/HSCExampleInputs/randomFuelingStations/randomFuelingStations.prj:
--------------------------------------------------------------------------------
1 | PROJCS["UTM_Zone_32_Northern_Hemisphere",GEOGCS["GCS_GRS 1980(IUGG, 1980)",DATUM["D_unknown",SPHEROID["GRS80",6378137,298.257222101]],PRIMEM["Greenwich",0],UNIT["Degree",0.017453292519943295]],PROJECTION["Transverse_Mercator"],PARAMETER["latitude_of_origin",0],PARAMETER["central_meridian",9],PARAMETER["scale_factor",0.9996],PARAMETER["false_easting",500000],PARAMETER["false_northing",0],UNIT["Meter",1]]
--------------------------------------------------------------------------------
/data/HSCExampleInputs/GasGridPoints/GasGridPoints.qpj:
--------------------------------------------------------------------------------
1 | PROJCS["UTM Zone 32, Northern Hemisphere",GEOGCS["WGS 84",DATUM["WGS_1984",SPHEROID["WGS 84",6378137,298.257223563,AUTHORITY["EPSG","7030"]],TOWGS84[0,0,0,0,0,0,0],AUTHORITY["EPSG","6326"]],PRIMEM["Greenwich",0,AUTHORITY["EPSG","8901"]],UNIT["degree",0.0174532925199433,AUTHORITY["EPSG","9108"]],AUTHORITY["EPSG","4326"]],PROJECTION["Transverse_Mercator"],PARAMETER["latitude_of_origin",0],PARAMETER["central_meridian",9],PARAMETER["scale_factor",0.9996],PARAMETER["false_easting",500000],PARAMETER["false_northing",0],UNIT["Meter",1]]
2 |
--------------------------------------------------------------------------------
/setup.py:
--------------------------------------------------------------------------------
1 | from setuptools import setup, find_packages
2 |
3 | setup(
4 | name='HIM',
5 | version='1.0',
6 | description='Evaluation of hydrogen infrastructure via geospatial datatools.',
7 |
8 | url='http://www.fz-juelich.de/iek/iek-3/DE/Home/home_node.html',
9 | author='Markus Reuss',
10 | author_email='m.reuss@fz-juelich.de',
11 | license='',
12 | include_package_data=True,
13 | packages=find_packages(),
14 | install_requires=[
15 | 'CoolProp',
16 | 'geopandas',
17 | 'jupyter',
18 | 'networkx<2.0',
19 | 'openpyxl',
20 | 'Pyomo',
21 | 'xlrd',
22 | 'XlsxWriter',
23 | 'descartes',
24 | ]
25 | )
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Windows image file caches
2 | Thumbs.db
3 | ehthumbs.db
4 |
5 | # Folder config file
6 | Desktop.ini
7 |
8 | # Recycle Bin used on file shares
9 | $RECYCLE.BIN/
10 |
11 | # Windows Installer files
12 | *.cab
13 | *.msi
14 | *.msm
15 | *.msp
16 |
17 | # Windows shortcuts
18 | *.lnk
19 |
20 | # =========================
21 | # Operating System Files
22 | # =========================
23 |
24 | # OSX
25 | # =========================
26 |
27 | .DS_Store
28 | .AppleDouble
29 | .LSOverride
30 |
31 | # Thumbnails
32 | ._*
33 |
34 | # Files that might appear in the root of a volume
35 | .DocumentRevisions-V100
36 | .fseventsd
37 | .Spotlight-V100
38 | .TemporaryItems
39 | .Trashes
40 | .VolumeIcon.icns
41 |
42 | # Directories potentially created on remote AFP share
43 | env/*
44 | *.egg-info/
45 | .ipynb_checkpoints
46 | */.ipynb_checkpoints/*
47 | __pycache__
48 | .AppleDB
49 | .AppleDesktop
50 | Network Trash Folder
51 | Temporary Items
52 | .apdisk
53 |
--------------------------------------------------------------------------------
/LICENSE.txt:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (C) 2016-2019 Markus Reuss (FZJ IEK-3), Thomas Grube (FZJ IEK-3), Martin Robinius (FZJ IEK-3), Detlef Stolten (FZJ IEK-3)
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
--------------------------------------------------------------------------------
/HIM/workflow/scenarioExample.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | Created on Wed Nov 22 11:00:59 2017
4 |
5 | @author: m.reuss
6 | """
7 | from HIM.utils import *
8 | import os
9 | #%%
10 | dataPath=path.join(os.path.dirname(os.path.realpath(__file__)),"..","..","data")
11 | dfTable = pd.read_excel(path.join(dataPath,"ImportTablesTechnologies.xlsx"), sheet_name=None, index_col=0)
12 | dfTable["General"]=pd.read_excel(path.join(dataPath,"Scenarios.xlsx"), sheet_name="General", index_col=0).append(pd.read_excel(path.join(dataPath,"Scenarios.xlsx"), sheet_name="Example", index_col=0))
13 | specificDemand=dfTable["General"].loc["specificDemand","General"]
14 | mileage=dfTable["General"].loc["mileage","General"]
15 | speed={"motorway":dfTable["General"].loc["truckSpeedHighway","General"],
16 | "urban": dfTable["General"].loc["truckSpeedRural","General"],
17 | "beeline": dfTable["General"].loc["truckSpeed","General"]}
18 | beeline=[False, False]
19 | weight="time"
20 | clustering=bool(dfTable["General"].loc["clustering","General"])
21 | clusterSize=dfTable["General"].loc["clusterSize","General"]
22 | #targetFS=9968
23 | targetCapacityFS=dfTable["General"].loc["targetStationSize","General"]
24 | fuelingMax_kg_d=dfTable["General"].loc["utilization Station","General"]*targetCapacityFS
25 | detourFactorPipeline=dfTable["General"].loc["detourFactorPipeline","General"]
26 | detourFactorTruck=dfTable["General"].loc["detourFactorTruck","General"]
27 | #%%
28 | weightFtoF=detourFactorPipeline*pd.Series([1., 1.25, 1.25, 1.5, 1.5, 1.5, 1.75, 1.75, 2.],
29 | index=["1to1","1to2","2to1","2to2","1to3","3to1","2to3","3to2","3to3"])
30 |
31 | crs={'ellps': 'GRS80', 'no_defs': True, 'proj': 'utm', 'units': 'm', 'zone': 32}
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 |
2 |
3 | # HIM- Hydrogen Infrastructure Model for Python
4 |
5 | HSC offers the functionality to calculate predefined hydrogen supply chain architectures with respect to spatial resolution for the analysis of explicit nationwide infrastructures.
6 |
7 | ## Installation and application
8 |
9 | First, download and install [Anaconda](https://www.anaconda.com/). Then, clone a local copy of this repository to your computer with git
10 |
11 | git clone https://github.com/FZJ-IEK3-VSA/HSCPathways.git
12 |
13 | or download it directly. Move to the folder
14 |
15 | cd HIM
16 |
17 | and install the required Python environment via
18 |
19 | conda env create -f environment.yml
20 |
21 | To determine the optimal pipeline design, a mathematical optimization solver is required. [Gurobi](https://www.gurobi.com/) is used as default solver, but other optimization solvers can be used as well.
22 |
23 | ## Examples
24 |
25 | A number of [**examples**](apps/) shows the capabilities of HIM. Either for [abstract costs analyses](apps/Example%20-%20Abstract%20analysis%20without%20geoferenced%20locations.ipynb)
26 |
27 |
28 |
29 | or for [exact infrastructure design](apps/Example%20Hydrogen%20Supply%20Chain%20Cost%20Generation.ipynb)
30 |
31 |
32 |
33 |
34 | ## License
35 |
36 | MIT License
37 |
38 | Copyright (C) 2016-2019 Markus Reuss (FZJ IEK-3), Thomas Grube (FZJ IEK-3), Martin Robinius (FZJ IEK-3), Detlef Stolten (FZJ IEK-3)
39 |
40 | You should have received a copy of the MIT License along with this program.
41 | If not, see https://opensource.org/licenses/MIT
42 |
43 | ## About Us
44 |
45 |
46 | We are the [Techno-Economic Energy Systems Analysis](http://www.fz-juelich.de/iek/iek-3/EN/Forschung/_Process-and-System-Analysis/_node.html) department at the [Institute of Energy and Climate Research: Electrochemical Process Engineering (IEK-3)](http://www.fz-juelich.de/iek/iek-3/EN/Home/home_node.html) belonging to the [Forschungszentrum Jülich](www.fz-juelich.de/). Our interdisciplinary department's research is focusing on energy-related process and systems analyses. Data searches and system simulations are used to determine energy and mass balances, as well as to evaluate performance, emissions and costs of energy systems. The results are used for performing comparative assessment studies between the various systems. Our current priorities include the development of energy strategies, in accordance with the German Federal Government’s greenhouse gas reduction targets, by designing new infrastructures for sustainable and secure energy supply chains and by conducting cost analysis studies for integrating new technologies into future energy market frameworks.
47 |
48 |
49 | ## Acknowledgment
50 |
51 | This work was supported by the Helmholtz Association under the Joint Initiative ["Energy System 2050 – A Contribution of the Research Field Energy"](https://www.helmholtz.de/en/research/energy/energy_system_2050/).
52 |
53 |
54 |
--------------------------------------------------------------------------------
/HIM/workflow/truckOptimization.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | Created on Fri Dec 1 13:06:45 2017
4 |
5 | @author: Markus Reuß and Paris Dimos
6 | """
7 | from HIM import dataHandling as sFun
8 | from HIM import optiSetup as optiFun
9 | import pandas as pd
10 | import networkx as nx
11 | import numpy as np
12 | import geopandas as gpd
13 | #%%
14 | def shortest_path_calculation(nxGraph, dfSources, dfSinks, weight="weightedDistance", distribution=False):
15 | '''
16 | Create shortest path matrix for the calculation from all sinks to all sources
17 | input:
18 | -nxGraph: networkx graph with all edges and weights
19 | -dfSources: geopandas dataframe with source locations
20 | -dfSinks: geopandas dataframe with sink locations
21 | weight: str, name of the weight that shall be used for the dijkstra
22 | distribution: boolean, if the system wants distribution or transmission
23 | return:
24 | pandas dataframe with distance matrice from every source (row) to every sink (col)
25 | '''
26 | print("-------Calculation time for shortest_path_length begins-------")
27 | dictA={}
28 | sinkIDs=dfSinks.index
29 | for sourceID in dfSources.index:
30 | dictA[sourceID]=nx.shortest_path_length(nxGraph, source=sourceID, weight=weight)
31 | dictB={}
32 | for sourceID in dfSources.index:
33 | dictB[sourceID]={}
34 | if distribution:
35 | sinkIDs=dfSinks.loc[dfSinks["ClusterID"]==sourceID].index
36 | for FSID in sinkIDs:
37 | try:
38 | dictB[sourceID][FSID]=dictA[sourceID][FSID]
39 | except:
40 | dictB[sourceID][FSID]=10000000
41 | DataFrame=pd.DataFrame(dictB)
42 | del dictA,dictB
43 | return DataFrame
44 | #%%
45 | def fillValues(GeoDataFrame, Graph, coords, Source, weight, name="F"):
46 | '''
47 | calculate all Source to sink paths
48 | '''
49 | import shapely as shp
50 |
51 | K_new=nx.Graph()
52 | K=nx.Graph()
53 |
54 | attributes=[]
55 | for attr in Graph.attr:
56 | if "ID" not in attr:
57 | attributes.append(attr)
58 | dicAttr={attr:{} for attr in attributes}
59 |
60 | for x, y, z in zip(GeoDataFrame['inputID'],GeoDataFrame['targetID'], GeoDataFrame["capacity"]):
61 | K_new.add_edge(x, y, weight=z)
62 |
63 | for source in Source.index:
64 | paths=nx.shortest_path(Graph, source=source, weight=weight)
65 | for attr in attributes:
66 | dicAttr[attr][source]={}
67 | for target in K_new[source]:
68 | path=paths[target]
69 | data=K_new[source][target]
70 | for i in range(len(path)-1):
71 | try:
72 | for key in data.keys():
73 | K[path[i]][path[i+1]][key]=K[path[i]][path[i+1]][key]+data[key]
74 | except: K.add_edge(path[i], path[i+1], data)
75 | for attr in attributes:
76 | if "ID" in attr:
77 | continue
78 | try: dicAttr[attr][source][target]=dicAttr[attr][source][target]+Graph[path[i]][path[i+1]][attr]
79 | except: dicAttr[attr][source][target]=Graph[path[i]][path[i+1]][attr]
80 |
81 | dicList={attr:[] for attr in attributes}
82 | for s, t in zip(GeoDataFrame['inputID'],GeoDataFrame['targetID']):
83 | if name in t:
84 | for attr in attributes:
85 | dicList[attr].append(dicAttr[attr][s][t])
86 | else:
87 | for attr in attributes:
88 | dicList[attr].append(dicAttr[attr][t][s])
89 |
90 | for attr in dicAttr.keys():
91 | GeoDataFrame[attr]=dicList[attr]
92 |
93 | GeoDataFrame["beeDist"]=GeoDataFrame.length/1000
94 | GeoDataFrame["detour"]=GeoDataFrame["weightedDistance"]/GeoDataFrame["beeDist"]
95 | #
96 | y=np.array(K.edges())
97 | inputIDarr=y[:,0]
98 | targetIDarr=y[:,1]
99 | LinesIn=coords.ix[list(inputIDarr)]
100 | LinesOut=coords.ix[list(targetIDarr)]
101 |
102 | EdgesCoords=gpd.GeoDataFrame(index=K.edges())
103 | EdgesCoords["inputCoords"]=LinesIn.geometry.values
104 | EdgesCoords["outputCoords"]=LinesOut.geometry.values
105 | EdgesCoords["geometry"]=""
106 |
107 | EdgesCoords["geometry"]=[shp.geometry.LineString([values["inputCoords"], values["outputCoords"]]) for key, values in EdgesCoords.iterrows()]
108 | EdgesTotalLine=gpd.GeoDataFrame(EdgesCoords["geometry"])
109 | EdgesTotalLine["capacity"]=pd.Series(nx.get_edge_attributes(K,"weight"))*1000
110 |
111 | return GeoDataFrame, EdgesTotalLine
112 | #%%
113 | def truckOptimization(Graph, coords, dfSource, dfFueling, weight="weightedDistance", distribution=False, name="F"):
114 | '''
115 | calculate the truck routes
116 | '''
117 |
118 | test=shortest_path_calculation(Graph, dfSource, dfFueling, weight=weight, distribution=distribution)
119 | test2=pd.DataFrame(test.unstack(level=0)).reset_index()
120 | test2=test2[~test2[0].isnull()]
121 | test2.columns=["inputID", "targetID", weight]
122 | test2.index=[(value["inputID"], value["targetID"]) for key, value in test2.iterrows()]
123 | GraphTruck2=optiFun.PipeNetWork()
124 | GraphTruck2.initializeEdges(test2)
125 | nx.set_node_attributes(GraphTruck2, "productionMax", dfSource.H2ProdCap_kt.to_dict())
126 | nx.set_node_attributes(GraphTruck2, "demand", dfFueling["H2Demand_kt_F"].to_dict())
127 | GraphTruck2.initOptiTruck(weight=weight)
128 | GraphTruck2.optModel()
129 | prodNodes=GraphTruck2.getProductionNodes()
130 | test3=GraphTruck2.getEdgesAsGpd(coords, analysisType="truck")
131 | (test4,test5) = fillValues(test3,Graph, coords, dfSource, weight, name=name)
132 | return (test4, test5, prodNodes)
--------------------------------------------------------------------------------
/HIM/utils.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | Created on Fri Nov 11 13:08:41 2016
4 |
5 | @author: m.reuss
6 | """
7 |
8 | import errno
9 | from datetime import datetime
10 | import numpy as np
11 | import geopandas as gpd
12 | import os
13 | import networkx as nx
14 | import pandas as pd
15 | import time
16 | import sys
17 | from IPython.display import display
18 | from scipy.cluster import vq
19 | from shapely.geometry import Point
20 | from shapely.geometry import LineString
21 | from cycler import cycler
22 | import os.path as path
23 | import matplotlib.pyplot as plt
24 | from matplotlib import gridspec
25 | import matplotlib as mpl
26 | if mpl.__version__[0]=="1":
27 | STYLEPATH=path.join(path.dirname(__file__),"..","data","matplotlibrcEES.mplstyle")
28 | else:
29 | STYLEPATH=path.join(path.dirname(__file__),"..","data","matplotlibrc36.mplstyle")
30 | plt.style.use(STYLEPATH)
31 | decimalSep=","
32 | #CP.set_config_string(
33 | # CP.ALTERNATIVE_REFPROP_PATH,
34 | # 'C:\\Program Files (x86)\\REFPROP\\')
35 | np.seterr(divide='ignore', invalid='ignore')
36 |
37 | #%%
38 | bg_area="#fff0de"
39 | bg_lines=(128/255, 130/255, 133/255)
40 |
41 | # Tell matplotlib to use the locale we set above
42 | import locale
43 | # Set to German locale to get comma decimal separater
44 | locale.setlocale(locale.LC_ALL, '')
45 | plt.rcParams['axes.formatter.use_locale'] = True
46 | bbox=dict(facecolor='white', alpha=0.75, edgecolor='none',pad=0.15)
47 | #%%
48 | colorNumber=5
49 | hatches=[" "]*colorNumber+["*"]*colorNumber+["\\"]*colorNumber+["/"]*colorNumber
50 | colorList=plt.rcParams['axes.prop_cycle'].by_key()['color']
51 |
52 | #%%
53 | import types
54 | from matplotlib.backend_bases import GraphicsContextBase, RendererBase
55 | from matplotlib.collections import LineCollection
56 |
57 | class GC(GraphicsContextBase):
58 | def __init__(self):
59 | super().__init__()
60 | self._capstyle = 'round'
61 |
62 | def custom_new_gc(self):
63 | return GC()
64 |
65 | RendererBase.new_gc = types.MethodType(custom_new_gc, RendererBase)
66 | #%%H2 Constant Values at Normal Conditions
67 | class H2Values (object):
68 |
69 | def __init__(self):
70 | self.M = 2.01588 # [kg/kmol], molare Masse von Wasserstoff
71 | # [J/kg K], spezifische Gaskonstante von Wasserstoff
72 | self.R_i = 4124.48269490247
73 | # [kg/m^3], Dichte von Wasserstoff im Normzustand
74 | self.roh_n = 0.089882
75 | # [-] Realgasfaktor von Wasserstoff im Normzustand
76 | self.Z_n = 1.00062387922965
77 | self.LHV_n = 119.833493175241 # [MJ/kg]
78 | self.g = 9.81 # [m/s^2], Erdbeschleunigung
79 | self.T_n = 273.15 # [K]
80 | self.p_n = 1.01325e5 # [Pa]
81 |
82 | #%% Supporting Functions
83 | def parabel(para, p):
84 | return (para[0] / 1e6) * (p / 1e5)**para[1] + \
85 | 8.79676122460001e-06 # Parabelgleichung
86 |
87 | def square(para, p):
88 | return para[0] * p**para[1] + para[2] # squaregleichung
89 |
90 |
91 | def getDiaPress(demArr, distArr, p_1, p_min):
92 | '''
93 | Calculation of Pipeline diameter and end pressure:
94 | Input Parameter:
95 | demArr=demand Array in kg/day
96 | distArr= distance Array in km
97 | p_1=Input Pressure at start of pipeline in bar
98 | p_min=minimal output pressure in bar
99 |
100 | deprecated!!! Not used anymore!!!
101 | '''
102 | # Initialization #
103 | V_para_parabel_20 = np.array([0.000125571318762396, 1.50162559878953])
104 | D_para_square_20 = np.array(
105 | [3.24859458677547e-06, 0.912591206027628, -0.166716162511868])
106 | Z_para_square_20 = np.array(
107 | [3.23101813258933e-09, 1.03880932425032, 1.00048097412768])
108 | T_m = np.array(20 + 273.15) # K
109 | k = 0.02 # mm
110 |
111 | # Vanessas Diameter
112 | DK=np.array([0.1063, 0.1307, 0.1593, 0.2065, 0.3063, 0.3356,
113 | 0.3844,0.432, 0.4796, 0.527, 0.578, 0.625, 0.671, 0.722, 0.7686, 0.814,
114 | 0.864, 0.915, 0.96, 1.011, 1.058, 1.104, 1.155, 1.249, 1.342, 1.444,
115 | 1.536]) #Average class of diameter
116 |
117 | # Less diameter variances
118 | #DK = np.linspace(0.1, 1.0, 901) # Average class of diameter
119 |
120 | propH2 = H2Values()
121 |
122 | demHourly = demArr / 24 / 3600 # kg/day to kg/s
123 |
124 | distMeter = distArr * 1000 # km to m
125 |
126 | p_1 = p_1 * 1e5 # bar to Pa
127 | ### Calculation ###
128 | res1 = len(distArr)
129 | res2 = demArr.shape[1]
130 |
131 | p_2 = np.zeros((res1, res2))
132 | w_1 = np.zeros((res1, res2))
133 | Re_1 = np.zeros((res1, res2))
134 | diameter = np.ones((res1, res2)) / 1000
135 | x = np.zeros((res1, res2))
136 | for i1 in range(demArr.shape[1]):
137 | for i2 in range(len(distArr)):
138 | while p_2[i2, i1] <= p_min * 1e5 or np.isnan(p_2[i2, i1]):
139 | # Calculation of Norm Volume Flow
140 | V_n = demHourly[0, i1] / propH2.roh_n # m^3/s (i.N.)
141 | # Startwerte
142 | # Calculation of input density
143 | roh_1 = square(D_para_square_20, p_1[i2, i1]) # kg/m3
144 | # Volume flow at entrance
145 | V_1 = demHourly[0, i1] / roh_1 # m^3/s
146 | # inner diameter of the Pipeline
147 | diameter[i2, i1] = DK[x[i2, i1]] # m
148 | # Velocity Entrance
149 | w_1[i2, i1] = V_1 / (np.pi * diameter[i2, i1]**2 / 4)
150 | # Calculation of dynamic viscosity
151 | eta_1 = parabel(V_para_parabel_20, p_1[i2, i1]) # Pa*s
152 | # Calculation of kinetic viscosity
153 | nu_1 = eta_1 / roh_1 # m^2/s
154 | # Calculation of reynolds number
155 | Re_1[i2, i1] = w_1[i2, i1] * diameter[i2, i1] / nu_1 # -
156 | # frcition coefficient
157 | # starting value
158 | alpha = np.e**(-1 * np.e**(6.75 - 0.0025 * Re_1[i2, i1]))
159 | lambda_1 = (64 / Re_1[i2, i1]) * (1 - alpha) + alpha * (-2 * np.log10((2.7 * (np.log10(
160 | Re_1[i2, i1]))**1.2 / Re_1[i2, i1]) + (k / (3.71 * 1000 * diameter[i2, i1]))))**(-2) # -
161 | # Simplification: Re_1 = Re_m --> lambda_m = lambda_1
162 | lambda_m = lambda_1
163 | # characteristics of the pipe
164 | # kg/(m s^2)=Pa
165 | C_1 = (lambda_1 * distMeter[i2] * roh_1 *
166 | w_1[i2, i1]**2) / (diameter[i2, i1] * 2)
167 | # input pressure
168 | p_20 = p_1[i2, i1] - C_1 # Pa
169 | # assuption: average pressure ~ input pressure
170 | p_m0 = p_20 # [Pa)
171 | # assumption: avg real gas factor = f(p_m0)
172 | Z_m = square(Z_para_square_20, p_m0)
173 | # compressibility factor
174 | K_m = Z_m / propH2.Z_n
175 | # pipe characteristics
176 | C = (lambda_m * 16 * propH2.roh_n * T_m * propH2.p_n *
177 | K_m) / (np.pi**2 * propH2.T_n) # kg Pa/m^3
178 | # outlet pressure
179 | p_2[i2, i1] = (p_1[i2, i1]**2 - (C * distMeter[i2]
180 | * V_n**2) / diameter[i2, i1]**5)**0.5 # Pa
181 |
182 | if x[i2, i1] == len(DK):
183 | break
184 | if p_2[i2, i1] <= p_min * 1e5 or np.isnan(p_2[i2, i1]):
185 | x[i2, i1] += 1
186 | x[i2:, i1:] = x[i2, i1]
187 |
188 | p_2 = p_2 * 1e-5
189 | diameter = diameter * 1000
190 | return diameter, p_2, w_1 # Diameter in mm and outlet pressure in bar
191 |
192 | # %% Compressor Energy Demand per Stage (with isentropic coefficient)
193 | # direct Method from Tietze
194 |
195 |
196 | def getCompressionEnergyStage(p_1, p_2, T_1, eta_is_S):
197 | '''
198 | calculation of specific hydrogen compression energy in every compression stage
199 | Input:
200 | p_1=Inlet Pressure
201 | p_2=outlet Pressure
202 | T_1 = Inlet Temperature
203 | eta_is_S = isentropic efficiency
204 | '''
205 | import CoolProp.CoolProp as CP
206 | fluid = 'HYDROGEN'
207 | # fluid='REFPROP::HYDROGEN'
208 | # Entropy
209 | s = CP.PropsSI('S', 'T', T_1, 'P', p_1 *
210 | 100000, fluid) # [T]=K, [P]=kPa, [h]=J/kg
211 | # Enthalpy input
212 | h_1 = CP.PropsSI('H', 'P', p_1 * 100000, 'S', s, fluid)
213 | # isentropic enthalpy
214 | h_2_is = CP.PropsSI('H', 'P', p_2 * 100000, 'S', s,
215 | fluid) # [T]=K, [P]=kPa, [h]=J/kg
216 | # isentropic temperature
217 | # T_2_is = CP.PropsSI('T','P',p_2*100,'S',s,fluid); # [T]=K, [P]=kPa, [h]=J/kg
218 | # isentropic work
219 | w_is = (h_2_is - h_1) / 1000 # [kJ/kg], massenspez. Verdichterarbeit
220 | # compressor work
221 | w = w_is / eta_is_S # [kJ/kg], massenspez. Verdichterarbeit
222 | w_spec = w / 3600
223 | # enthalpy after compression
224 | h_2 = w * 1000 + h_1 # [h]=J/kg
225 | # Temperature after compression
226 | T_2 = CP.PropsSI('T', 'P', p_2 * 100000, 'H', h_2,
227 | fluid) # [T]=K, [P]=kPa, [h]=J/kg
228 | return [w_spec, T_2]
229 |
230 | # %% CompressionDemand
231 |
232 |
233 | def getCompressionEnergy(
234 | p_1,
235 | p_2,
236 | demand,
237 | T_1=20,
238 | eta_isen=0.88,
239 | eta_mech=0.95,
240 | p_highlow_max=2.1,
241 | max_stages=2):
242 | '''
243 | calculation of specific hydrogen compression energy
244 | Input:
245 | p_1=Inlet Pressure in bar
246 | p_2=outlet Pressure in bar
247 | demand = hydrogen demand in kg/day
248 | T_1 = Inlet Temperature
249 | eta_is_S = isentropic efficiency
250 | '''
251 | # eta_isen=0.92-p_2/880*(0.24)
252 | if p_2 > p_1:
253 |
254 | compressorStages = np.log(p_2 / p_1) / np.log(p_highlow_max)
255 | compressorStages = np.ceil(compressorStages).astype(int)
256 |
257 | if compressorStages > max_stages:
258 | compressorStages = max_stages
259 | p_highlow = (p_2 / p_1)**(1 / compressorStages)
260 | # Initialize
261 | p_in = np.zeros(compressorStages)
262 | p_out = np.zeros(compressorStages)
263 | T_in = np.zeros(compressorStages)
264 | T_out = np.zeros(compressorStages)
265 | w_stage = np.zeros(compressorStages)
266 | # Stagedependent Calculation
267 | for i in range(compressorStages):
268 | if i == 0:
269 | p_in[i] = p_1
270 | T_in[i] = 273.15 + T_1
271 | else:
272 | p_in[i] = p_out[i - 1]
273 | T_in[i] = 273.15 + 40.
274 | p_out[i] = p_in[i] * p_highlow
275 | w_stage[i], T_out[i] = getCompressionEnergyStage(p_in[i],
276 | p_out[i],
277 | T_in[i],
278 | eta_isen)
279 | T_out = T_out - 273.15
280 | w_mech = np.sum(w_stage) / eta_mech
281 | P_shaft = demand * w_mech / 24
282 | #print(np)
283 | eta_motor = np.array([8e-5 * np.log(x)**4 - 0.0015 * np.log(x)**3 + 0.0061 * np.log(x)**2 + 0.0311 * np.log(x) + 0.7617 for x in P_shaft])
284 | eta_motor[eta_motor>0.98]=0.98
285 | P_el = P_shaft / eta_motor
286 | w_total = w_mech / eta_motor
287 | else:
288 | w_total = 0
289 | P_el = 0
290 |
291 | return w_total, P_el
292 |
293 | #%%
294 | def getFuelingStationInvest(n1, C1,
295 | C0=212,
296 | n0=400,
297 | I0=600000,
298 | scale=0.7,
299 | learning=0.06,
300 | installationFactor=1.3):
301 | '''
302 | scaling function for elaborating average cost for hydrogen refueling stations:
303 | Inputs:
304 | n1: number of stations
305 | c1: capacity of wished station
306 | n0: number of station - base case
307 | C0: capacity of base station
308 | I0: investment cost per station - base case
309 | installationFactor: muliplier for station cost
310 | learning: learning rate (double capacity: -x% costs)
311 | scale: scaling factor for increased sizing of stations
312 | '''
313 | V0=C0*n0
314 | V1=n1*C1
315 | beta=np.log2(1-learning)
316 | I1_avg=I0*((C1/C0)**scale)*((V1/V0)**beta)/(1+beta)*installationFactor
317 | return I1_avg
318 | #%%
319 | def createFolder(foldername):
320 | mydir = os.path.join(
321 | foldername,
322 | datetime.now().strftime('%Y-%m-%d_%H-%M-%S'))
323 | try:
324 | os.makedirs(mydir)
325 | except OSError as e:
326 | if e.errno != errno.EEXIST:
327 | raise # This was not a "directory exist" error..
328 | return mydir
329 |
330 | #%%
331 | def setupLogger(PATHLOG):
332 | import logging
333 | logger = logging.getLogger(__name__)
334 | handler = logging.FileHandler(PATHLOG)
335 | handler.setLevel(logging.INFO)
336 | logger.addHandler(handler)
337 | return logger
338 | #%%
339 | def testBoolColumns(gdf):
340 | '''
341 | checking geopandas dataframe for non save-able datatypes in columns
342 | converts boolean to int (False:0, True:1)
343 | '''
344 | df = gdf.copy()
345 | for colname, coltype in df.dtypes.items():
346 | if coltype == 'bool':
347 | df[colname] = df[colname].astype('int')
348 | if coltype == 'object':
349 | if colname=="geometry": continue
350 | else: df[colname] = df[colname].astype('str')
351 | df.columns=[str(x) for x in df.columns]
352 | return df
353 | #%%
354 | def builtDemDistArray(demMax, distMax, res):
355 | if type(res)==list:
356 | resDist=res[0]
357 | resDem=res[1]
358 | else:
359 | resDist=res
360 | resDem=res
361 |
362 | distDamp = distMax / resDist
363 | distArr = np.linspace(distDamp, distMax, resDist)
364 | distArr = np.array([distArr]).T
365 | # Resolution Demand
366 | demDamp = demMax / resDem
367 | demArr = np.linspace(demDamp, demMax, resDem)
368 | demArr = np.array([demArr])
369 | return demArr, distArr
--------------------------------------------------------------------------------
/HIM/optiSetup.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | Created on Tue Apr 18 08:22:38 2017
4 |
5 | @author: M.Reuss
6 | """
7 |
8 | from HIM.utils import *
9 | from HIM import dataHandling as sFun
10 | #from HyInfraGis.HyPyOpt.expansion_compute_physical_parameters_adjusted import expansion_compute_physical_param as getPhys
11 |
12 | import pyomo.environ as pyomo
13 | import pyomo.opt as opt
14 | from scipy import sqrt
15 | if __name__ =="__main__":
16 | logger=setupLogger("logfile.txt")
17 | else:
18 | try: from __main__ import logger
19 | except:
20 | logger=setupLogger("logfile.txt")
21 |
22 | #%%
23 | class PipeNetWork(nx.Graph):
24 | '''
25 | Class derived from networkx.Graph(), that defines the Network, i.e. the pipeline or truck network.
26 |
27 | It is able to transform the defined pipelineGraph into a pyomo concrete model
28 | and can optimize the capacities of the Graph.
29 | '''
30 | def __init__(self,**kwargs):
31 | '''
32 | Initializes an energy system network.
33 |
34 | '''
35 | nx.Graph.__init__(self)
36 | self.optimized = False
37 | self.pos = kwargs.get('pos', False)
38 | self.labelpos = None
39 | self.M = None
40 | self.minSpanTree=kwargs.get('minSpanTree', False)
41 |
42 | def initializeEdges(self, distMat):
43 | '''
44 | Initializes the edges of the network based on the distance matrice given
45 | '''
46 | self.add_edges_from(list(zip(distMat["inputID"], distMat["targetID"])),
47 | capacity=0)
48 | self.attr=[]
49 | for attr in distMat.columns:
50 | nx.set_edge_attributes(self, attr, distMat[attr].to_dict())
51 | self.attr.append(attr)
52 |
53 | self.add_nodes_from(nodes = self.nodes(),
54 | productionMax = 0,
55 | production = 0,
56 | demand = 0)
57 |
58 | def useMinSpanTree(self, **kwargs):
59 | '''
60 | apply minimum spanning tree on Graph
61 | kwargs:
62 | weight: attribute name to weight on
63 | '''
64 | weight=kwargs.get("weight", "weight")
65 | G=nx.minimum_spanning_tree(self, weight=weight)
66 | delEdges=[]
67 | for edge in self.edges_iter():
68 | if not G.has_edge(edge[0],edge[1]):
69 | delEdges.append((edge[0],edge[1]))
70 | self.remove_edges_from(delEdges)
71 | self.minSpanTree=True
72 |
73 | def reduceNetworkSize(self):
74 | '''
75 | input
76 | NetworkXGraph: networkX Graph you want to reduce
77 | attribute: The attribute of old edges that shall be merged with new edge
78 | output
79 | reduced NetworkX Graph
80 | __________
81 | Eliminate Nodes with two Neighbors for network reduction
82 | '''
83 | x=self.number_of_nodes()
84 | y=0
85 | logger.info("Number of nodes before Reduction: " + str(x))
86 | while x!=y:
87 | x=y
88 | for node in self.nodes():
89 | if "G" in node:
90 | neighbors=self.neighbors(node)
91 | if len(neighbors)==2:
92 |
93 | attrList=self[node][neighbors[0]].keys()
94 | attrDic={}
95 | for attr in attrList:
96 | attrDic[attr]=self[node][neighbors[0]][attr] + self[node][neighbors[1]][attr]
97 |
98 | self.add_edge(neighbors[0], neighbors[1], attr_dict=attrDic)
99 |
100 | self.remove_node(node)
101 | y=self.number_of_nodes()
102 | logger.info("Number of nodes after Reduction: " + str(y))
103 |
104 | def reduceNetworkSizeWater(self):
105 | '''
106 | input
107 | NetworkXGraph: networkX Graph you want to reduce
108 | attribute: The attribute of old edges that shall be merged with new edge
109 | output
110 | reduced NetworkX Graph
111 | __________
112 | Eliminate Nodes with two Neighbors for network reduction
113 | '''
114 | x=self.number_of_nodes()
115 | y=0
116 | logger.info("Number of nodes before Reduction: " + str(x))
117 | while x!=y:
118 | x=y
119 | for node in self.nodes():
120 | if "Water" in node:
121 | neighbors=self.neighbors(node)
122 | if len(neighbors)==2:
123 |
124 | attrList=self[node][neighbors[0]].keys()
125 | attrDic={}
126 | for attr in attrList:
127 | attrDic[attr]=self[node][neighbors[0]][attr] + self[node][neighbors[1]][attr]
128 |
129 | self.add_edge(neighbors[0], neighbors[1], attr_dict=attrDic)
130 |
131 | self.remove_node(node)
132 | y=self.number_of_nodes()
133 | logger.info("Number of nodes after Reduction: " + str(y))
134 |
135 | def checkFeasibility(self):
136 | '''
137 | simple comparison between Sources and Sinks
138 | '''
139 | prod=np.ceil(sum(nx.get_node_attributes(self, "productionMax").values())*1000)/1000
140 | dem=sum(nx.get_node_attributes(self, "demand").values())
141 | if prod shall the optimization solve linear?
152 | cistBinary --> binary cost element
153 | costLinear --> Linear cost element
154 | BigM --> maximal capacity
155 | SmallM --> minimal capacity
156 |
157 | Optional keyword arguments:
158 | '''
159 |
160 | self.checkFeasibility()
161 | self.M=pyomo.ConcreteModel()
162 | self.M.isLP=kwargs.get('linear', False)
163 | self.M.isInt=kwargs.get('integer', False)
164 | self.M.A=kwargs.get('costBinary', 474.77)
165 | self.M.B=kwargs.get('costLinear', 1.3695)
166 | self.M.QPa=kwargs.get('QPa', -0.0002)
167 | self.M.QPb=kwargs.get('QPb', 2.1487)
168 | self.M.BigM=kwargs.get('BigM', 3000)
169 | self.M.SmallM=kwargs.get('SmallM', 0)
170 | self.M.weight=kwargs.get('weight', 'weightedDistance')
171 | self.M.treeStructure=kwargs.get("tree", False)
172 |
173 | self.M.edgeIndex=self.edges()
174 | self.M.edgeIndexForw=[(node1,node2) for (node1,node2) in self.M.edgeIndex]
175 | self.M.edgeIndexBack=[(node2,node1) for (node1,node2) in self.M.edgeIndex]
176 |
177 | self.M.edgeIndexFull = self.M.edgeIndexForw
178 | self.M.edgeIndexFull.extend(self.M.edgeIndexBack)
179 |
180 | self.M.edgeLength=nx.get_edge_attributes(self, self.M.weight)
181 |
182 | self.M.edgeCapacity=pyomo.Var(self.M.edgeIndex)
183 | if not self.M.isLP:
184 | self.M.edgeCapacityInt=pyomo.Var(self.M.edgeIndex, within=pyomo.Binary)
185 | self.M.edgeFlow=pyomo.Var(self.M.edgeIndexFull, within = pyomo.NonNegativeReals)
186 | self.M.nodeIndex=self.nodes()
187 |
188 | self.M.nodeProductionMax=nx.get_node_attributes(self,"productionMax")
189 | self.M.nodeDemand=nx.get_node_attributes(self,"demand")
190 |
191 | self.M.nodeNeighbour = {self.M.nodeIndex[i]: neighbours for i,neighbours in enumerate(self.adjacency_list()) }
192 |
193 | self.M.nodeProduction=pyomo.Var(self.M.nodeIndex, within = pyomo.NonNegativeReals)
194 |
195 | #Constraints
196 | def massRule(M, n_index):
197 | return (sum(M.edgeFlow[(n_neighbour,n_index)] - M.edgeFlow[(n_index,n_neighbour)] for n_neighbour in M.nodeNeighbour[n_index])+M.nodeProduction[n_index]-M.nodeDemand[n_index])==0
198 | self.M.massCon = pyomo.Constraint(self.M.nodeIndex, rule=massRule)
199 |
200 | if not self.M.isLP:
201 | def maxRule(M, e_index0, e_index1):
202 | return M.edgeCapacity[(e_index0,e_index1)]<= M.BigM*M.edgeCapacityInt[(e_index0, e_index1)]
203 | self.M.maxCon = pyomo.Constraint(self.M.edgeIndex, rule=maxRule)
204 |
205 | def capacityRule(M, e_index0, e_index1):
206 | return M.edgeFlow[(e_index0,e_index1)] + M.edgeFlow[(e_index1,e_index0)] <= M.edgeCapacity[(e_index0,e_index1)]
207 | self.M.capacityCon = pyomo.Constraint(self.M.edgeIndex, rule=capacityRule)
208 |
209 | def prodRule(M, n_index):
210 | return M.nodeProduction[n_index]<=M.nodeProductionMax[n_index]
211 | self.M.prodCon=pyomo.Constraint(self.M.nodeIndex, rule=prodRule)
212 |
213 | if self.M.treeStructure:
214 | self.M.nodeInt=pyomo.Var(self.M.nodeIndex, within=pyomo.Binary)
215 |
216 | def nodeRule1(M, n_index):
217 | return (sum(M.edgeFlow[(n_neighbour,n_index)] + M.edgeFlow[(n_index,n_neighbour)] for n_neighbour in M.nodeNeighbour[n_index]))<=2*M.BigM*M.nodeInt[n_index]
218 | def nodeRule2(M, n_index):
219 | return (sum(M.edgeFlow[(n_neighbour,n_index)] + M.edgeFlow[(n_index,n_neighbour)] for n_neighbour in M.nodeNeighbour[n_index]))>=M.nodeInt[n_index]
220 |
221 | self.M.nodeCon1 = pyomo.Constraint(self.M.nodeIndex, rule=nodeRule1)
222 | self.M.nodeCon2 = pyomo.Constraint(self.M.nodeIndex, rule=nodeRule2)
223 |
224 |
225 |
226 | def treeRule(M):
227 | return sum(intVal for intVal in M.edgeCapacityInt.values()) <= sum(intVal for intVal in M.nodeInt.values())-1
228 | self.M.treeCon=pyomo.Constraint(rule=treeRule)
229 |
230 | #Objective Function
231 | if self.M.isLP:
232 | def objRule(M):
233 | return (sum(M.edgeCapacity[e_index]*M.edgeLength[e_index] for e_index in M.edgeIndex))
234 |
235 | elif self.M.isInt:
236 | def objRule(M):
237 | return (sum(M.edgeCapacityInt[e_index]*M.edgeLength[e_index] for e_index in M.edgeIndex))
238 | else:
239 | def objRule(M):
240 | return (sum((M.edgeCapacityInt[e_index]*M.A + M.edgeCapacity[e_index]*M.B)*M.edgeLength[e_index] for e_index in M.edgeIndex))
241 | self.M.obj=pyomo.Objective(rule=objRule)
242 | #_____________________________________________________________________________
243 | def initOptiTruck(self,**kwargs):
244 | '''
245 | Transforms the defined Graph into Concrete Model of Pyomo
246 |
247 | Initialize the OptiSystem class.
248 |
249 | '''
250 | self.checkFeasibility()
251 | self.M=pyomo.ConcreteModel()
252 | self.M.isLP=True
253 | self.M.weight=kwargs.get('weight', 'weightedDistance')
254 |
255 | self.M.edgeIndex=self.edges()
256 | self.M.edgeIndexForw=[(node1,node2) for (node1,node2) in self.M.edgeIndex]
257 | self.M.edgeIndexBack=[(node2,node1) for (node1,node2) in self.M.edgeIndex]
258 |
259 | self.M.edgeIndexFull = self.M.edgeIndexForw
260 | self.M.edgeIndexFull.extend(self.M.edgeIndexBack)
261 |
262 | self.M.edgeCapacity=pyomo.Var(self.M.edgeIndex)
263 |
264 | self.M.edgeFlow=pyomo.Var(self.M.edgeIndexFull, within = pyomo.NonNegativeReals)
265 |
266 | self.M.edgeLength=nx.get_edge_attributes(self, self.M.weight)
267 |
268 | self.M.nodeIndex=self.nodes()
269 |
270 | self.M.nodeProductionMax=nx.get_node_attributes(self,"productionMax")
271 |
272 | self.M.nodeDemand=nx.get_node_attributes(self,"demand")
273 |
274 | self.M.nodeNeighbour = {self.M.nodeIndex[i]: neighbours for i,neighbours in enumerate(self.adjacency_list()) }
275 |
276 | self.M.nodeProduction=pyomo.Var(self.M.nodeIndex, within = pyomo.NonNegativeReals)
277 |
278 | #Constraints
279 | def massRule(M, n_index):
280 | return (sum(M.edgeFlow[(n_neighbour,n_index)] - M.edgeFlow[(n_index,n_neighbour)] for n_neighbour in M.nodeNeighbour[n_index])+M.nodeProduction[n_index]-M.nodeDemand[n_index])==0
281 | self.M.massCon = pyomo.Constraint(self.M.nodeIndex, rule=massRule)
282 |
283 | def capacityRule(M, e_index0, e_index1):
284 | return M.edgeFlow[(e_index0,e_index1)] + M.edgeFlow[(e_index1,e_index0)] <= M.edgeCapacity[(e_index0,e_index1)]
285 | self.M.capacityCon = pyomo.Constraint(self.M.edgeIndex, rule=capacityRule)
286 |
287 | def prodRule(M, n_index):
288 | return M.nodeProduction[n_index]<=M.nodeProductionMax[n_index]
289 | self.M.prodCon=pyomo.Constraint(self.M.nodeIndex, rule=prodRule)
290 |
291 | #Objective Function
292 | if self.M.isLP:
293 | def objRule(M):
294 | return (sum(M.edgeCapacity[e_index]*M.edgeLength[e_index] for e_index in M.edgeIndex))
295 |
296 | self.M.obj=pyomo.Objective(rule=objRule)
297 |
298 | # Optimization
299 | def optModel(self, **kwargs):
300 |
301 |
302 | self.solver = kwargs.get('solver','gurobi')
303 | self.optprob = opt.SolverFactory(self.solver)
304 | self.optprob.options["timeLimit"]=kwargs.get('timeLimit',2000)
305 | self.optprob.options["threads"]=kwargs.get('threads',7)
306 | self.optprob.options["MIPgap"]=kwargs.get('gap',0.005)
307 | self.optprob.options["Heuristics"]=0.5
308 |
309 | logfile=os.path.join(kwargs.get('logPath',""),"GurobiLog.txt")
310 | self.optprob.options["logfile"]=kwargs.get('logfile',logfile)
311 | self.optiRes = self.optprob.solve(self.M,tee=kwargs.get('tee',True))
312 | def optNLModel(self):
313 | self.solver = 'ipopt'
314 | self.solver_io = 'nl'
315 | self.optprob = opt.SolverFactory(self.solver,solver_io=self.solver_io)
316 | self.optiRes = self.optprob.solve(self.M,tee=kwargs.get('tee',True))#, warmstart=True)
317 | #______________________________________________________
318 | def getEdgesAsGpd(self, coordSeries, analysisType, minCapacity=20, weighted=True, weightedTransmission=True, costCalc="Krieg",lbExport=1e-6, **kwargs):
319 | '''
320 | input:
321 | NX Graph --> Graph to implement
322 | coordSeries: Coordinates of all potential Nodes
323 | '''
324 | '''
325 | input:
326 | pyomoVariable --> Variable from whcih to extract the values
327 | coordSeries: Coordinates of all potential Nodes
328 | '''
329 |
330 | dicEdges=self.M.edgeFlow.get_values()
331 | nx.set_edge_attributes(self, "capacity", dicEdges)
332 | dicEdges={k:v for (k,v) in dicEdges.items() if v > lbExport}
333 | EdgesTotal = gpd.GeoDataFrame([(k[0], k[1], v) for (k,v) in dicEdges.items()],
334 | index=[k for k in dicEdges.keys()],
335 | columns=["inputID","targetID", "capacity"])
336 |
337 | LinesIn=coordSeries.loc[EdgesTotal["inputID"].values].geometry.values
338 | LinesOut=coordSeries.loc[EdgesTotal["targetID"].values].geometry.values
339 | EdgeCoords=gpd.GeoDataFrame(index=EdgesTotal.index)
340 | EdgeCoords["inputCoords"]=LinesIn
341 | EdgeCoords["outputCoords"]=LinesOut
342 | EdgesTotal["geometry"]=""
343 | EdgesTotal["distribution"]=False
344 | EdgesTotal.loc[["F" in tup[0] or "F" in tup[1] for tup in EdgesTotal.index ], "distribution"]=True
345 |
346 | geodict={}
347 | for key, values in EdgeCoords.iterrows():
348 | geodict[key]=LineString([values["inputCoords"], EdgeCoords["outputCoords"][key]])
349 | EdgesTotal["geometry"]=gpd.GeoSeries(geodict)
350 | for attr in self.attr:
351 | EdgesTotal[attr]=[self[key[0]][key[1]][attr] for key in dicEdges.keys()]
352 |
353 | EdgesTotal["capacityMax"]=EdgesTotal.capacity
354 | EdgesTotal.loc[EdgesTotal.capacityMax Variable from whcih to extract the values
380 | coordSeries: Coordinates of all potential Nodes
381 | '''
382 | NodesTotal=gpd.GeoDataFrame([(v[1].value) for v in self.M.nodeProduction.iteritems()],
383 | index=[(v[0]) for v in self.M.nodeProduction.iteritems()],
384 | columns=["production"])
385 |
386 | return NodesTotal
387 |
388 |
--------------------------------------------------------------------------------
/HIM/dataHandling.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | Created on Mon Apr 10 08:14:40 2017
4 |
5 | @author: m.reuss
6 | """
7 | from HIM.utils import *
8 | from scipy import spatial as sp
9 | import shapely as shp
10 | from scipy import sqrt
11 | from shapely.geometry import LineString
12 |
13 | #%% import Shapefile as Geopandas dataFrame and change into a common crs
14 | def import_shp(path, crs={'init' :'epsg:4326'}, name=""):
15 | '''
16 | input: path --> file source
17 | crs: coordinate reference system: default WGS84
18 | name: if you want to have unique indices, you should put there a name (e.g. G)
19 | -------------------------------
20 | This function imports a shapefile and gives a GeoDataFrame (geopandas).
21 | This dataFrame has a unique id as .index
22 |
23 | '''
24 | gpdDataFrame=gpd.read_file(path)
25 | gpdDataFrame=checkCorrupted(gpdDataFrame)
26 | gpdDataFrame.reset_index(drop=True)
27 | gpdDataFrame=gpdDataFrame.to_crs(crs)
28 | gpdDataFrame["intIndex"]=gpdDataFrame.index.values
29 | gpdDataFrame.index=[name+str(id1) for id1 in gpdDataFrame.index.values]
30 | gpdDataFrame["ID"]=gpdDataFrame.index.values
31 | return gpdDataFrame
32 | #%%
33 | def checkCorrupted(GeoDataFrame):
34 | NewDataFrame=GeoDataFrame.loc[[x is not None for x in GeoDataFrame.geometry]]
35 | return NewDataFrame
36 |
37 | #%%
38 | def point_array(GeoDataFrame):
39 | '''
40 | create Numpy array from GeoDataFrame of Points!!!
41 | input: GeoSeries of points
42 | '''
43 | if GeoDataFrame.geometry.type.all()=="Point":
44 | x=GeoDataFrame.geometry.map(lambda p:p.x).values
45 | y=GeoDataFrame.geometry.map(lambda p:p.y).values
46 | return list(zip(x,y))
47 | else:
48 | "GeoDataFrame does not contains Points: point_array is not working!"
49 | return
50 |
51 | #%% Grabs a list of an attribute from NetworkX
52 | def NXtoList(NXGraph, attribute):
53 | '''
54 | input:
55 | NXGraph: NX.Graph()
56 | attribute: name of the attribute as string
57 | -------------------
58 | returns the attributes of a NetworkX Graph as a list
59 | '''
60 | dicAttr=nx.get_edge_attributes(NXGraph,attribute)
61 | return [dicAttr[x] for x in dicAttr], dicAttr
62 |
63 |
64 | #%%
65 | def distMatrix(gpdIn, gpdOut, weight=1., kNN=10):
66 | '''
67 | Build distance Matrix for two geopandas DataFrames
68 | gpdIn: geopandas dataframe for start
69 | gpdOut: geopandas dataframe for target
70 | kNN: number of nearest neighbours
71 | weight: weighting factor for detouring
72 | '''
73 | if len(gpdOut)kNNmax:
137 | kNN=kNNmax
138 | if kNN>0:
139 | Coords=listC
140 | tree = sp.KDTree(Coords, leafsize=10)
141 | treeDist, treeLoc = tree.query(Coords, k=kNN)
142 | idx = (range(len(listF))+np.zeros((kNN,1),dtype=np.int)).T.ravel()
143 | inID=np.array([listF[id1] for id1 in idx])
144 | tarID=np.array([listF[id2] for id2 in treeLoc.ravel()])
145 |
146 | return (inID, tarID, treeDist.ravel()/1000)
147 |
148 |
149 | #%%
150 | def getDiameterSquare(massflow,
151 | H2Density=5.7,
152 | vPipeTrans=15):
153 | '''
154 | get m² from massflow with density and pipeline velocity
155 | massflow: kt per year
156 | H2Density in kg/m³
157 | output: diameter in m²
158 | '''
159 |
160 | ktPerYear_to_kgPerS=1e6/3600/365/24
161 | d2=massflow*ktPerYear_to_kgPerS*4/(H2Density*vPipeTrans*np.pi)
162 | return d2
163 |
164 | #%%
165 | def getSpecCost(massflow,
166 | f_grid=1,
167 | H2Density=5.7,
168 | vPipeTrans=15,
169 | source="Krieg",
170 | base="diameter",
171 | diameter=None,
172 | **kwargs):
173 | '''
174 | massflow: massflow in kt per year
175 | f_grid: Additional factor for weighting results (just for dijkstra algorithm)
176 | H2Density: Density of hydrogen
177 | vPipeTrans: maximum velocity of hydrogen inside the pipeline
178 |
179 | Output: specific pipeline invest in Million €
180 |
181 | '''
182 | if diameter==None:
183 | diameter=np.sqrt(getDiameterSquare(massflow, H2Density, vPipeTrans))*1000
184 | if base=="diameter":
185 | A=2.2e-3
186 | B=0.86
187 | C=247.5
188 | specCost=(A*diameter**2+B*diameter+C)
189 | elif base=="throughput":
190 | A=474.77
191 | B=1.3695
192 |
193 | specCost=A*f_grid+B*massflow
194 | return specCost*1e-6
195 |
196 | #%%
197 | def extractAndCalc(fullDF, minCapacity=0, zeroes=False):
198 | '''
199 | standard operations for output
200 | input: full DataFrame
201 | minCapacuty= minimum relevant capacity for pipeline design
202 |
203 | '''
204 | if zeroes: x=-1
205 | else: x=0
206 | EdgesDist=fullDF[fullDF["capacity"]>x].copy()
207 | EdgesDist.loc[EdgesDist["capacity"] Graph to implement
219 | coordSeries: Coordinates of all potential Nodes
220 | return:
221 | EdgesDist - geopandas Dataframe with extracted values from networkx graph
222 | '''
223 | y=np.array(NXGraph.edges())
224 | (inputIDarr, targetIDarr)=(y[:,0], y[:,1])
225 | LinesIn=coordSeries.loc[list(inputIDarr)].geometry.values
226 | LinesOut=coordSeries.loc[list(targetIDarr)].geometry.values
227 | EdgeCoords=gpd.GeoDataFrame(index=NXGraph.edges())
228 | EdgeRes=gpd.GeoDataFrame(index=NXGraph.edges())
229 | EdgeCoords["inputCoords"]=LinesIn
230 | EdgeCoords["outputCoords"]=LinesOut
231 | EdgeRes["geometry"]=""
232 | for key in EdgeCoords.index:
233 | EdgeRes.loc[key,"geometry"]=shp.geometry.LineString([EdgeCoords["inputCoords"][key], EdgeCoords["outputCoords"][key]])
234 |
235 | dicCap=nx.get_edge_attributes(NXGraph, "capacity")
236 | pdCap=pd.DataFrame.from_dict(dicCap, orient="index")
237 | EdgeRes["capacity"]=pdCap[0]
238 |
239 | EdgesDist=extractAndCalc(EdgeRes, minCapacity=minCapacity, zeroes=zeroes)
240 | return EdgesDist
241 | #%%
242 | def getGpdCapaFromPyomo(pyomoVariable, coordSeries, minCapacity=0, analysisType="pipeline"):
243 | '''
244 | input:
245 | pyomoVariable --> Variable from which to extract the values
246 | coordSeries: Coordinates of all potential Nodes
247 | return:
248 | EdgesDist - geopandas Dataframe with extracted values from networkx graph
249 | '''
250 | dicEdges=pyomoVariable.get_values()
251 | dicEdges={k:v for (k,v) in dicEdges.items() if v > 0}
252 | EdgesTotal = gpd.GeoDataFrame([(k[0], k[1], v) for (k,v) in dicEdges.items()],
253 | index=[k for k in dicEdges.keys()],
254 | columns=["inputID","targetID", "capacity"])
255 |
256 | LinesIn=coordSeries.ix[EdgesTotal["inputID"].values].geometry.values
257 | LinesOut=coordSeries.ix[EdgesTotal["targetID"].values].geometry.values
258 | EdgeCoords=gpd.GeoDataFrame(index=EdgesTotal.index)
259 | EdgeRes=gpd.GeoDataFrame(index=EdgesTotal.index)
260 | EdgeRes["capacity"]=EdgesTotal["capacity"]
261 | EdgeCoords["inputCoords"]=LinesIn
262 | EdgeCoords["outputCoords"]=LinesOut
263 | EdgeRes["geometry"]=""
264 | for key in EdgeCoords.index:
265 | EdgeRes.loc[key,"geometry"]=shp.geometry.LineString([EdgeCoords["inputCoords"][key], EdgeCoords["outputCoords"][key]])
266 | if analysisType=="pipeline":
267 | EdgesDist=extractAndCalc(EdgeRes, minCapacity=minCapacity)
268 | elif analysisType=="truck":
269 | EdgesDist=EdgeRes[EdgeRes["capacity"]>0].copy()
270 | EdgesDist["distance"]=EdgesDist.length.values/1000
271 | return EdgesDist
272 |
273 | #%%
274 | def getGpdFromPyomoNodes(pyomoVariable, name):
275 | '''
276 | input:
277 | pyomoVariable --> Variable from whcih to extract the values
278 | coordSeries: Coordinates of all potential Nodes
279 | '''
280 | NodesTotal=gpd.GeoDataFrame([(v[1].value) for v in pyomoVariable.iteritems()],
281 | index=[(v[0]) for v in pyomoVariable.iteritems()],
282 | columns=[name])
283 |
284 | return NodesTotal
285 |
286 |
287 | #%%Master student Paris Dimos work!!!
288 |
289 | def rem_dupl_un(GeoDataFrame, name="G"):
290 |
291 | '''
292 | Must first implement simple_gpd
293 | input: GeoDataFrame
294 | output: GeoDataframe with unique Points and ID's
295 | Need it like that because later I will have issues with distMatrix
296 | Re-run after the as_ID!!!
297 | '''
298 | GeoDataFrameListIn=(list(GeoDataFrame.coordsIn))
299 | GeoDataFrameListOut=(list(GeoDataFrame.coordsOut))
300 | num = min(len(GeoDataFrameListIn), len(GeoDataFrameListOut))
301 | GeoDataFrameListUni = [None]*(num*2)
302 | GeoDataFrameListUni[::2] = GeoDataFrameListIn[:num]
303 | GeoDataFrameListUni[1::2] = GeoDataFrameListOut[:num]
304 | GeoDataFrameListUni.extend(GeoDataFrameListIn[num:])
305 | GeoDataFrameListUni.extend(GeoDataFrameListOut[num:])
306 | seen={}
307 | GeoDataFrameListUni1 = [seen.setdefault(x,x) for x in GeoDataFrameListUni if x not in seen]
308 | from shapely.geometry import Point
309 | geometry=[Point(xy) for xy in GeoDataFrameListUni1]
310 | GeoDataFrameListUniA=gpd.GeoDataFrame()
311 | GeoDataFrameListUniA['geometry']=geometry
312 | GeoDataFrameListUniA['intIndex']=range(len(GeoDataFrameListUni1))
313 | GeoDataFrameListUniA['coords']=point_array(GeoDataFrameListUniA)
314 | GeoDataFrameListUniA['ID']=[name+str(x) for x in range(len(GeoDataFrameListUni1))]
315 | GeoDataFrameListUniA.crs=GeoDataFrame.crs
316 | del GeoDataFrameListUni1, GeoDataFrameListUni
317 | return GeoDataFrameListUniA
318 |
319 | #%%
320 | def as_ID(GeoDataFrame, GeoDataFrameListUniA):
321 | '''
322 | Assigns a unique ID to all coordinates of the DataFrame
323 | Input: GeoDataFrame, GeoDataFrame from rem_dupl_un function
324 | Output: GeoDataframe with unique "StrID" and "EndID"
325 | '''
326 | GeoDataFrameListUniA.index=GeoDataFrameListUniA['coords'].values
327 | GeoDataFrame['inputID']=GeoDataFrameListUniA.loc[GeoDataFrame['coordsIn'].values]['ID'].values
328 | GeoDataFrame['targetID']=GeoDataFrameListUniA.loc[GeoDataFrame['coordsOut'].values]['ID'].values
329 | #return GeoDataFrame
330 | #%%
331 | def simple_gpd(GeoDataFrame):
332 | '''
333 | Creates coords, coordsIn, coordsOut simple_gpd
334 | Input: GeoDataFrame
335 | Output: GeoDataframe with first and last coord at Linestring geometry
336 | '''
337 | GeoDataFrame['distance']=GeoDataFrame.length/1000
338 | GeoDataFrame['coords'] = [ix.coords[::len(ix.coords)-1] for ix in GeoDataFrame.geometry]
339 | GeoDataFrame['coordsIn'] = [(np.round(x[0][0],3), np.round(x[0][1],3)) for x in GeoDataFrame['coords']]
340 | GeoDataFrame['coordsOut'] = [(np.round(x[1][0],3), np.round(x[1][1],3)) for x in GeoDataFrame['coords']]
341 |
342 | #%%
343 | def splitLinesOnMaxDistance(GeoDataLineString, lMax=1000):
344 | '''
345 | split a lots of lines into smaller ones based on the length of the line
346 | '''
347 | j=0
348 | attrDict={}
349 |
350 | for key, values in GeoDataLineString.iterrows():
351 | geom=values["geometry"]
352 | if geom.length>lMax:
353 | addPoints=np.ceil(geom.length/lMax)
354 | start=geom.coords[0]
355 | for i in range(int(addPoints)+1):
356 | attrDict[j]={}
357 | if i>addPoints:
358 | end=geom.coords[-1]
359 | else:
360 | newPoint=geom.interpolate(geom.length/(addPoints+1)*(i+1))
361 | end=newPoint.coords[0]
362 | for attr in values.keys():
363 | if attr=="geometry": attrDict[j]["geometry"]=LineString([start, end])
364 | else: attrDict[j][attr]=values[attr]
365 | start=newPoint.coords[0]
366 | j+=1
367 | else:
368 | attrDict[j]=values
369 | j+=1
370 | NewGrid=gpd.GeoDataFrame().from_dict(attrDict)
371 | NewGrid.crs=GeoDataLineString.crs
372 | return NewGrid
373 |
374 | #%%
375 | def linePolyIntersectBoolean(lineDataFrame,
376 | polyDataFrame,
377 | name="boolStreet",
378 | save=False,
379 | precise=False,
380 | savepath=None):
381 | '''
382 | checks if Polygon dataframe intersects with a linestring dataframe
383 | input:
384 | -lineDataFrame: geopandas dataframe with linestrings
385 | -polyDataFrame: geopandas dataframe with polygons
386 | -name: name of new column in dataframe for boolean selection
387 | return:
388 | -polyDataFrame: geopandas dataframe with polygons and one additional column
389 | '''
390 | dictIntersect={}
391 | spatial_index = lineDataFrame.sindex
392 | for (gemIndex, gemValue) in polyDataFrame.iterrows():
393 | possible_matches_index = list(spatial_index.intersection(gemValue["geometry"].bounds))
394 | possible_matches = lineDataFrame.iloc[possible_matches_index]
395 | nMatches=len(possible_matches.index)
396 | if precise:
397 | precise_matches = possible_matches[possible_matches.intersects(gemValue["geometry"])]
398 | nMatches=len(precise_matches.index)
399 | if nMatches>0:
400 | dictIntersect[gemIndex]=True
401 | else:
402 | dictIntersect[gemIndex]=False
403 | polyDataFrame[name]=pd.Series(dictIntersect)*1
404 | if save:
405 | polyDataFrame.to_file(savepath)
406 | return polyDataFrame
407 | #%%
408 |
409 | def createCluster(FuelingNew, clusterSize, ClusterGraph=None, name="Cl"):
410 | '''
411 | automatic selection of multiple or single cluster selection
412 | '''
413 | if isinstance(ClusterGraph, type(None)):
414 | return createSingleCluster(FuelingNew, clusterSize, name="Cl")
415 | else:
416 | return createMultCluster(FuelingNew, clusterSize, ClusterGraph, name="Cl")
417 | #%%
418 | def createSingleCluster(FuelingNew, clusterSize, name="Cl"):
419 | '''
420 | workflow for clustering fueling stations based on kmeans algorithm
421 | to a given mean clustersize
422 |
423 | input:
424 | FuelingNew: Fueling station GeoDataFrame (geopandas)
425 | clusterSize: average number of fueling stations per cluster
426 | name: Unique ID-Name for created Cluster
427 | return:
428 | GeoDataFrame (geopandas) with Clusterlocations. The Fueling GeoDataFrame
429 | is extended by respectice ClusterID
430 | '''
431 | from scipy.cluster import vq
432 | from shapely.geometry import Point
433 | from sklearn.cluster import KMeans
434 | obs=point_array(FuelingNew)
435 | nCluster=int(max(np.round(len(FuelingNew)/clusterSize),1))
436 | #centroids, variance = vq.kmeans(test, nCluster, iter=100, )
437 | kmeans=KMeans(n_clusters=nCluster, random_state=42).fit(obs)
438 | identified, distance = vq.vq(obs, kmeans.cluster_centers_)
439 | Cluster=gpd.GeoDataFrame(geometry=[Point(x) for x in kmeans.cluster_centers_])
440 | Cluster["intIndex"]=Cluster.index
441 | Cluster.index=[name+ str(x) for x in Cluster.intIndex]
442 | Cluster["ID"]=Cluster.index
443 | FuelingNew["ClusterID"]=[name+ str(x) for x in identified]
444 | FuelingNew["distToCl"]=distance/1000
445 | Cluster["H2Demand_kt"]=FuelingNew.groupby(by="ClusterID")["H2Demand_kt_F"].sum()
446 | Cluster["numberOfFS"]=FuelingNew.groupby(by="ClusterID").size()
447 | Cluster.crs=FuelingNew.crs
448 | return Cluster
449 | #%%
450 | def createMultCluster(FuelingNew, clusterSize, ClusterGraph, name="Cl"):
451 | '''
452 | Clustering of fueling stations for multiple separate regions.
453 |
454 | input:
455 | FuelingNew: Fueling station GeoDataFrame (geopandas)
456 | clusterSize: average number of fueling stations per cluster
457 | name: Unique ID-Name for created Cluster
458 | return:
459 | GeoDataFrame (geopandas) with Clusterlocations. The Fueling GeoDataFrame
460 | is extended by respectice ClusterID
461 | '''
462 | dic={}
463 | i=0
464 | for subgraph in nx.connected_components(ClusterGraph):
465 | dic[i]=subgraph
466 | i+=1
467 | dic.keys()
468 | dicFueling={i:FuelingNew.loc[[x in dic[i] for x in FuelingNew.index]].copy() for i in dic.keys()}
469 | dicCluster={i:createSingleCluster(dicFueling[i], clusterSize, name=name+str(i)) for i in dicFueling.keys()}
470 | Cluster=dicCluster[list(dicCluster.keys())[0]]
471 | FuelingNew=dicFueling[list(dicFueling.keys())[0]]
472 | for i in list(dicCluster.keys())[1:]:
473 | Cluster=Cluster.append(dicCluster[i])
474 | FuelingNew=FuelingNew.append(dicFueling[i])
475 | FuelingNew=FuelingNew.sort_values(by="intIndex")
476 | Cluster["intIndex"]=range(len(Cluster.index))
477 | Cluster.crs=FuelingNew.crs
478 | return Cluster, FuelingNew
479 |
480 | #%%
481 | def cutLineAtPoints(line, points):
482 | # First coords of line (start + end)
483 | coords = [line.coords[0], line.coords[-1]]
484 | # Add the coords from the points
485 | coords += [list(p.coords)[0] for p in points]
486 | # Calculate the distance along the line for each point
487 | dists = [line.project(Point(p)) for p in coords]
488 | # sort the coords based on the distances
489 | # see http://stackoverflow.com/questions/6618515/sorting-list-based-on-values-from-another-list
490 | coords = [p for (d, p) in sorted(zip(dists, coords))]
491 | # generate the Lines
492 | lines = [LineString([coords[i], coords[i+1]]) for i in range(len(coords)-1)]
493 | return lines
494 | def simplifyLinesAndCrossings(gpdLines):
495 | '''
496 | input:
497 | Geopandas dataframe with linestrings
498 |
499 | output:
500 | Geopandas Dataframe with linestrings in separate sections, all points and cat at crossings
501 | Geopandas Dataframe with unique points of the linestring to select the coordinates
502 | '''
503 | singleLines=[]
504 | for line in gpdLines.geometry:
505 | length=len(line.coords)
506 | for x in range(length-1):
507 | singleLines.append(LineString([line.coords[x], line.coords[x+1]]))
508 | SingleLinesGDF=gpd.GeoDataFrame(geometry=singleLines)
509 | newLines=[]
510 | for key, values in SingleLinesGDF.iterrows():
511 | iterSectionsBool=SingleLinesGDF.intersects(values["geometry"])
512 | iterSections=SingleLinesGDF.intersection(values["geometry"]).loc[iterSectionsBool]
513 | iterPoints=iterSections.loc[iterSections.index!=key]
514 | if iterPoints.size>0:
515 | lines=cutLineAtPoints(values["geometry"],[iterPoints[x] for x in iterPoints.index])
516 | newLines.extend(lines)
517 | else:
518 | newLines.append(values["geometry"])
519 |
520 | newGrid=gpd.GeoDataFrame(geometry=newLines)
521 | newGrid.crs=gpdLines.crs
522 | newGrid["coordsIn"]=[x.coords[0] for x in newGrid.geometry]
523 | newGrid["coordsOut"]=[x.coords[-1] for x in newGrid.geometry]
524 | newGrid["distance"]=newGrid.length/1000
525 | newGrid["weightedDistance"]=newGrid["distance"]*1
526 | gridPoints=rem_dupl_un(newGrid)
527 | gridPoints.index=gridPoints["coords"]
528 | newGrid["inputID"]=gridPoints.loc[newGrid["coordsIn"].values, "ID"].values
529 | newGrid["targetID"]=gridPoints.loc[newGrid["coordsOut"].values, "ID"].values
530 | newGrid=newGrid.loc[[values["inputID"]!=values["targetID"] for key, values in newGrid.iterrows()]].copy()
531 | newGrid["ID"]=[(values["inputID"],values["targetID"]) for key, values in newGrid.iterrows()]
532 | newGrid=newGrid.loc[newGrid["ID"].drop_duplicates().index]
533 |
534 | gridPoints.index=gridPoints["ID"].values
535 |
536 | return newGrid, gridPoints
537 |
538 |
--------------------------------------------------------------------------------
/HIM/workflow/preprocFunc.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | Created on Wed Nov 22 11:00:59 2017
4 |
5 | @author: m.reuss
6 | """
7 | from HIM.utils import *
8 | from HIM import dataHandling as sFun
9 | #%%
10 | #%%
11 | def importDLMStreets(pathStreet, speed, crs, name="W"):
12 | '''
13 | importing the Streetgrid from "Digitales Landschaftsmodell" and selecting the grid with highest nodes
14 | '''
15 | Streets = sFun.import_shp(pathStreet, crs=crs, name="W")
16 | sFun.simple_gpd(Streets)
17 | StreetsPoints=sFun.rem_dupl_un(Streets, "Street")
18 | sFun.as_ID(Streets,GeoDataFrameListUniA=StreetsPoints)
19 | Streets.loc[Streets["BEZ"].isnull(), "BEZ"]=""
20 | Streets["speed"]=speed["urban"]
21 | Streets.loc[["A" in bez for bez in Streets["BEZ"]], "speed"]=speed["motorway"]
22 | Streets["time"]=Streets.length/1000/Streets["speed"]
23 | Streets["weightedDistance"]=Streets["distance"]
24 | Reduction=nx.Graph()
25 | Reduction.add_edges_from(list(zip(Streets.inputID,Streets.targetID)))
26 | for node in Reduction.nodes():
27 | G_new=[x for x in nx.node_connected_component(Reduction, node)]
28 | if len(G_new)>0.9*len(Reduction.nodes()): break
29 | StreetsPoints.index=StreetsPoints["ID"].values
30 | StreetsPointsNew=StreetsPoints.loc[G_new].copy()
31 | Streets.index=[(x[1].inputID, x[1].targetID) for x in Streets.iterrows()]
32 | distStreet=Streets.loc[:,["inputID", "targetID", "distance", "weightedDistance", "time"]]
33 | return distStreet, StreetsPointsNew
34 | #%%
35 | def correlateFuelingData(Fueling, FuelingData, ConvFabMart, Centroid, maxDistPrioFS=25):
36 | '''
37 | Converting the Fueling Station DataSet of Robinius and Grüger
38 | '''
39 | ConvFabMart.index=[int(val) for val in ConvFabMart["old"]]
40 | ConvFabMart["newID"]=[int(val) for val in ConvFabMart["new"]]
41 | FuelingData.index=Fueling.index
42 | # Copy Fueling Data to Fueling GeoDataFrame
43 | Fueling=Fueling.join(FuelingData, rsuffix="-")
44 | # Fine
45 | Fueling.loc[Fueling["ABST_2BAHN"]<=maxDistPrioFS,"BAB"] = 1
46 | Fueling["ID_KRS_BKG"]=ConvFabMart.loc[Fueling["ID_KRS_BKG"]]["new"].values
47 | Centroid.index=Centroid["ID_KRS_BKG"]
48 | Fueling["ID_C"]=Centroid.ix[Fueling["ID_KRS_BKG"]]["ID"].values
49 | Centroid.index=Centroid["ID"].values
50 | # Add the distance to the Centroid in km
51 | Fueling["CentC"]=Centroid.ix[Fueling["ID_C"]].geometry.values
52 | Fueling["CoordsFtoC"]=list(zip(Fueling.geometry, Fueling["CentC"]))
53 | Fueling["EdgesFtoC"]=[LineString(ix) for ix in Fueling["CoordsFtoC"]]
54 | EdgesFtoC=gpd.GeoDataFrame(Fueling["EdgesFtoC"].values, columns=["geometry"], index=Fueling.index)
55 | Fueling["distToC"]=EdgesFtoC.length/1000
56 | return Fueling
57 |
58 |
59 | #%%
60 | def getDistanceMatrices(Cluster,
61 | Source,
62 | FuelingNew,
63 | NGGridPoint,
64 | StreetsPointsNew,
65 | distStreet,
66 | distGtoG,
67 | weightFtoF,
68 | detourFactor,
69 | beeline,
70 | speed,
71 | clustering=False,
72 | clusterSize="switch",
73 | detourPipeline=1.4,
74 | kNN=10,
75 | kNNGas=10):
76 | '''
77 |
78 | Create Distance Matrices for:
79 | distMatTruck: Truck Transport from Source to Fueling station
80 | distMatTruck2: Truck Transport from Hub to Fueling Station
81 | distMatPipe: Pipeline Transport from Source to Fueling station via hub
82 |
83 | '''
84 | kNN=max(min(len(Cluster)-1, kNN),1)
85 | #Transmission grid
86 | distCtoC=sFun.selfDistMatrix(Cluster, weight=detourPipeline, kNN=kNN)
87 | distStoC=sFun.distMatrix(Source, Cluster, weight=detourPipeline, kNN=kNN)
88 | distStoS=sFun.selfDistMatrix(Source, weight=detourPipeline, kNN=min(len(Source.index)-1, kNN))
89 |
90 | distCtoG=sFun.distMatrix(Cluster, NGGridPoint, weight=detourPipeline, kNN=kNNGas)
91 | distStoG=sFun.distMatrix(Source, NGGridPoint, weight=detourPipeline, kNN=kNNGas)
92 | #%%
93 | listInID=[]
94 | listTarID=[]
95 | listDistance=[]
96 | for key, values in Cluster.iterrows():
97 | if len(FuelingNew[FuelingNew["ClusterID"]==key]["ID"])>0:
98 | listF=list(FuelingNew[FuelingNew["ClusterID"]==key]["ID"])
99 | listCoords=list(sFun.point_array(FuelingNew[FuelingNew["ClusterID"]==key]))
100 | (inID, outID, distance) = sFun.selfDistMatrixFueling(listF, listCoords)
101 | listInID.extend(inID)
102 | listTarID.extend(outID)
103 | listDistance.extend(distance)
104 | ## Distribution grid
105 | #from fueling station to fueling station
106 | distFtoF=pd.DataFrame([listInID,
107 | listTarID,
108 | listDistance],
109 | index=["inputID",
110 | "targetID",
111 | "distance"]).T
112 | distFtoF=distFtoF[distFtoF.inputID != distFtoF.targetID]
113 | distFtoF["inputArea"]=FuelingNew.loc[distFtoF["inputID"].values,"GEBIET"].values
114 | distFtoF["targetArea"]=FuelingNew.loc[distFtoF["targetID"].values,"GEBIET"].values
115 | distFtoF["weightID"]=[str(values["inputArea"])+"to"+str(values["targetArea"]) for key, values in distFtoF.iterrows()]
116 | distFtoF["weightedDistance"]=weightFtoF[distFtoF["weightID"]].values*distFtoF["distance"]
117 | distFtoF.index=[(x[1]["inputID"], x[1]["targetID"]) for x in distFtoF.iterrows()]
118 | distFtoF=distFtoF.loc[:,['inputID', 'targetID', 'distance', 'weightedDistance']]
119 | #From Fueling station to Centroid
120 | if clustering:
121 | distFueling=FuelingNew.loc[:,["ID", "ClusterID", "distToCl", "weightedDistance"]]
122 | else:
123 | distFueling=FuelingNew.loc[:,["ID", "ID_C", "distToC", "weightedDistance"]]
124 | distFueling.columns=distCtoC.columns
125 | distFueling.index=[(x[1]["inputID"], x[1]["targetID"]) for x in distFueling.iterrows()]
126 |
127 |
128 | #Truck distance matrices (nearest points)
129 | distStoStreet=sFun.distMatrix(Source, StreetsPointsNew, weight=detourFactor, kNN=1)
130 | distFtoStreet=sFun.distMatrix(FuelingNew, StreetsPointsNew, weight=detourFactor, kNN=1)
131 | distCtoStreet=sFun.distMatrix(Cluster, StreetsPointsNew, weight=detourFactor, kNN=1)
132 |
133 |
134 | if clusterSize=="noHub":
135 | FuelingNew.intIndex=range(len(FuelingNew.index))
136 | distFtoG=sFun.distMatrix(FuelingNew, NGGridPoint, weight=detourPipeline, kNN=1)
137 | #clusterSize=len(FuelingNew)
138 | distMatPipeline=distGtoG.append(distStoG).append(distFtoG).append(distStoS)
139 | else:
140 | distMatPipeline=distCtoC.append(distGtoG).append(distCtoG).append(distStoG).append(distFtoF).append(distFueling).append(distStoC).append(distStoS)
141 |
142 | # In[29]:
143 |
144 | if beeline[0]:
145 | distFtoS=sFun.distMatrix(FuelingNew, Source, weight=detourFactor, kNN=len(Source.geometry))
146 | distMatTruck=distFtoS
147 | distMatTruck["time"]=distMatTruck["weightedDistance"]/speed["beeline"]
148 | else:
149 | distMatTruck=distStoStreet.append(distFtoStreet)
150 | distMatTruck["time"]=distMatTruck["weightedDistance"]/speed["urban"]
151 | distMatTruck=distMatTruck.append(distStreet)
152 | if beeline[1]:
153 | distMatTruck2=distFueling.copy()
154 | distMatTruck2["weightedDistance"]=FuelingNew["distToC"].values
155 | distMatTruck2["weightedDistance"]=distMatTruck2["weightedDistance"]*detourFactor
156 | distMatTruck2["time"]=distMatTruck2["weightedDistance"]/speed["beeline"]
157 | else:
158 | distMatTruck2=distCtoStreet.append(distFtoStreet)
159 | distMatTruck2["time"]=distMatTruck2["weightedDistance"]/speed["urban"]
160 | distMatTruck2=distMatTruck2.append(distStreet)
161 |
162 |
163 | return distMatTruck, distMatTruck2, distMatPipeline
164 | #%%
165 | def getChosenStations(Fueling, Centroid, weightFtoF):
166 | '''
167 | select fueling stations based on demand per centroid
168 | '''
169 | if "BAB" not in Fueling.columns:
170 | Fueling["BAB"]=0
171 | try: FuelingSorted=Fueling.sort_values(by=["BAB","GEBIET","ID_TS"],ascending=[False,False, True])
172 | except: FuelingSorted=Fueling.copy()
173 | FuelingNew=gpd.GeoDataFrame(columns=Fueling.columns, crs=Centroid.crs)
174 | listInID=[]
175 | listTarID=[]
176 | listDistance=[]
177 | listFull=[]
178 |
179 | FuelingSorted["coords"]=sFun.point_array(FuelingSorted)
180 | # for key, values in Centroid.iterrows():
181 | # if values["minFS"]==0:
182 | # continue
183 | # listF=list(FuelingSorted[FuelingSorted["ID_C"]==key].head(int(values["minFS"]))["ID"])
184 | # listCoords=list(FuelingSorted[FuelingSorted["ID_C"]==key].head(int(values["minFS"]))["coords"])
185 | # listFull.extend(listF)
186 | # (inID, outID, distance) = sFun.selfDistMatrixFueling(listF, listCoords)
187 | # listInID.extend(inID)
188 | # listTarID.extend(outID)
189 | # listDistance.extend(distance)
190 | for key, values in Centroid.iterrows():
191 | if values["minFS"]==0:
192 | continue
193 | notEnough=True
194 | while notEnough:
195 | if len(FuelingSorted[FuelingSorted["ID_C"]==key].index)>values["minFS"]:
196 | listF=list(FuelingSorted[FuelingSorted["ID_C"]==key].head(int(values["minFS"]))["ID"])
197 | listCoords=list(FuelingSorted[FuelingSorted["ID_C"]==key].head(int(values["minFS"]))["coords"])
198 | listFull.extend(listF)
199 | notEnough=False
200 | else:
201 | FuelingSorted=FuelingSorted.append(FuelingSorted[FuelingSorted["ID_C"]==key], ignore_index=True)
202 | (inID, outID, distance) = sFun.selfDistMatrixFueling(listF, listCoords)
203 | listInID.extend(inID)
204 | listTarID.extend(outID)
205 | listDistance.extend(distance)
206 |
207 | FuelingNew=Fueling.loc[listFull].copy()
208 | FuelingNew["intIndex"]=range(len(FuelingNew.index))
209 | FuelingNew.index=["F"+str(x) for x in FuelingNew["intIndex"]]
210 | FuelingNew["ID"]=FuelingNew.index
211 | FuelingNew["H2Demand_kt_F"]=[Centroid.loc[ID_C, "H2Demand_kt_F"] for ID_C in FuelingNew["ID_C"]]
212 | FuelingNew["H2Demand_kg_d_F"]=FuelingNew["H2Demand_kt_F"]*1e6/365
213 | FuelingNew["CentC"]=Centroid.ix[FuelingNew["ID_C"]].geometry.values
214 | FuelingNew["CoordsFtoC"]=list(zip(FuelingNew.geometry, FuelingNew["CentC"]))
215 | FuelingNew["EdgesFtoC"]=[LineString(ix) for ix in FuelingNew["CoordsFtoC"]]
216 | EdgesFtoC=gpd.GeoDataFrame(FuelingNew["EdgesFtoC"].values, columns=["geometry"], index=FuelingNew.index)
217 | FuelingNew["distToC"]=EdgesFtoC.length/1000
218 | FuelingNew["areaID"]=[str(int(ix))+"to1" for ix in FuelingNew["GEBIET"]]
219 | FuelingNew["weightedDistance"]=[weightFtoF[values["areaID"]]*values["distToC"] for key, values in FuelingNew.iterrows()]
220 | return FuelingNew
221 |
222 | #%%
223 | def getGasFrance(NGGridLine, multiple=True):
224 | NGGridLine["coordsIn"]=NGGridLine.geometry.map(lambda geom: (np.round(geom.coords[0][0]), np.round(geom.coords[0][1])))
225 | NGGridLine["coordsOut"]=NGGridLine.geometry.map(lambda geom: (np.round(geom.coords[-1][0]), np.round(geom.coords[-1][1])))
226 | NGGridLine["distance"]=NGGridLine.length/1000
227 | #NGGridLine.loc[:,["geometry", "f_code"]].to_file(path + "railNG_jpn.shp")
228 | NGGridPoint=sFun.rem_dupl_un(NGGridLine, name="G")
229 | NGGridPoint.index=NGGridPoint.coords.values
230 | NGGridLine["inputID"]=NGGridPoint.loc[NGGridLine["coordsIn"].values, "ID"].values
231 | NGGridLine["targetID"]=NGGridPoint.loc[NGGridLine["coordsOut"].values, "ID"].values
232 | NGGridLine["tupleID"]=[(x[1]["inputID"], x[1]["targetID"]) for x in NGGridLine.iterrows()]
233 | NGGridPoint.index=NGGridPoint["ID"].values
234 |
235 | Reduction=nx.Graph()
236 | Reduction.add_edges_from(list(zip(NGGridLine.inputID,NGGridLine.targetID)))
237 | for subgraph in nx.connected_component_subgraphs(Reduction.copy()):
238 | if len(subgraph.nodes())<0.02*len(Reduction.nodes()):
239 | Reduction.remove_nodes_from(subgraph)
240 | NGGridPoint.index=NGGridPoint["ID"].values
241 | NGGridPoint=NGGridPoint.loc[Reduction.nodes()].copy()
242 | dictStreets={}
243 | NewSet=set(Reduction.nodes())
244 | for key, values in NGGridLine.iterrows():
245 | if values["inputID"] in NewSet or values["targetID"] in NewSet:
246 | dictStreets[key]=True
247 | else:
248 | dictStreets[key]=False
249 | NGGridLine["keep"]=pd.Series(dictStreets)
250 | NGGridLine=NGGridLine[NGGridLine["keep"]]
251 | NGGridLine.index=[(x[1].inputID, x[1].targetID) for x in NGGridLine.iterrows()]
252 | distGtoG=NGGridLine.loc[:,["inputID", "targetID", "distance"]].copy()
253 | distGtoG["weightedDistance"]=distGtoG["distance"]
254 | distGtoG.index=NGGridLine["tupleID"].values
255 | if multiple:
256 | NGGridPointTest=NGGridPoint.reset_index(drop=True)
257 | NGGridPointTest["intIndex"]=NGGridPointTest.index
258 | NGGridPointTest.index=["G"+ str(x) for x in NGGridPointTest.intIndex]
259 | NGGridPointTest["ID"]=NGGridPointTest.index
260 | NGGridPoint=NGGridPointTest
261 | distGtoG=sFun.selfDistMatrix(NGGridPointTest, weight=1, kNN=10)
262 | return distGtoG, NGGridPoint
263 | #%%
264 | def importFrance(dataPath, crs, speed, fuelingMax_kg_d, penetration, mileage, specificDemand,
265 | sourceFile="SourcesFrance.shp",
266 | productionMultiplier=1.1):
267 | '''
268 | Preprocessing of all French Inputs
269 | '''
270 | #df=pd.read_csv(path.join(dataPath,"NuclearPPFrance.csv"))
271 | #geometry = [Point(xy) for xy in zip(df['x'], df['y'])]
272 | #crsS = {'init': 'epsg:4326'}
273 | #Source = gpd.GeoDataFrame(df, crs=crsS, geometry=geometry).to_crs(crs)
274 | #Source.to_file(path.join(dataPath,"SourcesFrance.shp"))
275 | Source=sFun.import_shp(path.join(dataPath,sourceFile), name="S", crs=crs)
276 | # ## Adjustments of fueling stations
277 | # from ast import literal_eval
278 | # df=pd.read_csv(path.join(dataPath,"fuelstationsFR.csv"))
279 | # df=df.loc[[isinstance(x, str) for x in df["latlng"]]].copy()
280 | # df["coords"]=[literal_eval(xy) for xy in df["latlng"]]
281 | # df["geometry"]=[Point(yx[1],yx[0]) for yx in df["coords"]]
282 | # crsF = {'init': 'epsg:4326'}
283 | # Fueling = gpd.GeoDataFrame(df, crs=crsF, geometry=df["geometry"]).to_crs(crs)
284 | # #GetGEBIETs Value
285 | # urbanAreas=sFun.import_shp(r"C:\Alles\Sciebo\QGIS\grump-v1-urban-ext-polygons-rev01-shp", crs=crs)
286 | # urbAreasFr=urbanAreas.loc[urbanAreas["Countryeng"]=="France"]
287 | # bigUrbAreasFr=urbAreasFr.loc[urbAreasFr["ES95POP"]>100000]
288 | # Fueling["GEBIET"]=1
289 | # for (areaID, areaValue) in urbAreasFr.iterrows():
290 | # Fueling.loc[Fueling.within(areaValue.geometry), "GEBIET"]=2
291 | # for (areaID, areaValue) in bigUrbAreasFr.iterrows():
292 | # Fueling.loc[Fueling.within(areaValue.geometry), "GEBIET"]=3
293 | # Fueling.loc[:,["geometry", "typeroute", "GEBIET"]].to_file(path.join(dataPath,"FuelingFrance.shp"))
294 |
295 | Fueling=sFun.import_shp(path.join(dataPath,"FuelingFrance.shp"),name="F", crs=crs)
296 | #gdf=gpd.read_file(path.join(dataPath,"NUTS_RG_01M_2013.shp"))
297 | #gdf=gdf.loc[gdf.STAT_LEVL_==2]
298 | #gdf=gdf.loc[["FR" in x for x in gdf.NUTS_ID]].reset_index(drop=True)
299 | #gdf["intIndex"]=gdf.index
300 | #gdf.index=["D"+str(x) for x in gdf["intIndex"]]
301 | #gdf["ID"]=gdf.index
302 | #District=gdf.copy()
303 | #District.crs={'init': 'epsg:4326'}
304 | #District.to_file(path.join(dataPath,"PolyFrance.shp"))
305 | District=sFun.import_shp(path.join(dataPath,"PolyFrance.shp"), name="D", crs=crs)
306 |
307 | Streets=sFun.import_shp(path.join(dataPath,"FRA_roadsNew.shp"), name="W", crs=crs)
308 | sFun.simple_gpd(Streets)
309 | StreetsPoints=sFun.rem_dupl_un(Streets, "Street")
310 | sFun.as_ID(Streets,GeoDataFrameListUniA=StreetsPoints)
311 | Streets["speed"]=speed["urban"]
312 | Streets.loc[Streets.RTT_DESCRI=="Primary Route", "speed"]=speed["motorway"]
313 | Streets["time"]=Streets.length/1000/Streets["speed"]
314 | Streets["weightedDistance"]=Streets["distance"]
315 | Reduction=nx.Graph()
316 | Reduction.add_edges_from(list(zip(Streets.inputID,Streets.targetID)))
317 | for subgraph in nx.connected_component_subgraphs(Reduction.copy()):
318 | if len(subgraph.nodes())<0.02*len(Reduction.nodes()):
319 | Reduction.remove_nodes_from(subgraph)
320 | StreetsPoints.index=StreetsPoints["ID"].values
321 | StreetsPointsNew=StreetsPoints.loc[Reduction.nodes()].copy()
322 | dictStreets={}
323 | NewSet=set(Reduction.nodes())
324 | for key, values in Streets.iterrows():
325 | if values["inputID"] in NewSet or values["targetID"] in NewSet:
326 | dictStreets[key]=True
327 | else:
328 | dictStreets[key]=False
329 | Streets["keep"]=pd.Series(dictStreets)
330 | Streets=Streets[Streets["keep"]]
331 | Streets.index=[(x[1].inputID, x[1].targetID) for x in Streets.iterrows()]
332 | distStreet=Streets.loc[:,["inputID", "targetID", "distance", "weightedDistance", "time"]]
333 | #StreetsNew=sFun.splitLinesOnMaxDistance(Streets).T
334 | #StreetsNew.loc[:,["RTT_DESCRI", "geometry", "distance"]].to_file(path+"FRA_roadsNew.shp")
335 | District=sFun.linePolyIntersectBoolean(Streets,
336 | District)
337 | District=District.loc[District["boolStreet"]==1]
338 | District=District.reset_index(drop=True)
339 | District["intIndex"]=District.index
340 | District.index=["D"+str(x) for x in District["intIndex"]]
341 | District["ID"]=District.index
342 |
343 | vehicleDistribution=pd.read_excel(path.join(dataPath,"vehicleStockEurope1112.xls"), header=8)
344 | vehicleDistribution.index=vehicleDistribution["GEO"].values
345 | District["nCars"]=vehicleDistribution.loc[District.NUTS_ID][2016].values
346 | District["name"]=vehicleDistribution.loc[District.NUTS_ID]["GEO(L)/TIME"].values
347 | District=District.loc[District.nCars>0]
348 |
349 | #Arrondissements=sFun.import_shp(path.join(dataPath,"arrondissements-20131220-5m.shp"), crs=crs, name="A")
350 | #populationDistribution=pd.read_excel(path.join(dataPath,"Population.xls"), header=7, sheetname="Arrondissements")
351 | #populationDistribution["insee_ar"]=[(values["Code département"] + str(values["Code arrondissement"])) for key, values in populationDistribution.iterrows()]
352 | #populationDistribution.index=populationDistribution["insee_ar"].values
353 | #Arrondissements["population"]=populationDistribution.loc[Arrondissements.insee_ar]["Population totale"].values
354 | #tic=time.time()
355 | #dictPref={}
356 | #dictDist={}
357 | #dictID={}
358 | #spatial_index=Arrondissements.sindex
359 | #
360 | #for (areaID, areaValue) in District.iterrows():
361 | # possible_matches_index = list(spatial_index.intersection(areaValue["geometry"].bounds))
362 | # possible_matches = Arrondissements.iloc[possible_matches_index]
363 | # precise_matches = possible_matches[possible_matches.intersects(areaValue["geometry"])]
364 | # Arrondissements.loc[precise_matches.index,"ID_C"]=areaID
365 | # District.loc[areaID, "nArr"]=len(precise_matches.index)
366 | ##Arrondissements=Arrondissements[[isinstance(ID,str) for ID in Arrondissements["ID_C"]]]
367 | #Arrondissements=Arrondissements.loc[[isinstance(x, str) for x in Arrondissements["ID_C"]]].copy()
368 | #Arrondissements.loc[np.invert(Arrondissements["population"]>0), "population"]=0
369 | ##Arrondissements=Arrondissements.loc[Arrondissements["population"]>0]
370 | #for id_c in set(Arrondissements["ID_C"]):
371 | # Arrondissements.loc[Arrondissements["ID_C"]==id_c,"pop_NUTS2"]=Arrondissements.loc[Arrondissements["ID_C"]==id_c,"population"].sum()
372 | # Arrondissements.loc[Arrondissements["ID_C"]==id_c,"cars_NUTS2"]=District.loc[id_c,"nCars"]
373 | #Arrondissements["share"]=Arrondissements["population"]/Arrondissements["pop_NUTS2"]
374 | #Arrondissements["nCars"]=Arrondissements["share"]*Arrondissements["cars_NUTS2"]
375 | #Arrondissements=Arrondissements.reset_index(drop=True)
376 | #Arrondissements["intIndex"]=Arrondissements.index
377 | #Arrondissements.index=["A"+str(x) for x in Arrondissements["intIndex"]]
378 | #Arrondissements["ID"]=Arrondissements.index
379 | #Arrondissements.to_file(path.join(dataPath,"ArrondFrance_NEW.shp"))
380 | Arrondissements=sFun.import_shp(path.join(dataPath,"ArrondFrance_NEW.shp"), crs=crs, name="A")
381 |
382 | dictPref={}
383 | dictDist={}
384 | dictID={}
385 | spatial_index=Fueling.sindex
386 |
387 | for (areaID, areaValue) in Arrondissements.iterrows():
388 | possible_matches_index = list(spatial_index.intersection(areaValue["geometry"].bounds))
389 | possible_matches = Fueling.iloc[possible_matches_index]
390 | precise_matches = possible_matches[possible_matches.intersects(areaValue["geometry"])]
391 | Fueling.loc[precise_matches.index,"ID_C"]=areaID
392 | Arrondissements.loc[areaID, "nFuelStat"]=len(precise_matches.index)
393 | Fueling=Fueling[[isinstance(ID,str) for ID in Fueling["ID_C"]]]
394 | Fueling=Fueling.reset_index(drop=True)
395 | Fueling["intIndex"]=Fueling.index
396 | Fueling.index=["F"+str(x) for x in Fueling["intIndex"]]
397 | Fueling["ID"]=Fueling.index
398 | pathGasP = path.join(dataPath, "pipeFR.shp")
399 | #pathGasP = path.join(dataPath, "FRA_roadsNew.shp")
400 | NGGridLine = sFun.import_shp(pathGasP, crs=crs, name="GG")
401 | distGtoG, NGGridPoint = getGasFrance(NGGridLine)
402 | Centroid=Arrondissements.copy()
403 | Centroid.geometry=Centroid.centroid
404 | Centroid["FCEV"]=Centroid.nCars*penetration
405 | Centroid["FCEV"].sum()
406 | Centroid["H2Demand_kt"]=Centroid["FCEV"]*specificDemand*mileage*1e-6
407 | fuelingMax_kt_a=fuelingMax_kg_d/1e6*365
408 | Centroid["minFS"]=np.ceil(Centroid["H2Demand_kt"]/fuelingMax_kt_a)
409 | Centroid["realFS"]=Centroid["nFuelStat"]
410 | Centroid.loc[Centroid["realFS"]==0,"minFS"]=0
411 | #lowFS=Centroid[Centroid["minFS"]>Centroid["realFS"]].index
412 | Fueling["BAB"]=[x == "A" for x in Fueling["typeroute"]]
413 | Centroid["highwayFS"]=[sum(Fueling[Fueling["ID_C"]==ix]["BAB"]) for ix in Centroid.index]
414 | #Centroid.loc[lowFS,"minFS"] = Centroid["realFS"][lowFS].values
415 | Centroid["H2Demand_kt_F"]=Centroid["H2Demand_kt"]/Centroid["minFS"].astype(int)
416 | Centroid.loc[Centroid["realFS"]==0,"H2Demand_kt_F"]=0
417 |
418 | totalH2Demand=Centroid["H2Demand_kt"].sum()
419 |
420 | Source["H2ProdCap_kt"]=Source["p_nom"]/Source["p_nom"].sum()*totalH2Demand*productionMultiplier
421 |
422 | return {"Streets":Streets,
423 | "StreetsPointsNew":StreetsPointsNew,
424 | "distStreet":distStreet,
425 | "NGGridLine":NGGridLine,
426 | "NGGridPoint":NGGridPoint,
427 | "distGtoG":distGtoG,
428 | "Fueling":Fueling,
429 | "Centroid":Centroid,
430 | "Source":Source,
431 | "District":District,
432 | "totalH2Demand":totalH2Demand,
433 | "Arrondissements":Arrondissements}
434 |
--------------------------------------------------------------------------------
/HIM/workflow/workflowFunctionsClean.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | Created on Thu Sep 13 12:14:10 2018
4 |
5 | @author: m.reuss
6 | """
7 | from HIM import dataHandling as sFun
8 | from HIM import optiSetup as optiFun
9 | from HIM import hscTotal
10 | from HIM import plotFunctions as pFun
11 | from HIM.workflow import preprocFunc as preFun
12 | from HIM.utils import *
13 |
14 | #%%
15 | def preProcStreets(Streets, speed, crs):
16 | sFun.simple_gpd(Streets)
17 | StreetsPoints=sFun.rem_dupl_un(Streets, "Street")
18 | sFun.as_ID(Streets,GeoDataFrameListUniA=StreetsPoints)
19 | Streets.index=[(x[1].inputID, x[1].targetID) for x in Streets.iterrows()]
20 | Streets["speed"]=[speed[x] for x in Streets["streetType"]]
21 | Streets["time"]=Streets.length/1000/Streets["speed"]
22 | Streets["weightedDistance"]=Streets["distance"]
23 | distStreet=Streets.loc[:,["inputID", "targetID", "distance", "weightedDistance", "time"]]
24 | Reduction=nx.Graph()
25 | Reduction.add_edges_from(list(zip(Streets.inputID,Streets.targetID)))
26 | for subgraph in nx.connected_component_subgraphs(Reduction.copy()):
27 | if len(subgraph.nodes())<0.02*len(Reduction.nodes()):
28 | Reduction.remove_nodes_from(subgraph)
29 | StreetsPoints.index=StreetsPoints["ID"].values
30 | StreetsPointsNew=StreetsPoints.loc[Reduction.nodes()].copy()
31 | dictStreets={}
32 | NewSet=set(Reduction.nodes())
33 | for key, values in Streets.iterrows():
34 | if values["inputID"] in NewSet or values["targetID"] in NewSet:
35 | dictStreets[key]=True
36 | else:
37 | dictStreets[key]=False
38 | Streets["keep"]=pd.Series(dictStreets)
39 | Streets=Streets[Streets["keep"]]
40 | Streets.index=[(x[1].inputID, x[1].targetID) for x in Streets.iterrows()]
41 | distStreet=Streets.loc[:,["inputID", "targetID", "distance", "weightedDistance", "time"]]
42 |
43 | return distStreet, StreetsPointsNew
44 | #%%
45 | def preProcessing(dataPath, penetration, dfTable, country):
46 | '''
47 | loads the import data and precalculates the spatial demand as well as the
48 | fueling station locations. In addition, creates the distance matrices for
49 | the graph calculations afterwards.
50 |
51 | Basically, does everything, before the optimization of the transport models
52 | start
53 | ###
54 | Inputs:
55 | dataPath=string:
56 | path where input data is located:
57 | --> Folder with scenarios/ImportTableTechnologies etc.
58 | penetration: float
59 | FCEV penetration,
60 | dfTable: pandas dataframe :
61 | all techno-economic as well as scenario specific assumptions,
62 | country: string:
63 | country to investigate
64 | ___________________________________________________________________________
65 | Outputs:
66 | dictionary with 9 objects:
67 | "Source": GeoDataFrame with Points geometry:
68 | Important column:
69 | "H2ProdCap_kt": Hydrogen production capacity in kt per year
70 | "District": GeoDataFrame with Polygon geometry:
71 | Important column:
72 | "H2Demand_kt": Hydrogen demand in kt per year
73 | "FuelingNew":GeoDataFrame with Points geometry:
74 | Important column:
75 | "H2Demand_kt_F": Hydrogen demand per fueling station in kt per year
76 | "Cluster":GeoDataFrame with Points geometry:
77 | Important column:
78 | "H2Demand_kt": Hydrogen demand in kt per year
79 | "pipeCoords":GeoSeries with Points geometry:
80 | all coordinates for pipeline calculation
81 | "truckCoords":GeoSeries with Points geometry:
82 | all coordinates for truck calculation
83 | "distMatTruck": DataFrame:
84 | includes all distance matrices for transmission truck graph
85 | "distMatTruck2":DataFrame:
86 | includes all distance matrices for distribution truck graph
87 | "distMatPipeline":DataFrame:
88 | includes all distance matrices for pipeline graph
89 | '''
90 | crs={'ellps': 'GRS80',
91 | 'no_defs': True,
92 | 'proj': 'utm',
93 | 'units': 'm',
94 | 'zone': dfTable["General"].loc["utmZone","General"]}
95 | dataPathCountry=path.join(dataPath, country)
96 | #_____________________________________Import_______________________________
97 | Source = sFun.import_shp(path.join(dataPathCountry, "SourceNodes"), crs=crs, name="S")
98 |
99 | District = sFun.import_shp(path.join(dataPathCountry, "AreaPolygons"), crs=crs, name="C")
100 |
101 | Fueling = sFun.import_shp(path.join(dataPathCountry, "FuelingNodes"), crs=crs, name="F")
102 |
103 | Streets=sFun.import_shp(path.join(dataPathCountry,"StreetLines"), crs=crs, name="W")
104 |
105 | NGGridPoint = sFun.import_shp(path.join(dataPathCountry,"GasNodes"), crs=crs, name="G")
106 |
107 | distGtoG=pd.read_csv(path.join(dataPathCountry,"distGas.csv"), index_col=0)
108 | distGtoG.index=[(values["inputID"], values["targetID"]) for key, values in distGtoG.iterrows()]
109 | #________________________________Loading Scenario Data____________________
110 | speed={"motorway":dfTable["General"].loc["truckSpeedHighway","General"],
111 | "urban": dfTable["General"].loc["truckSpeedRural","General"],
112 | "beeline": dfTable["General"].loc["truckSpeed","General"]}
113 | clustering=bool(dfTable["General"].loc["clustering","General"])
114 | clusterSize=dfTable["General"].loc["clusterSize","General"]
115 | specificDemand=dfTable["General"].loc["specificDemand","General"]
116 | mileage=dfTable["General"].loc["mileage","General"]
117 | targetCapacityFS=dfTable["General"].loc["targetStationSize","General"]
118 | fuelingMax_kg_d=dfTable["General"].loc["utilization Station","General"]*targetCapacityFS
119 | detourFactorPipeline=dfTable["General"].loc["detourFactorPipeline","General"]
120 | detourFactorTruck=dfTable["General"].loc["detourFactorTruck","General"]
121 | weightFtoF=detourFactorPipeline*pd.Series([1., 1.25, 1.25, 1.5, 1.5, 1.5, 1.75, 1.75, 2.],
122 | index=["1to1","1to2","2to1","2to2","1to3","3to1","2to3","3to2","3to3"])
123 | #________________________________Preparing dataframes_____________________
124 |
125 |
126 |
127 |
128 | spatial_index=Fueling.sindex
129 | for (areaID, areaValue) in District.iterrows():
130 | possible_matches_index = list(spatial_index.intersection(areaValue["geometry"].bounds))
131 | possible_matches = Fueling.iloc[possible_matches_index]
132 | precise_matches = possible_matches[possible_matches.intersects(areaValue["geometry"])]
133 | Fueling.loc[precise_matches.index, "Name"]=areaValue["Name"]
134 | Fueling.loc[precise_matches.index,"ID_C"]=areaID
135 | District.loc[areaID, "nFuelStat"]=len(precise_matches.index)
136 | Fueling["BAB"]=0
137 | Source["H2ProdCap_kt"]=Source["H2ProdCap_"]
138 | (distStreet, StreetsPointsNew) = preProcStreets(Streets, speed, crs)
139 |
140 | Centroid=District.copy()
141 | Centroid.geometry=Centroid.centroid
142 | Centroid["FCEV"]=Centroid["Bestand"]*penetration
143 | Centroid["H2Demand_kt"]=Centroid["FCEV"]*specificDemand*mileage*1e-6
144 | fuelingMax_kt_a=fuelingMax_kg_d/1e6*365
145 | Centroid["minFS"]=np.ceil(Centroid["H2Demand_kt"]/fuelingMax_kt_a)
146 | Centroid["realFS"]=Centroid["nFuelStat"]
147 | Centroid["H2Demand_kt_F"]= Centroid["H2Demand_kt"]/Centroid["minFS"]
148 | Centroid.loc[Centroid["minFS"]==0,["H2Demand_kt_F", "H2Demand_kt"]]=0
149 | totalH2Demand=Centroid["H2Demand_kt"].sum()
150 | if country=="France":Source["H2ProdCap_kt"]=Source["p_nom"]/Source["p_nom"].sum()*totalH2Demand*1.1
151 | Source.loc[Source["H2ProdCap_kt"]>totalH2Demand, "H2ProdCap_kt"]=totalH2Demand
152 | District["H2Demand_kt"]=Centroid["H2Demand_kt"]
153 |
154 | totalH2Capacity=sum(Source["H2ProdCap_kt"])
155 | if totalH2Demand>totalH2Capacity:
156 | print("Production capacity not sufficient for Demand!")
157 |
158 | # ## Calculate minimum numbers of fueling stations
159 | try:
160 | fuelingMax_kt_a=totalH2Demand/targetFS
161 | fuelingMax_kg_d=fuelingMax_kt_a*1e6/365
162 | except:
163 | fuelingMax_kt_a=fuelingMax_kg_d/1e6*365
164 |
165 | Centroid["minFS"]=np.ceil(Centroid["H2Demand_kt"]/fuelingMax_kt_a)
166 | Centroid.loc[Centroid["realFS"]==0,"minFS"]=0
167 |
168 | Centroid["H2Demand_kt_F"]= Centroid["H2Demand_kt"]/Centroid["minFS"]
169 | Centroid.loc[Centroid["minFS"]==0,"H2Demand_kt_F"]
170 | Centroid.loc[Centroid["realFS"]==0,"H2Demand_kt"]=0
171 | #Fueling Station Selection
172 |
173 | FuelingNew=preFun.getChosenStations(Fueling=Fueling, Centroid=Centroid, weightFtoF=weightFtoF)
174 | #____________________________Clustering____________________________________
175 |
176 | if clustering:
177 | if country=="Japan":
178 | distFtoStreet=sFun.distMatrix(FuelingNew, StreetsPointsNew, weight=detourFactorTruck, kNN=1)
179 | ClusterGraph=optiFun.PipeNetWork()
180 | ClusterGraph.initializeEdges(distFtoStreet.append(distStreet))
181 | Cluster, FuelingNew=sFun.createCluster(FuelingNew, clusterSize, ClusterGraph)
182 | FuelingNew["weightedDistance"]=[weightFtoF[values["areaID"]]*values["distToCl"] for key, values in FuelingNew.iterrows()]
183 | else:
184 | Cluster=sFun.createCluster(FuelingNew, clusterSize)
185 | Cluster.crs=Centroid.crs
186 | FuelingNew["weightedDistance"]=[weightFtoF[values["areaID"]]*values["distToCl"] for key, values in FuelingNew.iterrows()]
187 | else:
188 | Cluster=Centroid.copy()
189 | FuelingNew["distToCl"]=FuelingNew["distToC"]
190 | FuelingNew["ClusterID"]=FuelingNew["ID_C"]
191 | #______________________________Distance Matrices__________________________
192 | pipeCoords=Cluster.geometry.append(Source.geometry).append(NGGridPoint.geometry).append(FuelingNew.geometry)
193 | truckCoords=Source.geometry.append(FuelingNew.geometry).append(StreetsPointsNew.geometry).append(Cluster.geometry)
194 |
195 | distMatTruck, distMatTruck2, distMatPipeline=preFun.getDistanceMatrices(Cluster,
196 | Source,
197 | FuelingNew,
198 | NGGridPoint,
199 | StreetsPointsNew,
200 | distStreet,
201 | distGtoG,
202 | weightFtoF,
203 | detourFactorTruck,
204 | speed=speed,
205 | clustering=clustering,
206 | clusterSize=clusterSize,
207 | beeline=[False, False])
208 |
209 | return {"Source":Source,
210 | "District":District,
211 | "FuelingNew":FuelingNew,
212 | "Cluster":Cluster,
213 | "pipeCoords":pipeCoords,
214 | "truckCoords":truckCoords,
215 | "distMatTruck":distMatTruck,
216 | "distMatTruck2":distMatTruck2,
217 | "distMatPipeline":distMatPipeline}
218 | #%%
219 | def calcTransportSystem(Source,
220 | FuelingNew,
221 | Cluster,
222 | truckCoords,
223 | pipeCoords,
224 | distMatTruck,
225 | distMatTruck2,
226 | distMatPipeline,
227 | pathResults,
228 | beeline=[False, False],
229 | weight="time",
230 | ):
231 | '''
232 | calculates the transport models:
233 | Pipeline Transmission and distribution
234 | Truck Transmission
235 | Truck Distribution
236 | Inputs:
237 | "Source": GeoDataFrame with Points geometry:
238 | Important column:
239 | "H2ProdCap_kt": Hydrogen production capacity in kt per year
240 | "District": GeoDataFrame with Polygon geometry:
241 | Important column:
242 | "H2Demand_kt": Hydrogen demand in kt per year
243 | "FuelingNew":GeoDataFrame with Points geometry:
244 | Important column:
245 | "H2Demand_kt_F": Hydrogen demand per fueling station in kt per year
246 | "Cluster":GeoDataFrame with Points geometry:
247 | Important column:
248 | "H2Demand_kt": Hydrogen demand in kt per year
249 | "pipeCoords":GeoSeries with Points geometry:
250 | all coordinates for pipeline calculation
251 | "truckCoords":GeoSeries with Points geometry:
252 | all coordinates for truck calculation
253 | "distMatTruck": DataFrame:
254 | includes all distance matrices for transmission truck graph
255 | "distMatTruck2":DataFrame:
256 | includes all distance matrices for distribution truck graph
257 | "distMatPipeline":DataFrame:
258 | includes all distance matrices for pipeline graph
259 | ___________________________________________________________________________
260 | Outputs: Results from the transport models:
261 | resultsEdgesTruck: GeoDataFrame with LineStrings:
262 | all resulting Edges of the Truck transmission calculation
263 | Important columns:
264 | "time": travelled time of each edge
265 | "weightedDistance": distance of each edge
266 | "edge": describes if truck section is an "Endpoint" (sink or source)
267 | resultsEdgesTruck2: GeoDataFrame with LineStrings:
268 | all resulting Edges of the Truck distribution calculation
269 | Important columns:
270 | "time": travelled time of each edge
271 | "weightedDistance": distance of each edge
272 | "edge": describes if truck section is an "Endpoint" (sink or source)
273 | resultsEdgesPipeline: GeoDataFrame with LineStrings:
274 | all resulting Edges of the Truck calculation
275 | Important columns:
276 | "weightedDistance": distance of each edge
277 | "lineCost": total Cost of each edge
278 | "diameter": diameter of pipeline section
279 |
280 | '''
281 | #PipelineCalculation
282 | # ## Import to NetworkX for minimum spanning tree
283 | GraphPipeline=optiFun.PipeNetWork()
284 | GraphPipeline.initializeEdges(distMatPipeline)
285 |
286 | nx.set_node_attributes(GraphPipeline, "productionMax", Source.H2ProdCap_kt.to_dict())
287 | nx.set_node_attributes(GraphPipeline, "demand", FuelingNew["H2Demand_kt_F"].to_dict())
288 | GraphPipeline.useMinSpanTree(weight="weightedDistance")
289 | #Test
290 | #GraphPipeline.reduceNetworkSize()
291 | # init optimization
292 | GraphPipeline.initOpti(linear=True)
293 | #Optimization
294 | GraphPipeline.optModel(logPath=pathResults, tee=False)
295 | #Extract results
296 | productionPipeline=GraphPipeline.getProductionNodes()
297 | resultsEdgesPipeline=GraphPipeline.getEdgesAsGpd(pipeCoords, "pipeline", costCalc="pressureDrop", logPath=pathResults, tee=False)
298 | Source["pipe_kt_a"]=productionPipeline
299 | Source["pipe_kg_d"]=Source["pipe_kt_a"]*1e6/365
300 | #_____________________________________________________________________
301 | #Initializing Graph
302 | GraphTruck=optiFun.PipeNetWork()
303 | GraphTruck.initializeEdges(distMatTruck)
304 |
305 | nx.set_node_attributes(GraphTruck, "productionMax", Source.H2ProdCap_kt.to_dict())
306 | nx.set_node_attributes(GraphTruck, "demand", FuelingNew["H2Demand_kt_F"].to_dict())
307 |
308 | GraphTruck.reduceNetworkSize()
309 | #Initializing the optimization
310 | GraphTruck.initOptiTruck(weight=weight)
311 |
312 | GraphTruck.optModel(logPath=pathResults, tee=False)
313 |
314 | resultsTruckNodes=GraphTruck.getProductionNodes()
315 | resultsEdgesTruck=GraphTruck.getEdgesAsGpd(truckCoords, "truck")
316 | #______________________________________________________________________
317 | Source["truck_kt_a"]=resultsTruckNodes
318 | Source["truck_kg_d"]=Source["truck_kt_a"]*1e6/365
319 | resultsEdgesTruck["edge"]=[not ("Street" in x[0] and "Street" in x[1]) for x in resultsEdgesTruck.index]
320 |
321 | if not beeline[1]:#Optimization of Distribution trucks
322 | GraphTruck2=optiFun.PipeNetWork()
323 | GraphTruck2.initializeEdges(distMatTruck2)
324 |
325 | nx.set_node_attributes(GraphTruck2, "productionMax", Cluster.H2Demand_kt.to_dict())
326 | nx.set_node_attributes(GraphTruck2, "demand", FuelingNew["H2Demand_kt_F"].to_dict())
327 |
328 | GraphTruck2.reduceNetworkSize()
329 |
330 | GraphTruck2.initOptiTruck(weight=weight)
331 |
332 | GraphTruck2.optModel(logPath=pathResults, tee=False)
333 |
334 | resultsEdgesTruck2=GraphTruck2.getEdgesAsGpd(truckCoords, "truck")
335 | resultsEdgesTruck2["H2Demand_kg_d_F"]=resultsEdgesTruck2["capacity"]*1e6/365
336 | else:
337 | resultsEdgesTruck2=FuelingNew.loc[:,["distToC","H2Demand_kt_F", "H2Demand_kg_d_F", "EdgesFtoC"]].copy()
338 | resultsEdgesTruck2.geometry=resultsEdgesTruck2["EdgesFtoC"]
339 | resultsEdgesTruck2["weightedDistance"]=resultsEdgesTruck2["distToC"]*detourFactorTruck
340 | resultsEdgesTruck2["time"]=resultsEdgesTruck2["weightedDistance"]/speed["beeline"]
341 | #Extract results
342 | resultsEdgesTruck2["edge"]=[not ("Street" in x[0] and "Street" in x[1]) for x in resultsEdgesTruck2.index]
343 |
344 | return resultsEdgesTruck, resultsEdgesTruck2, resultsEdgesPipeline
345 | # In[25]:
346 | def calcSpatialHSC(resultsEdgesTruck,
347 | resultsEdgesTruck2,
348 | resultsEdgesPipeline,
349 | hscPathways,
350 | Cluster,
351 | FuelingNew,
352 | Source,
353 | District,
354 | dfTable,
355 | pathResults=False,
356 | beeline=[False, False]):
357 | '''
358 | calculates the Hydrogen supply chain model based on different pathways
359 | Inputs:
360 | "resultsEdgesTruck": GeoDataFrame with LineStrings:
361 | all resulting Edges of the Truck transmission calculation
362 | Important columns:
363 | "time": travelled time of each edge
364 | "weightedDistance": distance of each edge
365 | "edge": describes if truck section is an "Endpoint" (sink or source)
366 | "resultsEdgesTruck2": GeoDataFrame with LineStrings:
367 | all resulting Edges of the Truck distribution calculation
368 | Important columns:
369 | "time": travelled time of each edge
370 | "weightedDistance": distance of each edge
371 | "edge": describes if truck section is an "Endpoint" (sink or source)
372 | "resultsEdgesPipeline": GeoDataFrame with LineStrings:
373 | all resulting Edges of the Truck calculation
374 | Important columns:
375 | "weightedDistance": distance of each edge
376 | "lineCost": total Cost of each edge
377 | "diameter": diameter of pipeline section
378 | "hscPathways": dictionary:
379 | definition of supply chain pathways --> Which techs to use
380 | "Source": GeoDataFrame with Points geometry:
381 | Important column:
382 | "H2ProdCap_kt": Hydrogen production capacity in kt per year
383 | "District": GeoDataFrame with Polygon geometry:
384 | Important column:
385 | "H2Demand_kt": Hydrogen demand in kt per year
386 | "FuelingNew":GeoDataFrame with Points geometry:
387 | Important column:
388 | "H2Demand_kt_F": Hydrogen demand per fueling station in kt per year
389 | "Cluster":GeoDataFrame with Points geometry:
390 | Important column:
391 | "H2Demand_kt": Hydrogen demand in kt per year
392 | "dfTable": pandas dataframe :
393 | all techno-economic as well as scenario specific assumptions,
394 |
395 | Output:
396 | Results: dictionary:
397 | Collection of Supply Chain Classes of each pathway
398 | '''
399 | Results={}
400 | pipelineDistance=[resultsEdgesPipeline[resultsEdgesPipeline["distribution"]==False],
401 | resultsEdgesPipeline[resultsEdgesPipeline["distribution"]]]
402 | truckDistance=[resultsEdgesTruck,
403 | resultsEdgesTruck2]
404 | sourceDf={"pipeline":Source["pipe_kg_d"],
405 | "truck":Source["truck_kg_d"]}
406 | if pathResults:
407 | pathData=os.path.join(pathResults, "data")
408 | os.makedirs(pathData)
409 | i=0
410 | for hscPathwayType in sorted(hscPathways.keys()):
411 | listCapacities=[sourceDf[hscPathwayType],
412 | sourceDf[hscPathwayType],
413 | sourceDf[hscPathwayType],
414 | sourceDf[hscPathwayType],
415 | sourceDf[hscPathwayType],
416 | sourceDf[hscPathwayType],
417 | resultsEdgesTruck["capacity"].values*1e6/365,
418 | Cluster["H2Demand_kt"]*1e6/365,
419 | resultsEdgesTruck2["H2Demand_kg_d_F"],
420 | FuelingNew["H2Demand_kg_d_F"]]
421 | for listHSC in hscPathways[hscPathwayType]:
422 | cumCost=0
423 |
424 | Results[i]=hscTotal.HSC(listHSC,
425 | dfTable,
426 | listCapacities,
427 | FuelingNew["H2Demand_kt_F"].sum()*1e6,
428 | truckDistance=truckDistance,
429 | pipelineDistance=pipelineDistance,
430 | targetCars=0,
431 | beeline=beeline)
432 |
433 | Results[i].calcHSC(cumCost=cumCost)
434 | if pathData:
435 |
436 | Results[i].saveHSC(pathData, i)
437 | i+=1
438 | FuelingNew["TOTEXPipe"]=Results[1].hscClasses["Station"].TOTEX
439 | if pathResults:
440 | resultsEdgesPipeline.crs=Cluster.crs
441 | resultsEdgesTruck2.crs=Cluster.crs
442 | resultsEdgesTruck.crs=Cluster.crs
443 | pathSHP=path.join(pathResults, "shapefiles")
444 | os.makedirs(pathSHP)
445 | testBoolColumns(resultsEdgesTruck).to_file(path.join(pathSHP,"TrucksRoutingTransmission.shp"))
446 | testBoolColumns(resultsEdgesTruck2).to_file(path.join(pathSHP,"TrucksRoutingDistribution.shp"))
447 | pipeDistribution=resultsEdgesPipeline.loc[resultsEdgesPipeline.distribution]
448 | pipeTransmission=resultsEdgesPipeline.loc[resultsEdgesPipeline.distribution==False]
449 | testBoolColumns(pipeDistribution).to_file(path.join(pathSHP,"PipeDistribution.shp"))
450 | testBoolColumns(pipeTransmission).to_file(path.join(pathSHP,"PipeTransmission.shp"))
451 | testBoolColumns(Source).to_file(path.join(pathSHP,"Source.shp"))
452 | testBoolColumns(FuelingNew).to_file(path.join(pathSHP,"FuelingStation.shp"))
453 | testBoolColumns(Cluster).to_file(path.join(pathSHP,"Hubs.shp"))
454 |
455 | testBoolColumns(District).to_file(path.join(pathSHP,"Area.shp"))
456 |
457 | return Results
--------------------------------------------------------------------------------
/HIM/hscAbstract.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | Created on Tue Apr 10 10:43:13 2018
4 |
5 | @author: m.reuss
6 | """
7 |
8 | from HIM.utils import *
9 | from HIM import hscClasses
10 | from HIM import plotFunctions as pFun
11 | from HIM import dataHandling as sFun
12 | from openpyxl import load_workbook
13 |
14 | #%%
15 |
16 | class HSCAbstract(object):
17 | '''
18 | This class contains the automatic calculation of a predefined supply chain
19 | based on abstract distances and demands
20 | '''
21 | def __init__(self,
22 | demArr,
23 | distArr,
24 | listHSC,
25 | dfTable,
26 | learning=False,
27 | cityRadius=15):
28 | '''
29 | initialize the hydrogen supply chain:
30 | Name everything
31 | prepare the storage partitions
32 | inputs:
33 | listHSC: list of hydrogen supply technologies = pathway
34 | dfTable: dataframe with techno-economic data
35 | listCapacities: list of hydrogen demand (kg/day) per technology
36 | totalH2Demand: Total hydrogen demand per year (kg)
37 | truckDistance: Distance for trucks to drive
38 | totalLineCost: Pipeline costs
39 | targetCars:
40 | beeline: Boolean of beeline or truck routing is used
41 | end: Boolean series if the edge is end of the street
42 | time: Time series of how long the trip for this street is taking
43 | '''
44 |
45 |
46 | #####################################################################
47 | #Step 1: Initialize the supply chain for the given technologies
48 | self.demArr=demArr
49 | self.distArr=distArr
50 | self.dfTable=dfTable
51 | self.technologies=self.getAttrDict()
52 | self.learning=learning
53 | self.dfHSC=pd.DataFrame(index=listHSC)
54 | self.dfHSC["kind"]=[self.technologies[key] for key in self.dfHSC.index]
55 | self.dfHSC["form"]=[dfTable[name].loc["form", key] for (key, name) in zip(self.dfHSC.index, self.dfHSC["kind"])]
56 | self.getNames()
57 | self.dfHSC["technology"]=self.dfHSC.index
58 | self.dfHSC.index=self.dfHSC["name"].values
59 | self.dfHSC["intIndex"]=range(len(self.dfHSC.index))
60 | self.dfHSC["storePart"]=False
61 | self.dfHSC["TOTEX"]=0
62 | self.dfHSC["invest"]=0
63 | self.dfHSC["varOPEX"]=0
64 | self.dfHSC["fixOPEX"]=0
65 | self.dfHSC["nTrucks"]=0
66 | self.dfHSC["pipeLength"]=0
67 | self.dfHSC["cumCost"]=0
68 | self.hscClasses={}
69 | self.preStorage=False
70 | self.geoms={}
71 | self.capacityFS=dfTable["General"].loc["targetStationSize","General"]
72 | self.energyDemand=pd.DataFrame(index=["Loss","electricityRES","electricityGrid","NaturalGas","Diesel"])
73 | self.transportType=[]
74 | self.name=''.join((str(e).partition("-")[0][:4] + str(e).partition("-")[2][:2]) for e in self.dfHSC.loc[["Con" not in x for x in self.dfHSC.index], "technology"].values)
75 | #######################################################################
76 | #step 2: add important information
77 | if any(self.dfHSC["kind"]=="Storage"):
78 | self.preStorage=True
79 | self.storageIndex=self.dfHSC.loc[self.dfHSC["kind"]=="Storage","intIndex"]
80 | self.dfHSC.loc[self.dfHSC["intIndex"]==self.storageIndex[0]-1,"storePart"]=True
81 | self.dfHSC.loc[self.dfHSC["intIndex"]==self.storageIndex[0]+1,"storePart"]=True
82 | self.detourTruck=self.dfTable["General"].loc["detourFactorTruck","General"]
83 | self.detourPipeline=self.dfTable["General"].loc["detourFactorPipeline","General"]
84 | self.demArrFS=self.dfTable["General"].loc["utilization Station", "General"]*self.capacityFS*self.demArr**0
85 | self.distArrFS=self.dfTable["General"].loc["distributionDistance", "General"]*self.distArr**0
86 | self.nFS=self.demArr/self.demArrFS
87 | self.ogdenPipe=(2.43*(self.nFS**0.49))*cityRadius/self.nFS
88 | self.ogdenTruck=1.42*self.nFS*cityRadius/self.nFS
89 | self.truckDistance=[self.detourTruck*(self.distArr+self.ogdenTruck),
90 | self.detourTruck*self.ogdenTruck]
91 | self.pipeDistance=[self.detourPipeline*self.distArr,
92 | self.detourPipeline*self.ogdenPipe]
93 |
94 | self.transportDemands=[self.demArr, self.demArrFS]
95 | self.TOTEX={}
96 | self.cumCost={}
97 |
98 | #%%
99 | #get dictionary of attributes from the ImportTablesTechnology.xslx
100 | def getAttrDict(self):
101 | attr={}
102 | for key in self.dfTable:
103 | if "lr6" in key:
104 | continue
105 | for tech in self.dfTable[key].columns:
106 | attr[tech]=key
107 |
108 | return attr
109 | #%%
110 | def getNames(self):
111 | names=[]
112 | for (key, name) in zip(self.dfHSC.index, self.dfHSC["kind"]):
113 | count=len(self.dfHSC[self.dfHSC["kind"]==name]["kind"])
114 | if count<=1:
115 | names.append(name)
116 | else:
117 | x=1
118 | while name + str(x) in names:
119 | x+=1
120 | names.append(name+str(x))
121 | self.dfHSC["name"]=names
122 | #%%
123 | def calcFSCost(self ,targetNumber, learning = False):
124 | '''
125 | calculate Fuelstation cost based on learning rate and scaleup
126 | '''
127 | baseCost=self.dfTable["General"].loc["baseStationCost","General"]
128 | baseSize=self.dfTable["General"].loc["baseStationSize","General"]
129 | baseNumber=self.dfTable["General"].loc["baseStationNumber","General"]
130 | learningRate=self.dfTable["General"].loc["learningRate","General"]
131 |
132 | if learning:
133 | V0=baseSize*baseNumber
134 | V1=self.capacityFS*targetNumber
135 | beta=np.log2(1-learningRate)
136 | learningFactor=((V1/V0)**beta)/(1+beta)
137 | else:
138 | learningFactor=1
139 |
140 | self.dfTable["Station"].loc["stationInvest",:]=baseCost*self.dfTable["Station"].loc["stationMult",:]*(self.capacityFS/baseSize)**self.dfTable["Station"].loc["stationScaling",:]*learningFactor
141 | #%%
142 | def calcHSC(self,
143 | cumCost=0):
144 | '''
145 | calculates the hydrogen supply chain:
146 | 1. initialize the chain parts
147 | 2. calculate the results
148 | 3. extract the results
149 | Resulting dataframe: HSC.dfHSCRes
150 | '''
151 | self.startCost=cumCost
152 | iTrans=0
153 | for (key, (kind, technology, storagePartition, index)) in self.dfHSC.loc[:,[ "kind","technology","storePart", "intIndex"]].iterrows():
154 | capacity=self.demArr
155 | if kind=="Production":
156 | self.hscClasses[key]=hscClasses.Production(capacity,
157 | technology,
158 | self.dfTable)
159 | elif kind=="Import":
160 | self.hscClasses[key]=hscClasses.Import(capacity,
161 | technology,
162 | self.dfTable)
163 | elif kind=="Storage":
164 | self.hscClasses[key]=hscClasses.Storage(capacity,
165 | technology,
166 | self.dfTable,
167 | costH2In=cumCost)
168 | self.preStorage=False
169 | elif kind=="Transport":
170 | #check if pipeline or Truck
171 | if technology=="Pipeline":
172 | self.hscClasses[key]=hscClasses.PipelineSingle(self.transportDemands[iTrans],
173 | self.pipeDistance[iTrans],
174 | self.dfTable,
175 | costH2In=cumCost)
176 | self.transportType.append("Pipeline")
177 | else:
178 | #self.geoms[key]=self.truckDistance[iTrans].geometry
179 | self.hscClasses[key]=hscClasses.Truck(self.transportDemands[iTrans],
180 | self.truckDistance[iTrans],
181 | technology,
182 | self.dfTable,
183 | costH2In=cumCost)
184 | #self.truckGPD[iTrans]["TOTEX"]=self.hscClasses[key].getTOTEX()
185 | #self.truckGPD[iTrans]["nTruckPerDay"]=self.hscClasses[key].nTruckPerDay
186 |
187 | self.transportType.append("Truck")
188 |
189 | iTrans+=1
190 |
191 | elif kind=="Station":
192 | if "learningRate" in self.dfTable["General"].index:
193 | self.calcFSCost(self.demArr/self.capacityFS, learning = self.learning)
194 | self.hscClasses[key]=hscClasses.Station(self.demArrFS,
195 | technology,
196 | self.dfTable,
197 | costH2In=cumCost)
198 |
199 |
200 |
201 |
202 | elif kind=="Connector":
203 | #The conversion technology needs additional information about the
204 | #inlet and outlet pressure, in case it is an compressor
205 | pressureIn=0
206 | pressureOut=0
207 | nextTech=0
208 | if technology=="Compressor":
209 | #previous Technology --> elaborating starting pressure
210 | prevKind=self.dfHSC.loc[[x == index-1 for x in self.dfHSC["intIndex"]],"kind"][0]
211 | prevTech=self.dfHSC.loc[[x == index-1 for x in self.dfHSC["intIndex"]],"technology"][0]
212 | i=1
213 | while prevTech=="None" and index-i>0:
214 | i+=1
215 | prevKind=self.dfHSC.loc[[x == index-i for x in self.dfHSC["intIndex"]],"kind"][0]
216 | prevTech=self.dfHSC.loc[[x == index-i for x in self.dfHSC["intIndex"]],"technology"][0]
217 | #since conversion pressure are not fixed: try method
218 | try:
219 | pressureIn=self.dfTable[prevKind].loc["pressureOut",prevTech]
220 | except:
221 | if prevTech=="Dehydrogenation":
222 | pressureIn=2.
223 | else: pressureIn=999.
224 | #following Technology --> elaborating starting pressure
225 | nextKind=self.dfHSC.loc[[x == index+1 for x in self.dfHSC["intIndex"]],"kind"][0]
226 | nextTech=self.dfHSC.loc[[x == index+1 for x in self.dfHSC["intIndex"]],"technology"][0]
227 | i=1
228 | while nextTech=="None":
229 | i+=1
230 | nextKind=self.dfHSC.loc[[x == index+i for x in self.dfHSC["intIndex"]],"kind"][0]
231 | nextTech=self.dfHSC.loc[[x == index+i for x in self.dfHSC["intIndex"]],"technology"][0]
232 |
233 | try:
234 | pressureOut=self.dfTable[nextKind].loc["pressureIn",nextTech]
235 | except:
236 | #Input pressure for Liquefaction as well as Hydrogenation as set to 30 bar
237 | pressureOut=30.
238 | #if pressure in > pressure Out --> No compression necessary
239 | pressureOut=max(pressureIn, pressureOut)
240 |
241 |
242 |
243 | if self.preStorage:
244 | self.hscClasses[key]=hscClasses.Connector(capacity,
245 | technology,
246 | self.dfTable,
247 | costH2In=cumCost,
248 | pressureIn=pressureIn,
249 | pressureOut=pressureOut,
250 | nextStep=nextTech,
251 | storagePartition=storagePartition)
252 | else:
253 | self.hscClasses[key]=hscClasses.Connector2(capacity,
254 | technology,
255 | self.dfTable,
256 | costH2In=cumCost,
257 | pressureIn=pressureIn,
258 | pressureOut=pressureOut,
259 | nextStep=nextTech,
260 | storagePartition=storagePartition)
261 |
262 |
263 |
264 |
265 | cumCost=self.hscClasses[key].getTOTEX()+cumCost
266 | self.TOTEX[key]=self.hscClasses[key].TOTEX
267 | self.cumCost[key]=cumCost
268 | # self.dfHSC.loc[key, "invest"]=self.hscClasses[key].invest
269 | #
270 | # self.dfHSC.loc[key, "CAPEX"]=self.hscClasses[key].CAPEX
271 | # self.dfHSC.loc[key, "fixOPEX"]=self.hscClasses[key].fixOPEX
272 | # self.dfHSC.loc[key, "varOPEX"]=self.hscClasses[key].varOPEX
273 | # self.dfHSC.loc[key, "nTrucks"]=self.hscClasses[key].numberOfTrucks
274 | #
275 | self.dfHSC.loc[key, "additionalEmissions"]=self.hscClasses[key].CO2Emissions
276 | self.dfHSC.loc[key, "additionalPE"]=self.hscClasses[key].primary
277 |
278 | self.energyDemand[key]=[np.array([[0]])+x for x in self.hscClasses[key].getDemandAbstract()]
279 | # #Add the energy demand to the results
280 | self.dfHSC["TOTEX"]=pd.Series(self.TOTEX)
281 | self.dfHSC=pd.concat([self.dfHSC, self.energyDemand.T],axis=1)
282 |
283 | # ##Test!!!
284 | # self.dfHSC.loc[self.dfHSC.index[0],"TOTEX"]=self.dfHSC.loc[self.dfHSC.index[0],"TOTEX"]+self.startCost
285 | #
286 | # #Exclude everything in the same way as for H2Mobility
287 | self.dfHSCRes=self.dfHSC.loc[:,["TOTEX",
288 | "CAPEX",
289 | "fixOPEX",
290 | "varOPEX",
291 | "invest",
292 | "cumCost",
293 | "technology",
294 | "nTrucks",
295 | "pipeLength",
296 | "Loss",
297 | "electricityRES",
298 | "electricityGrid",
299 | "NaturalGas",
300 | "Diesel"]]
301 | self.calcLossesRecursive()
302 | self.calcEmissions()
303 | self.calcPrimaryDemand()
304 | # self.dfHSCRes=self.dfHSCRes.round(4)
305 | #%%
306 | def createResFolders(self, pathResults, savePlot=False, saveResults=False):
307 | '''
308 | creating the result folders for storing the results
309 | '''
310 | self.savePlot=savePlot
311 | self.saveResults=saveResults
312 | self.pathResults=os.path.join(pathResults, self.name)
313 | if savePlot:
314 | self.pathPlot=os.path.join(self.pathResults, "Graphics")
315 | os.makedirs(self.pathPlot)
316 | if saveResults:
317 | self.pathRes=os.path.join(self.pathResults, "data")
318 | os.makedirs(self.pathRes)
319 |
320 | def calcEmissions(self, useOverCapacity=True):
321 | '''
322 | calculate emissions based on input Values in dfTable
323 | '''
324 | if useOverCapacity:
325 | factor=self.dfHSCRes["overCapacity"]
326 | else:
327 | factor=1
328 | emissions=self.dfTable["General"].loc[["emissionDiesel",
329 | "emissionNG",
330 | "emissionGrid",
331 | "emissionRES"], "General"]
332 | emissionDiesel=emissions["emissionDiesel"]*self.dfHSC["Diesel"]
333 | emissionNG=emissions["emissionNG"]*self.dfHSC["NaturalGas"]
334 | emissionGrid=emissions["emissionGrid"]*self.dfHSC["electricityGrid"]
335 | emissionRES=emissions["emissionRES"]*self.dfHSC["electricityRES"]
336 | self.dfHSCRes["CO2Emissions[kg/kg]"]=factor*((emissionDiesel+emissionNG+emissionGrid+emissionRES)/1000+self.dfHSC["additionalEmissions"])
337 |
338 | def calcPrimaryDemand(self, useOverCapacity=True):
339 | '''
340 | calculate emissions based on input Values in dfTable
341 | '''
342 | if useOverCapacity:
343 | factor=self.dfHSCRes["overCapacity"]
344 | else:
345 | factor=1
346 | primaryDemand=self.dfTable["General"].loc[["primaryDiesel",
347 | "primaryNG",
348 | "primaryGrid",
349 | "primaryRES"], "General"]
350 | primaryDiesel=primaryDemand["primaryDiesel"]*self.dfHSC["Diesel"]
351 | primaryNG=primaryDemand["primaryNG"]*self.dfHSC["NaturalGas"]
352 | primaryGrid=primaryDemand["primaryGrid"]*self.dfHSC["electricityGrid"]
353 | primaryRES=primaryDemand["primaryRES"]*self.dfHSC["electricityRES"]
354 | self.dfHSCRes["primaryEnergy[MJ/kg]"]=factor*(primaryDiesel+primaryNG+primaryGrid+primaryRES+self.dfHSC["additionalPE"])
355 | #%%
356 | def calcLossesRecursive(self):
357 | '''
358 | recursive calculate losses back to get an overcapacity that is necessary
359 | '''
360 | loss=[1]
361 | lossesSingle=self.dfHSCRes["Loss"]
362 | for i in range(lossesSingle.size-1):
363 | loss.append(lossesSingle[-1*(i+1)]*loss[i])
364 | loss.reverse()
365 | self.dfHSCRes["overCapacity"]=loss
366 | #%%
367 | '''
368 | saves the Hydrogen supply chain results to the given excel-file
369 | '''
370 | def saveHSC(self, pathData, i, name="HSCRes.xlsx"):
371 | self.filePath=os.path.join(pathData,name)
372 | self.sheetName=str(i)
373 | if path.isfile(self.filePath):
374 | book = load_workbook(self.filePath)
375 | if self.sheetName in book.sheetnames:
376 | std=book.get_sheet_by_name(self.sheetName)
377 | book.remove_sheet(std)
378 | writer=pd.ExcelWriter(self.filePath, engine = 'openpyxl')
379 | writer.book=book
380 | self.dfHSCRes.to_excel(writer,
381 | sheet_name=self.sheetName)
382 | writer.save()
383 | writer.close()
384 | else:
385 | self.dfHSCRes.to_excel(self.filePath, sheet_name=self.sheetName)
386 |
387 |
388 | #%%
389 | def plotHSC(self,
390 | figsize=(14,10),
391 | background=None,
392 | source=None,
393 | hub=None,
394 | sink=None,
395 | truckVar="capacity",
396 | pipeVar="capacity",
397 | zorder=3,
398 | alpha=1,
399 | savePlot=False,
400 | pathPlot=None,
401 | show=True):
402 | '''
403 | plot the scenario results onto a geospatial map
404 | '''
405 | bg_area=(87/255, 133/255, 147/255)
406 | bg_lines=(99/255, 150/255, 167/255)
407 |
408 | self.fig,self.ax=plt.subplots(figsize=figsize)
409 | self.ax.set_aspect('equal')
410 | self.ax.axis("off")
411 | rangeTransport=len(self.transportType)
412 | if isinstance(background, gpd.GeoDataFrame):
413 | pFun.plot_polygon_collection(ax=self.ax,
414 | geoms=background.geometry,
415 | colors_or_values=[bg_area for ix in background.index],
416 | plot_values=False,
417 | vmin=None,
418 | vmax=None,
419 | cmap=None,
420 | edgecolor=bg_lines,
421 | alpha=1,
422 | label="Administrative Area")
423 | #Sinks
424 | if isinstance(sink, gpd.GeoDataFrame):
425 | pFun.plotGPDPoints(sink, self.ax,
426 | color="black",
427 | label="Fueling Stations")
428 |
429 | #Source
430 | sourceMarkerMax=100
431 | sourceMarkerMin=1
432 | if isinstance(source, gpd.GeoDataFrame):
433 | if self.transportType[0]=="Pipeline":
434 | source["markersize"]=((source["pipe_kt_a"]-source["pipe_kt_a"].min())/source["pipe_kt_a"].max()*(sourceMarkerMax-sourceMarkerMin)+sourceMarkerMin)
435 | else:
436 | source["markersize"]=((source["truck_kt_a"]-source["truck_kt_a"].min())/source["truck_kt_a"].max()*(sourceMarkerMax-sourceMarkerMin)+sourceMarkerMin)
437 | pFun.plotGPDPoints(source,self.ax,
438 | colName="markersize",
439 | color=(178/255, 223/255, 138/255),
440 | zorder=zorder+1,
441 | marker="D",
442 | label="Electrolyzer\n %.1f" % source["pipe_kt_a"].min() + " - %.1f" % source["pipe_kt_a"].max() + "kt/a")
443 |
444 | if self.industryDemand>0:
445 | indMarkerSizeMax=100
446 | indMarkerSizeMin=0
447 | if isinstance(hub, gpd.GeoDataFrame):
448 | hub["markersize"]=((hub["H2Ind_kt"]-hub["H2Ind_kt"].min())/hub["H2Ind_kt"].max()*(indMarkerSizeMax-indMarkerSizeMin)+indMarkerSizeMin)
449 | pFun.plotGPDPoints(hub,self.ax,
450 | colName="markersize",
451 | color="blue",
452 | zorder=zorder+1,
453 | marker="^",
454 | label="Industrial Demand\n %.1f" % hub["H2Ind_kt"].min() + " - %.1f" % hub["H2Ind_kt"].max() + "kt/a")
455 |
456 | #Transport
457 | lineWidthMax=[5,0.5]
458 | lineWidthMin=[0.5,0.5]
459 | colorStyle=["redToWhite", "black"]
460 | maxRange=[5,1]
461 | pipeLabel=["Transmission Pipeline", "Distribution Pipeline"]
462 | truckLabel=["Truck Routes", "Truck Routes (Distribution)"]
463 | for i in range(rangeTransport):
464 | if self.transportType[i]=="Pipeline":
465 | pFun.plotGpdLinesVar(self.pipelineGPD[i],
466 | pipeVar,
467 | self.ax,
468 | zorder=zorder-i,
469 | alpha=alpha,
470 | name=colorStyle[i],
471 | rangeMax=maxRange[i],
472 | maxLineWidth=lineWidthMax[i],
473 | minLineWidth=lineWidthMin[i],
474 | label=pipeLabel[i])
475 | else:
476 | pFun.plotGpdLinesVar(self.truckGPD[i],
477 | truckVar,
478 | self.ax,
479 | zorder=zorder-i,
480 | alpha=alpha,
481 | name="black",
482 | rangeMax=maxRange[i],
483 | maxLineWidth=lineWidthMax[i],
484 | minLineWidth=lineWidthMin[i],
485 | label=truckLabel[i])
486 |
487 |
488 | plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
489 | if savePlot:
490 | plt.savefig(os.path.join(pathPlot,self.name), bbox_inches="tight")
491 | if show:
492 | plt.show()
493 |
494 |
--------------------------------------------------------------------------------
/HIM/hscTotal.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | Created on Wed Aug 23 11:21:00 2017
4 |
5 | @author: Markus
6 | """
7 |
8 |
9 | from HIM.utils import *
10 | from HIM import hscClasses
11 | from HIM import plotFunctions as pFun
12 | from HIM import dataHandling as sFun
13 | from openpyxl import load_workbook
14 |
15 | #%%
16 |
17 | class HSC(object):
18 | '''
19 | This class contains the automatic calculation of a predefined supply chain
20 | After initialization, there are 2 options:
21 | .calcHSC
22 | .saveHSC
23 | '''
24 | def __init__(self,
25 | listHSC,
26 | dfTable,
27 | listCapacities,
28 | totalH2Demand,
29 | truckDistance,
30 | pipelineDistance,
31 | targetCars=100000,
32 | beeline=[True, True],
33 | industryDemand=0,
34 | transportWeight="weightedDistance",
35 | learning=True):
36 | '''
37 | initialize the hydrogen supply chain:
38 | Name everything
39 | prepare the storage partitions
40 | inputs:
41 | listHSC: list of hydrogen supply technologies = pathway
42 | dfTable: dataframe with techno-economic data
43 | listCapacities: list of hydrogen demand (kg/day) per technology
44 | totalH2Demand: Total hydrogen demand per year (kg)
45 | truckDistance: Distance for trucks to drive
46 | totalLineCost: Pipeline costs
47 | targetCars:
48 | beeline: Boolean of beeline or truck routing is used
49 | end: Boolean series if the edge is end of the street
50 | time: Time series of how long the trip for this street is taking
51 | '''
52 |
53 |
54 | #####################################################################
55 | #Step 1: Initialize the supply chain for the given technologies
56 | self.dfTable=dfTable
57 | self.technologies=self.getAttrDict()
58 | self.learning=learning
59 | self.dfHSC=pd.DataFrame(index=listHSC)
60 | self.dfHSC["kind"]=[self.technologies[key] for key in self.dfHSC.index]
61 | self.dfHSC["form"]=[dfTable[name].loc["form", key] for (key, name) in zip(self.dfHSC.index, self.dfHSC["kind"])]
62 | self.dfHSC["capacity"]=listCapacities
63 | self.getNames()
64 | self.dfHSC["technology"]=self.dfHSC.index
65 | self.dfHSC.index=self.dfHSC["name"].values
66 | self.dfHSC["intIndex"]=range(len(self.dfHSC.index))
67 | self.dfHSC["storePart"]=False
68 | self.dfHSC["TOTEX"]=0
69 | self.dfHSC["invest"]=0
70 | self.dfHSC["varOPEX"]=0
71 | self.dfHSC["fixOPEX"]=0
72 | self.dfHSC["nTrucks"]=0
73 | self.dfHSC["pipeLength"]=0
74 | self.dfHSC["cumCost"]=0
75 | self.hscClasses={}
76 | self.industryDemand=industryDemand
77 | self.totalH2Demand=[totalH2Demand+industryDemand, totalH2Demand]
78 | self.truckGPD=truckDistance.copy()
79 | self.pipelineGPD=pipelineDistance.copy()
80 | self.preStorage=False
81 | self.targetCars=targetCars
82 | self.geoms={}
83 | self.capacityFS=self.dfTable["General"].loc["targetStationSize","General"]
84 | self.energyDemand=pd.DataFrame(index=["Loss",
85 | "electricityRES",
86 | "electricityGrid",
87 | "NaturalGas",
88 | "Diesel"])
89 | self.beeline=beeline
90 | self.transportType=[]
91 | self.transportWeight=transportWeight
92 | self.name=''.join((str(e).partition("-")[0][:4] + str(e).partition("-")[2][:2]) for e in self.dfHSC.loc[["Con" not in x for x in self.dfHSC.index], "technology"].values)
93 | #######################################################################
94 | #step 2: add important information
95 | if any(self.dfHSC["kind"]=="Storage"):
96 | self.preStorage=True
97 | self.storageIndex=self.dfHSC.loc[self.dfHSC["kind"]=="Storage","intIndex"]
98 | self.dfHSC.loc[self.dfHSC["intIndex"]==self.storageIndex[0]-1,"storePart"]=True
99 | self.dfHSC.loc[self.dfHSC["intIndex"]==self.storageIndex[0]+1,"storePart"]=True
100 |
101 | #get dictionary of attributes from the ImportTablesTechnology.xslx
102 | def getAttrDict(self):
103 | attr={}
104 | for key in self.dfTable:
105 | if "lr6" in key:
106 | continue
107 | for tech in self.dfTable[key].columns:
108 | attr[tech]=key
109 |
110 | return attr
111 |
112 | def getNames(self):
113 | names=[]
114 | for (key, name) in zip(self.dfHSC.index, self.dfHSC["kind"]):
115 | count=len(self.dfHSC[self.dfHSC["kind"]==name]["kind"])
116 | if count<=1:
117 | names.append(name)
118 | else:
119 | x=1
120 | while name + str(x) in names:
121 | x+=1
122 | names.append(name+str(x))
123 | self.dfHSC["name"]=names
124 |
125 | def calcFSCost(self ,targetNumber, learning = False):
126 | '''
127 | calculate Fuelstation cost based on learning rate and scaleup
128 | '''
129 | baseCost=self.dfTable["General"].loc["baseStationCost","General"]
130 | baseSize=self.dfTable["General"].loc["baseStationSize","General"]
131 | baseNumber=self.dfTable["General"].loc["baseStationNumber","General"]
132 | learningRate=self.dfTable["General"].loc["learningRate","General"]
133 |
134 | if learning:
135 | V0=baseSize*baseNumber
136 | V1=self.capacityFS*targetNumber
137 | beta=np.log2(1-learningRate)
138 | learningFactor=((V1/V0)**beta)/(1+beta)
139 | else:
140 | learningFactor=1
141 |
142 | self.dfTable["Station"].loc["stationInvest",:]=baseCost*self.dfTable["Station"].loc["stationMult",:]*(self.capacityFS/baseSize)**self.dfTable["Station"].loc["stationScaling",:]*learningFactor
143 |
144 | def calcHSC(self,
145 | cumCost=0,
146 | abstract=False):
147 | '''
148 | calculates the hydrogen supply chain:
149 | 1. initialize the chain parts
150 | 2. calculate the results
151 | 3. extract the results
152 | Resulting dataframe: HSC.dfHSCRes
153 | '''
154 | self.startCost=cumCost
155 | iTrans=0
156 | for (key, (kind, technology, storagePartition, capacity, index)) in self.dfHSC.loc[:,[ "kind","technology","storePart", "capacity", "intIndex"]].iterrows():
157 | if kind=="Production":
158 | self.hscClasses[key]=hscClasses.Production(capacity,
159 | technology,
160 | self.dfTable)
161 | elif kind=="Import":
162 | self.hscClasses[key]=hscClasses.Import(capacity,
163 | technology,
164 | self.dfTable)
165 | elif kind=="Storage":
166 | self.hscClasses[key]=hscClasses.Storage(capacity,
167 | technology,
168 | self.dfTable,
169 | costH2In=cumCost)
170 | self.preStorage=False
171 | elif kind=="Transport":
172 | #check if pipeline or Truck
173 | if technology=="Pipeline":
174 | totalLineCost=self.pipelineGPD[iTrans]["lineCost"].sum()*1e6
175 | self.dfHSC.loc[key,"pipeLength"]=self.pipelineGPD[iTrans][self.transportWeight].sum()
176 | #self.geoms[key]=self.pipelineDistance[iTrans].geometry
177 | self.hscClasses[key]=hscClasses.Pipeline(self.totalH2Demand[iTrans],
178 | totalLineCost,
179 | self.pipelineGPD[iTrans]["weightedDistance"].sum()*1e3,
180 | self.dfTable,
181 | costH2In=cumCost)
182 | self.transportType.append("Pipeline")
183 | else:
184 | #self.geoms[key]=self.truckDistance[iTrans].geometry
185 | self.hscClasses[key]=hscClasses.Truck(capacity,
186 | self.truckGPD[iTrans][self.transportWeight],
187 | technology,
188 | self.dfTable,
189 | costH2In=cumCost,
190 | beeline=self.beeline[iTrans],
191 | end=self.truckGPD[iTrans]["edge"],
192 | time=self.truckGPD[iTrans]["time"],
193 | totalH2Demand=self.totalH2Demand[iTrans])
194 | self.truckGPD[iTrans]["TOTEX"]=self.hscClasses[key].getTOTEX()
195 | self.truckGPD[iTrans]["nTruckPerDay"]=self.hscClasses[key].nTruckPerDay
196 |
197 | self.transportType.append("Truck")
198 |
199 | iTrans+=1
200 |
201 | elif kind=="Station":
202 | if "learningRate" in self.dfTable["General"].index:
203 | self.calcFSCost(len(capacity), learning = self.learning)
204 | self.hscClasses[key]=hscClasses.Station(capacity,
205 | technology,
206 | self.dfTable,
207 | costH2In=cumCost)
208 |
209 |
210 |
211 |
212 | elif kind=="Connector":
213 | #The conversion technology needs additional information about the
214 | #inlet and outlet pressure, in case it is an compressor
215 | pressureIn=0
216 | pressureOut=0
217 | nextTech=0
218 | if technology=="Compressor":
219 | #previous Technology --> elaborating starting pressure
220 | prevKind=self.dfHSC.loc[[x == index-1 for x in self.dfHSC["intIndex"]],"kind"][0]
221 | prevTech=self.dfHSC.loc[[x == index-1 for x in self.dfHSC["intIndex"]],"technology"][0]
222 | i=1
223 | while prevTech=="None" and index-i>0:
224 | i+=1
225 | prevKind=self.dfHSC.loc[[x == index-i for x in self.dfHSC["intIndex"]],"kind"][0]
226 | prevTech=self.dfHSC.loc[[x == index-i for x in self.dfHSC["intIndex"]],"technology"][0]
227 | #since conversion pressure are not fixed: try method
228 | try:
229 | pressureIn=self.dfTable[prevKind].loc["pressureOut",prevTech]
230 | except:
231 | if prevTech=="Dehydrogenation":
232 | pressureIn=2.
233 | else: pressureIn=999.
234 | #following Technology --> elaborating starting pressure
235 | nextKind=self.dfHSC.loc[[x == index+1 for x in self.dfHSC["intIndex"]],"kind"][0]
236 | nextTech=self.dfHSC.loc[[x == index+1 for x in self.dfHSC["intIndex"]],"technology"][0]
237 | i=1
238 | while nextTech=="None":
239 | i+=1
240 | nextKind=self.dfHSC.loc[[x == index+i for x in self.dfHSC["intIndex"]],"kind"][0]
241 | nextTech=self.dfHSC.loc[[x == index+i for x in self.dfHSC["intIndex"]],"technology"][0]
242 |
243 | try:
244 | pressureOut=self.dfTable[nextKind].loc["pressureIn",nextTech]
245 | except:
246 | #Input pressure for Liquefaction as well as Hydrogenation as set to 30 bar
247 | pressureOut=30.
248 | #if pressure in > pressure Out --> No compression necessary
249 | pressureOut=max(pressureIn, pressureOut)
250 |
251 |
252 |
253 | if self.preStorage:
254 | self.hscClasses[key]=hscClasses.Connector(capacity,
255 | technology,
256 | self.dfTable,
257 | costH2In=cumCost,
258 | pressureIn=pressureIn,
259 | pressureOut=pressureOut,
260 | nextStep=nextTech,
261 | storagePartition=storagePartition)
262 | else:
263 | self.hscClasses[key]=hscClasses.Connector2(capacity,
264 | technology,
265 | self.dfTable,
266 | costH2In=cumCost,
267 | pressureIn=pressureIn,
268 | pressureOut=pressureOut,
269 | nextStep=nextTech,
270 | storagePartition=storagePartition)
271 |
272 |
273 |
274 |
275 | cumCost+=self.hscClasses[key].getMeanTOTEX()
276 | self.dfHSC.loc[key, "TOTEX"]=self.hscClasses[key].getMeanTOTEX()
277 | self.dfHSC.loc[key, "invest"]=self.hscClasses[key].getTotalInvest()
278 |
279 | self.dfHSC.loc[key, "CAPEX"]=self.hscClasses[key].getMeanValue(self.hscClasses[key].CAPEX)
280 | self.dfHSC.loc[key, "fixOPEX"]=self.hscClasses[key].getMeanValue(self.hscClasses[key].fixOPEX)
281 | self.dfHSC.loc[key, "varOPEX"]=self.hscClasses[key].getMeanValue(self.hscClasses[key].varOPEX)
282 | self.dfHSC.loc[key, "nTrucks"]=self.hscClasses[key].numberOfTrucks
283 | self.dfHSC.loc[key, "cumCost"]=cumCost
284 | self.dfHSC.loc[key, "additionalEmissions"]=self.hscClasses[key].CO2Emissions
285 | self.dfHSC.loc[key, "additionalPE"]=self.hscClasses[key].primary
286 | self.energyDemand[key]=self.hscClasses[key].getDemand()
287 | #Add the energy demand to the results
288 | self.dfHSC=pd.concat([self.dfHSC, self.energyDemand.T],axis=1)
289 |
290 | ##Test!!!
291 | self.dfHSC.loc[self.dfHSC.index[0],"TOTEX"]=self.dfHSC.loc[self.dfHSC.index[0],"TOTEX"]+self.startCost
292 |
293 | #Exclude everything in the same way as for H2Mobility
294 | self.dfHSCRes=self.dfHSC.loc[:,["TOTEX",
295 | "CAPEX",
296 | "fixOPEX",
297 | "varOPEX",
298 | "invest",
299 | "cumCost",
300 | "technology",
301 | "nTrucks",
302 | "pipeLength",
303 | "Loss",
304 | "electricityRES",
305 | "electricityGrid",
306 | "NaturalGas",
307 | "Diesel"]]
308 | self.calcLossesRecursive()
309 | self.calcEmissions()
310 | self.calcPrimaryDemand()
311 | self.dfHSCRes=self.dfHSCRes.round(4)
312 | #%%
313 | def createResFolders(self, pathResults, savePlot=False, saveResults=False):
314 | self.savePlot=savePlot
315 | self.saveResults=saveResults
316 | self.pathResults=os.path.join(pathResults, self.name)
317 | if savePlot:
318 | self.pathPlot=os.path.join(self.pathResults, "Graphics")
319 | os.makedirs(self.pathPlot)
320 | if saveResults:
321 | self.pathRes=os.path.join(self.pathResults, "data")
322 | os.makedirs(self.pathRes)
323 |
324 | def calcEmissions(self, useOverCapacity=True):
325 | '''
326 | calculate emissions based on input Values in dfTable
327 | '''
328 | if useOverCapacity:
329 | factor=self.dfHSCRes["overCapacity"]
330 | else:
331 | factor=1
332 | emissions=self.dfTable["General"].loc[["emissionDiesel",
333 | "emissionNG",
334 | "emissionGrid",
335 | "emissionRES"], "General"]
336 | emissionDiesel=emissions["emissionDiesel"]*self.dfHSC["Diesel"]
337 | emissionNG=emissions["emissionNG"]*self.dfHSC["NaturalGas"]
338 | emissionGrid=emissions["emissionGrid"]*self.dfHSC["electricityGrid"]
339 | emissionRES=emissions["emissionRES"]*self.dfHSC["electricityRES"]
340 | self.dfHSCRes["CO2Emissions[kg/kg]"]=factor*((emissionDiesel+emissionNG+emissionGrid+emissionRES)/1000+self.dfHSC["additionalEmissions"])
341 |
342 | def calcPrimaryDemand(self, useOverCapacity=True):
343 | '''
344 | calculate emissions based on input Values in dfTable
345 | '''
346 | if useOverCapacity:
347 | factor=self.dfHSCRes["overCapacity"]
348 | else:
349 | factor=1
350 | primaryDemand=self.dfTable["General"].loc[["primaryDiesel",
351 | "primaryNG",
352 | "primaryGrid",
353 | "primaryRES"], "General"]
354 | primaryDiesel=primaryDemand["primaryDiesel"]*self.dfHSC["Diesel"]
355 | primaryNG=primaryDemand["primaryNG"]*self.dfHSC["NaturalGas"]
356 | primaryGrid=primaryDemand["primaryGrid"]*self.dfHSC["electricityGrid"]
357 | primaryRES=primaryDemand["primaryRES"]*self.dfHSC["electricityRES"]
358 | self.dfHSCRes["primaryEnergy[MJ/kg]"]=factor*(primaryDiesel+primaryNG+primaryGrid+primaryRES+self.dfHSC["additionalPE"])
359 | #%%
360 | def calcLossesRecursive(self):
361 | '''
362 | recursive calculate losses back to get an overcapacity that is necessary
363 | '''
364 | loss=[1]
365 | lossesSingle=self.dfHSCRes["Loss"]
366 | for i in range(lossesSingle.size-1):
367 | loss.append(lossesSingle[-1*(i+1)]*loss[i])
368 | loss.reverse()
369 | self.dfHSCRes["overCapacity"]=loss
370 | #%%
371 | '''
372 | saves the Hydrogen supply chain results to the given excel-file
373 | '''
374 | def saveHSC(self, pathData, i, name="HSCRes.xlsx"):
375 | self.filePath=os.path.join(pathData,name)
376 | self.sheetName=str(i)
377 | if path.isfile(self.filePath):
378 | book = load_workbook(self.filePath)
379 | if self.sheetName in book.sheetnames:
380 | std=book.get_sheet_by_name(self.sheetName)
381 | book.remove_sheet(std)
382 | writer=pd.ExcelWriter(self.filePath, engine = 'openpyxl')
383 | writer.book=book
384 | self.dfHSCRes.to_excel(writer,
385 | sheet_name=self.sheetName)
386 | writer.save()
387 | writer.close()
388 | else:
389 | self.dfHSCRes.to_excel(self.filePath, sheet_name=self.sheetName)
390 |
391 |
392 | #%%
393 | def plotHSC(self,
394 | figsize=(14,10),
395 | background=None,
396 | source=None,
397 | hub=None,
398 | sink=None,
399 | truckVar="capacity",
400 | pipeVar="capacity",
401 | zorder=3,
402 | alpha=1,
403 | savePlot=False,
404 | pathPlot=None,
405 | show=True):
406 | '''
407 | plot the scenario as a map of the investigated country
408 | '''
409 | bg_area=(87/255, 133/255, 147/255)
410 | bg_lines=(99/255, 150/255, 167/255)
411 |
412 | self.fig,self.ax=plt.subplots(figsize=figsize)
413 | self.ax.set_aspect('equal')
414 | self.ax.axis("off")
415 | rangeTransport=len(self.transportType)
416 | if isinstance(background, gpd.GeoDataFrame):
417 | pFun.plot_polygon_collection(ax=self.ax,
418 | geoms=background.geometry,
419 | colors_or_values=[bg_area for ix in background.index],
420 | plot_values=False,
421 | vmin=None,
422 | vmax=None,
423 | cmap=None,
424 | edgecolor=bg_lines,
425 | alpha=1,
426 | label="Administrative Area")
427 | #Sinks
428 | if isinstance(sink, gpd.GeoDataFrame):
429 | pFun.plotGPDPoints(sink, self.ax,
430 | color="black",
431 | label="Fueling Stations")
432 |
433 | #Source
434 | sourceMarkerMax=100
435 | sourceMarkerMin=1
436 | if isinstance(source, gpd.GeoDataFrame):
437 | if self.transportType[0]=="Pipeline":
438 | source["markersize"]=((source["pipe_kt_a"]-source["pipe_kt_a"].min())/source["pipe_kt_a"].max()*(sourceMarkerMax-sourceMarkerMin)+sourceMarkerMin)
439 | else:
440 | source["markersize"]=((source["truck_kt_a"]-source["truck_kt_a"].min())/source["truck_kt_a"].max()*(sourceMarkerMax-sourceMarkerMin)+sourceMarkerMin)
441 | pFun.plotGPDPoints(source,self.ax,
442 | colName="markersize",
443 | color=(178/255, 223/255, 138/255),
444 | zorder=zorder+1,
445 | marker="D",
446 | label="Electrolyzer\n %.1f" % source["pipe_kt_a"].min() + " - %.1f" % source["pipe_kt_a"].max() + "kt/a")
447 |
448 | if self.industryDemand>0:
449 | indMarkerSizeMax=100
450 | indMarkerSizeMin=0
451 | if isinstance(hub, gpd.GeoDataFrame):
452 | hub["markersize"]=((hub["H2Ind_kt"]-hub["H2Ind_kt"].min())/hub["H2Ind_kt"].max()*(indMarkerSizeMax-indMarkerSizeMin)+indMarkerSizeMin)
453 | pFun.plotGPDPoints(hub,self.ax,
454 | colName="markersize",
455 | color="blue",
456 | zorder=zorder+1,
457 | marker="^",
458 | label="Industrial Demand\n %.1f" % hub["H2Ind_kt"].min() + " - %.1f" % hub["H2Ind_kt"].max() + "kt/a")
459 |
460 | #Transport
461 | lineWidthMax=[5,0.5]
462 | lineWidthMin=[0.5,0.5]
463 | colorStyle=["redToWhite", "black"]
464 | maxRange=[5,1]
465 | pipeLabel=["Transmission Pipeline", "Distribution Pipeline"]
466 | truckLabel=["Truck Routes", "Truck Routes (Distribution)"]
467 | for i in range(rangeTransport):
468 | if self.transportType[i]=="Pipeline":
469 | pFun.plotGpdLinesVar(self.pipelineGPD[i],
470 | pipeVar,
471 | self.ax,
472 | zorder=zorder-i,
473 | alpha=alpha,
474 | name=colorStyle[i],
475 | rangeMax=maxRange[i],
476 | maxLineWidth=lineWidthMax[i],
477 | minLineWidth=lineWidthMin[i],
478 | label=pipeLabel[i])
479 | else:
480 | pFun.plotGpdLinesVar(self.truckGPD[i],
481 | truckVar,
482 | self.ax,
483 | zorder=zorder-i,
484 | alpha=alpha,
485 | name="black",
486 | rangeMax=maxRange[i],
487 | maxLineWidth=lineWidthMax[i],
488 | minLineWidth=lineWidthMin[i],
489 | label=truckLabel[i])
490 |
491 |
492 | plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
493 | if savePlot:
494 | plt.savefig(os.path.join(pathPlot,self.name), bbox_inches="tight")
495 | if show:
496 | plt.show()
497 |
498 |
--------------------------------------------------------------------------------
/data/matplotlibrcEES.mplstyle:
--------------------------------------------------------------------------------
1 | ### MATPLOTLIBRC FORMAT
2 |
3 | # This is a sample matplotlib configuration file - you can find a copy
4 | # of it on your system in
5 | # site-packages/matplotlib/mpl-data/matplotlibrc. If you edit it
6 | # there, please note that it will be overwritten in your next install.
7 | # If you want to keep a permanent local copy that will not be
8 | # overwritten, place it in the following location:
9 | # unix/linux:
10 | # $HOME/.config/matplotlib/matplotlibrc or
11 | # $XDG_CONFIG_HOME/matplotlib/matplotlibrc (if $XDG_CONFIG_HOME is set)
12 | # other platforms:
13 | # $HOME/.matplotlib/matplotlibrc
14 | #
15 | # See http://matplotlib.org/users/customizing.html#the-matplotlibrc-file for
16 | # more details on the paths which are checked for the configuration file.
17 | #
18 | # This file is best viewed in a editor which supports python mode
19 | # syntax highlighting. Blank lines, or lines starting with a comment
20 | # symbol, are ignored, as are trailing comments. Other lines must
21 | # have the format
22 | # key : val # optional comment
23 | #
24 | # Colors: for the color values below, you can either use - a
25 | # matplotlib color string, such as r, k, or b - an rgb tuple, such as
26 | # (1.0, 0.5, 0.0) - a hex string, such as ff00ff or #ff00ff - a scalar
27 | # grayscale intensity such as 0.75 - a legal html color name, e.g., red,
28 | # blue, darkslategray
29 |
30 | #### CONFIGURATION BEGINS HERE
31 |
32 | # The default backend; one of GTK GTKAgg GTKCairo GTK3Agg GTK3Cairo
33 | # CocoaAgg MacOSX Qt4Agg Qt5Agg TkAgg WX WXAgg Agg Cairo GDK PS PDF SVG
34 | # Template.
35 | # You can also deploy your own backend outside of matplotlib by
36 | # referring to the module name (which must be in the PYTHONPATH) as
37 | # 'module://my_backend'.
38 | backend : TkAgg
39 |
40 | # If you are using the Qt4Agg backend, you can choose here
41 | # to use the PyQt4 bindings or the newer PySide bindings to
42 | # the underlying Qt4 toolkit.
43 | #backend.qt4 : PyQt4 # PyQt4 | PySide
44 |
45 | # Note that this can be overridden by the environment variable
46 | # QT_API used by Enthought Tool Suite (ETS); valid values are
47 | # "pyqt" and "pyside". The "pyqt" setting has the side effect of
48 | # forcing the use of Version 2 API for QString and QVariant.
49 |
50 | # The port to use for the web server in the WebAgg backend.
51 | # webagg.port : 8888
52 |
53 | # If webagg.port is unavailable, a number of other random ports will
54 | # be tried until one that is available is found.
55 | # webagg.port_retries : 50
56 |
57 | # When True, open the webbrowser to the plot that is shown
58 | # webagg.open_in_browser : True
59 |
60 | # When True, the figures rendered in the nbagg backend are created with
61 | # a transparent background.
62 | nbagg.transparent : True
63 |
64 | # if you are running pyplot inside a GUI and your backend choice
65 | # conflicts, we will automatically try to find a compatible one for
66 | # you if backend_fallback is True
67 | #backend_fallback: True
68 |
69 | #interactive : False
70 | #toolbar : toolbar2 # None | toolbar2 ("classic" is deprecated)
71 | #timezone : UTC # a pytz timezone string, e.g., US/Central or Europe/Paris
72 |
73 | # Where your matplotlib data lives if you installed to a non-default
74 | # location. This is where the matplotlib fonts, bitmaps, etc reside
75 | #datapath : /home/jdhunter/mpldata
76 |
77 |
78 | ### LINES
79 | # See http://matplotlib.org/api/artist_api.html#module-matplotlib.lines for more
80 | # information on line properties.
81 | lines.linewidth : 2.0 # line width in points
82 | #lines.linestyle : - # solid line
83 | lines.color : 0.,0.3176, 0.5098 # has no affect on plot(); see axes.prop_cycle
84 | #lines.marker : None # the default marker
85 | #lines.markeredgewidth : 0.5 # the line width around the marker symbol
86 | #lines.markersize : 6 # markersize, in points
87 | #lines.dash_joinstyle : miter # miter|round|bevel
88 | #lines.dash_capstyle : butt # butt|round|projecting
89 | #lines.solid_joinstyle : miter # miter|round|bevel
90 | #lines.solid_capstyle : projecting # butt|round|projecting
91 | #lines.antialiased : True # render lines in antialised (no jaggies)
92 |
93 | #markers.fillstyle: full # full|left|right|bottom|top|none
94 |
95 | ### PATCHES
96 | # Patches are graphical objects that fill 2D space, like polygons or
97 | # circles. See
98 | # http://matplotlib.org/api/artist_api.html#module-matplotlib.patches
99 | # information on patch properties
100 | #patch.linewidth : 0.0 # edge width in points
101 | #patch.facecolor : blue
102 | #patch.edgecolor : black
103 | #patch.antialiased : True # render patches in antialised (no jaggies)
104 |
105 | ### FONT
106 | #
107 | # font properties used by text.Text. See
108 | # http://matplotlib.org/api/font_manager_api.html for more
109 | # information on font properties. The 6 font properties used for font
110 | # matching are given below with their default values.
111 | #
112 | # The font.family property has five values: 'serif' (e.g., Times),
113 | # 'sans-serif' (e.g., Helvetica), 'cursive' (e.g., Zapf-Chancery),
114 | # 'fantasy' (e.g., Western), and 'monospace' (e.g., Courier). Each of
115 | # these font families has a default list of font names in decreasing
116 | # order of priority associated with them. When text.usetex is False,
117 | # font.family may also be one or more concrete font names.
118 | #
119 | # The font.style property has three values: normal (or roman), italic
120 | # or oblique. The oblique style will be used for italic, if it is not
121 | # present.
122 | #
123 | # The font.variant property has two values: normal or small-caps. For
124 | # TrueType fonts, which are scalable fonts, small-caps is equivalent
125 | # to using a font size of 'smaller', or about 83% of the current font
126 | # size.
127 | #
128 | # The font.weight property has effectively 13 values: normal, bold,
129 | # bolder, lighter, 100, 200, 300, ..., 900. Normal is the same as
130 | # 400, and bold is 700. bolder and lighter are relative values with
131 | # respect to the current weight.
132 | #
133 | # The font.stretch property has 11 values: ultra-condensed,
134 | # extra-condensed, condensed, semi-condensed, normal, semi-expanded,
135 | # expanded, extra-expanded, ultra-expanded, wider, and narrower. This
136 | # property is not currently implemented.
137 | #
138 | # The font.size property is the default font size for text, given in pts.
139 | # 12pt is the standard value.
140 | #
141 | font.family : Arial
142 | font.style : normal
143 | #font.variant : normal
144 | #font.weight : medium
145 | #font.stretch : normal
146 | # note that font.size controls default text sizes. To configure
147 | # special text sizes tick labels, axes, labels, title, etc, see the rc
148 | # settings for axes and ticks. Special text sizes can be defined
149 | # relative to font.size, using the following values: xx-small, x-small,
150 | # small, medium, large, x-large, xx-large, larger, or smaller
151 | font.size : 7
152 | font.serif : Bitstream Vera Serif, New Century Schoolbook, Century Schoolbook L, Utopia, ITC Bookman, Bookman, Nimbus Roman No9 L, Times New Roman, Times, Palatino, Charter, serif
153 | font.sans-serif : Bitstream Vera Sans, Lucida Grande, Verdana, Geneva, Lucid, Arial, Helvetica, Avant Garde, sans-serif
154 | #font.cursive : Apple Chancery, Textile, Zapf Chancery, Sand, Script MT, Felipa, cursive
155 | #font.fantasy : Comic Sans MS, Chicago, Charcoal, Impact, Western, Humor Sans, fantasy
156 | #font.monospace : Bitstream Vera Sans Mono, Andale Mono, Nimbus Mono L, Courier New, Courier, Fixed, Terminal, monospace
157 |
158 | ### TEXT
159 | # text properties used by text.Text. See
160 | # http://matplotlib.org/api/artist_api.html#module-matplotlib.text for more
161 | # information on text properties
162 |
163 | text.color : black
164 |
165 | ### LaTeX customizations. See http://wiki.scipy.org/Cookbook/Matplotlib/UsingTex
166 | text.usetex : False # use latex for all text handling. The following fonts
167 | # are supported through the usual rc parameter settings:
168 | # new century schoolbook, bookman, times, palatino,
169 | # zapf chancery, charter, serif, sans-serif, helvetica,
170 | # avant garde, courier, monospace, computer modern roman,
171 | # computer modern sans serif, computer modern typewriter
172 | # If another font is desired which can loaded using the
173 | # LaTeX \usepackage command, please inquire at the
174 | # matplotlib mailing list
175 | text.latex.unicode : False # use "ucs" and "inputenc" LaTeX packages for handling
176 | # unicode strings.
177 | #text.latex.preamble : # IMPROPER USE OF THIS FEATURE WILL LEAD TO LATEX FAILURES
178 | # AND IS THEREFORE UNSUPPORTED. PLEASE DO NOT ASK FOR HELP
179 | # IF THIS FEATURE DOES NOT DO WHAT YOU EXPECT IT TO.
180 | # preamble is a comma separated list of LaTeX statements
181 | # that are included in the LaTeX document preamble.
182 | # An example:
183 | # text.latex.preamble : \usepackage{bm},\usepackage{euler}
184 | # The following packages are always loaded with usetex, so
185 | # beware of package collisions: color, geometry, graphicx,
186 | # type1cm, textcomp. Adobe Postscript (PSSNFS) font packages
187 | # may also be loaded, depending on your font settings
188 |
189 | #text.dvipnghack : None # some versions of dvipng don't handle alpha
190 | # channel properly. Use True to correct
191 | # and flush ~/.matplotlib/tex.cache
192 | # before testing and False to force
193 | # correction off. None will try and
194 | # guess based on your dvipng version
195 |
196 | #text.hinting : auto # May be one of the following:
197 | # 'none': Perform no hinting
198 | # 'auto': Use freetype's autohinter
199 | # 'native': Use the hinting information in the
200 | # font file, if available, and if your
201 | # freetype library supports it
202 | # 'either': Use the native hinting information,
203 | # or the autohinter if none is available.
204 | # For backward compatibility, this value may also be
205 | # True === 'auto' or False === 'none'.
206 | #text.hinting_factor : 8 # Specifies the amount of softness for hinting in the
207 | # horizontal direction. A value of 1 will hint to full
208 | # pixels. A value of 2 will hint to half pixels etc.
209 |
210 | #text.antialiased : True # If True (default), the text will be antialiased.
211 | # This only affects the Agg backend.
212 |
213 | # The following settings allow you to select the fonts in math mode.
214 | # They map from a TeX font name to a fontconfig font pattern.
215 | # These settings are only used if mathtext.fontset is 'custom'.
216 | # Note that this "custom" mode is unsupported and may go away in the
217 | # future.
218 | #mathtext.cal : cursive
219 | #mathtext.rm : serif
220 | #mathtext.tt : monospace
221 | #mathtext.it : serif:italic
222 | #mathtext.bf : serif:bold
223 | #mathtext.sf : sans
224 | #mathtext.fontset : cm # Should be 'cm' (Computer Modern), 'stix',
225 | # 'stixsans' or 'custom'
226 | #mathtext.fallback_to_cm : True # When True, use symbols from the Computer Modern
227 | # fonts when a symbol can not be found in one of
228 | # the custom math fonts.
229 |
230 | #mathtext.default : it # The default font to use for math.
231 | # Can be any of the LaTeX font names, including
232 | # the special name "regular" for the same font
233 | # used in regular text.
234 |
235 | ### AXES
236 | # default face and edge color, default tick sizes,
237 | # default fontsizes for ticklabels, and so on. See
238 | # http://matplotlib.org/api/axes_api.html#module-matplotlib.axes
239 | axes.hold : True # whether to clear the axes by default on
240 | axes.facecolor : white # axes background color
241 | axes.edgecolor : 0.3, 0.3, 0.3 # axes edge color
242 | axes.linewidth : 1.0 # edge linewidth
243 | axes.grid : True # display grid or not
244 | axes.titlesize : 11 # fontsize of the axes title
245 | axes.labelsize : 11 # fontsize of the x any y labels
246 | axes.labelpad : 5 # space between label and axis
247 | #axes.labelweight : normal # weight of the x and y labels
248 | axes.labelcolor : k
249 | axes.axisbelow : True # whether axis gridlines and ticks are below
250 | # the axes elements (lines, text, etc)
251 |
252 | #axes.formatter.limits : -7, 7 # use scientific notation if log10
253 | # of the axis range is smaller than the
254 | # first or larger than the second
255 | #axes.formatter.use_locale : False # When True, format tick labels
256 | # according to the user's locale.
257 | # For example, use ',' as a decimal
258 | # separator in the fr_FR locale.
259 | #axes.formatter.use_mathtext : False # When True, use mathtext for scientific
260 | # notation.
261 | #axes.formatter.useoffset : True # If True, the tick label formatter
262 | # will default to labeling ticks relative
263 | # to an offset when the data range is very
264 | # small compared to the minimum absolute
265 | # value of the data.
266 |
267 | #axes.unicode_minus : True # use unicode for the minus symbol
268 | # rather than hyphen. See
269 | # http://en.wikipedia.org/wiki/Plus_and_minus_signs#Character_codes
270 | axes.prop_cycle : (cycler('color', ['005382','81a436','6785ff','67d1ff','808080','67ff95']))
271 |
272 | # color cycle for plot lines
273 | # as list of string colorspecs:
274 | # single letter, long name, or
275 | # web-style hex
276 | axes.xmargin : 0 # x margin. See `axes.Axes.margins`
277 | axes.ymargin : 0 # y margin See `axes.Axes.margins`
278 |
279 | #polaraxes.grid : True # display grid on polar axes
280 | #axes3d.grid : True # display grid on 3d axes
281 |
282 | ### TICKS
283 | # see http://matplotlib.org/api/axis_api.html#matplotlib.axis.Tick
284 | xtick.major.size : 2 # major tick size in points
285 | xtick.minor.size : 1 # minor tick size in points
286 | xtick.major.width : 0.5 # major tick width in points
287 | xtick.minor.width : 0.5 # minor tick width in points
288 | xtick.major.pad : 4 # distance to major tick label in points
289 | xtick.minor.pad : 4 # distance to the minor tick label in points
290 | xtick.color : k # color of the tick labels
291 | xtick.labelsize : 11 # fontsize of the tick labels
292 | xtick.direction : in # direction: in, out, or inout
293 |
294 | ytick.major.size : 2 # major tick size in points
295 | ytick.minor.size : 1 # minor tick size in points
296 | ytick.major.width : 0.5 # major tick width in points
297 | ytick.minor.width : 0.5 # minor tick width in points
298 | ytick.major.pad : 4 # distance to major tick label in points
299 | ytick.minor.pad : 4 # distance to the minor tick label in points
300 | ytick.color : k # color of the tick labels
301 | ytick.labelsize : 11 # fontsize of the tick labels
302 | ytick.direction : in # direction: in, out, or inout
303 |
304 |
305 | ### GRIDS
306 | grid.color : 0.3,0.3,0.3 # grid color
307 | grid.linestyle : - # dotted
308 | #grid.linewidth : 0.5 # in points
309 | #grid.alpha : 1.0 # transparency, between 0.0 and 1.0
310 |
311 | ### Legend
312 | #legend.fancybox : False # if True, use a rounded box for the
313 | # legend, else a rectangle
314 | #legend.isaxes : True
315 | #legend.numpoints : 2 # the number of points in the legend line
316 | legend.fontsize : 11.
317 | #legend.borderpad : 0.5 # border whitespace in fontsize units
318 | #legend.markerscale : 1.0 # the relative size of legend markers vs. original
319 | # the following dimensions are in axes coords
320 | #legend.labelspacing : 0.5 # the vertical space between the legend entries in fraction of fontsize
321 | #legend.handlelength : 2. # the length of the legend lines in fraction of fontsize
322 | #legend.handleheight : 0.7 # the height of the legend handle in fraction of fontsize
323 | #legend.handletextpad : 0.8 # the space between the legend line and legend text in fraction of fontsize
324 | #legend.borderaxespad : 0.5 # the border between the axes and legend edge in fraction of fontsize
325 | #legend.columnspacing : 2. # the border between the axes and legend edge in fraction of fontsize
326 | #legend.shadow : False
327 | legend.frameon : True # whether or not to draw a frame around legend
328 | #legend.framealpha : None # opacity of of legend frame
329 | #legend.scatterpoints : 3 # number of scatter points
330 |
331 | ### FIGURE
332 | # See http://matplotlib.org/api/figure_api.html#matplotlib.figure.Figure
333 | #figure.titlesize : large # size of the figure title
334 | #figure.titleweight : normal # weight of the figure title
335 | figure.figsize : 7, 4 # figure size in inches #half page: 3.4, 3; double page: 7,4
336 | figure.dpi : 300 # figure dots per inch
337 | figure.facecolor : 1 # figure facecolor; 0.75 is scalar gray
338 | #figure.edgecolor : white # figure edgecolor
339 | figure.autolayout : false # When True, automatically adjust subplot
340 | # parameters to make the plot fit the figure
341 | figure.max_open_warning : 50 # The maximum number of figures to open through
342 | # the pyplot interface before emitting a warning.
343 | # If less than one this feature is disabled.
344 |
345 | # The figure subplot parameters. All dimensions are a fraction of the
346 | # figure width or height
347 | #figure.subplot.left : 0.125 # the left side of the subplots of the figure
348 | #figure.subplot.right : 0.9 # the right side of the subplots of the figure
349 | #figure.subplot.bottom : 0.1 # the bottom of the subplots of the figure
350 | #figure.subplot.top : 0.9 # the top of the subplots of the figure
351 | #figure.subplot.wspace : 0.2 # the amount of width reserved for blank space between subplots
352 | #figure.subplot.hspace : 0.2 # the amount of height reserved for white space between subplots
353 |
354 | ### IMAGES
355 | #image.aspect : equal # equal | auto | a number
356 | #image.interpolation : bilinear # see help(imshow) for options
357 | #image.cmap : jet # gray | jet etc...
358 | #image.lut : 256 # the size of the colormap lookup table
359 | #image.origin : upper # lower | upper
360 | #image.resample : False
361 | #image.composite_image : True # When True, all the images on a set of axes are
362 | # combined into a single composite image before
363 | # saving a figure as a vector graphics file,
364 | # such as a PDF.
365 |
366 | ### CONTOUR PLOTS
367 | #contour.negative_linestyle : dashed # dashed | solid
368 | #contour.corner_mask : True # True | False | legacy
369 |
370 | ### ERRORBAR PLOTS
371 | #errorbar.capsize : 3 # length of end cap on error bars in pixels
372 |
373 | ### Agg rendering
374 | ### Warning: experimental, 2008/10/10
375 | #agg.path.chunksize : 0 # 0 to disable; values in the range
376 | # 10000 to 100000 can improve speed slightly
377 | # and prevent an Agg rendering failure
378 | # when plotting very large data sets,
379 | # especially if they are very gappy.
380 | # It may cause minor artifacts, though.
381 | # A value of 20000 is probably a good
382 | # starting point.
383 | ### SAVING FIGURES
384 | #path.simplify : True # When True, simplify paths by removing "invisible"
385 | # points to reduce file size and increase rendering
386 | # speed
387 | #path.simplify_threshold : 0.1 # The threshold of similarity below which
388 | # vertices will be removed in the simplification
389 | # process
390 | #path.snap : True # When True, rectilinear axis-aligned paths will be snapped to
391 | # the nearest pixel when certain criteria are met. When False,
392 | # paths will never be snapped.
393 | #path.sketch : None # May be none, or a 3-tuple of the form (scale, length,
394 | # randomness).
395 | # *scale* is the amplitude of the wiggle
396 | # perpendicular to the line (in pixels). *length*
397 | # is the length of the wiggle along the line (in
398 | # pixels). *randomness* is the factor by which
399 | # the length is randomly scaled.
400 |
401 | # the default savefig params can be different from the display params
402 | # e.g., you may want a higher resolution, or to make the figure
403 | # background white
404 | savefig.dpi : 600 # figure dots per inch
405 | #savefig.facecolor : white # figure facecolor when saving
406 | #savefig.edgecolor : white # figure edgecolor when saving
407 | savefig.format : png # png, ps, pdf, svg
408 | savefig.bbox : tight # 'tight' or 'standard'.
409 | # 'tight' is incompatible with pipe-based animation
410 | # backends but will workd with temporary file based ones:
411 | # e.g. setting animation.writer to ffmpeg will not work,
412 | # use ffmpeg_file instead
413 | #savefig.pad_inches : 0.2 # Padding to be used when bbox is set to 'tight'
414 | #savefig.jpeg_quality: 95 # when a jpeg is saved, the default quality parameter.
415 | #savefig.directory : ~ # default directory in savefig dialog box,
416 | # leave empty to always use current working directory
417 | savefig.transparent : False # setting that controls whether figures are saved with a
418 | # transparent background by default
419 |
420 | # tk backend params
421 | #tk.window_focus : False # Maintain shell focus for TkAgg
422 |
423 | # ps backend params
424 | #ps.papersize : letter # auto, letter, legal, ledger, A0-A10, B0-B10
425 | #ps.useafm : False # use of afm fonts, results in small files
426 | #ps.usedistiller : False # can be: None, ghostscript or xpdf
427 | # Experimental: may produce smaller files.
428 | # xpdf intended for production of publication quality files,
429 | # but requires ghostscript, xpdf and ps2eps
430 | #ps.distiller.res : 6000 # dpi
431 | #ps.fonttype : 3 # Output Type 3 (Type3) or Type 42 (TrueType)
432 |
433 | # pdf backend params
434 | #pdf.compression : 6 # integer from 0 to 9
435 | # 0 disables compression (good for debugging)
436 | #pdf.fonttype : 3 # Output Type 3 (Type3) or Type 42 (TrueType)
437 |
438 | # svg backend params
439 | #svg.image_inline : True # write raster image data directly into the svg file
440 | #svg.image_noscale : False # suppress scaling of raster data embedded in SVG
441 | #svg.fonttype : 'path' # How to handle SVG fonts:
442 | # 'none': Assume fonts are installed on the machine where the SVG will be viewed.
443 | # 'path': Embed characters as paths -- supported by most SVG renderers
444 | # 'svgfont': Embed characters as SVG fonts -- supported only by Chrome,
445 | # Opera and Safari
446 |
447 | # docstring params
448 | #docstring.hardcopy = False # set this when you want to generate hardcopy docstring
449 |
450 | # Set the verbose flags. This controls how much information
451 | # matplotlib gives you at runtime and where it goes. The verbosity
452 | # levels are: silent, helpful, debug, debug-annoying. Any level is
453 | # inclusive of all the levels below it. If your setting is "debug",
454 | # you'll get all the debug and helpful messages. When submitting
455 | # problems to the mailing-list, please set verbose to "helpful" or "debug"
456 | # and paste the output into your report.
457 | #
458 | # The "fileo" gives the destination for any calls to verbose.report.
459 | # These objects can a filename, or a filehandle like sys.stdout.
460 | #
461 | # You can override the rc default verbosity from the command line by
462 | # giving the flags --verbose-LEVEL where LEVEL is one of the legal
463 | # levels, e.g., --verbose-helpful.
464 | #
465 | # You can access the verbose instance in your code
466 | # from matplotlib import verbose.
467 | #verbose.level : silent # one of silent, helpful, debug, debug-annoying
468 | #verbose.fileo : sys.stdout # a log filename, sys.stdout or sys.stderr
469 |
470 | # Event keys to interact with figures/plots via keyboard.
471 | # Customize these settings according to your needs.
472 | # Leave the field(s) empty if you don't need a key-map. (i.e., fullscreen : '')
473 |
474 | #keymap.fullscreen : f # toggling
475 | #keymap.home : h, r, home # home or reset mnemonic
476 | #keymap.back : left, c, backspace # forward / backward keys to enable
477 | #keymap.forward : right, v # left handed quick navigation
478 | #keymap.pan : p # pan mnemonic
479 | #keymap.zoom : o # zoom mnemonic
480 | #keymap.save : s # saving current figure
481 | #keymap.quit : ctrl+w, cmd+w # close the current figure
482 | #keymap.grid : g # switching on/off a grid in current axes
483 | #keymap.yscale : l # toggle scaling of y-axes ('log'/'linear')
484 | #keymap.xscale : L, k # toggle scaling of x-axes ('log'/'linear')
485 | #keymap.all_axes : a # enable all axes
486 |
487 | # Control location of examples data files
488 | #examples.directory : '' # directory to look in for custom installation
489 |
490 | ###ANIMATION settings
491 | #animation.html : 'none' # How to display the animation as HTML in
492 | # the IPython notebook. 'html5' uses
493 | # HTML5 video tag.
494 | #animation.writer : ffmpeg # MovieWriter 'backend' to use
495 | #animation.codec : mpeg4 # Codec to use for writing movie
496 | #animation.bitrate: -1 # Controls size/quality tradeoff for movie.
497 | # -1 implies let utility auto-determine
498 | #animation.frame_format: 'png' # Controls frame format used by temp files
499 | #animation.ffmpeg_path: 'ffmpeg' # Path to ffmpeg binary. Without full path
500 | # $PATH is searched
501 | #animation.ffmpeg_args: '' # Additional arguments to pass to ffmpeg
502 | #animation.avconv_path: 'avconv' # Path to avconv binary. Without full path
503 | # $PATH is searched
504 | #animation.avconv_args: '' # Additional arguments to pass to avconv
505 | #animation.mencoder_path: 'mencoder'
506 | # Path to mencoder binary. Without full path
507 | # $PATH is searched
508 | #animation.mencoder_args: '' # Additional arguments to pass to mencoder
509 | #animation.convert_path: 'convert' # Path to ImageMagick's convert binary.
510 | # On Windows use the full path since convert
511 | # is also the name of a system tool.
512 |
--------------------------------------------------------------------------------