├── data └── .gitkeep ├── config ├── .gitkeep ├── euler │ ├── config.yaml │ └── cluster-config.yaml ├── schema.yaml └── default.yaml ├── VERSION ├── lib ├── pytest.ini ├── test-requirements.yaml ├── renewablepotentialslib │ ├── __init__.py │ ├── eligibility.py │ ├── conversion.py │ └── shape_utils.py ├── README.md ├── setup.py └── tests │ ├── test_technically_eligible_land.py │ ├── test_shape_utils.py │ └── test_conversion.py ├── .syncignore-build ├── .flake8 ├── envs ├── metadata.yaml └── default.yaml ├── environment.yaml ├── .syncignore ├── .editorconfig ├── CITATION.md ├── rules ├── sync.smk ├── sonnendach.smk ├── ninja-input.smk ├── capacityfactors.smk ├── potential.smk └── data-preprocessing.smk ├── .github └── workflows │ ├── pythonpackage.yaml │ └── schemavalidation.yaml ├── tests ├── test_administrative_borders.py ├── validate_schema.py ├── test_potential_of_layers.py └── test_sonnendach_reference.py ├── .gitignore ├── scripts ├── metadata.py ├── land_cover_stats_to_csv.py ├── lau.py ├── capacityfactors │ ├── timeseries.py │ ├── averages_map.py │ ├── ninja_input_wind.py │ ├── ninja_input_pv.py │ └── id_map.py ├── swiss_building_footprints.py ├── built_up_area.py ├── estimate_protected_shapes.py ├── administrative_borders.py ├── technically_eligible_area.py ├── technical_eligibility.py ├── technically_eligible_electricity_yield.py ├── shared_coast.py ├── units.py ├── technically_eligible_capacity.py ├── technically_eligible_capacityfactor.py ├── areas.py └── potentials.py ├── LICENSE.md ├── Snakefile ├── README.md └── notebooks ├── wind-turbines-in-europe.ipynb └── handling-missing-shapes-of-protected-areas.ipynb /data/.gitkeep: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /config/.gitkeep: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /VERSION: -------------------------------------------------------------------------------- 1 | 1.0.0-dev 2 | -------------------------------------------------------------------------------- /lib/pytest.ini: -------------------------------------------------------------------------------- 1 | [pytest] 2 | -------------------------------------------------------------------------------- /.syncignore-build: -------------------------------------------------------------------------------- 1 | # build files to ignore when receiving from Euler 2 | raw-wdpa-*/ 3 | capacityfactors/ 4 | -------------------------------------------------------------------------------- /.flake8: -------------------------------------------------------------------------------- 1 | [flake8] 2 | max-line-length = 119 3 | ignore = E261, W503 4 | exclude = Snakefile, *.smk 5 | builtins = snakemake # for using the snakemake injection in scripts 6 | -------------------------------------------------------------------------------- /envs/metadata.yaml: -------------------------------------------------------------------------------- 1 | name: metadata 2 | channels: 3 | - conda-forge 4 | dependencies: 5 | - python=3.6 6 | - pyyaml=5.1 7 | - pip: 8 | - -e ../../lib 9 | -------------------------------------------------------------------------------- /environment.yaml: -------------------------------------------------------------------------------- 1 | name: solar-and-wind-potentials 2 | channels: 3 | - conda-forge 4 | - bioconda 5 | dependencies: 6 | - python=3.6 7 | - snakemake-minimal=5.4 8 | - pycountry=20.7.3 9 | -------------------------------------------------------------------------------- /.syncignore: -------------------------------------------------------------------------------- 1 | # files to ignore when sending to Euler 2 | .snakemake 3 | .git 4 | .pytest_cache 5 | __pycache__ 6 | *.pyc 7 | *.pyo 8 | .vscode 9 | .DS_Store 10 | build 11 | data/automatic 12 | data/sonnendach-old 13 | -------------------------------------------------------------------------------- /config/euler/config.yaml: -------------------------------------------------------------------------------- 1 | cluster: bsub -oo {cluster.output} -W {cluster.runtime} -n {cluster.cores} -R "rusage[mem={cluster.memory}]" < 2 | jobs: 999 3 | cluster-config: config/euler/cluster-config.yaml 4 | local-cores: 1 5 | latency-wait: 60 6 | -------------------------------------------------------------------------------- /.editorconfig: -------------------------------------------------------------------------------- 1 | # http://editorconfig.org 2 | 3 | root = true 4 | 5 | [*] 6 | indent_style = space 7 | indent_size = 4 8 | trim_trailing_whitespace = true 9 | insert_final_newline = true 10 | charset = utf-8 11 | end_of_line = lf 12 | 13 | [LICENSE] 14 | insert_final_newline = false 15 | -------------------------------------------------------------------------------- /CITATION.md: -------------------------------------------------------------------------------- 1 | If you use this code or data in an academic publication, please cite the following article: 2 | 3 | Tröndle, T., Pfenninger, S., Lilliestam, J., 2019. Home-made or imported: on the possibility for renewable electricity autarky on all scales in Europe. Energy Strategy Reviews 26. https://doi.org/10.1016/j.esr.2019.100388 4 | -------------------------------------------------------------------------------- /lib/test-requirements.yaml: -------------------------------------------------------------------------------- 1 | name: 2 | test-renewablepotentialslib 3 | channels: 4 | - conda-forge 5 | dependencies: 6 | - python=3.6 7 | - pip=19.1.1 8 | - pytest=3.2.3 9 | - pycountry=20.7.3 10 | - pyproj=2.6.1 11 | - geopandas=0.8.1 12 | - numpy=1.14.6 13 | - pandas=0.23.2 14 | - shapely=1.7.1 15 | - pip: 16 | -e . 17 | -------------------------------------------------------------------------------- /lib/renewablepotentialslib/__init__.py: -------------------------------------------------------------------------------- 1 | """Top-level package of renewablepotentialslib.""" 2 | 3 | __version__ = "0.1.0" # Additionally defined in setup.py. 4 | 5 | # from https://epsg.io/3035 6 | EPSG_3035 = "EPSG:3035" 7 | EPSG_3035_PROJ4 = "+proj=laea +lat_0=52 +lon_0=10 +x_0=4321000 +y_0=3210000 +ellps=GRS80 +units=m +no_defs " 8 | # from https://epsg.io/4326 9 | WGS84 = "EPSG:4326" 10 | WGS84_PROJ4 = "+proj=longlat +datum=WGS84 +no_defs " 11 | -------------------------------------------------------------------------------- /envs/default.yaml: -------------------------------------------------------------------------------- 1 | name: default 2 | channels: 3 | - conda-forge 4 | dependencies: 5 | - python=3.6 6 | - numpy=1.14.6 7 | - pandas=0.23.2 8 | - gdal=2.4 9 | - pytest=3.2.3 10 | - pytest-html=1.20.0 11 | - ipython 12 | - fiona=1.8.4 13 | - rasterio=1.0.25 14 | - rasterstats=0.13.0 15 | - geopandas=0.8.2 16 | - xarray=0.12.1 17 | - netcdf4=1.5.1.2 18 | - pyyaml=5.1 19 | - pycountry=20.7.3 20 | - pip=19.1.1 21 | - pip: 22 | - -e ../../lib 23 | -------------------------------------------------------------------------------- /rules/sync.smk: -------------------------------------------------------------------------------- 1 | # Rules to sync to and from Euler 2 | 3 | EULER_URL = "euler.ethz.ch" 4 | EULER_BASE_DIR = "~/Develop/solar-and-wind-potentials/" 5 | EULER_BUILD_DIR = EULER_BASE_DIR + "build/" 6 | LOCAL_EULER_RESULTS = "./build/euler/" 7 | 8 | 9 | rule send: 10 | message: "Send changes to Euler" 11 | shell: 12 | "rsync -avzh --progress --delete --no-p --no-g --chmod=ugo=rwX -r . --exclude-from=.syncignore {EULER_URL}:{EULER_BASE_DIR}" 13 | 14 | 15 | rule receive: 16 | message: "Receive build changes from Euler" 17 | shell: 18 | "rsync -avzh --progress --delete -r --max-size=1g --exclude-from=.syncignore-build {EULER_URL}:{EULER_BUILD_DIR} {LOCAL_EULER_RESULTS}" 19 | -------------------------------------------------------------------------------- /config/euler/cluster-config.yaml: -------------------------------------------------------------------------------- 1 | __default__: 2 | runtime: 10 3 | cores: 1 4 | memory: 16000 5 | output: "build/logs/{rule}.{wildcards}.log" 6 | protected_areas_in_europe: 7 | runtime: 239 8 | memory: 64000 9 | settlements: 10 | cores: 4 11 | runtime: 240 12 | shared_coast: 13 | cores: 4 14 | runtime: 60 15 | slope_in_europe: 16 | cores: 4 17 | elevation_in_europe: 18 | cores: 4 19 | eez_eligibility: 20 | cores: 4 21 | sensitivities: 22 | cores: 4 23 | capacityfactor_timeseries: 24 | memory: 128000 25 | runtime: 180 26 | time_average_capacityfactor_map: 27 | memory: 32000 28 | national_boundaries: 29 | runtime: 60 30 | administrative_borders: 31 | runtime: 240 32 | -------------------------------------------------------------------------------- /.github/workflows/pythonpackage.yaml: -------------------------------------------------------------------------------- 1 | name: Tests of renewablepotentialslib 2 | 3 | on: [push, pull_request] 4 | 5 | jobs: 6 | test: 7 | name: Run tests 8 | runs-on: ubuntu-latest 9 | defaults: 10 | run: 11 | shell: bash -l {0} 12 | steps: 13 | - uses: actions/checkout@v2 14 | - name: Setup conda environment 15 | uses: conda-incubator/setup-miniconda@v2 16 | with: 17 | auto-update-conda: true 18 | python-version: 3.6 19 | activate-environment: test-renewablepotentialslib 20 | environment-file: lib/test-requirements.yaml 21 | - name: Test with pytest 22 | run: pytest ./lib 23 | -------------------------------------------------------------------------------- /tests/test_administrative_borders.py: -------------------------------------------------------------------------------- 1 | from pathlib import Path 2 | import os 3 | 4 | import pytest 5 | import fiona 6 | import yaml 7 | 8 | ROOT_DIR = Path(os.path.abspath(__file__)).parent.parent 9 | 10 | with open('config/default.yaml', 'r') as src: 11 | CONFIG_DEFAULT = yaml.safe_load(src) 12 | 13 | PATH_TO_BORDERS = ROOT_DIR / "build" / "administrative-borders.gpkg" 14 | 15 | @pytest.mark.skipif(not PATH_TO_BORDERS.exists(), reason="Consolidated administrative border shapefile not available.") 16 | def test_administrative_border_layers(): 17 | layers = fiona.listlayers(str(PATH_TO_BORDERS)) 18 | default_layers = set( 19 | v for countries in CONFIG_DEFAULT['layers'].values() for k, v in countries.items() 20 | ) 21 | assert len(default_layers.difference(layers)) == 0 22 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # generated files 2 | build/ 3 | 4 | # ignore raw data 5 | data/ 6 | 7 | # ignore notebooks 8 | notebooks/ 9 | 10 | ## Core latex/pdflatex auxiliary files: 11 | *.aux 12 | *.lof 13 | *.log 14 | *.lot 15 | *.fls 16 | *.out 17 | *.toc 18 | *.fmt 19 | *.fot 20 | *.cb 21 | *.cb2 22 | 23 | ## Bibliography auxiliary files (bibtex/biblatex/biber): 24 | *.bbl 25 | *.bcf 26 | *.blg 27 | *-blx.aux 28 | *-blx.bib 29 | *.brf 30 | *.run.xml 31 | 32 | ## Build tool auxiliary files: 33 | *.fdb_latexmk 34 | *.synctex 35 | *.synctex(busy) 36 | *.synctex.gz 37 | *.synctex.gz(busy) 38 | *.pdfsync 39 | 40 | # Python 41 | *.pyc 42 | build/ 43 | dist/ 44 | __pycache__ 45 | *.egg-info 46 | .cache/ 47 | 48 | # Snakemake 49 | .snakemake/ 50 | dag.pdf 51 | dag-sonnendach.pdf 52 | rules/export.smk 53 | 54 | # Marked2 55 | report/marked2_custom_processor.sh 56 | report/build 57 | -------------------------------------------------------------------------------- /lib/README.md: -------------------------------------------------------------------------------- 1 | # renewablepotentialslib 2 | 3 | Library code of the solar-and-wind-potentials workflow. 4 | 5 | The library code contains general-purpose functions and routines that we expect to change at a slow pace -- in contrast to scripts. If you alter library code be aware that this will not trigger reruns of workflow rules. Think of `renewablepotentialslib` as any other dependency of this workflow (like NumPy): changing the version of any dependency will not rerun worfklow rules. When you change library code, you will have to rerun rules manually where needed. 6 | 7 | ## Developer Guide 8 | 9 | ### Installation 10 | 11 | Best install `renewablepotentialslib` in editable mode: 12 | 13 | $ conda env create -f requirements-test.yaml 14 | $ conda activate renewablepotentialslib-test 15 | 16 | ### Run the test suite 17 | 18 | Run the test suite with py.test: 19 | 20 | $ py.test 21 | -------------------------------------------------------------------------------- /.github/workflows/schemavalidation.yaml: -------------------------------------------------------------------------------- 1 | name: Tests of YAML configuration and schema 2 | 3 | on: [push, pull_request] 4 | 5 | jobs: 6 | validate: 7 | name: Validate configuration against a schema and validate the schema itself 8 | runs-on: ubuntu-latest 9 | steps: 10 | - uses: actions/checkout@v2 11 | - name: Set up Python 12 | uses: actions/setup-python@v2 13 | with: 14 | python-version: 3.6 15 | - name: Install dependencies 16 | run: | 17 | python -m pip install --upgrade pip 18 | pip install jsonschema PyYAML 19 | - name: Validate default config 20 | run: python ./tests/validate_schema.py ./config/schema.yaml --config ./config/default.yaml 21 | - name: Validate schema itself 22 | run: python ./tests/validate_schema.py ./config/schema.yaml 23 | -------------------------------------------------------------------------------- /tests/validate_schema.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import jsonschema 3 | import yaml 4 | 5 | parser = argparse.ArgumentParser() 6 | parser.add_argument("schema", help="JSON schema in YAML format") 7 | parser.add_argument( 8 | "--config", 9 | help=( 10 | "configuration file to validate. " 11 | "If not given, schema itself will be validated against JSON schema Draft 7" 12 | ) 13 | ) 14 | args = parser.parse_args() 15 | 16 | with open(args.schema, "r") as f: 17 | schema = yaml.safe_load(f) 18 | 19 | if args.config: 20 | with open(args.config, "r") as f: 21 | config = yaml.safe_load(f) 22 | jsonschema.validate(config, schema) 23 | else: 24 | # We set the metaschema 'additionalProperties' to False to create a 'strict' schema checker, 25 | # which will fail on typos 26 | jsonschema.Draft7Validator.META_SCHEMA['additionalProperties'] = False 27 | jsonschema.Draft7Validator.check_schema(schema) 28 | -------------------------------------------------------------------------------- /lib/setup.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | from setuptools import setup, find_packages 4 | 5 | setup( 6 | name='renewablepotentialslib', 7 | version='0.1.0', # additionally defined in __init__.py 8 | description='Library code of the solar-and-wind-potentials workflow.', 9 | maintainer='calliope-project', 10 | maintainer_email='tim.troendle@usys.ethz.ch', 11 | packages=find_packages(exclude=['tests*']), 12 | include_package_data=True, 13 | install_requires=[ 14 | "pycountry", 15 | "geopandas", 16 | "numpy", 17 | "pandas", 18 | "shapely", 19 | "pyproj" 20 | ], 21 | classifiers=[ 22 | 'Environment :: Console', 23 | 'Intended Audience :: Science/Research', 24 | 'Programming Language :: Python', 25 | 'Programming Language :: Python :: 3 :: Only', 26 | 'Programming Language :: Python :: 3.6', 27 | 'Topic :: Scientific/Engineering' 28 | ] 29 | ) 30 | -------------------------------------------------------------------------------- /scripts/metadata.py: -------------------------------------------------------------------------------- 1 | import io 2 | import yaml 3 | from datetime import datetime 4 | 5 | import renewablepotentialslib 6 | 7 | 8 | def metadata(config, version, path_to_output): 9 | metadata = { 10 | "description": 11 | "This is the metadata of the build process of " 12 | "the solar-and-wind-potentials model in the same directory.", 13 | "solar-and-wind-potentials-version": version, 14 | "renewable-potentials-lib-version": renewablepotentialslib.__version__, 15 | "generated-utc": datetime.utcnow(), 16 | "config": config 17 | } 18 | with io.open(path_to_output, 'w', encoding='utf8') as outfile: 19 | yaml.dump(metadata, outfile, default_flow_style=False, allow_unicode=True, sort_keys=False) 20 | 21 | 22 | if __name__ == "__main__": 23 | metadata( 24 | config=snakemake.params.config, 25 | version=snakemake.params.version, 26 | path_to_output=snakemake.output[0] 27 | ) 28 | -------------------------------------------------------------------------------- /scripts/land_cover_stats_to_csv.py: -------------------------------------------------------------------------------- 1 | """Script to extract attributes of features and write to csv.""" 2 | import geopandas as gpd 3 | from rasterstats import zonal_stats 4 | import pandas as pd 5 | 6 | 7 | def land_cover_stats_to_csv(path_to_units, path_to_land_cover, attributes, path_to_results_csv): 8 | """Extract attributes of features and write to csv.""" 9 | #ipdb.set_trace() 10 | units = gpd.read_file(path_to_units).set_index('id') 11 | 12 | cmap = {int(attribute.split("_")[1]): attribute for attribute in attributes} 13 | stats = zonal_stats(path_to_units, path_to_land_cover, categorical=True, category_map=cmap) 14 | stat_df = pd.DataFrame(data=stats, index=units.index) 15 | stat_df.to_csv(path_to_results_csv) 16 | 17 | 18 | if __name__ == "__main__": 19 | land_cover_stats_to_csv( 20 | path_to_units=snakemake.input.units, 21 | path_to_land_cover=snakemake.input.land_cover, 22 | attributes=snakemake.params.attributes, 23 | path_to_results_csv=snakemake.output[0] 24 | ) 25 | -------------------------------------------------------------------------------- /LICENSE.md: -------------------------------------------------------------------------------- 1 | Copyright (c) 2017-2020 Tim Tröndle 2 | 3 | Permission is hereby granted, free of charge, to any person obtaining 4 | a copy of this software and associated documentation files (the 5 | "Software"), to deal in the Software without restriction, including 6 | without limitation the rights to use, copy, modify, merge, publish, 7 | distribute, sublicense, and/or sell copies of the Software, and to 8 | permit persons to whom the Software is furnished to do so, subject to 9 | the following conditions: 10 | 11 | The above copyright notice and this permission notice shall be 12 | included in all copies or substantial portions of the Software. 13 | 14 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 15 | EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 16 | MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 17 | NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE 18 | LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION 19 | OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION 20 | WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 21 | -------------------------------------------------------------------------------- /rules/sonnendach.smk: -------------------------------------------------------------------------------- 1 | """Retrieve statistics of the sonnendach.ch dataset.""" 2 | 3 | localrules: total_size_swiss_rooftops_according_to_sonnendach_data, 4 | total_swiss_yield_according_to_sonnendach_data, sonnendach_statistics 5 | 6 | 7 | rule total_size_swiss_rooftops_according_to_sonnendach_data: 8 | message: "Download the size of rooftops from Sonnendach data." 9 | params: url = config["data-sources"]["sonnendach_statistics"] 10 | output: "data/automatic/sonnendach/total-rooftop-area-km2.txt" 11 | shell: "curl -sLo {output} '{params.url}'" 12 | 13 | 14 | rule total_swiss_yield_according_to_sonnendach_data: 15 | message: "Download the yield of all available rooftops from Sonnendach data." 16 | params: url = config["data-sources"]["sonnendach_total_size"] 17 | output: "data/automatic/sonnendach/total-yield-twh.txt" 18 | shell: "curl -sLo {output} '{params.url}'" 19 | 20 | 21 | rule sonnendach_statistics: 22 | message: "Download statistics of roofs in Switzerland." 23 | params: url = config["data-sources"]["sonnendach_total_yield"] 24 | output: "data/automatic/sonnendach/roof-statistics.csv", 25 | shell: "curl -sLo {output} '{params.url}'" 26 | -------------------------------------------------------------------------------- /scripts/lau.py: -------------------------------------------------------------------------------- 1 | """Preprocessing of raw LAU2 data to bring it into normalised form.""" 2 | import geopandas as gpd 3 | import pandas as pd 4 | 5 | from renewablepotentialslib.shape_utils import to_multi_polygon 6 | 7 | OUTPUT_DRIVER = "GeoJSON" 8 | KOSOVO_MUNICIPALITIES = [f"RS{x:02d}" for x in range(1, 38)] 9 | 10 | 11 | def merge_lau(path_to_shapes, path_to_attributes, path_to_output): 12 | """Merge LAU shapes with attributes.""" 13 | shapes = gpd.read_file(path_to_shapes) 14 | shapes.geometry = shapes.geometry.map(to_multi_polygon) 15 | attributes = gpd.read_file(path_to_attributes) 16 | attributes = pd.DataFrame(attributes) # to be able to remove the geo information 17 | del attributes["geometry"] 18 | all_shapes = shapes.merge(attributes, on="COMM_ID", how="left") 19 | all_shapes_no_kosovo = _remove_kosovo(all_shapes) 20 | all_shapes_no_kosovo.to_file(path_to_output, driver=OUTPUT_DRIVER) 21 | 22 | 23 | def _remove_kosovo(shapes): 24 | """Identify and remove municipalities in Kosovo. 25 | 26 | Those municipalities must be removed as we do not have load data and pycountry 27 | cannot handle them at the moment (as of 2018, Kosovo does not have a standardised 28 | country code). 29 | """ 30 | return shapes.set_index("COMM_ID").drop(KOSOVO_MUNICIPALITIES).reset_index() 31 | 32 | 33 | if __name__ == "__main__": 34 | merge_lau( 35 | path_to_shapes=snakemake.input.shapes, 36 | path_to_attributes=snakemake.input.attributes, 37 | path_to_output=snakemake.output[0] 38 | ) 39 | -------------------------------------------------------------------------------- /scripts/capacityfactors/timeseries.py: -------------------------------------------------------------------------------- 1 | """Create index capacity factor timeseries of renewables.""" 2 | import xarray as xr 3 | 4 | SIM_ID_DIMENSION = "site" 5 | SITE_ID_VAR = "site_id" 6 | LAT_VAR = "lat" 7 | LON_VAR = "lon" 8 | WEIGHT_VAR = "weight" 9 | CAPACITY_FACTOR_VAR = "electricity" 10 | WEIGHTED_CAPACITY_FACTOR_VAR = "weighted_electricity" 11 | ORIENTATION_VAR = "_orientation" 12 | FLAT_SURFACE = "flat" 13 | FILE_SUFFIX = "nc" 14 | 15 | 16 | def timeseries(path_to_input, path_to_output): 17 | """Create index capacity factor timeseries of renewables from separate renewables.ninja runs.""" 18 | ds = xr.open_dataset(path_to_input) 19 | if "open-field-pv" in path_to_input: 20 | ds = select_flat_surfaces_only(ds) 21 | elif "rooftop-pv" in path_to_input: 22 | ds = weigh_capacity_factors(ds) 23 | ds = groupby_sites(ds) 24 | ds.to_netcdf(path_to_output, "w") 25 | 26 | 27 | def groupby_sites(ds): 28 | cp = ds[[CAPACITY_FACTOR_VAR, SITE_ID_VAR]].groupby(SITE_ID_VAR).sum(dim=SIM_ID_DIMENSION) 29 | coords = ds[[LAT_VAR, LON_VAR, SITE_ID_VAR]].groupby(SITE_ID_VAR).first() 30 | return xr.merge([cp, coords]) 31 | 32 | 33 | def select_flat_surfaces_only(ds): 34 | return ds.sel({SIM_ID_DIMENSION: ds[ORIENTATION_VAR] == FLAT_SURFACE}) 35 | 36 | 37 | def weigh_capacity_factors(ds): 38 | ds[CAPACITY_FACTOR_VAR] = ds[CAPACITY_FACTOR_VAR] * ds[WEIGHT_VAR] 39 | return ds 40 | 41 | 42 | if __name__ == "__main__": 43 | timeseries( 44 | path_to_input=snakemake.input.capacityfactor, 45 | path_to_output=snakemake.output[0] 46 | ) 47 | -------------------------------------------------------------------------------- /scripts/capacityfactors/averages_map.py: -------------------------------------------------------------------------------- 1 | """Create maps of time averaged capacitfy factors of renewables.""" 2 | import numpy as np 3 | import rasterio 4 | import xarray as xr 5 | 6 | CAPACITY_FACTOR_VAR = "electricity" 7 | 8 | DTYPE = np.float32 9 | NODATA = -1 10 | 11 | 12 | def averages_map(path_to_id_map, path_to_timeseries, path_to_output): 13 | """Create maps of time averaged capacitfy factors of renewables.""" 14 | with rasterio.open(path_to_id_map, "r") as f_ids: 15 | ids = f_ids.read(1) 16 | meta = f_ids.meta 17 | averages = map_id_to_average_capacity_factor(ids, path_to_timeseries, meta["nodata"]) 18 | meta["dtype"] = DTYPE 19 | meta["nodata"] = NODATA 20 | with rasterio.open(path_to_output, "w", **meta) as f_avg: 21 | f_avg.write(averages, 1) 22 | 23 | 24 | def map_id_to_average_capacity_factor(ids, path_to_timeseries, nodata_id): 25 | average_capacity_factors = xr.open_dataset(path_to_timeseries).mean("time")[CAPACITY_FACTOR_VAR].to_dataframe() 26 | average_capacity_factors.index = average_capacity_factors.index.astype(np.int32) 27 | average_capacity_factors = average_capacity_factors.to_dict()[CAPACITY_FACTOR_VAR] 28 | average_capacity_factors[nodata_id] = NODATA 29 | mapping_function = np.vectorize( 30 | lambda site_id: average_capacity_factors[site_id], 31 | otypes=[DTYPE] 32 | ) 33 | return mapping_function(ids) 34 | 35 | 36 | if __name__ == "__main__": 37 | averages_map( 38 | path_to_id_map=snakemake.input.id_map, 39 | path_to_timeseries=snakemake.input.timeseries, 40 | path_to_output=snakemake.output[0] 41 | ) 42 | -------------------------------------------------------------------------------- /scripts/swiss_building_footprints.py: -------------------------------------------------------------------------------- 1 | """Determine total Swiss building footprint of ESM data.""" 2 | import rasterio 3 | import fiona 4 | from rasterstats import zonal_stats 5 | import pandas as pd 6 | import geopandas as gpd 7 | 8 | from renewablepotentialslib.eligibility import Eligibility 9 | from renewablepotentialslib.conversion import area_in_squaremeters 10 | 11 | 12 | def swiss_building_footprint(path_to_building_footprint, path_to_eligibility, 13 | path_to_countries, path_to_output): 14 | with rasterio.open(path_to_eligibility, "r") as f_eligibility: 15 | eligibility = f_eligibility.read(1) 16 | with rasterio.open(path_to_building_footprint, "r") as f_building_share: 17 | building_share = f_building_share.read(1) 18 | transform = f_building_share.transform 19 | building_share[eligibility != Eligibility.ROOFTOP_PV] = 0 20 | 21 | with fiona.open(path_to_countries, "r", layer="nuts0") as src: 22 | zs = zonal_stats( 23 | vectors=src, 24 | raster=building_share, 25 | affine=transform, 26 | stats="mean", 27 | nodata=-999 28 | ) 29 | building_share = pd.Series( 30 | index=[feat["properties"]["id"] for feat in src], 31 | data=[stat["mean"] for stat in zs] 32 | ) 33 | building_footprint_km2 = ( 34 | area_in_squaremeters(gpd.read_file(path_to_countries).set_index("id")) 35 | .div(1e6) 36 | .mul(building_share) 37 | ) 38 | swiss_building_footprint = building_footprint_km2.loc["CHE"] 39 | with open(path_to_output, "w") as f_out: 40 | f_out.write(f"{swiss_building_footprint}") 41 | 42 | 43 | if __name__ == "__main__": 44 | swiss_building_footprint( 45 | path_to_building_footprint=snakemake.input.building_footprints, 46 | path_to_eligibility=snakemake.input.eligibility, 47 | path_to_countries=snakemake.input.countries, 48 | path_to_output=snakemake.output[0] 49 | ) 50 | -------------------------------------------------------------------------------- /scripts/built_up_area.py: -------------------------------------------------------------------------------- 1 | """Determine the built up area in administrative units.""" 2 | import fiona 3 | import rasterio 4 | from rasterstats import zonal_stats 5 | import pandas as pd 6 | 7 | from renewablepotentialslib.shape_utils import determine_pixel_areas 8 | 9 | 10 | def built_up_areas(path_to_built_up_share, path_to_units, path_to_result): 11 | """Determine the built up area in administrative units.""" 12 | with rasterio.open(path_to_built_up_share) as src: 13 | built_up_share = src.read(1) 14 | crs = src.crs 15 | transform = src.transform 16 | bounds = src.bounds 17 | resolution = src.res[0] 18 | with fiona.open(path_to_units, "r") as src: 19 | unit_ids = [feature["properties"]["id"] for feature in src] 20 | unit_geometries = [feature["geometry"] for feature in src] 21 | 22 | pixel_area = determine_pixel_areas(crs, bounds, resolution) 23 | built_up_stats = pd.DataFrame( 24 | index=unit_ids, 25 | data={ 26 | "built_up_km2": _stats(unit_geometries, built_up_share * pixel_area, transform), 27 | "non_built_up_km2": _stats(unit_geometries, (1 - built_up_share) * pixel_area, transform) 28 | } 29 | ) 30 | built_up_stats["built_up_share"] = (built_up_stats["built_up_km2"] / 31 | (built_up_stats["built_up_km2"] + built_up_stats["non_built_up_km2"])) 32 | built_up_stats.index.name = "id" 33 | built_up_stats.to_csv( 34 | path_to_result, 35 | header=True, 36 | index=True 37 | ) 38 | 39 | 40 | def _stats(unit_geometries, raster_area, transform): 41 | area = zonal_stats( 42 | unit_geometries, 43 | raster_area, 44 | affine=transform, 45 | stats="sum", 46 | nodata=-999 47 | ) 48 | return [stat["sum"] for stat in area] 49 | 50 | 51 | if __name__ == "__main__": 52 | built_up_areas( 53 | path_to_built_up_share=snakemake.input.built_up_area, 54 | path_to_units=snakemake.input.units, 55 | path_to_result=snakemake.output[0] 56 | ) 57 | -------------------------------------------------------------------------------- /rules/ninja-input.smk: -------------------------------------------------------------------------------- 1 | """Workflow to create simulation input for renewables.ninja. 2 | 3 | We create a raster grid on top of a map of Europe in order of running one (wind) 4 | or several (different roof configurations for pv) simulations per raster point. 5 | """ 6 | 7 | configfile: "config/default.yaml" 8 | 9 | include: "../Snakefile" 10 | include: "sonnendach.smk" 11 | root_dir = config["root-directory"] + "/" if config["root-directory"] not in ["", "."] else "" 12 | script_dir = f"{root_dir}scripts/" 13 | 14 | 15 | rule ninja_simulation_input: 16 | message: "Create input files for renewable.ninja simulations." 17 | input: 18 | "build/capacityfactors/ninja-input-pv.csv", 19 | "build/capacityfactors/ninja-input-wind-onshore.csv", 20 | "build/capacityfactors/ninja-input-wind-offshore.csv" 21 | 22 | 23 | rule pv_simulation_points: 24 | message: "Create locations and parameters of pv simulations for renewables.ninja." 25 | input: 26 | script = script_dir + "capacityfactors/ninja_input_pv.py", 27 | units = "build/continental/units.geojson", 28 | roof_categories = rules.sonnendach_statistics.output[0] 29 | params: 30 | bounds = config["scope"]["bounds"], 31 | ninja = config["parameters"]["ninja"], 32 | maximum_power_density = config["parameters"]["maximum-installable-power-density"] 33 | output: 34 | points = "build/capacityfactors/ninja-input-pv.csv" 35 | conda: "../envs/default.yaml" 36 | script: "../scripts/capacityfactors/ninja_input_pv.py" 37 | 38 | 39 | rule wind_simulation_points: 40 | message: "Create locations and parameters of wind simulations for renewables.ninja." 41 | input: 42 | script = script_dir + "capacityfactors/ninja_input_wind.py", 43 | units = "build/continental/units.geojson", 44 | eez = "build/eez-in-europe.geojson" 45 | params: 46 | bounds = config["scope"]["bounds"], 47 | ninja = config["parameters"]["ninja"], 48 | output: 49 | points_onshore = "build/capacityfactors/ninja-input-wind-onshore.csv", 50 | points_offhore = "build/capacityfactors/ninja-input-wind-offshore.csv", 51 | conda: "../envs/default.yaml" 52 | script: "../scripts/capacityfactors/ninja_input_wind.py" 53 | -------------------------------------------------------------------------------- /scripts/estimate_protected_shapes.py: -------------------------------------------------------------------------------- 1 | """This module estimates the shape of protected areas, for which only centroids are known. 2 | 3 | This procedure is applied by the provider of the database, UNEP-WCMC, as well. See: 4 | https://www.protectedplanet.net/c/calculating-protected-area-coverage 5 | or the manual of the database for further information. 6 | """ 7 | import math 8 | 9 | import geopandas as gpd 10 | import pycountry 11 | 12 | from renewablepotentialslib import EPSG_3035_PROJ4 13 | 14 | 15 | def estimate_shapes(path_to_input, scope_config, path_to_output): 16 | """Estimates the shap of protected areas for which only centroids are known.""" 17 | points = gpd.read_file(path_to_input) 18 | points_in_scope = filter_points(points, scope_config) 19 | original_crs = points_in_scope.crs 20 | # convert points to circles 21 | points_in_scope = points_in_scope.to_crs(EPSG_3035_PROJ4) 22 | points_in_scope.geometry = [rec[1].geometry.buffer(radius_meter(rec[1]["REP_AREA"])) 23 | for rec in points_in_scope.iterrows()] 24 | test_area_size(points_in_scope) 25 | points_in_scope.to_crs(original_crs).to_file(path_to_output, driver="GeoJSON") 26 | 27 | 28 | def filter_points(points, scope_config): 29 | x_min, x_max, y_min, y_max = [scope_config["bounds"][z] 30 | for z in ["x_min", "x_max", "y_min", "y_max"]] 31 | countries = [pycountry.countries.lookup(country).alpha_3 32 | for country in scope_config["countries"]] 33 | return points.cx[x_min:x_max, y_min:y_max].loc[ 34 | (points.ISO3.isin(countries)) & 35 | (points.REP_AREA > 0) 36 | ].copy() 37 | 38 | 39 | def radius_meter(area_squarekilometer): 40 | area_squaremeter = area_squarekilometer * 1e6 41 | return math.sqrt(area_squaremeter / math.pi) 42 | 43 | 44 | def test_area_size(points): 45 | area_size_calculated = points.area.sum() / 1e6 46 | area_size_reported = points.REP_AREA.sum() 47 | assert abs(area_size_calculated - area_size_reported) < (area_size_reported / 100) 48 | 49 | 50 | if __name__ == "__main__": 51 | estimate_shapes( 52 | path_to_input=snakemake.input.protected_areas, 53 | scope_config=snakemake.params.scope, 54 | path_to_output=snakemake.output[0] 55 | ) 56 | -------------------------------------------------------------------------------- /scripts/administrative_borders.py: -------------------------------------------------------------------------------- 1 | import geopandas as gpd 2 | 3 | from renewablepotentialslib.shape_utils import ( 4 | buffer_if_necessary, 5 | to_multi_polygon, 6 | drop_countries, 7 | drop_geoms_completely_outside_study_area, 8 | drop_parts_of_geoms_completely_outside_study_area, 9 | update_features 10 | ) 11 | 12 | OUTPUT_DRIVER = 'GPKG' 13 | SCHEMA = { 14 | "properties": { 15 | "country_code": "str", # id of the country to which the unit belongs 16 | "id": "str", # a unique id of this unit 17 | "name": "str", # the name of the unit, not necessarily unique 18 | "type": "str", # the type of the unit 19 | "proper": "bool" # flag indicating proper administrative unit (not the case for water bodies e.g.) 20 | }, 21 | "geometry": "MultiPolygon" 22 | } 23 | 24 | 25 | def normalise_admin_borders(path_to_nuts, path_to_gadm, path_to_lau, crs, scope_config, path_to_output): 26 | """Normalises raw administrative boundary data and places it in one, layered geodatapackage.""" 27 | 28 | for _src, _path in { 29 | 'nuts': path_to_nuts, 'gadm': path_to_gadm, 'lau': path_to_lau 30 | }.items(): 31 | gdf = gpd.read_file(_path) 32 | gdf = gdf.to_crs(crs) 33 | gdf.geometry = gdf.geometry.map(buffer_if_necessary).map(to_multi_polygon) 34 | gdf = update_features(gdf, _src) 35 | gdf = drop_countries(gdf, scope_config) 36 | gdf = drop_geoms_completely_outside_study_area(gdf, scope_config) 37 | gdf = drop_parts_of_geoms_completely_outside_study_area(gdf, scope_config) 38 | 39 | assert gdf.id.duplicated().sum() == 0 40 | 41 | allowed_cols = list(SCHEMA["properties"].keys()) + ['geometry'] 42 | 43 | for lvl in gdf.level.unique(): 44 | gdf.loc[gdf.level == lvl, allowed_cols].to_file( 45 | path_to_output, schema=SCHEMA, layer=lvl, driver=OUTPUT_DRIVER 46 | ) 47 | 48 | 49 | if __name__ == "__main__": 50 | normalise_admin_borders( 51 | path_to_nuts=snakemake.input.nuts_geojson, 52 | path_to_gadm=snakemake.input.gadm_gpkg, 53 | path_to_lau=snakemake.input.lau_gpkg, 54 | crs=snakemake.params.crs, 55 | scope_config=snakemake.params.scope, 56 | path_to_output=snakemake.output[0] 57 | ) 58 | -------------------------------------------------------------------------------- /scripts/capacityfactors/ninja_input_wind.py: -------------------------------------------------------------------------------- 1 | """Create wind simulation input for renewables.ninja.""" 2 | import pandas as pd 3 | import geopandas as gpd 4 | 5 | from renewablepotentialslib.shape_utils import point_raster_on_shapes 6 | 7 | 8 | def wind(path_to_shapes_of_land_surface, path_to_shapes_of_water_surface, bounds, ninja, 9 | path_to_onshore_output, path_to_offshore_output): 10 | """Create wind on- and offshore simulation input for renewables.ninja.""" 11 | write_parameters( 12 | bounds=bounds, 13 | resolution=ninja["resolution-grid"], 14 | path_to_shapes=path_to_shapes_of_land_surface, 15 | hub_height=ninja["hub-height"]["onshore"], 16 | turbine=ninja["turbine"]["onshore"], 17 | path_to_output=path_to_onshore_output 18 | ) 19 | write_parameters( 20 | bounds=bounds, 21 | resolution=ninja["resolution-grid"], 22 | path_to_shapes=path_to_shapes_of_water_surface, 23 | hub_height=ninja["hub-height"]["offshore"], 24 | turbine=ninja["turbine"]["offshore"], 25 | path_to_output=path_to_offshore_output 26 | ) 27 | 28 | 29 | def write_parameters(bounds, resolution, path_to_shapes, hub_height, turbine, path_to_output): 30 | points = point_raster_on_shapes( 31 | bounds_wgs84=bounds, 32 | shapes=gpd.read_file(path_to_shapes), 33 | resolution_km2=resolution 34 | ) 35 | parameters = pd.DataFrame( 36 | data={ 37 | "lat": [point.y for point in points.geometry], 38 | "long": [point.x for point in points.geometry], 39 | "hub_height": hub_height, 40 | "turbine": turbine 41 | } 42 | ) 43 | parameters["sim_id"] = parameters.index 44 | parameters["site_id"] = parameters.index 45 | parameters["weight"] = 1 46 | parameters[["sim_id", "weight", "site_id", "lat", "long", "hub_height", "turbine"]].to_csv( 47 | path_to_output, 48 | header=True, 49 | index=False 50 | ) 51 | 52 | 53 | if __name__ == "__main__": 54 | wind( 55 | path_to_shapes_of_land_surface=snakemake.input.units, 56 | path_to_shapes_of_water_surface=snakemake.input.eez, 57 | bounds=snakemake.params.bounds, 58 | ninja=snakemake.params.ninja, 59 | path_to_onshore_output=snakemake.output.points_onshore, 60 | path_to_offshore_output=snakemake.output.points_offhore 61 | ) 62 | -------------------------------------------------------------------------------- /tests/test_potential_of_layers.py: -------------------------------------------------------------------------------- 1 | """Test whether potential estimations between layers are similar.""" 2 | import os 3 | from pathlib import Path 4 | 5 | import pytest 6 | import pandas as pd 7 | 8 | from renewablepotentialslib.eligibility import Potential 9 | 10 | TOLERANCE = 0.005 # 0.5% 11 | 12 | BUILD_DIR = Path(os.path.abspath(__file__)).parent.parent / "build" 13 | PATH_TO_CONTINENTAL_POTENTIALS = BUILD_DIR / "continental" / "technical-potential" / "potentials.csv" 14 | PATH_TO_NATIONAL_POTENTIALS = BUILD_DIR / "national" / "technical-potential" / "potentials.csv" 15 | PATH_TO_REGIONAL_POTENTIALS = BUILD_DIR / "regional" / "technical-potential" / "potentials.csv" 16 | PATH_TO_MUNICIPAL_POTENTIALS = BUILD_DIR / "municipal" / "technical-potential" / "potentials.csv" 17 | 18 | 19 | @pytest.mark.skipif(not PATH_TO_CONTINENTAL_POTENTIALS.exists(), reason="Continental potentials not available.") 20 | @pytest.mark.skipif(not PATH_TO_NATIONAL_POTENTIALS.exists(), reason="National potentials not available.") 21 | @pytest.mark.parametrize( 22 | "potential", Potential 23 | ) 24 | def test_continental_to_national(potential): 25 | continental = pd.read_csv(PATH_TO_CONTINENTAL_POTENTIALS, index_col=0).sum() 26 | national = pd.read_csv(PATH_TO_NATIONAL_POTENTIALS, index_col=0).sum() 27 | 28 | assert continental[str(potential)] == pytest.approx(national[str(potential)], TOLERANCE) 29 | 30 | 31 | @pytest.mark.skipif(not PATH_TO_CONTINENTAL_POTENTIALS.exists(), reason="Continental potentials not available.") 32 | @pytest.mark.skipif(not PATH_TO_REGIONAL_POTENTIALS.exists(), reason="Regional potentials not available.") 33 | @pytest.mark.parametrize( 34 | "potential", Potential 35 | ) 36 | def test_continental_to_regional(potential): 37 | continental = pd.read_csv(PATH_TO_CONTINENTAL_POTENTIALS, index_col=0).sum() 38 | regional = pd.read_csv(PATH_TO_REGIONAL_POTENTIALS, index_col=0).sum() 39 | 40 | assert continental[str(potential)] == pytest.approx(regional[str(potential)], TOLERANCE) 41 | 42 | 43 | @pytest.mark.skipif(not PATH_TO_CONTINENTAL_POTENTIALS.exists(), reason="Continental potentials not available.") 44 | @pytest.mark.skipif(not PATH_TO_MUNICIPAL_POTENTIALS.exists(), reason="Municipal potentials not available.") 45 | @pytest.mark.parametrize( 46 | "potential", Potential 47 | ) 48 | def test_continental_to_municipal(potential): 49 | continental = pd.read_csv(PATH_TO_CONTINENTAL_POTENTIALS, index_col=0).sum() 50 | municipal = pd.read_csv(PATH_TO_MUNICIPAL_POTENTIALS, index_col=0).sum() 51 | 52 | assert continental[str(potential)] == pytest.approx(municipal[str(potential)], TOLERANCE) 53 | -------------------------------------------------------------------------------- /scripts/technically_eligible_area.py: -------------------------------------------------------------------------------- 1 | """Determines area of technically eligible land for renewables.""" 2 | import numpy as np 3 | import rasterio 4 | 5 | from renewablepotentialslib.shape_utils import determine_pixel_areas 6 | from renewablepotentialslib.eligibility import Eligibility 7 | 8 | DATATYPE = np.float32 9 | 10 | 11 | def determine_area(path_to_eligibility_categories, path_to_building_share, 12 | path_to_rooftop_correction_factor, path_to_result): 13 | """Determines area of technically eligible land for renewables. 14 | 15 | For all eligibility categories other than rooftop PV, this is simply the pixel/cell size. 16 | For rooftop PV, we reduce the area to the building footprints, and we furthermore apply a 17 | correction factor to map from building footprint to available rooftop space. 18 | """ 19 | with rasterio.open(path_to_eligibility_categories) as src: 20 | eligibility_categories = src.read(1) 21 | meta = src.meta 22 | bounds = src.bounds 23 | resolution = src.res[0] 24 | pixel_area = determine_pixel_areas(meta["crs"], bounds, resolution).astype(DATATYPE) 25 | areas_of_eligibility = pixel_area.copy() 26 | rooftop_area = determine_rooftop_areas(pixel_area, path_to_building_share, path_to_rooftop_correction_factor) 27 | pv_rooftop_mask = eligibility_categories == Eligibility.ROOFTOP_PV 28 | areas_of_eligibility[pv_rooftop_mask] = rooftop_area[pv_rooftop_mask] 29 | write_to_file(areas_of_eligibility, path_to_result, meta) 30 | 31 | 32 | def determine_rooftop_areas(pixel_areas, path_to_building_share, path_to_rooftop_correction_factor): 33 | """Returns a raster in which the value corresponds to the rooftop area in the pixel.""" 34 | with rasterio.open(path_to_building_share) as f_building_share, \ 35 | open(path_to_rooftop_correction_factor, "r") as f_factor: 36 | factor = float(f_factor.readline()) 37 | building_share = f_building_share.read(1) 38 | return pixel_areas * building_share * factor 39 | 40 | 41 | def write_to_file(areas_of_eligibility, path_to_result, meta): 42 | meta.update(dtype=DATATYPE) 43 | with rasterio.open(path_to_result, 'w', **meta) as new_geotiff: 44 | new_geotiff.write(areas_of_eligibility, 1) 45 | 46 | 47 | if __name__ == "__main__": 48 | determine_area( 49 | path_to_eligibility_categories=snakemake.input.eligibility_categories, 50 | path_to_building_share=snakemake.input.building_share, 51 | path_to_rooftop_correction_factor=snakemake.input.rooftop_correction_factor, 52 | path_to_result=snakemake.output[0] 53 | ) 54 | -------------------------------------------------------------------------------- /scripts/technical_eligibility.py: -------------------------------------------------------------------------------- 1 | """This module determines an upper bound of land eligibility for renewable generation based on geospatial data. 2 | 3 | In here, we only exclude areas based on technical restrictions. 4 | """ 5 | 6 | import numpy as np 7 | import rasterio 8 | 9 | from renewablepotentialslib.eligibility import eligibility_land_mask, DATATYPE 10 | 11 | 12 | def determine_eligibility(path_to_land_cover, path_to_slope, path_to_bathymetry, 13 | path_to_building_share, path_to_urban_green_share, path_to_result, 14 | max_slope, max_building_share, max_urban_green_share, max_depth_offshore): 15 | """Determines eligibility of land for renewables.""" 16 | with rasterio.open(path_to_land_cover) as src: 17 | transform = src.transform 18 | land_cover = src.read(1) 19 | crs = src.crs 20 | with rasterio.open(path_to_slope) as src: 21 | slope = src.read(1) 22 | with rasterio.open(path_to_bathymetry) as src: 23 | bathymetry = src.read(1) 24 | with rasterio.open(path_to_building_share) as src: 25 | building_share = src.read(1) 26 | with rasterio.open(path_to_urban_green_share) as src: 27 | urban_green_share = src.read(1) 28 | eligibility = eligibility_land_mask( 29 | land_cover=land_cover, 30 | slope=slope, 31 | bathymetry=bathymetry, 32 | building_share=building_share, 33 | urban_green_share=urban_green_share, 34 | max_slope=max_slope, 35 | max_building_share=max_building_share, 36 | max_urban_green_share=max_urban_green_share, 37 | max_depth_offshore=max_depth_offshore 38 | ) 39 | with rasterio.open(path_to_result, 'w', driver='GTiff', height=eligibility.shape[0], 40 | width=eligibility.shape[1], count=1, dtype=DATATYPE, 41 | crs=crs, transform=transform) as new_geotiff: 42 | new_geotiff.write(eligibility, 1) 43 | 44 | 45 | if __name__ == "__main__": 46 | determine_eligibility( 47 | path_to_land_cover=snakemake.input.land_cover, 48 | path_to_slope=snakemake.input.slope, 49 | path_to_bathymetry=snakemake.input.bathymetry, 50 | path_to_building_share=snakemake.input.building_share, 51 | path_to_urban_green_share=snakemake.input.urban_green_share, 52 | max_slope=snakemake.params.max_slope, 53 | max_building_share=snakemake.params.max_building_share, 54 | max_urban_green_share=snakemake.params.max_urban_green_share, 55 | max_depth_offshore=snakemake.params.max_depth_offshore, 56 | path_to_result=snakemake.output[0] 57 | ) 58 | -------------------------------------------------------------------------------- /rules/capacityfactors.smk: -------------------------------------------------------------------------------- 1 | """Workflow to create spatially disaggregated capacity factors from renewables.ninja simulations. 2 | 3 | This is based on simulations run based on input generated in the workflow `ninja-input.smk`. Beware 4 | that renewables.ninja simulations are not in the loop, i.e. they are not run automatically but must 5 | be run manually if they need to be altered. 6 | """ 7 | 8 | configfile: "./config/default.yaml" 9 | root_dir = config["root-directory"] + "/" if config["root-directory"] not in ["", "."] else "" 10 | script_dir = f"{root_dir}scripts/" 11 | 12 | 13 | rule capacityfactor_timeseries: 14 | message: "Create index capacity factor timeseries of {wildcards.technology}." 15 | input: 16 | script = script_dir + "capacityfactors/timeseries.py", 17 | capacityfactor = lambda wildcards: config["data-sources"]["raw-capacity-factors"].format(technology=wildcards.technology) 18 | output: 19 | "build/capacityfactors/{technology}-timeseries.nc" 20 | conda: "../envs/default.yaml" 21 | script: "../scripts/capacityfactors/timeseries.py" 22 | 23 | 24 | rule capacityfactor_id_map: 25 | message: "Create raster map of indices to time series for {wildcards.technology}." 26 | input: 27 | script = script_dir + "capacityfactors/id_map.py", 28 | timeseries = rules.capacityfactor_timeseries.output[0] 29 | output: 30 | temp("build/{technology}-ids-lowres.tif") 31 | shadow: "full" 32 | params: 33 | resolution = config["parameters"]["ninja"]["resolution-grid"] 34 | conda: "../envs/default.yaml" 35 | script: "../scripts/capacityfactors/id_map.py" 36 | 37 | 38 | rule capacityfactor_id_map_warped_to_land_cover: 39 | message: "Warp raster map of indices for {wildcards.technology} to land cover map resolution." 40 | input: 41 | id_map = rules.capacityfactor_id_map.output[0], 42 | reference = "build/land-cover-europe.tif" 43 | output: 44 | "build/capacityfactors/{technology}-ids.tif" 45 | shadow: "full" 46 | conda: "../envs/default.yaml" 47 | shell: "rio warp {input.id_map} {output} --like {input.reference} --resampling nearest" 48 | 49 | 50 | rule time_average_capacityfactor_map: 51 | message: "Create raster map of average capacity factors for {wildcards.technology}." 52 | input: 53 | scripts = script_dir + "capacityfactors/averages_map.py", 54 | id_map = rules.capacityfactor_id_map_warped_to_land_cover.output[0], 55 | timeseries = rules.capacityfactor_timeseries.output[0] 56 | output: 57 | "build/capacityfactors/{technology}-time-average.tif" 58 | conda: "../envs/default.yaml" 59 | script: "../scripts/capacityfactors/averages_map.py" 60 | -------------------------------------------------------------------------------- /lib/tests/test_technically_eligible_land.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | import numpy as np 3 | 4 | from renewablepotentialslib.eligibility import eligibility_land_mask, Eligibility, GlobCover 5 | 6 | @pytest.fixture 7 | def config(): 8 | return { 9 | "max-slope": { 10 | "pv": 3, 11 | "wind": 20 12 | }, 13 | "max-depth-offshore": -50, 14 | "max-building-share": 0.1, 15 | "max-urban-green-share": 0.1 16 | } 17 | 18 | 19 | @pytest.mark.parametrize( 20 | "land_cover,slope,bathymetry,building_share,urban_green_share,expected", [ 21 | (GlobCover.RAINFED_CROPLANDS, 0, 0, 0, 0, Eligibility.ONSHORE_WIND_AND_PV), 22 | (GlobCover.RAINFED_CROPLANDS, 4, 0, 0, 0, Eligibility.ONSHORE_WIND), 23 | (GlobCover.RAINFED_CROPLANDS, 21, 0, 0, 0, Eligibility.NOT_ELIGIBLE), 24 | (GlobCover.RAINFED_CROPLANDS, 0, 0, 0.11, 0, Eligibility.ROOFTOP_PV), 25 | (GlobCover.RAINFED_CROPLANDS, 0, 0, 0, 0.11, Eligibility.ROOFTOP_PV), 26 | (GlobCover.RAINFED_CROPLANDS, 0, 0, 0, 0, Eligibility.ONSHORE_WIND_AND_PV), 27 | (GlobCover.MOSAIC_FOREST, 0, 0, 0, 0, Eligibility.ONSHORE_WIND), 28 | (GlobCover.MOSAIC_FOREST, 21, 0, 0, 0, Eligibility.NOT_ELIGIBLE), 29 | (GlobCover.MOSAIC_FOREST, 0, 0, 0.11, 0, Eligibility.ROOFTOP_PV), 30 | (GlobCover.MOSAIC_FOREST, 0, 0, 0, 0.11, Eligibility.ROOFTOP_PV), 31 | (GlobCover.MOSAIC_FOREST, 0, 0, 0, 0, Eligibility.ONSHORE_WIND), 32 | (GlobCover.MOSAIC_GRASSLAND, 0, 0, 0, 0, Eligibility.ONSHORE_WIND_AND_PV), 33 | (GlobCover.MOSAIC_GRASSLAND, 4, 0, 0, 0, Eligibility.ONSHORE_WIND), 34 | (GlobCover.WATER_BODIES, 0, 0, 0, 0, Eligibility.OFFSHORE_WIND), 35 | (GlobCover.WATER_BODIES, 0, -51, 0, 0, Eligibility.NOT_ELIGIBLE), 36 | (GlobCover.WATER_BODIES, 0, 0, 0, 0, Eligibility.OFFSHORE_WIND), 37 | (GlobCover.ARTIFICAL_SURFACES_AND_URBAN_AREAS, 0, 0, 0, 0, Eligibility.NOT_ELIGIBLE) 38 | 39 | ] 40 | ) 41 | def test_eligibility(land_cover, slope, bathymetry, building_share, urban_green_share, 42 | expected, config): 43 | max_slope = config["max-slope"] 44 | max_depth_offshore = config["max-depth-offshore"] 45 | max_building_share = config["max-building-share"] 46 | max_urban_green_share = config["max-urban-green-share"] 47 | result = eligibility_land_mask( 48 | land_cover=np.array([land_cover]), 49 | slope=np.array([slope]), 50 | bathymetry=np.array([bathymetry]), 51 | building_share=np.array([building_share]), 52 | urban_green_share=np.array([urban_green_share]), 53 | max_slope=max_slope, 54 | max_depth_offshore=max_depth_offshore, 55 | max_building_share=max_building_share, 56 | max_urban_green_share=max_urban_green_share 57 | ) 58 | assert Eligibility(result[0]) == expected 59 | -------------------------------------------------------------------------------- /scripts/technically_eligible_electricity_yield.py: -------------------------------------------------------------------------------- 1 | """Determines maximal electricity yield for renewables.""" 2 | from datetime import timedelta 3 | 4 | import numpy as np 5 | import rasterio 6 | 7 | from renewablepotentialslib.conversion import watt_to_watthours 8 | 9 | 10 | def determine_electricity_yield(path_to_eligibility_categories, path_to_capacities_pv_prio, 11 | path_to_capacities_wind_prio, path_to_cf_pv_prio, 12 | path_to_cf_wind_prio, 13 | path_to_pv_prio_result, path_to_wind_prio_result): 14 | """Determines maximal electricity yield for renewables.""" 15 | with rasterio.open(path_to_capacities_pv_prio) as src: 16 | meta = src.meta 17 | capacity_pv_prio_mw = src.read(1) 18 | with rasterio.open(path_to_capacities_wind_prio) as src: 19 | capacity_wind_prio_mw = src.read(1) 20 | with rasterio.open(path_to_eligibility_categories) as src: 21 | eligibility_categories = src.read(1) 22 | with rasterio.open(path_to_cf_pv_prio) as src: 23 | no_cf = src.nodata 24 | cf_pv_prio = src.read(1) 25 | with rasterio.open(path_to_cf_wind_prio) as src: 26 | cf_wind_prio = src.read(1) 27 | electricity_yield_pv_prio = _determine_electricity_yield( 28 | capacity_mw=capacity_pv_prio_mw, 29 | eligibility_category=eligibility_categories, 30 | cf=cf_pv_prio, 31 | data_mask=cf_pv_prio != no_cf 32 | ) 33 | electricity_yield_wind_prio = _determine_electricity_yield( 34 | capacity_mw=capacity_wind_prio_mw, 35 | eligibility_category=eligibility_categories, 36 | cf=cf_wind_prio, 37 | data_mask=cf_wind_prio != no_cf 38 | ) 39 | _write_to_file(path_to_pv_prio_result, electricity_yield_pv_prio, meta) 40 | _write_to_file(path_to_wind_prio_result, electricity_yield_wind_prio, meta) 41 | 42 | 43 | def _determine_electricity_yield(capacity_mw, eligibility_category, cf, data_mask): 44 | electricity_yield_twh = np.zeros_like(capacity_mw) 45 | cf[data_mask] = watt_to_watthours(cf[data_mask], duration=timedelta(days=365)) 46 | electricity_yield_twh[data_mask] = (capacity_mw * cf)[data_mask] / 1e6 47 | return electricity_yield_twh 48 | 49 | 50 | def _write_to_file(path_to_file, electricity_yield, meta): 51 | with rasterio.open(path_to_file, 'w', **meta) as new_geotiff: 52 | new_geotiff.write(electricity_yield, 1) 53 | 54 | 55 | if __name__ == "__main__": 56 | determine_electricity_yield( 57 | path_to_eligibility_categories=snakemake.input.eligibility_categories, 58 | path_to_capacities_pv_prio=snakemake.input.capacities_pv_prio, 59 | path_to_capacities_wind_prio=snakemake.input.capacities_wind_prio, 60 | path_to_cf_pv_prio=snakemake.input.cf_pv_prio, 61 | path_to_cf_wind_prio=snakemake.input.cf_wind_prio, 62 | path_to_pv_prio_result=snakemake.output.pv, 63 | path_to_wind_prio_result=snakemake.output.wind 64 | ) 65 | -------------------------------------------------------------------------------- /scripts/shared_coast.py: -------------------------------------------------------------------------------- 1 | """Module to Determine share of shared coast between eez and administrative units.""" 2 | from textwrap import dedent 3 | from multiprocessing import Pool 4 | from itertools import cycle 5 | 6 | import numpy as np 7 | import pandas as pd 8 | import geopandas as gpd 9 | from shapely.prepared import prep 10 | 11 | from renewablepotentialslib.shape_utils import buffer_if_necessary 12 | 13 | DRIVER = "GeoJSON" 14 | 15 | 16 | def allocate_eezs(path_to_units, path_to_eezs, path_to_output, threads): 17 | """Determine share of shared coast between eez and administrative units.""" 18 | units = gpd.read_file(path_to_units) 19 | units.set_index("id", inplace=True) 20 | eezs = gpd.read_file(path_to_eezs) 21 | eezs.geometry = eezs.geometry.map(buffer_if_necessary) 22 | with Pool(int(threads)) as pool: 23 | share_of_coast_length = pool.map( 24 | _share_of_coast_length, 25 | zip((eez[1] for eez in eezs.iterrows()), cycle([units])) 26 | ) 27 | share = pd.DataFrame( 28 | index=units.index, 29 | data=dict(zip(eezs["MRGID"].values, share_of_coast_length)) 30 | ) 31 | assert ( 32 | ((share.sum() > 0.99) & (share.sum() < 1.01)) | 33 | (share.sum() == 0.0) 34 | ).all(), share.sum() 35 | share.to_csv(path_to_output, header=True) 36 | 37 | 38 | def _share_of_coast_length(args): 39 | # How to determine the length of the shared coast? 40 | # I intersect eez with the unit and determine the length of the resulting polygon. 41 | # This approach is fairly rough, but accurate enough for this analysis. 42 | eez = args[0] 43 | units = args[1] 44 | length_of_shared_coast = pd.Series(data=0.0, index=units.index, dtype=np.float32) 45 | prep_eez = prep(eez["geometry"]) # increase performance 46 | intersection_mask = ((units["country_code"].isin([eez["ISO_Ter1"], "EUR"])) & 47 | (units["geometry"].map(lambda unit: prep_eez.intersects(unit)))) 48 | if intersection_mask.sum() == 0: 49 | msg = dedent("""No shared coast found for {}. 50 | Ignoring eez with area {} km^2.""".format( 51 | eez["GeoName"], 52 | eez["Area_km2"] 53 | )) 54 | print(msg) 55 | share = length_of_shared_coast.copy() 56 | elif intersection_mask.sum() == 1: 57 | # performance improvement in cases where only one unit matches 58 | share = length_of_shared_coast.copy() 59 | share[intersection_mask] = 1 60 | else: 61 | length_of_shared_coast[intersection_mask] = units.loc[intersection_mask, "geometry"].map( 62 | lambda unit: eez["geometry"].intersection(unit).length 63 | ) 64 | share = length_of_shared_coast / length_of_shared_coast.sum() 65 | return share 66 | 67 | 68 | if __name__ == "__main__": 69 | allocate_eezs( 70 | path_to_units=snakemake.input.units, 71 | path_to_eezs=snakemake.input.eez, 72 | path_to_output=snakemake.output[0], 73 | threads=snakemake.threads 74 | ) 75 | -------------------------------------------------------------------------------- /scripts/capacityfactors/ninja_input_pv.py: -------------------------------------------------------------------------------- 1 | """Create PV simulation input for renewables.ninja.""" 2 | import pandas as pd 3 | import geopandas as gpd 4 | 5 | from renewablepotentialslib.shape_utils import point_raster_on_shapes 6 | from renewablepotentialslib.conversion import area_to_capacity, orientation_to_azimuth 7 | 8 | 9 | def pv_simulation_parameters(path_to_shapes_of_land_surface, path_to_roof_categories, 10 | bounds, ninja, maximum_power_density, path_to_output): 11 | """Create PV simulation input for renewables.ninja.""" 12 | points = point_raster_on_shapes( 13 | bounds_wgs84=bounds, 14 | shapes=gpd.read_file(path_to_shapes_of_land_surface), 15 | resolution_km2=ninja["resolution-grid"] 16 | ) 17 | 18 | roof_categories = pd.read_csv(path_to_roof_categories, index_col=[0, 1]) 19 | roof_categories = area_to_capacity( 20 | roof_categories, 21 | power_density_flat=maximum_power_density["pv-on-flat-areas"], 22 | power_density_tilted=maximum_power_density["pv-on-tilted-roofs"] 23 | ).reset_index() 24 | lat_long = pd.DataFrame( 25 | data={ 26 | "lat": [point.y for point in points.geometry], 27 | "long": [point.x for point in points.geometry] 28 | } 29 | ) 30 | 31 | index = pd.MultiIndex.from_product((points.index, roof_categories.index), names=["id", "roof_cat_id"]) 32 | data = pd.DataFrame(index=index).reset_index() 33 | data = data.merge(roof_categories, left_on="roof_cat_id", right_index=True).rename( 34 | columns={"share_of_roof_areas": "weight"} 35 | ) 36 | data = data.merge(lat_long, left_on="id", right_index=True) 37 | data["azim"] = data["orientation"].map(orientation_to_azimuth) 38 | data["site_id"] = data.id 39 | data["sim_id"] = data.apply( 40 | lambda row: "{}_{}_{}".format(row.id, row.orientation, round(row.average_tilt)), 41 | axis=1 42 | ) 43 | flat_mask = data["orientation"] == "flat" 44 | data.loc[flat_mask, "average_tilt"] = data.loc[flat_mask, "lat"].map(optimal_tilt) 45 | data["pr"] = ninja["pv-performance-ratio"] 46 | data[ 47 | ["sim_id", "weight", "site_id", "lat", "long", "average_tilt", 48 | "orientation", "azim", "pr"] 49 | ].sort_index().to_csv( 50 | path_to_output, 51 | header=True, 52 | index=False 53 | ) 54 | 55 | 56 | def optimal_tilt(latitude): 57 | # based on @Jacobson:2018 58 | optimal_tilt = 1.3793 + latitude * (1.2011 + latitude * (-0.014404 + latitude * 0.000080509)) 59 | assert 90 > optimal_tilt >= 0 60 | return optimal_tilt 61 | 62 | 63 | if __name__ == "__main__": 64 | pv_simulation_parameters( 65 | path_to_shapes_of_land_surface=snakemake.input.units, 66 | path_to_roof_categories=snakemake.input.roof_categories, 67 | bounds=snakemake.params.bounds, 68 | ninja=snakemake.params.ninja, 69 | maximum_power_density=snakemake.params.maximum_power_density, 70 | path_to_output=snakemake.output.points 71 | ) 72 | -------------------------------------------------------------------------------- /scripts/capacityfactors/id_map.py: -------------------------------------------------------------------------------- 1 | """Create maps of ids to capacity factor timeseries of renewables.""" 2 | import math 3 | 4 | import numpy as np 5 | import geopandas as gpd 6 | import shapely 7 | import rasterio 8 | from rasterio.transform import from_origin 9 | import xarray as xr 10 | 11 | DTYPE = np.uint16 12 | NO_DATA_VALUE = 64001 13 | INDEX_EPSILON = 10e-3 14 | 15 | from renewablepotentialslib import EPSG_3035, EPSG_3035_PROJ4, WGS84, WGS84_PROJ4 16 | 17 | 18 | def id_map(path_to_timeseries, path_to_map, resolution_km): 19 | """Create maps of ids to capacity factor timeseries of renewables. 20 | 21 | Each point on the map links to a timeseries of capacity factors of renewables. Together with the 22 | timeseries, both files form the spatio-temporal data format used in this study. 23 | """ 24 | ds = xr.open_dataset(path_to_timeseries) 25 | pv_config = ds[["lat", "lon"]].to_dataframe() 26 | points = gpd.GeoDataFrame( 27 | geometry=[shapely.geometry.Point(row.lon, row.lat) for _, row in pv_config.iterrows()], 28 | data={ 29 | "site_id": pv_config.index 30 | }, 31 | crs=WGS84_PROJ4 32 | ).to_crs(EPSG_3035_PROJ4) 33 | x_min = min([point.x for point in points.geometry]) 34 | x_max = max([point.x for point in points.geometry]) 35 | y_min = min([point.y for point in points.geometry]) 36 | y_max = max([point.y for point in points.geometry]) 37 | resolution_m = resolution_km * 1000 38 | width = (x_max - x_min) / resolution_m + 1 39 | height = (y_max - y_min) / resolution_m + 1 40 | assert isclose(round(width), width) # diff is purely numerics 41 | assert isclose(round(height), height) # diff is purely numerics 42 | width = round(width) 43 | height = round(height) 44 | raster = np.ones(shape=(height, width), dtype=DTYPE) * NO_DATA_VALUE 45 | for _, point in points.iterrows(): 46 | index_x = (point.geometry.x - x_min) / resolution_m 47 | index_y = (y_max - point.geometry.y) / resolution_m 48 | assert isclose(round(index_x), index_x) # diff is purely numerics 49 | assert isclose(round(index_y), index_y) # diff is purely numerics 50 | int_index_x = round(index_x) 51 | int_index_y = round(index_y) 52 | raster[int_index_y, int_index_x] = point.site_id 53 | transform = from_origin( 54 | west=x_min - resolution_m / 2, 55 | north=y_max + resolution_m / 2, 56 | xsize=resolution_m, 57 | ysize=resolution_m 58 | ) 59 | with rasterio.open(path_to_map, 'w', driver='GTiff', height=height, width=width, 60 | count=1, dtype=DTYPE, crs=EPSG_3035, transform=transform, 61 | nodata=NO_DATA_VALUE) as f_map: 62 | f_map.write(raster, 1) 63 | 64 | 65 | def isclose(a, b): 66 | return math.isclose(a, b, abs_tol=INDEX_EPSILON, rel_tol=0) 67 | 68 | 69 | if __name__ == "__main__": 70 | id_map( 71 | path_to_timeseries=snakemake.input.timeseries, 72 | resolution_km=snakemake.params.resolution, 73 | path_to_map=snakemake.output[0] 74 | ) 75 | -------------------------------------------------------------------------------- /scripts/units.py: -------------------------------------------------------------------------------- 1 | import pandas as pd 2 | import geopandas as gpd 3 | import pycountry 4 | 5 | DRIVER = "GeoJSON" 6 | 7 | 8 | def remix_units(path_to_borders, layer_name, layer_config, countries, path_to_output): 9 | """Remixes NUTS, LAU, and GADM data to form the units of the analysis.""" 10 | source_layers = _read_source_layers(path_to_borders, layer_config) 11 | _validate_source_layers(source_layers) 12 | _validate_layer_config(layer_config, layer_name, countries) 13 | layer = _build_layer(layer_config, source_layers) 14 | _validate_layer(layer, layer_name, countries) 15 | if layer_name == "continental": # treat special case 16 | layer = _continental_layer(layer) 17 | _write_layer(layer, path_to_output) 18 | 19 | 20 | def _read_source_layers(path_to_borders, layers): 21 | source_layers = { 22 | layer_name: gpd.read_file(path_to_borders, layer=layer_name) 23 | for layer_name in set(layers.values()) 24 | } 25 | return source_layers 26 | 27 | 28 | def _validate_source_layers(source_layers): 29 | crs = [layer.crs for layer in source_layers.values()] 30 | assert not crs or crs.count(crs[0]) == len(crs), "Source layers have different crs. They must match." 31 | 32 | 33 | def _validate_layer_config(layer_config, layer_name, countries): 34 | assert all(country in layer_config.keys() for country in countries), ("Layer {} is not correctly " 35 | "defined.".format(layer_name)) 36 | 37 | 38 | def _build_layer(country_to_source_map, source_layers): 39 | crs = [layer.crs for layer in source_layers.values()][0] 40 | layer = pd.concat([ 41 | source_layers[source_layer][source_layers[source_layer].country_code == _iso3(country)] 42 | for country, source_layer in country_to_source_map.items() 43 | ]) 44 | assert isinstance(layer, pd.DataFrame) 45 | return gpd.GeoDataFrame(layer, crs=crs) 46 | 47 | 48 | def _validate_layer(layer, layer_name, countries): 49 | assert all(_iso3(country) in layer.country_code.unique() 50 | for country in countries), (f"Countries are missing in layer {layer_name}.") 51 | 52 | 53 | def _iso3(country_name): 54 | return pycountry.countries.lookup(country_name).alpha_3 55 | 56 | 57 | def _continental_layer(layer): 58 | # special case all Europe 59 | layer = layer.dissolve(by=[1 for idx in layer.index]) 60 | index = layer.index[0] 61 | layer.loc[index, "id"] = "EUR" 62 | layer.loc[index, "country_code"] = "EUR" 63 | layer.loc[index, "name"] = "Europe" 64 | layer.loc[index, "type"] = "continent" 65 | layer.loc[index, "proper"] = 1 66 | return layer 67 | 68 | 69 | def _write_layer(gdf, path_to_file): 70 | gdf.to_file( 71 | path_to_file, 72 | driver=DRIVER 73 | ) 74 | 75 | 76 | if __name__ == "__main__": 77 | remix_units( 78 | path_to_borders=snakemake.input.administrative_borders, 79 | layer_name=snakemake.params.layer_name, 80 | layer_config=snakemake.params.layer_config, 81 | countries=snakemake.params.countries, 82 | path_to_output=snakemake.output[0] 83 | ) 84 | -------------------------------------------------------------------------------- /Snakefile: -------------------------------------------------------------------------------- 1 | from snakemake.utils import validate 2 | 3 | configfile: "config/default.yaml" 4 | validate(config, "config/schema.yaml") 5 | 6 | include: "rules/data-preprocessing.smk" 7 | include: "rules/sonnendach.smk" 8 | include: "rules/capacityfactors.smk" 9 | include: "rules/potential.smk" 10 | include: "rules/sync.smk" 11 | 12 | localrules: all, clean 13 | root_dir = config["root-directory"] + "/" if config["root-directory"] not in ["", "."] else "" 14 | __version__ = open(f"{root_dir}VERSION").readlines()[0].strip() 15 | script_dir = f"{root_dir}scripts/" 16 | 17 | wildcard_constraints: 18 | layer = "({layer_list})".format(layer_list="|".join((f"({layer})" for layer in config["layers"]))), 19 | scenario = "({scenario_list})".format(scenario_list="|".join((f"({scenario})" for scenario in config["scenarios"]))) 20 | 21 | onstart: 22 | shell("mkdir -p build/logs") 23 | onsuccess: 24 | if "pushcut_secret" in config.keys(): 25 | trigger_pushcut(event_name="snakemake_succeeded", secret=config["pushcut_secret"]) 26 | onerror: 27 | if "pushcut_secret" in config.keys(): 28 | trigger_pushcut(event_name="snakemake_failed", secret=config["pushcut_secret"]) 29 | 30 | 31 | rule all: 32 | message: "Run entire analysis and compile report." 33 | input: 34 | "build/logs/test-report.html", 35 | expand( 36 | "build/{layer}/{scenario}/potentials.csv", 37 | scenario=config["scenarios"], 38 | layer=config["layers"] 39 | ), 40 | expand( 41 | "build/{layer}/{scenario}/capacities.csv", 42 | scenario=config["scenarios"], 43 | layer=config["layers"] 44 | ), 45 | expand( 46 | "build/{layer}/{scenario}/areas.csv", 47 | scenario=config["scenarios"], 48 | layer=config["layers"] 49 | ) 50 | 51 | 52 | rule clean: # removes all generated results 53 | shell: 54 | """ 55 | rm -r ./build/* 56 | echo "Data downloaded to data/ has not been cleaned." 57 | """ 58 | 59 | 60 | rule test: 61 | message: "Run tests." 62 | input: 63 | expand("build/{layer}/technical-potential/potentials.csv", layer=config["layers"]), 64 | "build/technically-eligible-land.tif", 65 | "build/technically-eligible-area-km2.tif", 66 | "build/technically-eligible-electricity-yield-pv-prio-twh.tif", 67 | "build/administrative-borders.gpkg", 68 | "data/automatic/sonnendach/total-rooftop-area-km2.txt", 69 | "data/automatic/sonnendach/total-yield-twh.txt" 70 | output: "build/logs/test-report.html" 71 | conda: "envs/default.yaml" 72 | shell: 73 | "py.test --html={output} --self-contained-html" 74 | 75 | 76 | rule build_metadata: 77 | message: "Generate build metadata." 78 | input: 79 | script = script_dir + "metadata.py" 80 | params: 81 | config = config, 82 | version = __version__ 83 | output: "build/model/build-metadata.yaml" 84 | conda: "envs/metadata.yaml" 85 | script: "scripts/metadata.py" 86 | 87 | 88 | def trigger_pushcut(event_name, secret): 89 | import requests 90 | response = requests.post( 91 | f'https://api.pushcut.io/{secret}/notifications/{event_name}' 92 | ) 93 | -------------------------------------------------------------------------------- /scripts/technically_eligible_capacity.py: -------------------------------------------------------------------------------- 1 | """Determines maximal capacities for renewables.""" 2 | import numpy as np 3 | import pandas as pd 4 | import rasterio 5 | 6 | from renewablepotentialslib.eligibility import Eligibility 7 | 8 | 9 | def determine_capacities( 10 | path_to_eligibility_categories, path_to_eligible_areas, path_to_statistical_roof_model, 11 | maximum_installable_power_density, path_to_pv_prio_result, path_to_wind_prio_result 12 | ): 13 | """Determines maximal capacities for renewables.""" 14 | with rasterio.open(path_to_eligible_areas) as src: 15 | meta = src.meta 16 | areas = src.read(1) 17 | with rasterio.open(path_to_eligibility_categories) as src: 18 | eligibility_categories = src.read(1) 19 | flat_roof_share = pd.read_csv(path_to_statistical_roof_model).set_index("orientation").loc[ 20 | "flat", "share_of_roof_areas" 21 | ] 22 | capacities_pv_prio = _determine_capacities(areas, eligibility_categories, maximum_installable_power_density, flat_roof_share, pv_prio=True) 23 | capacities_wind_prio = _determine_capacities(areas, eligibility_categories, maximum_installable_power_density, flat_roof_share, pv_prio=False) 24 | _write_to_file(path_to_pv_prio_result, capacities_pv_prio, meta) 25 | _write_to_file(path_to_wind_prio_result, capacities_wind_prio, meta) 26 | 27 | 28 | def _determine_capacities(areas, eligibility_categories, maximum_installable_power_density, flat_roof_share, pv_prio): 29 | capacities = np.zeros_like(areas) 30 | for eligibility in Eligibility: 31 | mask = eligibility_categories == eligibility 32 | power_density_mw_per_km2 = _power_density_mw_per_km2( 33 | eligibility=eligibility, 34 | pv_prio=pv_prio, 35 | flat_roof_share=flat_roof_share, 36 | maximum_installable_power_density=maximum_installable_power_density 37 | ) 38 | capacities[mask] = areas[mask] * power_density_mw_per_km2 39 | return capacities 40 | 41 | 42 | def _power_density_mw_per_km2(eligibility, pv_prio, flat_roof_share, maximum_installable_power_density): 43 | rooftop_pv = (maximum_installable_power_density["pv-on-flat-areas"] * flat_roof_share + 44 | maximum_installable_power_density["pv-on-tilted-roofs"] * (1 - flat_roof_share)) 45 | onshore = maximum_installable_power_density["onshore-wind"] 46 | offshore = maximum_installable_power_density["offshore-wind"] 47 | if pv_prio: 48 | wind_and_pv = maximum_installable_power_density["pv-on-flat-areas"] 49 | else: 50 | wind_and_pv = onshore 51 | return { 52 | Eligibility.NOT_ELIGIBLE: 0, 53 | Eligibility.ROOFTOP_PV: rooftop_pv, 54 | Eligibility.ONSHORE_WIND_AND_PV: wind_and_pv, 55 | Eligibility.ONSHORE_WIND: onshore, 56 | Eligibility.OFFSHORE_WIND: offshore 57 | }[eligibility] 58 | 59 | 60 | def _write_to_file(path_to_file, capacities, meta): 61 | with rasterio.open(path_to_file, 'w', **meta) as new_geotiff: 62 | new_geotiff.write(capacities, 1) 63 | 64 | 65 | if __name__ == "__main__": 66 | determine_capacities( 67 | path_to_eligibility_categories=snakemake.input.eligibility_categories, 68 | path_to_eligible_areas=snakemake.input.eligible_areas, 69 | path_to_statistical_roof_model=snakemake.input.statistical_roof_model, 70 | maximum_installable_power_density=snakemake.params.maximum_installable_power_density, 71 | path_to_pv_prio_result=snakemake.output.pv, 72 | path_to_wind_prio_result=snakemake.output.wind 73 | ) 74 | -------------------------------------------------------------------------------- /tests/test_sonnendach_reference.py: -------------------------------------------------------------------------------- 1 | """Test whether our estimations are close to the ones from sonnendach.ch""" 2 | import os 3 | from pathlib import Path 4 | 5 | import pytest 6 | import rasterio 7 | import rasterio.mask 8 | from rasterstats import zonal_stats 9 | import fiona 10 | 11 | from renewablepotentialslib.eligibility import Eligibility 12 | 13 | ROOT_DIR = Path(os.path.abspath(__file__)).parent.parent 14 | PATH_TO_CATEGORIES = ROOT_DIR / "build" / "technically-eligible-land.tif" 15 | PATH_TO_AREAS = ROOT_DIR / "build" / "technically-eligible-area-km2.tif" 16 | PATH_TO_ENERGY_YIELD = ROOT_DIR / "build" / "technically-eligible-electricity-yield-pv-prio-twh.tif" 17 | PATH_TO_NUTS = ROOT_DIR / "build" / "administrative-borders.gpkg" 18 | PATH_TO_SONNENDACH_AREA_ESTIMATE = ROOT_DIR / "data" / "automatic" / "sonnendach" /\ 19 | "total-rooftop-area-km2.txt" 20 | PATH_TO_SONNENDACH_YIELD_ESTIMATE = ROOT_DIR / "data" / "automatic" / "sonnendach" /\ 21 | "total-yield-twh.txt" 22 | 23 | 24 | @pytest.mark.skipif(not PATH_TO_AREAS.exists(), reason="Eligible area raster data not available.") 25 | @pytest.mark.skipif(not PATH_TO_NUTS.exists(), reason="Switzerland shape not available.") 26 | @pytest.mark.skipif(not PATH_TO_SONNENDACH_AREA_ESTIMATE.exists(), reason="Sonnendach area estimation not available.") 27 | def test_switzerland_rooftop_area(): 28 | with open(PATH_TO_SONNENDACH_AREA_ESTIMATE, "r") as f_sonnendach_estimate: 29 | sonnendach_estimate = float(f_sonnendach_estimate.readline()) 30 | with fiona.open(PATH_TO_NUTS.as_posix(), "r", layer="nuts0") as shapefile: 31 | switzerland = [feature["geometry"] for feature in shapefile if feature["properties"]["country_code"] == "CHE"] 32 | assert len(switzerland) == 1 33 | with rasterio.open(PATH_TO_AREAS.as_posix()) as src: 34 | transform = src.transform 35 | areas = src.read(1) 36 | with rasterio.open(PATH_TO_CATEGORIES.as_posix()) as src: 37 | categories = src.read(1) 38 | areas[categories != Eligibility.ROOFTOP_PV] = 0 39 | zs = zonal_stats(switzerland, areas, affine=transform, stats="sum", nodata=-999) 40 | our_estimate = zs[0]["sum"] 41 | assert our_estimate == pytest.approx(sonnendach_estimate, 0.02) # 2% tolerance 42 | 43 | 44 | @pytest.mark.skipif(not PATH_TO_ENERGY_YIELD.exists(), reason="Eligible energy yield raster data not available.") 45 | @pytest.mark.skipif(not PATH_TO_NUTS.exists(), reason="Switzerland shape not available.") 46 | @pytest.mark.skipif( 47 | not PATH_TO_SONNENDACH_YIELD_ESTIMATE.exists(), 48 | reason="Sonnendach yield estimation not available.") 49 | def test_switzerland_energy_yield(): 50 | with open(PATH_TO_SONNENDACH_YIELD_ESTIMATE, "r") as f_sonnendach_estimate: 51 | sonnendach_estimate = float(f_sonnendach_estimate.readline()) 52 | with fiona.open(PATH_TO_NUTS.as_posix(), "r", layer="nuts0") as shapefile: 53 | switzerland = [feature["geometry"] for feature in shapefile if feature["properties"]["country_code"] == "CHE"] 54 | assert len(switzerland) == 1 55 | with rasterio.open(PATH_TO_ENERGY_YIELD.as_posix()) as src: 56 | transform = src.transform 57 | energy_yield = src.read(1) 58 | with rasterio.open(PATH_TO_CATEGORIES.as_posix()) as src: 59 | categories = src.read(1) 60 | energy_yield[categories != Eligibility.ROOFTOP_PV] = 0 61 | zs = zonal_stats(switzerland, energy_yield, affine=transform, stats="sum", nodata=-999) 62 | our_estimate = zs[0]["sum"] 63 | assert our_estimate <= sonnendach_estimate 64 | assert our_estimate == pytest.approx(sonnendach_estimate, 0.10) # 10% tolerance 65 | -------------------------------------------------------------------------------- /scripts/technically_eligible_capacityfactor.py: -------------------------------------------------------------------------------- 1 | """Determines capacity factors for each eligibility category on a map.""" 2 | import numpy as np 3 | import rasterio 4 | 5 | from renewablepotentialslib.eligibility import Eligibility 6 | 7 | 8 | def determine_capacityfactor(path_to_eligibility_categories, path_to_rooftop_pv_cf, 9 | path_to_open_field_pv_cf, path_to_wind_onshore_cf, 10 | path_to_wind_offshore_cf, availability, 11 | path_to_output_pv_prio, path_to_output_wind_prio): 12 | """Determines capacity factors for each eligibility category on a map.""" 13 | with rasterio.open(path_to_eligibility_categories) as src: 14 | eligibility_categories = src.read(1) 15 | with rasterio.open(path_to_rooftop_pv_cf) as src: 16 | meta = src.meta 17 | rooftop_pv_cf = src.read(1) 18 | with rasterio.open(path_to_open_field_pv_cf) as src: 19 | open_field_pv_cf = src.read(1) 20 | with rasterio.open(path_to_wind_onshore_cf) as src: 21 | wind_onshore_cf = src.read(1) 22 | valid = wind_onshore_cf != meta["nodata"] 23 | wind_onshore_cf[valid] = wind_onshore_cf[valid] * availability["wind-onshore"] 24 | with rasterio.open(path_to_wind_offshore_cf) as src: 25 | wind_offshore_cf = src.read(1) 26 | valid = wind_offshore_cf != meta["nodata"] 27 | wind_offshore_cf[valid] = wind_offshore_cf[valid] * availability["wind-offshore"] 28 | capacityfactor_pv_prio = _determine_capacityfactor( 29 | eligibility_category=eligibility_categories, 30 | rooftop_pv_cf=rooftop_pv_cf, 31 | open_field_pv_cf=open_field_pv_cf, 32 | wind_onshore_cf=wind_onshore_cf, 33 | wind_offshore_cf=wind_offshore_cf, 34 | pv_prio=True, 35 | nodata=meta["nodata"] 36 | ) 37 | capacityfactor_wind_prio = _determine_capacityfactor( 38 | eligibility_category=eligibility_categories, 39 | rooftop_pv_cf=rooftop_pv_cf, 40 | open_field_pv_cf=open_field_pv_cf, 41 | wind_onshore_cf=wind_onshore_cf, 42 | wind_offshore_cf=wind_offshore_cf, 43 | pv_prio=False, 44 | nodata=meta["nodata"] 45 | ) 46 | _write_to_file(path_to_output_pv_prio, capacityfactor_pv_prio, meta) 47 | _write_to_file(path_to_output_wind_prio, capacityfactor_wind_prio, meta) 48 | 49 | 50 | def _determine_capacityfactor(eligibility_category, rooftop_pv_cf, open_field_pv_cf, 51 | wind_onshore_cf, wind_offshore_cf, pv_prio, nodata): 52 | cf_map = np.zeros_like(rooftop_pv_cf) 53 | for eligibility in Eligibility: 54 | cf = _capacity_factor( 55 | eligibility=eligibility, 56 | pv_prio=pv_prio, 57 | rooftop_pv_cf=rooftop_pv_cf, 58 | open_field_pv_cf=open_field_pv_cf, 59 | wind_onshore_cf=wind_onshore_cf, 60 | wind_offshore_cf=wind_offshore_cf, 61 | nodata=nodata 62 | ) 63 | mask = eligibility_category == eligibility 64 | cf_map[mask] = cf[mask] 65 | return cf_map 66 | 67 | 68 | def _capacity_factor(eligibility, pv_prio, rooftop_pv_cf, open_field_pv_cf, 69 | wind_onshore_cf, wind_offshore_cf, nodata): 70 | return { 71 | Eligibility.NOT_ELIGIBLE: np.ones_like(rooftop_pv_cf) * nodata, 72 | Eligibility.ROOFTOP_PV: rooftop_pv_cf, 73 | Eligibility.ONSHORE_WIND_AND_PV: open_field_pv_cf if pv_prio else wind_onshore_cf, 74 | Eligibility.ONSHORE_WIND: wind_onshore_cf, 75 | Eligibility.OFFSHORE_WIND: wind_offshore_cf 76 | }[eligibility] 77 | 78 | 79 | def _write_to_file(path_to_file, data, meta): 80 | with rasterio.open(path_to_file, 'w', **meta) as new_geotiff: 81 | new_geotiff.write(data, 1) 82 | 83 | 84 | if __name__ == "__main__": 85 | determine_capacityfactor( 86 | path_to_eligibility_categories=snakemake.input.eligibility_categories, 87 | path_to_rooftop_pv_cf=snakemake.input.rooftop_pv_cf, 88 | path_to_open_field_pv_cf=snakemake.input.open_field_pv_cf, 89 | path_to_wind_onshore_cf=snakemake.input.wind_onshore_cf, 90 | path_to_wind_offshore_cf=snakemake.input.wind_offshore_cf, 91 | availability=snakemake.params.availability, 92 | path_to_output_pv_prio=snakemake.output.pv, 93 | path_to_output_wind_prio=snakemake.output.wind, 94 | ) 95 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Potentials of solar and wind power 2 | 3 | This Snakemake workflow allows to estimate potentials of solar and wind power. We applied it in a former publication in which we estimated potentials for Europe as a whole, as well as all countries, sub-national regions, and municipalities in Europe (see below). 4 | 5 | However, it can be used to generate potentials for different areas by modifying the configuration files (and likely some of the rules as well). If you want to use it for different areas, consider the workflow as a starting point for your own analysis, not as a generic library that will work out of the box. 6 | 7 | [![article DOI](https://img.shields.io/badge/article-10.1016%2Fj.esr.2019.100388-blue)](https://doi.org/10.1016/j.esr.2019.100388) 8 | 9 | ## Documentation and citation 10 | 11 | The approach is described in the following publication. If you use data or the method, please cite this publication. 12 | 13 | > Tröndle, T., Pfenninger, S., Lilliestam, J., 2019. Home-made or imported: on the possibility for renewable electricity autarky on all scales in Europe. Energy Strategy Reviews 26. https://doi.org/10.1016/j.esr.2019.100388 14 | 15 | ## Getting ready 16 | 17 | ### Installation 18 | 19 | The following dependencies are needed to set up an environment in which the analysis can be run and the paper be build: 20 | 21 | * [conda](https://conda.io/docs/index.html) 22 | 23 | When these dependencies are installed, you can create a conda environment from within you can run the analysis: 24 | 25 | conda env create -f environment.yaml 26 | 27 | Don't forget to activate the environment. To see what you can do now, run: 28 | 29 | snakemake --list 30 | 31 | ### Data to be retrieved manually 32 | 33 | Whenever possible, data is downloaded automatically. As this is not always possible, you will need to retrieve the following data sets manually: 34 | 35 | * [European Settlement Map 2012, Release 2017, 100m](https://land.copernicus.eu/pan-european/GHSL/european-settlement-map), to be placed at `./data/esm-100m-2017/` 36 | * [World Exclusive Economic Zones v10](http://www.marineregions.org/downloads.php), to be placed in `./data/World_EEZ_v10_20180221` 37 | * capacity factors from renewable.ninja, to be placed in `./data/capacityfactors/{technology}.nc` for technology in ["wind-onshore", "wind-offshore", "rooftop-pv", "open-field-pv"] (where "open-field-pv" and "rooftop-pv" can be the same dataset and hence can be linked instead of copied)(to run simulations, see `Manual steps` below) 38 | 39 | ## Run the analysis 40 | 41 | snakemake --use-conda 42 | 43 | This will run all analysis steps to estimate potentials. 44 | 45 | You can also run certain parts only by using other `snakemake` rules; to get a list of all rules run `snakemake --list`. 46 | 47 | To generate a PDF of the dependency graph of all steps, run: 48 | 49 | snakemake --rulegraph | dot -Tpdf > dag.pdf 50 | 51 | (needs `dot`: `conda install graphviz`). 52 | 53 | ## Run on Euler cluster 54 | 55 | To run on Euler, use the following command: 56 | 57 | snakemake --use-conda --profile config/euler 58 | 59 | If you want to run on another cluster, read [snakemake's documentation on cluster execution](https://snakemake.readthedocs.io/en/stable/executable.html#cluster-execution) and take `config/euler` as a starting point. 60 | 61 | ## Manual steps 62 | 63 | At the moment, there is one manual step involved: running renewables.ninja simulations of wind and solar electricity. It is added to the automatic workflow as input data. Should you want to change the simulations, because you want to change parameters of the simulation (see `parameters.ninja` in the config), you can do that in three steps: 64 | 65 | 1) Create input files by first changing the config, then running `snakemake -s rules/ninja-input.smk`. 66 | 2) Run the simulations on renewables.ninja. 67 | 3) Update the data in `data/capacityfactors/{technology}`. 68 | 69 | ## Run the tests 70 | 71 | snakemake --use-conda test 72 | 73 | ## Repo structure 74 | 75 | * `src`: contains the Python source code 76 | * `tests`: contains the test code 77 | * `config`: configurations used in the study 78 | * `rules`: additional Snakemake rules and workflows 79 | * `data`: place for raw data, whether retrieved manually and automatically 80 | * `build`: will contain all results (does not exist initially) 81 | 82 | ## License 83 | 84 | The code in this repo is MIT licensed, see `./LICENSE.md`. 85 | -------------------------------------------------------------------------------- /lib/tests/test_shape_utils.py: -------------------------------------------------------------------------------- 1 | import geopandas as gpd 2 | import pytest 3 | import shapely.geometry 4 | import pycountry 5 | 6 | from renewablepotentialslib.shape_utils import ( 7 | study_area, 8 | to_multi_polygon, 9 | drop_countries, 10 | drop_geoms_completely_outside_study_area, 11 | drop_parts_of_geoms_completely_outside_study_area, 12 | update_features 13 | ) 14 | 15 | # Loading the 60M file for its smaller size (1M used in actual workflow) 16 | URL_NUTS = "https://ec.europa.eu/eurostat/cache/GISCO/distribution/v2/nuts/geojson/NUTS_RG_60M_{}_4326.geojson" 17 | 18 | @pytest.mark.parametrize("year", [2006, 2010, 2013, 2016, 2021]) 19 | def test_nuts_years(year): 20 | gdf = gpd.read_file(URL_NUTS.format(year)) 21 | cols = ['id', 'FID', 'LEVL_CODE', 'NUTS_NAME'] 22 | assert all(i in gdf.columns for i in cols) 23 | 24 | class TestStudyArea(): 25 | def config(self, exclusions=0): 26 | _config = { 27 | "scope": { 28 | 'bounds': { 29 | 'x_min': 0, 30 | 'x_max': 1, 31 | 'y_min': 50, 32 | 'y_max': 51, 33 | } 34 | } 35 | } 36 | if exclusions > 0: 37 | _config["scope"]["exclusion_zones"] = { 38 | 'foo': { 39 | 'x_min': 0.5, 40 | 'x_max': 0.6, 41 | 'y_min': 50.2, 42 | 'y_max': 50.5, 43 | } 44 | } 45 | if exclusions == 2: 46 | _config["scope"]["exclusion_zones"]["bar"] = { 47 | 'x_min': 0, 48 | 'x_max': 0.1, 49 | 'y_min': 50, 50 | 'y_max': 50.1, 51 | } 52 | return _config 53 | 54 | def test_bounding_box(self): 55 | area = study_area(self.config()["scope"]) 56 | assert area.bounds == (0.0, 50.0, 1.0, 51.0) 57 | 58 | 59 | @pytest.mark.parametrize( 60 | "exclusions,not_in,is_in", 61 | [(1, (0.55, 50.3), (0.7, 50.7)), (2, (0.05, 50.05), (0.05, 50.3))] 62 | ) 63 | def test_exclusion_zone(self, exclusions, not_in, is_in): 64 | no_exclusions = study_area(self.config()["scope"]) 65 | with_exclusions = study_area(self.config(exclusions)["scope"]) 66 | 67 | assert no_exclusions.area > with_exclusions.area 68 | assert with_exclusions.bounds == no_exclusions.bounds 69 | 70 | assert not shapely.geometry.Point(not_in).within(with_exclusions) 71 | assert shapely.geometry.Point(is_in).within(with_exclusions) 72 | 73 | 74 | class TestGeomManipulation(): 75 | CONFIG = { 76 | 'bounds': { # IRE and GBR, not including shetland and some of cornwall 77 | 'x_min': -10, 78 | 'x_max': 1, 79 | 'y_min': 51.11, 80 | 'y_max': 60.1, 81 | } 82 | } 83 | NOT_DROPPED = ['IRL', 'North West (England)'] 84 | FULL_DROPPED = [ 85 | "Removing France (nuts0, country=FRA) as they are outside of study area", 86 | ] 87 | PARTIAL_DROPPED = [ 88 | "Removing parts of Highlands and Islands (nuts2, country=GBR)", 89 | "Removing parts of United Kingdom (nuts0, country=GBR)", 90 | ] 91 | 92 | @pytest.fixture 93 | def europe_gdf(self): 94 | gdf = gpd.read_file( 95 | URL_NUTS.format(2016) 96 | ) 97 | return update_features(gdf, "nuts") 98 | 99 | @pytest.mark.filterwarnings("ignore:Geometry is in a geographic CRS:UserWarning") 100 | def test_multipolygon(self, europe_gdf): 101 | assert not all([isinstance(i, shapely.geometry.Polygon) for i in europe_gdf.geometry]) 102 | 103 | polys = europe_gdf.geometry.map(to_multi_polygon) 104 | 105 | assert all([isinstance(i, shapely.geometry.MultiPolygon) for i in polys]) 106 | assert all(polys.area == europe_gdf.area) 107 | 108 | def test_drop_countries(self, europe_gdf): 109 | config = {"countries": ["Germany", "France", "Greece"]} 110 | 111 | assert set(europe_gdf.country_code) != set( 112 | [pycountry.countries.lookup(i).alpha_3 for i in config["countries"]] 113 | ) 114 | 115 | gdf = drop_countries(europe_gdf, config) 116 | 117 | assert set(gdf.country_code) == set( 118 | [pycountry.countries.lookup(i).alpha_3 for i in config["countries"]] 119 | ) 120 | 121 | def test_drop_geoms(self, capsys, europe_gdf): 122 | gdf = drop_geoms_completely_outside_study_area(europe_gdf, self.CONFIG) 123 | out, err = capsys.readouterr() 124 | 125 | for i in self.NOT_DROPPED + self.PARTIAL_DROPPED: 126 | assert i not in out 127 | 128 | for i in self.FULL_DROPPED: 129 | assert i in out 130 | assert sorted(gdf.country_code.unique()) == sorted(['IRL', 'GBR']) 131 | 132 | def test_drop_parts_of_geoms(self, capsys, europe_gdf): 133 | gdf = drop_parts_of_geoms_completely_outside_study_area(europe_gdf, self.CONFIG) 134 | out, err = capsys.readouterr() 135 | for i in self.NOT_DROPPED + self.FULL_DROPPED: 136 | assert i not in out 137 | for i in self.PARTIAL_DROPPED: 138 | assert i in out 139 | assert ( 140 | europe_gdf[europe_gdf.name == 'Highlands and Islands'].area 141 | > gdf[gdf.name == 'Highlands and Islands'].area 142 | ).all() 143 | -------------------------------------------------------------------------------- /config/schema.yaml: -------------------------------------------------------------------------------- 1 | $schema: "http://json-schema.org/draft-07/schema#" 2 | description: Default configuration schema 3 | properties: 4 | parameters: 5 | type: object 6 | properties: 7 | nuts-year: 8 | type: number 9 | enum: [2006, 2010, 2013, 2016, 2021] 10 | description: Indicates the reference NUTS year 11 | crs: 12 | type: string 13 | enum: ["EPSG:4326"] 14 | description: Coordinate reference system to which all datasets are reprojected during preprocessing 15 | scope: 16 | type: object 17 | properties: 18 | countries: 19 | type: array 20 | items: 21 | type: string 22 | enum: 23 | - "Austria" 24 | - "Belgium" 25 | - "Bulgaria" 26 | - "Croatia" 27 | - "Cyprus" 28 | - "Czech Republic" 29 | - "Denmark" 30 | - "Estonia" 31 | - "Finland" 32 | - "France" 33 | - "Germany" 34 | - "Greece" 35 | - "Hungary" 36 | - "Ireland" 37 | - "Italy" 38 | - "Latvia" 39 | - "Lithuania" 40 | - "Luxembourg" 41 | - "Netherlands" 42 | - "Poland" 43 | - "Portugal" 44 | - "Romania" 45 | - "Slovakia" 46 | - "Slovenia" 47 | - "Spain" 48 | - "Sweden" 49 | - "United Kingdom" 50 | - "Albania" 51 | - "Bosnia and Herzegovina" 52 | - "North Macedonia" 53 | - "Montenegro" 54 | - "Norway" 55 | - "Serbia" 56 | - "Switzerland" 57 | - "Iceland" 58 | description: Countries to include in the model 59 | bounds: 60 | type: object 61 | properties: 62 | x_min: 63 | type: number 64 | minimum: -180 65 | maximum: 180 66 | description: Minimum longitude, in degrees east 67 | x_max: 68 | type: number 69 | minimum: -180 70 | maximum: 180 71 | description: Maximum longitude, in degrees east 72 | y_min: 73 | type: number 74 | minimum: -90 75 | maximum: 90 76 | description: Minimum Latitude, in degrees north 77 | y_max: 78 | type: number 79 | minimum: -90 80 | maximum: 90 81 | description: Maximum Latitude, in degrees north 82 | description: Total extent of system under study. Defaults to all of Europe 83 | exclusion_zones: 84 | type: object 85 | patternProperties: 86 | ^.*$: 87 | properties: 88 | x_min: 89 | type: number 90 | minimum: -180 91 | maximum: 180 92 | description: Minimum longitude, in degrees east 93 | x_max: 94 | type: number 95 | minimum: -180 96 | maximum: 180 97 | description: Maximum longitude, in degrees east 98 | y_min: 99 | type: number 100 | minimum: -90 101 | maximum: 90 102 | description: Minimum Latitude, in degrees north 103 | y_max: 104 | type: number 105 | minimum: -90 106 | maximum: 90 107 | description: Maximum Latitude, in degrees north 108 | description: Any number of bounding boxes defining exclusion zones, where spatial features within the total bounds are to be ignored. 109 | layers: 110 | type: object 111 | patternProperties: 112 | ^.*$: 113 | type: object 114 | properties: 115 | ^.*$: # ideally this would be 'oneof' the list of countries above (can this be done in a schema??) 116 | type: string 117 | enum: 118 | - gadm0 119 | - gadm1 120 | - gadm2 121 | - gadm3 122 | - gadm4 123 | - gadm5 124 | - lau2 125 | - nuts0 126 | - nuts1 127 | - nuts2 128 | - nuts3 129 | 130 | -------------------------------------------------------------------------------- /notebooks/wind-turbines-in-europe.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# Which wind turbines have recently been built the most?\n", 8 | "\n", 9 | "For the analysis we need simulations of wind power in-feed which we generate using renewables.ninja. We need to choose a particular wind turbine model, but there are plenty of them in use today. We thus choose a model that is built often recently. Using the wind farms database of [The Wind Power](https://www.thewindpower.net), we search for the models that have been built most often recently." 10 | ] 11 | }, 12 | { 13 | "cell_type": "code", 14 | "execution_count": 2, 15 | "metadata": {}, 16 | "outputs": [], 17 | "source": [ 18 | "import pandas as pd" 19 | ] 20 | }, 21 | { 22 | "cell_type": "code", 23 | "execution_count": 3, 24 | "metadata": {}, 25 | "outputs": [], 26 | "source": [ 27 | "PATH_TO_DATA = \"../data/Windfarms_Europe_20180627.csv\"\n", 28 | "RECENT_YEAR = 2010" 29 | ] 30 | }, 31 | { 32 | "cell_type": "code", 33 | "execution_count": 5, 34 | "metadata": {}, 35 | "outputs": [], 36 | "source": [ 37 | "data = pd.read_csv(PATH_TO_DATA, quotechar=\"'\")" 38 | ] 39 | }, 40 | { 41 | "cell_type": "code", 42 | "execution_count": 42, 43 | "metadata": {}, 44 | "outputs": [], 45 | "source": [ 46 | "data[\"year\"] = pd.to_numeric(\n", 47 | " data[\"Commissioning date (Format: yyyy or yyyymm)\"].str[:4],\n", 48 | " errors=\"coerce\"\n", 49 | ")\n", 50 | "data[\"number_turbines\"] = pd.to_numeric(\n", 51 | " data[\"Number of turbines\"],\n", 52 | " errors=\"coerce\"\n", 53 | ")\n", 54 | "data[\"hub_height\"] = pd.to_numeric(\n", 55 | " data[\"Hub height (m)\"],\n", 56 | " errors=\"coerce\"\n", 57 | ")\n", 58 | "recent = data[\"year\"] >= RECENT_YEAR\n", 59 | "offshore = data[\"Offshore - Shore distance (km)\"] != \"No\"" 60 | ] 61 | }, 62 | { 63 | "cell_type": "markdown", 64 | "metadata": {}, 65 | "source": [ 66 | "## Turbines" 67 | ] 68 | }, 69 | { 70 | "cell_type": "code", 71 | "execution_count": 40, 72 | "metadata": {}, 73 | "outputs": [ 74 | { 75 | "data": { 76 | "text/plain": [ 77 | "Turbine\n", 78 | "#ND 15.369783\n", 79 | "V90/2000 9.834090\n", 80 | "E82/2300 5.366971\n", 81 | "Name: number_turbines, dtype: float64" 82 | ] 83 | }, 84 | "execution_count": 40, 85 | "metadata": {}, 86 | "output_type": "execute_result" 87 | } 88 | ], 89 | "source": [ 90 | "(data[recent & ~offshore].groupby(\"Turbine\")[\"number_turbines\"]\n", 91 | " .sum()\n", 92 | " .transform(lambda x: x / x.sum() * 100)\n", 93 | " .sort_values(ascending=False)\n", 94 | " .iloc[:3]\n", 95 | ")" 96 | ] 97 | }, 98 | { 99 | "cell_type": "markdown", 100 | "metadata": {}, 101 | "source": [ 102 | "The onshore wind turbine that has been built most often since 2010 in Europe is the Vestas V90/2000. 10% of all installed wind turbines since then has been this model." 103 | ] 104 | }, 105 | { 106 | "cell_type": "code", 107 | "execution_count": 41, 108 | "metadata": {}, 109 | "outputs": [ 110 | { 111 | "data": { 112 | "text/plain": [ 113 | "Turbine\n", 114 | "SWT-3.6-107 26.271186\n", 115 | "SWT-3.6-120 17.312349\n", 116 | "SWT-6.0-154 10.805085\n", 117 | "Name: number_turbines, dtype: float64" 118 | ] 119 | }, 120 | "execution_count": 41, 121 | "metadata": {}, 122 | "output_type": "execute_result" 123 | } 124 | ], 125 | "source": [ 126 | "(data[recent & offshore].groupby(\"Turbine\")[\"number_turbines\"]\n", 127 | " .sum()\n", 128 | " .transform(lambda x: x / x.sum() * 100)\n", 129 | " .sort_values(ascending=False)\n", 130 | " .iloc[:3]\n", 131 | ")" 132 | ] 133 | }, 134 | { 135 | "cell_type": "markdown", 136 | "metadata": {}, 137 | "source": [ 138 | "The offshore wind turbine that has been built most often since 2010 in Europe is the Siemens SWT-3.6-107. 26% of all installed wind turbines since then has been this model." 139 | ] 140 | }, 141 | { 142 | "cell_type": "markdown", 143 | "metadata": {}, 144 | "source": [ 145 | "## Hub Heights\n", 146 | "\n", 147 | "What are the median hub heights of these turbines?" 148 | ] 149 | }, 150 | { 151 | "cell_type": "code", 152 | "execution_count": 48, 153 | "metadata": {}, 154 | "outputs": [ 155 | { 156 | "data": { 157 | "text/plain": [ 158 | "Turbine\n", 159 | "V90/2000 105.0\n", 160 | "SWT-3.6-107 87.0\n", 161 | "Name: hub_height, dtype: float64" 162 | ] 163 | }, 164 | "execution_count": 48, 165 | "metadata": {}, 166 | "output_type": "execute_result" 167 | } 168 | ], 169 | "source": [ 170 | "(data[recent].groupby(\"Turbine\")[\"hub_height\"]\n", 171 | " .median()\n", 172 | " .sort_values(ascending=False)\n", 173 | " .loc[[\"V90/2000\", \"SWT-3.6-107\"]]\n", 174 | ")" 175 | ] 176 | }, 177 | { 178 | "cell_type": "code", 179 | "execution_count": null, 180 | "metadata": {}, 181 | "outputs": [], 182 | "source": [] 183 | } 184 | ], 185 | "metadata": { 186 | "kernelspec": { 187 | "display_name": "Python 3", 188 | "language": "python", 189 | "name": "python3" 190 | }, 191 | "language_info": { 192 | "codemirror_mode": { 193 | "name": "ipython", 194 | "version": 3 195 | }, 196 | "file_extension": ".py", 197 | "mimetype": "text/x-python", 198 | "name": "python", 199 | "nbconvert_exporter": "python", 200 | "pygments_lexer": "ipython3", 201 | "version": "3.6.6" 202 | } 203 | }, 204 | "nbformat": 4, 205 | "nbformat_minor": 2 206 | } 207 | -------------------------------------------------------------------------------- /lib/renewablepotentialslib/eligibility.py: -------------------------------------------------------------------------------- 1 | from enum import IntEnum, Enum 2 | import numpy as np 3 | 4 | DATATYPE = np.uint8 5 | 6 | class Eligibility(IntEnum): 7 | """Categories defining land eligibility for renewable power.""" 8 | NOT_ELIGIBLE = 0 9 | ROOFTOP_PV = 250 10 | ONSHORE_WIND_AND_PV = 180 11 | ONSHORE_WIND = 110 12 | OFFSHORE_WIND = 40 13 | 14 | @property 15 | def area_column_name(self): 16 | return "eligibility_{}_km2".format(self.name.lower()) 17 | 18 | @staticmethod 19 | def onshore(): 20 | """Returns all onshore eligibilities.""" 21 | return [ 22 | Eligibility.NOT_ELIGIBLE, 23 | Eligibility.ROOFTOP_PV, 24 | Eligibility.ONSHORE_WIND_AND_PV, 25 | Eligibility.ONSHORE_WIND, 26 | ] 27 | 28 | @staticmethod 29 | def offshore(): 30 | """Returns all offshore eligibilities.""" 31 | return [ 32 | Eligibility.OFFSHORE_WIND 33 | ] 34 | 35 | 36 | class GlobCover(IntEnum): 37 | """Original categories taken from GlobCover 2009 land cover.""" 38 | POST_FLOODING = 11 39 | RAINFED_CROPLANDS = 14 40 | MOSAIC_CROPLAND = 20 41 | MOSAIC_VEGETATION = 30 42 | CLOSED_TO_OPEN_BROADLEAVED_FOREST = 40 43 | CLOSED_BROADLEAVED_FOREST = 50 44 | OPEN_BROADLEAVED_FOREST = 60 45 | CLOSED_NEEDLELEAVED_FOREST = 70 46 | OPEN_NEEDLELEAVED_FOREST = 90 47 | CLOSED_TO_OPEN_MIXED_FOREST = 100 48 | MOSAIC_FOREST = 110 49 | MOSAIC_GRASSLAND = 120 50 | CLOSED_TO_OPEN_SHRUBLAND = 130 51 | CLOSED_TO_OPEN_HERBS = 140 52 | SPARSE_VEGETATION = 150 53 | CLOSED_TO_OPEN_REGULARLY_FLOODED_FOREST = 160 # doesn't exist in Europe 54 | CLOSED_REGULARLY_FLOODED_FOREST = 170 # doesn't exist in Europe 55 | CLOSED_TO_OPEN_REGULARLY_FLOODED_GRASSLAND = 180 # roughly 2.3% of land in Europe 56 | ARTIFICAL_SURFACES_AND_URBAN_AREAS = 190 57 | BARE_AREAS = 200 58 | WATER_BODIES = 210 59 | PERMANENT_SNOW = 220 60 | NO_DATA = 230 61 | 62 | 63 | FARM = [GlobCover.POST_FLOODING, GlobCover.RAINFED_CROPLANDS, 64 | GlobCover.MOSAIC_CROPLAND, GlobCover.MOSAIC_VEGETATION] 65 | FOREST = [GlobCover.CLOSED_TO_OPEN_BROADLEAVED_FOREST, GlobCover.CLOSED_BROADLEAVED_FOREST, 66 | GlobCover.OPEN_BROADLEAVED_FOREST, GlobCover.CLOSED_NEEDLELEAVED_FOREST, 67 | GlobCover.OPEN_NEEDLELEAVED_FOREST, GlobCover.CLOSED_TO_OPEN_MIXED_FOREST, 68 | GlobCover.MOSAIC_FOREST, GlobCover.CLOSED_TO_OPEN_REGULARLY_FLOODED_FOREST, 69 | GlobCover.CLOSED_REGULARLY_FLOODED_FOREST] 70 | VEGETATION = [GlobCover.MOSAIC_GRASSLAND, GlobCover.CLOSED_TO_OPEN_SHRUBLAND, 71 | GlobCover.CLOSED_TO_OPEN_HERBS, GlobCover.SPARSE_VEGETATION, 72 | GlobCover.CLOSED_TO_OPEN_REGULARLY_FLOODED_GRASSLAND] 73 | BARE = [GlobCover.BARE_AREAS] 74 | OTHER = VEGETATION + BARE 75 | URBAN = [GlobCover.ARTIFICAL_SURFACES_AND_URBAN_AREAS] 76 | WATER = [GlobCover.WATER_BODIES] 77 | 78 | 79 | class ProtectedArea(IntEnum): 80 | """Derived from UNEP-WCMC data set.""" 81 | PROTECTED = 255 82 | NOT_PROTECTED = 0 83 | 84 | 85 | class Potential(Enum): 86 | """Classes of renewable electricity potentials.""" 87 | ROOFTOP_PV = (1, [Eligibility.ROOFTOP_PV]) 88 | OPEN_FIELD_PV = (2, [Eligibility.ONSHORE_WIND_AND_PV]) 89 | ONSHORE_WIND = (3, [Eligibility.ONSHORE_WIND_AND_PV, Eligibility.ONSHORE_WIND]) 90 | OFFSHORE_WIND = (4, [Eligibility.OFFSHORE_WIND]) 91 | 92 | def __init__(self, int_id, corresponding_eligibilities): 93 | self.int_id = int_id 94 | self.eligible_on = corresponding_eligibilities 95 | 96 | @property 97 | def area_name(self): 98 | return "{}_km2".format(self.name.lower()) 99 | 100 | @property 101 | def capacity_name(self): 102 | return "{}_mw".format(self.name.lower()) 103 | 104 | @property 105 | def electricity_yield_name(self): 106 | return "{}_twh_per_year".format(self.name.lower()) 107 | 108 | @staticmethod 109 | def onshore(): 110 | """Returns all onshore potentials.""" 111 | return [ 112 | Potential.ROOFTOP_PV, 113 | Potential.OPEN_FIELD_PV, 114 | Potential.ONSHORE_WIND, 115 | ] 116 | 117 | @staticmethod 118 | def offshore(): 119 | """Returns all offshore potentials.""" 120 | return [ 121 | Potential.OFFSHORE_WIND 122 | ] 123 | 124 | def __repr__(self): 125 | return self.electricity_yield_name 126 | 127 | def __str__(self): 128 | return self.__repr__() 129 | 130 | 131 | def eligibility_land_mask( 132 | land_cover, slope, bathymetry, building_share, urban_green_share, 133 | max_slope, max_building_share, max_urban_green_share, max_depth_offshore 134 | ): 135 | # parameters 136 | max_slope_pv = max_slope["pv"] 137 | max_slope_wind = max_slope["wind"] 138 | assert max_slope_pv <= max_slope_wind # wind can be built whereever pv can be built 139 | 140 | # prepare masks 141 | settlements = (building_share > max_building_share) | (urban_green_share > max_urban_green_share) 142 | farm = np.isin(land_cover, FARM) 143 | forest = np.isin(land_cover, FOREST) 144 | other = np.isin(land_cover, OTHER) 145 | water = np.isin(land_cover, WATER) 146 | pv = (slope <= max_slope_pv) & ~settlements & (farm | other) 147 | wind = (slope <= max_slope_wind) & ~settlements & (farm | forest | other) 148 | offshore = (bathymetry > max_depth_offshore) & ~settlements & water 149 | 150 | # allocate eligibility 151 | land = np.ones_like(land_cover, dtype=DATATYPE) * Eligibility.NOT_ELIGIBLE 152 | _add_eligibility(land, Eligibility.ROOFTOP_PV, settlements) 153 | _add_eligibility(land, Eligibility.ONSHORE_WIND_AND_PV, wind & pv) 154 | _add_eligibility(land, Eligibility.ONSHORE_WIND, wind & ~pv) 155 | _add_eligibility(land, Eligibility.OFFSHORE_WIND, offshore) 156 | return land 157 | 158 | 159 | def _add_eligibility(land, eligibility, mask): 160 | assert all(land[mask] == Eligibility.NOT_ELIGIBLE), f"Overwriting other eligibility with {eligibility}." 161 | land[mask] = eligibility 162 | -------------------------------------------------------------------------------- /lib/tests/test_conversion.py: -------------------------------------------------------------------------------- 1 | from datetime import timedelta 2 | import math 3 | import io 4 | 5 | import pytest 6 | import pandas as pd 7 | from pandas.testing import assert_frame_equal 8 | 9 | from renewablepotentialslib.conversion import ( 10 | watt_to_watthours, 11 | eu_country_code_to_iso3, 12 | coordinate_string_to_decimal, 13 | transform_coordinates, 14 | area_to_capacity 15 | ) 16 | 17 | 18 | @pytest.mark.parametrize("watt,duration,expected_watthour", [ 19 | (10, timedelta(minutes=60), 10), 20 | (6, timedelta(minutes=30), 3), 21 | (1 / 8760, timedelta(days=365), 1) 22 | ]) 23 | def test_watt_to_watthour_conversion(watt, duration, expected_watthour): 24 | assert watt_to_watthours(watt=watt, duration=duration) == expected_watthour 25 | 26 | 27 | @pytest.mark.parametrize( 28 | "eu_country_code,iso3", 29 | [("DE", "DEU"), 30 | ("EL", "GRC"), 31 | ("UK", "GBR")] 32 | ) 33 | def test_eu_country_code(eu_country_code, iso3): 34 | assert eu_country_code_to_iso3(eu_country_code) == iso3 35 | 36 | 37 | @pytest.mark.parametrize( 38 | "arcminutes,expected_easting,expected_northing", 39 | [("""48°18'N 14°17'E""", 14.283333, 48.300000), 40 | ("""54°35'20.0"N 1°11'15.0"W""", -1.187500, 54.588889)] 41 | ) 42 | def test_coordinates_to_decimal(arcminutes, expected_easting, expected_northing): 43 | easting, northing = coordinate_string_to_decimal(arcminutes) 44 | assert math.isclose(easting, expected_easting, abs_tol=0.00001) 45 | assert math.isclose(northing, expected_northing, abs_tol=0.00001) 46 | 47 | 48 | @pytest.mark.parametrize( 49 | "arcminutes,expected_easting,expected_northing", 50 | [("""48°18'N 14°17'O""", 14.283333, 48.300000), 51 | ("""48°18'N 14°17' O""", 14.283333, 48.300000), 52 | ("""48°18'N, 14°17' O""", 14.283333, 48.300000), 53 | ("""48.300000 N, 14.283333O""", 14.283333, 48.300000), 54 | ("""48°18′N 14°17′E""", 14.283333, 48.300000), 55 | ("""48°18′0.0″N 14°17′0.0″E""", 14.283333, 48.300000)] 56 | ) 57 | def test_coordinates_to_decimal_edgecases(arcminutes, expected_easting, expected_northing): 58 | easting, northing = coordinate_string_to_decimal(arcminutes) 59 | assert math.isclose(easting, expected_easting, abs_tol=0.00001) 60 | assert math.isclose(northing, expected_northing, abs_tol=0.00001) 61 | 62 | 63 | @pytest.mark.parametrize( 64 | "from_epsg,from_x,from_y,to_x,to_y", 65 | [("EPSG:4326", 8.55, 47.36, 4211389.55, 2695117.37), # values from epsg.io 66 | ("EPSG:4326", 33.87, 89.44, 4347749.36, 7315609.95)] # values from epsg.io 67 | ) 68 | def test_transform_coordinates_to_epsg3035(from_epsg, from_x, from_y, to_x, to_y): 69 | x, y = transform_coordinates( 70 | from_epsg=from_epsg, 71 | to_epsg="EPSG:3035", 72 | x=from_x, 73 | y=from_y 74 | ) 75 | assert math.isclose(x, to_x, abs_tol=0.1) # tolerance 0.1m 76 | assert math.isclose(y, to_y, abs_tol=0.1) 77 | 78 | 79 | @pytest.mark.parametrize( 80 | "from_epsg,from_x,from_y,to_x,to_y", 81 | [("EPSG:4326", 92.4, 32.8, 8315488.22495176, 3969803.31307849), # values from epsg.io 82 | ("EPSG:4326", 33.87, 89.44, 0.00000000, 9020047.84807365)] # values from epsg.io 83 | ) 84 | @pytest.mark.xfail(reason="ESRI:54009 may not be the right identifier.") 85 | def test_transform_coordinates_to_esri54009(from_epsg, from_x, from_y, to_x, to_y): 86 | x, y = transform_coordinates( 87 | from_epsg=from_epsg, 88 | to_epsg="ESRI:54009", 89 | x=from_x, 90 | y=from_y 91 | ) 92 | assert math.isclose(x, to_x, abs_tol=0.1) # tolerance 0.1m 93 | assert math.isclose(y, to_y, abs_tol=0.1) 94 | 95 | 96 | @pytest.mark.parametrize( 97 | "from_epsg,to_x,to_y,from_x,from_y", 98 | [("EPSG:3035", 8.55, 47.36, 4211389.55, 2695117.37), # values from epsg.io 99 | ("EPSG:3035", 33.87, 89.44, 4347749.36, 7315609.95)] # values from epsg.io 100 | ) 101 | def test_transform_coordinates_to_epsg4326(from_epsg, to_x, to_y, from_x, from_y): 102 | x, y = transform_coordinates( 103 | from_epsg=from_epsg, 104 | to_epsg="EPSG:4326", 105 | x=from_x, 106 | y=from_y 107 | ) 108 | assert math.isclose(x, to_x, abs_tol=0.01) 109 | assert math.isclose(y, to_y, abs_tol=0.01) 110 | 111 | class TestAreaToCapacity: 112 | ROOF_MODEL = """ 113 | orientation,average_tilt,share_of_roof_areas 114 | E, 18.155579, 0.049090 115 | E, 25.863758, 0.039782 116 | E, 32.876361, 0.036700 117 | E, 43.523447, 0.040453 118 | N, 17.312256, 0.048285 119 | N, 24.879743, 0.041521 120 | N, 32.361540, 0.046410 121 | N, 43.655379, 0.045527 122 | S, 18.063436, 0.055544 123 | S, 25.412273, 0.036332 124 | S, 32.368793, 0.047489 125 | S, 43.059819, 0.042767 126 | W, 18.107352, 0.051856 127 | W, 25.376952, 0.034674 128 | W, 32.340545, 0.041847 129 | W, 43.504116, 0.039763 130 | flat, 0.000000, 0.301960 131 | """ 132 | 133 | 134 | @pytest.fixture() 135 | def roof_model_area_based(self): 136 | model = pd.read_csv(io.StringIO(self.ROOF_MODEL)).set_index(["orientation", "average_tilt"]) 137 | assert model.sum().sum() == pytest.approx(1.0) 138 | return model 139 | 140 | 141 | def test_capacity_based_roof_model_sums_to_one(self, roof_model_area_based): 142 | roof_model_capacity_based = area_to_capacity( 143 | roof_model_area_based, 144 | power_density_flat=1, 145 | power_density_tilted=2 146 | ) 147 | assert roof_model_capacity_based["share_of_roof_areas"].sum() == pytest.approx(1.0) 148 | 149 | 150 | def test_capacity_based_roof_model_equals_area_based_for_equal_power_density(self, roof_model_area_based): 151 | roof_model_capacity_based = area_to_capacity( 152 | roof_model_area_based, 153 | power_density_flat=1, 154 | power_density_tilted=1 155 | ) 156 | assert_frame_equal(roof_model_capacity_based, roof_model_area_based) 157 | 158 | 159 | def test_weight_of_flat_reduced_for_lower_power_density(self, roof_model_area_based): 160 | roof_model_capacity_based = area_to_capacity( 161 | roof_model_area_based, 162 | power_density_flat=1, 163 | power_density_tilted=2 164 | ) 165 | capacity_weight = float(roof_model_capacity_based.loc[("flat", 0.0)]) 166 | area_weight = float(roof_model_area_based.loc[("flat", 0.0)]) 167 | assert capacity_weight < area_weight 168 | 169 | 170 | def test_weight_of_flat_increased_for_higher_power_density(self, roof_model_area_based): 171 | roof_model_capacity_based = area_to_capacity( 172 | roof_model_area_based, 173 | power_density_flat=2, 174 | power_density_tilted=1 175 | ) 176 | capacity_weight = float(roof_model_capacity_based.loc[("flat", 0.0)]) 177 | area_weight = float(roof_model_area_based.loc[("flat", 0.0)]) 178 | assert capacity_weight > area_weight 179 | -------------------------------------------------------------------------------- /lib/renewablepotentialslib/conversion.py: -------------------------------------------------------------------------------- 1 | """Functions to convert units.""" 2 | def _set_proj_lib(): 3 | # FIXME This is fragile and should not be necessary. 4 | import os 5 | from pathlib import Path 6 | path_to_projlib = Path(os.environ['CONDA_PREFIX']) / "share" / "proj" 7 | if not path_to_projlib.exists(): 8 | msg = "Could not locate proj library. Path works on Unix only." 9 | raise ImportError(msg) 10 | os.environ["PROJ_LIB"] = path_to_projlib.as_posix() 11 | _set_proj_lib() 12 | from functools import partial 13 | from itertools import product 14 | 15 | import pycountry 16 | import shapely.geometry 17 | import shapely.ops 18 | import pyproj 19 | 20 | from renewablepotentialslib import EPSG_3035_PROJ4 21 | 22 | 23 | def watt_to_watthours(watt, duration): 24 | """Converts from [k|M|G|T]Watt to [k|M|G|T]WattHour.""" 25 | return watt * duration.total_seconds() / 3600 26 | 27 | 28 | def area_in_squaremeters(geodataframe): 29 | """Calculates the area sizes of a geo dataframe in square meters. 30 | 31 | Following https://gis.stackexchange.com/a/20056/77760 I am choosing equal-area projections 32 | to receive a most accurate determination of the size of polygons in the geo dataframe. 33 | Instead of Gall-Peters, as suggested in the answer, I am using EPSG_3035 which is 34 | particularly usefull for Europe. 35 | 36 | Returns a pandas series of area sizes in square meters. 37 | """ 38 | return geodataframe.to_crs(EPSG_3035_PROJ4).area 39 | 40 | 41 | def eu_country_code_to_iso3(eu_country_code): 42 | """Converts EU country code to ISO 3166 alpha 3. 43 | 44 | The European Union uses its own country codes, which often but not always match ISO 3166. 45 | """ 46 | assert len(eu_country_code) == 2, "EU country codes are of length 2, yours is '{}'.".format(eu_country_code) 47 | if eu_country_code.lower() == "el": 48 | iso2 = "gr" 49 | elif eu_country_code.lower() == "uk": 50 | iso2 = "gb" 51 | else: 52 | iso2 = eu_country_code 53 | return pycountry.countries.lookup(iso2).alpha_3 54 | 55 | 56 | def coordinate_string_to_decimal(coordinate_string): 57 | """Converts a coordinate string to decimal coordinates in degrees (easting, northing). 58 | 59 | A coordinate string looks something like this "48°18'N 14°17'E" hence is given in degrees, 60 | arcminutes, and potentially arcseconds. It may as well be given in decimals. 61 | 62 | The function would return the coordinates in decimal degrees easting and northing, so for 63 | the example given above that would be (14.283333, 48.300000). 64 | """ 65 | lat, long = _separate_lat_and_long(coordinate_string) 66 | easting = _to_decimal_degree(*_split_coordinate(long)) 67 | northing = _to_decimal_degree(*_split_coordinate(lat)) 68 | return easting, northing 69 | 70 | 71 | def transform_coordinates(x, y, from_epsg, to_epsg): 72 | """Tranforms coordinates from one coordinate reference system to the other.""" 73 | point = shapely.geometry.Point(x, y) 74 | 75 | project = partial( 76 | pyproj.transform, 77 | pyproj.Proj(init=from_epsg), 78 | pyproj.Proj(init=to_epsg)) 79 | 80 | transformed_point = shapely.ops.transform(project, point) 81 | return transformed_point.x, transformed_point.y 82 | 83 | 84 | def transform_bounds(x_min, y_min, x_max, y_max, from_epsg, to_epsg): 85 | """Tranforms bounds from one coordinate reference system to the other. 86 | 87 | Returns bounds as tuple: (x_min, y_min, x_max, y_max). 88 | """ 89 | points = [transform_coordinates(x, y, from_epsg, to_epsg) 90 | for x, y in product([x_min, x_max], [y_min, y_max])] 91 | 92 | return (min([p[0] for p in points]), min([p[1] for p in points]), 93 | max([p[0] for p in points]), max([p[1] for p in points]),) 94 | 95 | 96 | def _separate_lat_and_long(coordinate_string): 97 | coordinate_string = coordinate_string.replace(",", "") 98 | assert "N" in coordinate_string, coordinate_string 99 | length_of_lat = coordinate_string.find("N") + 1 100 | lat = coordinate_string[:length_of_lat].strip() 101 | long = coordinate_string[length_of_lat:].strip() 102 | assert lat[-1] == "N", lat 103 | assert long[-1] in ["E", "O", "W"], long 104 | return lat, long 105 | 106 | 107 | def _split_coordinate(coordinate_string): 108 | coordinate_string = coordinate_string.replace("′", "'") 109 | if "°" in coordinate_string: # coordinates are given in arcminutes 110 | degrees, residual = coordinate_string.split("°") 111 | if "'" in residual: 112 | arcminutes, residual = residual.split("'") 113 | else: 114 | arcminutes = 0.0 115 | if '"' in residual: 116 | arcseconds, residual = residual.split('"') 117 | else: 118 | arcseconds = 0.0 119 | else: # coordinates are given in decimals already 120 | degrees = coordinate_string[:-1] 121 | residual = coordinate_string[-1] 122 | arcminutes = 0.0 123 | arcseconds = 0.0 124 | degrees, arcminutes, arcseconds = [float(value) for value in (degrees, arcminutes, arcseconds)] 125 | if "W" in residual: 126 | # longitude given in westings instead of eastings 127 | degrees, arcminutes, arcseconds = [- value for value in (degrees, arcminutes, arcseconds)] 128 | return degrees, arcminutes, arcseconds 129 | 130 | 131 | def _to_decimal_degree(degrees, arcminutes, arcseconds): 132 | return degrees + arcminutes / 60 + arcseconds / 3600 133 | 134 | 135 | def area_to_capacity(statistical_roof_model_area_based, power_density_flat, power_density_tilted): 136 | """Maps area shares to capacity shares of statistical roof model. 137 | 138 | The statistical roof model defines roof categories (e.g. south-facing with tilt 10°) and their 139 | shares in a population of roofs. This function maps areas shares to shares of installable pv 140 | capacity. It discriminates between flat and tilted roofs, i.e. the power density of flat roofs 141 | can be different than the one from tilted roofs. 142 | 143 | Parameters: 144 | * statistical_roof_model_area_based: model as described above, values are shares of total roof area 145 | * power_density_flat: power density of flat pv installations, unit must be consistent with next 146 | * power_density_tilted: power density of tilted pv installations, unit must be consistent with previous 147 | Returns: 148 | * statistical_roof_model_cpacity_based: model as described above, values are shares of total 149 | installable capacity 150 | """ 151 | cap_based = statistical_roof_model_area_based.copy() 152 | flat_roofs = cap_based.index.get_level_values(0) == "flat" 153 | tilted_roofs = cap_based.index.get_level_values(0) != "flat" 154 | cap_based[flat_roofs] = cap_based[flat_roofs] * power_density_flat 155 | cap_based[tilted_roofs] = cap_based[tilted_roofs] * power_density_tilted 156 | return cap_based / cap_based.sum() 157 | 158 | 159 | def orientation_to_azimuth(orientation): 160 | if orientation == "S": 161 | return 180 162 | elif orientation == "W": 163 | return -90 164 | elif orientation == "N": 165 | return 0 166 | elif orientation == "E": 167 | return 90 168 | elif orientation == "flat": 169 | return 180 170 | else: 171 | raise ValueError() 172 | -------------------------------------------------------------------------------- /scripts/areas.py: -------------------------------------------------------------------------------- 1 | """Determine available area of renewable electricity in each administrative unit. 2 | 3 | * Take the (only technically restricted) raster data potentials, 4 | * add restrictions based on scenario definitions, 5 | * allocate the onshore areas to the administrative units, 6 | * allocate the offshore areas to exclusive economic zones (EEZ), 7 | * allocate the offshore areas of EEZ to units based on the fraction of shared coast. 8 | 9 | This is in analogy to `potentials.py` but for areas [km2] instead of potentials [TWh/a]. 10 | """ 11 | import numpy as np 12 | import pandas as pd 13 | import rasterio 14 | from rasterstats import zonal_stats 15 | import fiona 16 | 17 | from renewablepotentialslib.eligibility import Eligibility, FOREST, FARM, OTHER, ProtectedArea 18 | 19 | 20 | def areas(path_to_units, path_to_eez, path_to_shared_coast, path_to_eligible_area, 21 | path_to_eligibility_categories, path_to_land_cover, path_to_protected_areas, 22 | scenario_config, path_to_result): 23 | """Determine available area of renewable electricity in each administrative unit. 24 | 25 | * Take the (only technically restricted) raster data potentials, 26 | * add restrictions based on scenario definitions, 27 | * allocate the onshore areas to the administrative units, 28 | * allocate the offshore areas to exclusive economic zones (EEZ), 29 | * allocate the offshore areas of EEZ to units based on the fraction of shared coast. 30 | """ 31 | with rasterio.open(path_to_eligibility_categories, "r") as src: 32 | category_map = src.read(1) 33 | with rasterio.open(path_to_eligible_area, "r") as src: 34 | transform = src.transform 35 | area_map = src.read(1) 36 | with rasterio.open(path_to_land_cover, "r") as src: 37 | land_cover = src.read(1) 38 | with rasterio.open(path_to_protected_areas, "r") as src: 39 | protected_areas = src.read(1) 40 | with fiona.open(path_to_units, "r") as src: 41 | unit_ids = [feature["properties"]["id"] for feature in src] 42 | unit_geometries = [feature["geometry"] for feature in src] 43 | with fiona.open(path_to_eez, "r") as src: 44 | eez_ids = [feature["properties"]["id"] for feature in src] 45 | eez_geometries = [feature["geometry"] for feature in src] 46 | shared_coasts = pd.read_csv(path_to_shared_coast, index_col=0) 47 | 48 | area_map = apply_scenario_config_to_areas( 49 | area_map=area_map, 50 | category_map=category_map, 51 | land_cover=land_cover, 52 | protected_areas=protected_areas, 53 | scenario_config=scenario_config 54 | ) 55 | category_map = apply_scenario_config_to_categories( 56 | category_map=category_map, 57 | land_cover=land_cover, 58 | protected_areas=protected_areas, 59 | scenario_config=scenario_config 60 | ) 61 | onshore_areas = pd.DataFrame( 62 | index=unit_ids, 63 | data={ 64 | eligibility_category.area_column_name: _area( 65 | eligibility_category=eligibility_category, 66 | area_map=area_map, 67 | category_map=category_map, 68 | shapes=unit_geometries, 69 | transform=transform 70 | ) 71 | for eligibility_category in Eligibility.onshore() 72 | } 73 | ) 74 | offshore_eez_areas = pd.DataFrame( 75 | index=eez_ids, 76 | data={ 77 | eligibility_category.area_column_name: _area( 78 | eligibility_category=eligibility_category, 79 | area_map=area_map, 80 | category_map=category_map, 81 | shapes=eez_geometries, 82 | transform=transform 83 | ) 84 | for eligibility_category in Eligibility.offshore() 85 | } 86 | ) 87 | offshore_areas = pd.DataFrame( 88 | data=shared_coasts.dot(offshore_eez_areas), 89 | columns=[cat.area_column_name for cat in Eligibility.offshore()] 90 | ) 91 | areas = pd.concat([onshore_areas, offshore_areas], axis=1) 92 | areas.index.name = "id" 93 | areas.to_csv( 94 | path_to_result, 95 | header=True, 96 | index=True 97 | ) 98 | 99 | 100 | def apply_scenario_config_to_areas(area_map, category_map, 101 | land_cover, protected_areas, scenario_config): 102 | """Limit eligible area of each pixel based on scenario config.""" 103 | 104 | # share-rooftops-used 105 | share_rooftops_used = scenario_config["share-rooftops-used"] 106 | mask = category_map == Eligibility.ROOFTOP_PV 107 | area_map[mask] = area_map[mask] * share_rooftops_used 108 | 109 | # share-forest-used-for-wind 110 | share_forest_used_for_wind = scenario_config["share-forest-used-for-wind"] 111 | mask = np.isin(land_cover, FOREST) & (category_map != Eligibility.ROOFTOP_PV) 112 | area_map[mask] = area_map[mask] * share_forest_used_for_wind 113 | 114 | # share-other-land-used 115 | share_other_land_used = scenario_config["share-other-land-used"] 116 | mask = np.isin(land_cover, OTHER) & (category_map != Eligibility.ROOFTOP_PV) 117 | area_map[mask] = area_map[mask] * share_other_land_used 118 | 119 | # share-farmland-used 120 | share_farmland_used = scenario_config["share-farmland-used"] 121 | mask = np.isin(land_cover, FARM) & (category_map != Eligibility.ROOFTOP_PV) 122 | area_map[mask] = area_map[mask] * share_farmland_used 123 | 124 | # share-offshore-used 125 | share_offshore_used = scenario_config["share-offshore-used"] 126 | mask = category_map == Eligibility.OFFSHORE_WIND 127 | area_map[mask] = area_map[mask] * share_offshore_used 128 | 129 | return area_map 130 | 131 | 132 | def apply_scenario_config_to_categories(category_map, land_cover, protected_areas, scenario_config): 133 | """Change categories of each pixel based on scenario config.""" 134 | 135 | # pv-on-farmland 136 | pv_on_farmland = scenario_config["pv-on-farmland"] 137 | if not pv_on_farmland: 138 | mask = np.isin(land_cover, FARM) & (category_map == Eligibility.ONSHORE_WIND_AND_PV) 139 | category_map[mask] = Eligibility.ONSHORE_WIND 140 | 141 | # protected-areas-used 142 | use_protected_areas = scenario_config["use-protected-areas"] 143 | if not use_protected_areas: 144 | mask = (protected_areas == ProtectedArea.PROTECTED) & (category_map != Eligibility.ROOFTOP_PV) 145 | category_map[mask] = Eligibility.NOT_ELIGIBLE 146 | 147 | return category_map 148 | 149 | 150 | def _area(eligibility_category, area_map, category_map, shapes, transform): 151 | """Determine eligible area of one eligibility category per shape.""" 152 | area_map = area_map.copy() 153 | area_map[category_map != eligibility_category] = 0 154 | potentials = zonal_stats( 155 | shapes, 156 | area_map, 157 | affine=transform, 158 | stats="sum", 159 | nodata=-999 160 | ) 161 | return [stat["sum"] for stat in potentials] 162 | 163 | 164 | if __name__ == "__main__": 165 | areas( 166 | path_to_units=snakemake.input.units, 167 | path_to_eez=snakemake.input.eez, 168 | path_to_shared_coast=snakemake.input.shared_coast, 169 | path_to_eligible_area=snakemake.input.area, 170 | path_to_eligibility_categories=snakemake.input.category, 171 | path_to_land_cover=snakemake.input.land_cover, 172 | path_to_protected_areas=snakemake.input.protected_areas, 173 | scenario_config=snakemake.params.scenario, 174 | path_to_result=snakemake.output[0] 175 | ) 176 | -------------------------------------------------------------------------------- /lib/renewablepotentialslib/shape_utils.py: -------------------------------------------------------------------------------- 1 | """Module containing utilities.""" 2 | import math 3 | 4 | import numpy as np 5 | import geopandas as gpd 6 | import shapely 7 | import pycountry 8 | import pyproj 9 | 10 | from renewablepotentialslib.conversion import transform_bounds, eu_country_code_to_iso3 11 | from renewablepotentialslib import EPSG_3035, EPSG_3035_PROJ4, WGS84, WGS84_PROJ4 12 | 13 | 14 | def determine_pixel_areas(crs, bounds, resolution): 15 | """Returns a raster in which the value corresponds to the area [km2] of the pixel. 16 | 17 | This assumes the data comprises square pixel in WGS84. 18 | 19 | Parameters: 20 | crs: the coordinate reference system of the data (must be WGS84) 21 | bounds: an object with attributes left/right/top/bottom given in degrees 22 | resolution: the scalar resolution (remember: square pixels) given in degrees 23 | """ 24 | # the following is based on https://gis.stackexchange.com/a/288034/77760 25 | # and assumes the data to be in WGS84 26 | assert pyproj.crs.CRS(crs) == pyproj.crs.CRS(WGS84) 27 | width = int((bounds.right - bounds.left) / resolution) 28 | height = int((bounds.top - bounds.bottom) / resolution) 29 | latitudes = np.linspace( 30 | start=bounds.top, 31 | stop=bounds.bottom, 32 | num=height, 33 | endpoint=True, 34 | dtype=np.float64 35 | ) 36 | varea_of_pixel = np.vectorize(lambda lat: _area_of_pixel(resolution, lat)) 37 | pixel_area = varea_of_pixel(latitudes) # vector 38 | return pixel_area.repeat(width).reshape(height, width).astype(np.float64) 39 | 40 | 41 | def _area_of_pixel(pixel_size, center_lat): 42 | """Calculate km^2 area of a wgs84 square pixel. 43 | 44 | Adapted from: https://gis.stackexchange.com/a/127327/2397 45 | 46 | Parameters: 47 | pixel_size (float): length of side of pixel in degrees. 48 | center_lat (float): latitude of the center of the pixel. Note this 49 | value +/- half the `pixel-size` must not exceed 90/-90 degrees 50 | latitude or an invalid area will be calculated. 51 | 52 | Returns: 53 | Area of square pixel of side length `pixel_size` centered at 54 | `center_lat` in km^2. 55 | 56 | """ 57 | a = 6378137 # meters 58 | b = 6356752.3142 # meters 59 | e = math.sqrt(1 - (b / a)**2) 60 | area_list = [] 61 | for f in [center_lat + pixel_size / 2, center_lat - pixel_size / 2]: 62 | zm = 1 - e * math.sin(math.radians(f)) 63 | zp = 1 + e * math.sin(math.radians(f)) 64 | area_list.append( 65 | math.pi * b**2 * ( 66 | math.log(zp / zm) / (2 * e) + 67 | math.sin(math.radians(f)) / (zp * zm))) 68 | return pixel_size / 360. * (area_list[0] - area_list[1]) / 1e6 69 | 70 | 71 | def buffer_if_necessary(shape): 72 | """Fix shapes which are invalid. 73 | 74 | Following the advice given here: 75 | https://github.com/Toblerity/Shapely/issues/344 76 | """ 77 | if shape.is_valid: 78 | return shape 79 | 80 | new_shape = shape.buffer(0.0) 81 | assert new_shape.is_valid 82 | assert np.isclose(new_shape.area, shape.area, rtol=1e-5) 83 | 84 | return new_shape 85 | 86 | 87 | def point_raster_on_shapes(bounds_wgs84, resolution_km2, shapes): 88 | """Creates a point raster with given resolution on a set of shapes. 89 | 90 | Extends (=buffers) the shapes, so that whenever a raster cell is touched by any shape, 91 | a point is created for that cell. 92 | 93 | Parameters: 94 | * bounds_wgs84: the bounds of the point raster, given in WGS84 95 | * resolution_km2: the resolution of the point raster, given in km2 96 | * shapes: GeoDataFrame containing the shapes 97 | Returns: 98 | * point raster in WGS84 with given resolution, filtered by the shapes 99 | """ 100 | x_min, y_min, x_max, y_max = transform_bounds( 101 | bounds_wgs84["x_min"], bounds_wgs84["y_min"], bounds_wgs84["x_max"], bounds_wgs84["y_max"], 102 | from_epsg=WGS84, 103 | to_epsg=EPSG_3035 104 | ) 105 | all_points = [ 106 | shapely.geometry.Point(x, y) 107 | for x in np.arange(start=x_min, stop=x_max, step=resolution_km2 * 1000) 108 | for y in np.arange(start=y_min, stop=y_max, step=resolution_km2 * 1000) 109 | ] 110 | simplification_strength = resolution_km2 * 1000 / 20 111 | buffer_size = math.sqrt(resolution_km2 ** 2 + resolution_km2 ** 2) / 2 * 1000 112 | surface_areas = (shapes.to_crs(EPSG_3035_PROJ4) 113 | .simplify(simplification_strength) 114 | .buffer(buffer_size)) 115 | prepared_polygons = [shapely.prepared.prep(polygon) for polygon in surface_areas.geometry] 116 | return gpd.GeoSeries( 117 | list(filter( 118 | lambda point: any(polygon.intersects(point) for polygon in prepared_polygons), 119 | all_points 120 | )), 121 | crs=EPSG_3035_PROJ4 122 | ).to_crs(WGS84_PROJ4) 123 | 124 | 125 | def update_features(gdf, src): 126 | if src == 'nuts': 127 | gdf["CNTR_CODE"] = gdf.CNTR_CODE.apply(eu_country_code_to_iso3) 128 | gdf = gdf.rename(columns={"NUTS_NAME": "name", "CNTR_CODE": "country_code"}) 129 | gdf["type"] = gdf.LEVL_CODE.map({0: "country"}) 130 | gdf["proper"] = True 131 | gdf = gdf.drop(columns=["FID"]) 132 | 133 | # Country IDs should have three letters instead of two 134 | gdf.loc[gdf.LEVL_CODE == 0, "id"] = gdf.loc[gdf.LEVL_CODE == 0, "country_code"] 135 | # Many country names are broken or missing 136 | gdf.loc[gdf.LEVL_CODE == 0, "name"] = gdf.loc[gdf.LEVL_CODE == 0, "id"].apply( 137 | lambda x: pycountry.countries.lookup(x).name 138 | ) 139 | 140 | gdf["level"] = 'nuts' + gdf.LEVL_CODE.astype(str) 141 | 142 | elif src == 'gadm': 143 | gdf["level"] = gdf.source_ds_lyr.str.rsplit('_', 1, expand=True)[1].astype(int) 144 | for lvl in gdf.level.unique(): 145 | lvl_mask = gdf.level == lvl 146 | gdf.loc[lvl_mask, "country_code"] = gdf.loc[lvl_mask, "GID_0"] 147 | gdf.loc[lvl_mask, "id"] = gdf.loc[lvl_mask, f"GID_{lvl}"] 148 | gdf.loc[lvl_mask, "name"] = gdf.loc[lvl_mask, f"NAME_{lvl}"] 149 | if lvl > 0: 150 | gdf.loc[lvl_mask, "type"] = gdf.loc[lvl_mask, f"ENGTYPE_{lvl}"] 151 | else: 152 | gdf.loc[lvl_mask, "type"] = "country" 153 | gdf["proper"] = True 154 | gdf["level"] = 'gadm' + gdf.level.astype(str) 155 | 156 | elif src == 'lau': 157 | gdf["CNTR_CODE"] = gdf.COMM_ID.str[:2].apply(eu_country_code_to_iso3) 158 | gdf = gdf.rename( 159 | columns={"COMM_ID": "id", "NAME_LATN": "name", "CNTR_CODE": "country_code"} 160 | ) 161 | gdf["type"] = "commune" 162 | gdf["proper"] = gdf["TRUE_COMM_"] == "T" 163 | gdf["level"] = "lau2" 164 | 165 | return gdf 166 | 167 | 168 | def drop_countries(gdf, scope_config): 169 | countries = [pycountry.countries.lookup(i).alpha_3 for i in scope_config["countries"]] 170 | _not_in = set(gdf.country_code).difference(countries) 171 | print(f"Removing {_not_in} as they are outside of study area.") 172 | 173 | return gdf[gdf.country_code.isin(countries)] 174 | 175 | 176 | def drop_geoms_completely_outside_study_area(gdf, scope_config): 177 | _study_area = study_area(scope_config) 178 | completely_in = gdf.intersects(_study_area) 179 | for row_index, row in gdf[~completely_in].iterrows(): 180 | print( 181 | "Removing {} ({}, country={}) as they are outside of study area." 182 | .format(*row[["name", "level", "country_code"]]) 183 | ) 184 | gdf = gdf[completely_in] 185 | 186 | return gdf 187 | 188 | 189 | def drop_parts_of_geoms_completely_outside_study_area(gdf, scope_config): 190 | gdf = gdf.copy() 191 | _study_area = study_area(scope_config) 192 | all_geoms = gdf.explode() 193 | geoms_within_study_area = all_geoms.within(_study_area) 194 | geoms_partially_out = ~geoms_within_study_area.all(level=0) 195 | 196 | # work only with geoms which have some polygons within the study area and some out 197 | geoms_to_update = geoms_within_study_area.mul(geoms_partially_out, level=0) 198 | if gdf.loc[geoms_to_update.any(level=0)].empty: 199 | return gdf 200 | 201 | for row_index, row in gdf.loc[geoms_to_update.any(level=0)].iterrows(): 202 | print( 203 | "Removing parts of {} ({}, country={}) as they are outside of study area." 204 | .format(*row[["name", "level", "country_code"]]) 205 | ) 206 | # Unlike groupby, dissolve can only operate on columns, not multiindex levels 207 | 208 | new_geoms = ( 209 | all_geoms[geoms_to_update] 210 | .geometry 211 | .map(buffer_if_necessary) 212 | .groupby(level=0) 213 | .apply(lambda x: x.unary_union) 214 | .map(to_multi_polygon) 215 | ) 216 | gdf.loc[new_geoms.index, 'geometry'] = new_geoms 217 | 218 | return gdf 219 | 220 | 221 | def to_multi_polygon(geometry): 222 | if isinstance(geometry, dict): 223 | geometry = shapely.geometry.shape(geometry) 224 | if isinstance(geometry, shapely.geometry.polygon.Polygon): 225 | return shapely.geometry.MultiPolygon(polygons=[geometry]) 226 | else: 227 | return geometry 228 | 229 | 230 | def study_area(scope_config): 231 | """ 232 | Create a bounding box for the study area, and cut out holes for all defined 233 | exclusion zones. For plotting purposes, exclusion zones and the bounding box are 234 | defined in opposite orientations, see https://github.com/geopandas/geopandas/issues/951 235 | """ 236 | holes = [ 237 | ( 238 | (exclusion_zone["x_max"], exclusion_zone["y_min"]), 239 | (exclusion_zone["x_max"], exclusion_zone["y_max"]), 240 | (exclusion_zone["x_min"], exclusion_zone["y_max"]), 241 | (exclusion_zone["x_min"], exclusion_zone["y_min"]) 242 | ) 243 | for exclusion_zone in scope_config.get("exclusion_zones", {}).values() 244 | ] 245 | 246 | _study_area = shapely.geometry.Polygon( 247 | ((scope_config["bounds"]["x_min"], scope_config["bounds"]["y_min"]), 248 | (scope_config["bounds"]["x_min"], scope_config["bounds"]["y_max"]), 249 | (scope_config["bounds"]["x_max"], scope_config["bounds"]["y_max"]), 250 | (scope_config["bounds"]["x_max"], scope_config["bounds"]["y_min"])), 251 | holes=holes 252 | ) 253 | _study_area = buffer_if_necessary(_study_area) 254 | 255 | return _study_area 256 | -------------------------------------------------------------------------------- /config/default.yaml: -------------------------------------------------------------------------------- 1 | snakemake: 2 | max-threads: 4 3 | data-sources: 4 | load: https://data.open-power-system-data.org/time_series/2018-06-30/time_series_60min_stacked.csv 5 | nuts: https://ec.europa.eu/eurostat/cache/GISCO/distribution/v2/nuts/geojson/NUTS_RG_01M_{nuts_year}_4326.geojson 6 | lau: http://ec.europa.eu/eurostat/cache/GISCO/geodatafiles/COMM-01M-2013-SH.zip 7 | degurba: http://ec.europa.eu/eurostat/cache/GISCO/geodatafiles/DGURBA_2014_SH.zip 8 | land_cover: http://due.esrin.esa.int/files/Globcover2009_V2.3_Global_.zip 9 | protected_areas: https://www.protectedplanet.net/downloads/WDPA_Feb2019?type=shapefile 10 | cgiar_tile: http://srtm.csi.cgiar.org/wp-content/uploads/files/srtm_5x5/TIFF/ 11 | gmted_tile: https://edcintl.cr.usgs.gov/downloads/sciweb1/shared/topo/downloads/GMTED/Global_tiles_GMTED/075darcsec/mea/ 12 | gadm: https://biogeo.ucdavis.edu/data/gadm3.6/gpkg/gadm36_{country_code}_gpkg.zip 13 | bathymetric: https://www.ngdc.noaa.gov/mgg/global/relief/ETOPO1/data/bedrock/grid_registered/georeferenced_tiff/ETOPO1_Bed_g_geotiff.zip 14 | pop: http://cidportal.jrc.ec.europa.eu/ftp/jrc-opendata/GHSL/GHS_POP_GPW4_GLOBE_R2015A/GHS_POP_GPW42015_GLOBE_R2015A_54009_250/V1-0/GHS_POP_GPW42015_GLOBE_R2015A_54009_250_v1_0.zip 15 | settlement_data: data/esm-100m-2017/ESM_class{esm_class}_100m.tif 16 | eez_data: data/World_EEZ_v10_20180221/eez_v10.shp 17 | sonnendach_statistics: https://zenodo.org/record/4091033/files/roof-statistics.csv?download=1 18 | sonnendach_total_size: https://zenodo.org/record/4091033/files/total-rooftop-area-km2.txt?download=1 19 | sonnendach_total_yield: https://zenodo.org/record/4091033/files/total-yield-twh.txt?download=1 20 | raw-capacity-factors: data/capacityfactors/{technology}.nc 21 | root-directory: . # point to the root directory if working directory is not root directory 22 | crs: "EPSG:4326" 23 | scope: 24 | countries: 25 | - "Austria" 26 | - "Belgium" 27 | - "Bulgaria" 28 | - "Croatia" 29 | - "Cyprus" 30 | - "Czech Republic" 31 | - "Denmark" 32 | - "Estonia" 33 | - "Finland" 34 | - "France" 35 | - "Germany" 36 | - "Greece" 37 | - "Hungary" 38 | - "Ireland" 39 | - "Italy" 40 | - "Latvia" 41 | - "Lithuania" 42 | - "Luxembourg" 43 | - "Netherlands" 44 | - "Poland" 45 | - "Portugal" 46 | - "Romania" 47 | - "Slovakia" 48 | - "Slovenia" 49 | - "Spain" 50 | - "Sweden" 51 | - "United Kingdom" 52 | - "Albania" 53 | - "Bosnia and Herzegovina" 54 | - "North Macedonia" 55 | - "Montenegro" 56 | - "Norway" 57 | - "Serbia" 58 | - "Switzerland" 59 | - "Iceland" 60 | bounds: 61 | x_min: -30 # in degrees east 62 | x_max: 37 # in degrees east 63 | y_min: 30 # in degrees north 64 | y_max: 75 # in degrees north 65 | exclusion_zones: 66 | atlantic_islands: 67 | x_min: -30 # in degrees east 68 | x_max: -10 # in degrees east 69 | y_min: 30 # in degrees north 70 | y_max: 41 # in degrees north 71 | 72 | layers: 73 | continental: 74 | Austria: nuts0 75 | Belgium: nuts0 76 | Bulgaria: nuts0 77 | Croatia: nuts0 78 | Cyprus: nuts0 79 | Czech Republic: nuts0 80 | Denmark: nuts0 81 | Estonia: nuts0 82 | Finland: nuts0 83 | France: nuts0 84 | Germany: nuts0 85 | Greece: nuts0 86 | Hungary: nuts0 87 | Ireland: nuts0 88 | Italy: nuts0 89 | Latvia: nuts0 90 | Lithuania: nuts0 91 | Luxembourg: nuts0 92 | Netherlands: nuts0 93 | Poland: nuts0 94 | Portugal: nuts0 95 | Romania: nuts0 96 | Slovakia: nuts0 97 | Slovenia: nuts0 98 | Spain: nuts0 99 | Sweden: nuts0 100 | United Kingdom: nuts0 101 | Albania: gadm0 102 | Bosnia and Herzegovina: gadm0 103 | North Macedonia: gadm0 104 | Montenegro: gadm0 105 | Norway: nuts0 106 | Serbia: gadm0 107 | Switzerland: nuts0 108 | Iceland: nuts0 109 | national: 110 | Austria: nuts0 111 | Belgium: nuts0 112 | Bulgaria: nuts0 113 | Croatia: nuts0 114 | Cyprus: nuts0 115 | Czech Republic: nuts0 116 | Denmark: nuts0 117 | Estonia: nuts0 118 | Finland: nuts0 119 | France: nuts0 120 | Germany: nuts0 121 | Greece: nuts0 122 | Hungary: nuts0 123 | Ireland: nuts0 124 | Italy: nuts0 125 | Latvia: nuts0 126 | Lithuania: nuts0 127 | Luxembourg: nuts0 128 | Netherlands: nuts0 129 | Poland: nuts0 130 | Portugal: nuts0 131 | Romania: nuts0 132 | Slovakia: nuts0 133 | Slovenia: nuts0 134 | Spain: nuts0 135 | Sweden: nuts0 136 | United Kingdom: nuts0 137 | Albania: gadm0 138 | Bosnia and Herzegovina: gadm0 139 | North Macedonia: gadm0 140 | Montenegro: gadm0 141 | Norway: nuts0 142 | Serbia: gadm0 143 | Switzerland: nuts0 144 | Iceland: nuts0 145 | regional: 146 | # The link between NUTS and administrative units unfortunately is not obvious. 147 | # It's not documented anywhere -- at least I could not find any information. 148 | # Hence, I am using GADM here instead. 149 | # Validation source: https://en.wikipedia.org/wiki/List_of_administrative_divisions_by_country 150 | Austria: gadm1 # gadm1 and wiki match 9 151 | Belgium: gadm1 # match 3 152 | Bulgaria: gadm1 # match 28 153 | Croatia: gadm1 # match 21 154 | Cyprus: gadm1 # gadm1: 5 regions; wiki: 6 regions 155 | Czech Republic: gadm1 # match 14 156 | Denmark: gadm1 # match 5 157 | Estonia: gadm1 # gadm1 16 regions; wiki 15 regions 158 | Finland: gadm1 # gadm1 5 regions; wiki 19 regions 159 | France: gadm1 # match 13 160 | Germany: gadm1 # match 16 161 | Greece: gadm1 # gadm1 8 regions; wiki 13 (14) regions 162 | Hungary: gadm1 # match 20 163 | Ireland: gadm1 # match 26 164 | Italy: gadm1 # match 20 165 | Latvia: gadm1 # gadm1 5 regions; wiki 119 regions 166 | Lithuania: gadm1 # match 10 167 | Luxembourg: gadm2 # match 12 168 | Netherlands: gadm1 # gadm1 14 regions; wiki 12 regions 169 | Poland: gadm1 # match 16 170 | Portugal: gadm1 # match 18 171 | Romania: gadm1 # match 42 172 | Slovakia: gadm1 # match 8 173 | Slovenia: gadm1 # match 12 174 | Spain: gadm1 # match 17 175 | Sweden: gadm1 # match 21 176 | United Kingdom: gadm1 # match 4 177 | Albania: gadm1 # match 12 178 | Bosnia and Herzegovina: gadm1 # match 3 179 | North Macedonia: nuts3 # match 8 (statistical, non-administrative) 180 | Montenegro: gadm1 # match 21 181 | Norway: gadm1 # match 19 182 | Serbia: gadm1 # gadm1 25 regions; wiki 5 regions 183 | Switzerland: gadm1 # match 26 184 | Iceland: gadm1 185 | municipal: 186 | Austria: lau2 187 | Belgium: lau2 188 | Bulgaria: lau2 189 | Croatia: lau2 190 | Cyprus: lau2 191 | Czech Republic: lau2 192 | Denmark: lau2 193 | Estonia: lau2 194 | Finland: lau2 195 | France: lau2 196 | Germany: lau2 197 | Greece: lau2 198 | Hungary: lau2 199 | Ireland: lau2 200 | Italy: lau2 201 | Latvia: lau2 202 | Lithuania: lau2 203 | Luxembourg: lau2 # lau2 106, https://en.wikipedia.org/wiki/Communes_of_Luxembourg 102 204 | Netherlands: lau2 205 | Poland: lau2 206 | Portugal: lau2 207 | Romania: lau2 208 | Slovakia: lau2 209 | Slovenia: lau2 210 | Spain: lau2 211 | Sweden: lau2 212 | United Kingdom: lau2 213 | Albania: gadm3 214 | Bosnia and Herzegovina: gadm3 215 | North Macedonia: gadm1 # 85, instead of 1817 in LAU2; https://en.wikipedia.org/wiki/Municipalities_of_the_Republic_of_Macedonia 216 | Montenegro: gadm1 # same as regional 217 | Norway: lau2 218 | Serbia: lau2 219 | Switzerland: lau2 220 | Iceland: lau2 221 | parameters: 222 | maximum-installable-power-density: # this is not the yield, but the density of installed power 223 | pv-on-tilted-roofs: 160 # [W/m^2] from (Gagnon:2016, Klauser:2016), i.e. 16% efficiency 224 | pv-on-flat-areas: 80 # [W/m^2] from (Gagnon:2016, Klauser:2016, Wirth:2017) 225 | onshore-wind: 8 # [W/m^2] from (European Environment Agency, 2009) 226 | offshore-wind: 15 # [W/m^2] from (European Environment Agency, 2009) 227 | availability: 228 | wind-onshore: 0.97 # [-] from (European Environment Agency, 2009) 229 | wind-offshore: 0.9 # [-] from (European Environment Agency, 2009) 230 | ninja: # change these with caution as the renewables.ninja simulations are not in the loop, see README 231 | resolution-grid: 50 # [km^2] corresponding to MERRA resolution 232 | pv-performance-ratio: 0.9 233 | hub-height: 234 | onshore: 105 # m, median hub height of V90/2000 in Europe between 2010 and 2018 235 | offshore: 87 # m, median hub height of SWT-3.6-107 in Europe between 2010 and 2018 236 | turbine: 237 | onshore: "vestas v90 2000" # most built between 2010 and 2018 in Europe 238 | offshore: "siemens swt 3.6 107" # most built between 2010 and 2018 in Europe 239 | max-slope: 240 | pv: 10 # [°] (AlGarni:2018) 241 | wind: 20 # [°] (McKenna:2014) 242 | max-depth-offshore: -50 243 | max-building-share: 0.01 # Above, a pixel cannot be used for energy farms. Equals roughly 900m2 244 | max-urban-green-share: 0.01 # Above, a pixel cannot be used for energy farms. Equals roughly 900m2. Removes e.g. Berlin's Tempelhofer Feld. 245 | nuts-year: 2010 # Choice of NUTS data to use from [2006, 2010, 2013, 2016, 2021] 246 | scenarios: 247 | technical-potential: 248 | use-protected-areas: True 249 | pv-on-farmland: True 250 | share-farmland-used: 1.0 251 | share-forest-used-for-wind: 1.0 252 | share-other-land-used: 1.0 253 | share-offshore-used: 1.0 254 | share-rooftops-used: 1.0 255 | technical-social-potential: 256 | use-protected-areas: False 257 | pv-on-farmland: False 258 | share-farmland-used: 0.1 259 | share-forest-used-for-wind: 0.1 260 | share-other-land-used: 0.1 261 | share-offshore-used: 0.1 262 | share-rooftops-used: 1.0 263 | -------------------------------------------------------------------------------- /scripts/potentials.py: -------------------------------------------------------------------------------- 1 | """Determine potential of renewable electricity in each administrative unit. 2 | 3 | * Take the (only technically restricted) raster data potentials, 4 | * add restrictions based on scenario definitions, 5 | * allocate the onshore potentials to the administrative units, 6 | * allocate the offshore potentials to exclusive economic zones (EEZ), 7 | * allocate the offshore potential of EEZ to units based on the fraction of shared coast. 8 | 9 | This is in analogy to `areas.py` but for potentials [TWh/a] rather than areas [km2] . 10 | """ 11 | 12 | import numpy as np 13 | import pandas as pd 14 | import rasterio 15 | from rasterstats import zonal_stats 16 | import fiona 17 | 18 | from renewablepotentialslib.eligibility import Eligibility, ProtectedArea, Potential, FOREST, FARM, OTHER 19 | 20 | 21 | def potentials(path_to_units, path_to_eez, path_to_shared_coast, 22 | path_to_capacities_pv_prio, path_to_capacities_wind_prio, 23 | path_to_electricity_yield_pv_prio, path_to_electricity_yield_wind_prio, 24 | path_to_eligibility_categories, path_to_land_cover, path_to_protected_areas, 25 | scenario_config, metric, path_to_result): 26 | """Determine potential of renewable electricity in each administrative unit. 27 | 28 | * Take the (only technically restricted) raster data potentials, 29 | * add restrictions based on scenario definitions, 30 | * allocate the onshore potentials to the administrative units, 31 | * allocate the offshore potentials to exclusive economic zones (EEZ), 32 | * allocate the offshore potential of EEZ to units based on the fraction of shared coast. 33 | """ 34 | with rasterio.open(path_to_eligibility_categories, "r") as src: 35 | eligibility_categories = src.read(1) 36 | with rasterio.open(path_to_electricity_yield_pv_prio, "r") as src: 37 | transform = src.transform 38 | electricity_yield_pv_prio = src.read(1) 39 | with rasterio.open(path_to_electricity_yield_wind_prio, "r") as src: 40 | electricity_yield_wind_prio = src.read(1) 41 | with rasterio.open(path_to_land_cover, "r") as src: 42 | land_cover = src.read(1) 43 | with rasterio.open(path_to_protected_areas, "r") as src: 44 | protected_areas = src.read(1) 45 | with fiona.open(path_to_units, "r") as src: 46 | unit_ids = [feature["properties"]["id"] for feature in src] 47 | unit_geometries = [feature["geometry"] for feature in src] 48 | with fiona.open(path_to_eez, "r") as src: 49 | eez_ids = [feature["properties"]["id"] for feature in src] 50 | eez_geometries = [feature["geometry"] for feature in src] 51 | shared_coasts = pd.read_csv(path_to_shared_coast, index_col=0) 52 | electricity_yield_pv_prio, electricity_yield_wind_prio = apply_scenario_config( 53 | potential_pv_prio=electricity_yield_pv_prio, 54 | potential_wind_prio=electricity_yield_wind_prio, 55 | categories=eligibility_categories, 56 | land_cover=land_cover, 57 | protected_areas=protected_areas, 58 | scenario_config=scenario_config 59 | ) 60 | if metric == "capacity": 61 | with rasterio.open(path_to_capacities_pv_prio, "r") as src: 62 | transform = src.transform 63 | capacities_pv_prio = src.read(1) 64 | with rasterio.open(path_to_capacities_wind_prio, "r") as src: 65 | capacities_wind_prio = src.read(1) 66 | 67 | potential_pv_prio, potential_wind_prio = apply_scenario_config( 68 | potential_pv_prio=capacities_pv_prio, 69 | potential_wind_prio=capacities_wind_prio, 70 | categories=eligibility_categories, 71 | land_cover=land_cover, 72 | protected_areas=protected_areas, 73 | scenario_config=scenario_config 74 | ) 75 | elif metric == "electricity_yield": 76 | potential_pv_prio = electricity_yield_pv_prio 77 | potential_wind_prio = electricity_yield_wind_prio 78 | 79 | capacities_pv_prio, capacities_wind_prio = decide_between_pv_and_wind( 80 | potential_pv_prio=potential_pv_prio, 81 | potential_wind_prio=potential_wind_prio, 82 | electricity_yield_pv_prio=electricity_yield_pv_prio, 83 | electricity_yield_wind_prio=electricity_yield_wind_prio, 84 | eligibility_categories=eligibility_categories 85 | ) 86 | 87 | onshore_potentials = pd.DataFrame( 88 | index=unit_ids, 89 | data={ 90 | getattr(potential, f"{metric}_name"): potentials_per_shape( 91 | eligibilities=potential.eligible_on, 92 | potential_map=(capacities_pv_prio if "pv" in str(potential).lower() 93 | else capacities_wind_prio), 94 | eligibility_categories=eligibility_categories, 95 | shapes=unit_geometries, 96 | transform=transform 97 | ) 98 | for potential in Potential.onshore() 99 | } 100 | ) 101 | offshore_eez_potentials = pd.DataFrame( 102 | index=eez_ids, 103 | data={ 104 | getattr(potential, f"{metric}_name"): potentials_per_shape( 105 | eligibilities=potential.eligible_on, 106 | potential_map=(capacities_pv_prio if "pv" in str(potential).lower() 107 | else capacities_wind_prio), 108 | eligibility_categories=eligibility_categories, 109 | shapes=eez_geometries, 110 | transform=transform 111 | ) 112 | for potential in Potential.offshore() 113 | } 114 | ) 115 | offshore_potentials = pd.DataFrame( 116 | data=shared_coasts.dot(offshore_eez_potentials), 117 | columns=[getattr(potential, f"{metric}_name") for potential in Potential.offshore()] 118 | ) 119 | potentials = pd.concat([onshore_potentials, offshore_potentials], axis=1) 120 | potentials.index.name = "id" 121 | potentials.to_csv( 122 | path_to_result, 123 | header=True, 124 | index=True 125 | ) 126 | 127 | 128 | def apply_scenario_config(potential_pv_prio, potential_wind_prio, categories, 129 | land_cover, protected_areas, scenario_config): 130 | """Limit potential in each pixel based on scenario config.""" 131 | 132 | # share-rooftops-used 133 | share_rooftops_used = scenario_config["share-rooftops-used"] 134 | mask = categories == Eligibility.ROOFTOP_PV 135 | potential_pv_prio[mask] = potential_pv_prio[mask] * share_rooftops_used 136 | potential_wind_prio[mask] = potential_wind_prio[mask] * share_rooftops_used 137 | 138 | # share-forest-used-for-wind 139 | share_forest_used_for_wind = scenario_config["share-forest-used-for-wind"] 140 | mask = np.isin(land_cover, FOREST) & (categories != Eligibility.ROOFTOP_PV) 141 | potential_pv_prio[mask] = potential_pv_prio[mask] * share_forest_used_for_wind 142 | potential_wind_prio[mask] = potential_wind_prio[mask] * share_forest_used_for_wind 143 | 144 | # share-other-land-used 145 | share_other_land_used = scenario_config["share-other-land-used"] 146 | mask = np.isin(land_cover, OTHER) & (categories != Eligibility.ROOFTOP_PV) 147 | potential_pv_prio[mask] = potential_pv_prio[mask] * share_other_land_used 148 | potential_wind_prio[mask] = potential_wind_prio[mask] * share_other_land_used 149 | 150 | # share-farmland-used 151 | share_farmland_used = scenario_config["share-farmland-used"] 152 | mask = np.isin(land_cover, FARM) & (categories != Eligibility.ROOFTOP_PV) 153 | potential_pv_prio[mask] = potential_pv_prio[mask] * share_farmland_used 154 | potential_wind_prio[mask] = potential_wind_prio[mask] * share_farmland_used 155 | 156 | # share-offshore-used 157 | share_offshore_used = scenario_config["share-offshore-used"] 158 | mask = categories == Eligibility.OFFSHORE_WIND 159 | potential_pv_prio[mask] = potential_pv_prio[mask] * share_offshore_used 160 | potential_wind_prio[mask] = potential_wind_prio[mask] * share_offshore_used 161 | 162 | # pv-on-farmland 163 | pv_on_farmland = scenario_config["pv-on-farmland"] 164 | if not pv_on_farmland: 165 | mask = np.isin(land_cover, FARM) & (categories == Eligibility.ONSHORE_WIND_AND_PV) 166 | potential_pv_prio[mask] = 0 167 | 168 | # share-protected-areas-used 169 | use_protected_areas = scenario_config["use-protected-areas"] 170 | if not use_protected_areas: 171 | mask = (protected_areas == ProtectedArea.PROTECTED) & (categories != Eligibility.ROOFTOP_PV) 172 | potential_pv_prio[mask] = 0 173 | potential_wind_prio[mask] = 0 174 | 175 | return potential_pv_prio, potential_wind_prio 176 | 177 | 178 | def decide_between_pv_and_wind(potential_pv_prio, potential_wind_prio, 179 | electricity_yield_pv_prio, electricity_yield_wind_prio, 180 | eligibility_categories): 181 | """When both are possible, choose PV when its electricity yield is higher, or vice versa.""" 182 | pv_and_wind_possible = eligibility_categories == Eligibility.ONSHORE_WIND_AND_PV 183 | higher_wind_yield = electricity_yield_pv_prio <= electricity_yield_wind_prio 184 | 185 | potential_pv_prio[pv_and_wind_possible & higher_wind_yield] = 0 186 | potential_wind_prio[pv_and_wind_possible & ~higher_wind_yield] = 0 187 | 188 | return potential_pv_prio, potential_wind_prio 189 | 190 | 191 | def potentials_per_shape(eligibilities, potential_map, eligibility_categories, shapes, transform): 192 | """Determine potential of one eligibility category per shape.""" 193 | potential_map = potential_map.copy() 194 | potential_map[~np.isin(eligibility_categories, eligibilities)] = 0 195 | potentials = zonal_stats( 196 | shapes, 197 | potential_map, 198 | affine=transform, 199 | stats="sum", 200 | nodata=-999 201 | ) 202 | return [stat["sum"] for stat in potentials] 203 | 204 | 205 | if __name__ == "__main__": 206 | potentials( 207 | path_to_units=snakemake.input.units, 208 | path_to_eez=snakemake.input.eez, 209 | path_to_shared_coast=snakemake.input.shared_coast, 210 | path_to_capacities_pv_prio=snakemake.input.get("pv_capacity", None), 211 | path_to_capacities_wind_prio=snakemake.input.get("wind_capacity", None), 212 | path_to_electricity_yield_pv_prio=snakemake.input.pv_yield, 213 | path_to_electricity_yield_wind_prio=snakemake.input.wind_yield, 214 | path_to_eligibility_categories=snakemake.input.category, 215 | path_to_land_cover=snakemake.input.land_cover, 216 | path_to_protected_areas=snakemake.input.protected_areas, 217 | scenario_config=snakemake.params.scenario, 218 | metric=snakemake.params.potential_metric, 219 | path_to_result=snakemake.output[0] 220 | ) 221 | -------------------------------------------------------------------------------- /rules/potential.smk: -------------------------------------------------------------------------------- 1 | """Rules to determine the potential of renewables on a raster map and per administrative unit. 2 | 3 | (1) Determine technical potential on a raster map. 4 | (2) Define administrative units. 5 | (3) Based on scenarios, restrict the potential, and allocate it to administrative units. 6 | 7 | """ 8 | 9 | root_dir = config["root-directory"] + "/" if config["root-directory"] not in ["", "."] else "" 10 | script_dir = f"{root_dir}scripts/" 11 | 12 | rule category_of_technical_eligibility: 13 | message: 14 | "Determine upper bound surface eligibility for renewables based on land cover, slope, bathymetry, and settlements." 15 | input: 16 | src = script_dir + "technical_eligibility.py", 17 | land_cover = rules.land_cover_in_europe.output[0], 18 | slope = rules.slope_in_europe.output[0], 19 | bathymetry = rules.bathymetry_in_europe.output[0], 20 | building_share = rules.settlements.output.buildings, 21 | urban_green_share = rules.settlements.output.urban_greens 22 | params: 23 | max_slope = config["parameters"]["max-slope"], 24 | max_building_share = config["parameters"]["max-building-share"], 25 | max_urban_green_share = config["parameters"]["max-urban-green-share"], 26 | max_depth_offshore = config["parameters"]["max-depth-offshore"] 27 | output: 28 | "build/technically-eligible-land.tif" 29 | conda: "../envs/default.yaml" 30 | script: "../scripts/technical_eligibility.py" 31 | 32 | 33 | rule total_size_swiss_building_footprints_according_to_settlement_data: 34 | message: "Sum the size of building footprints from settlement data." 35 | input: 36 | src = script_dir + "swiss_building_footprints.py", 37 | building_footprints = rules.settlements.output.buildings, 38 | eligibility = "build/technically-eligible-land.tif", 39 | countries = rules.administrative_borders.output[0] 40 | output: 41 | "build/building-footprints-according-to-settlement-data-km2.txt" 42 | conda: "../envs/default.yaml" 43 | script: "../scripts/swiss_building_footprints.py" 44 | 45 | 46 | rule correction_factor_building_footprint_to_available_rooftop: 47 | message: "Determine the factor that maps from building footprints to available rooftop area for CHE." 48 | input: 49 | rooftops = rules.total_size_swiss_rooftops_according_to_sonnendach_data.output[0], 50 | building_footprints = rules.total_size_swiss_building_footprints_according_to_settlement_data.output[0] 51 | output: 52 | "build/ratio-esm-available.txt" 53 | run: 54 | with open(input.rooftops, "r") as f_in: 55 | rooftops = float(f_in.read()) 56 | with open(input.building_footprints, "r") as f_in: 57 | building_footprints = float(f_in.read()) 58 | ratio = rooftops / building_footprints 59 | with open(output[0], "w") as f_out: 60 | f_out.write(f"{ratio:.3f}") 61 | 62 | 63 | rule capacityfactor_of_technical_eligibility: 64 | message: 65 | "Determine capacityfactor of eligibility category." 66 | input: 67 | script = script_dir + "technically_eligible_capacityfactor.py", 68 | eligibility_categories = rules.category_of_technical_eligibility.output[0], 69 | rooftop_pv_cf = "build/capacityfactors/rooftop-pv-time-average.tif", 70 | open_field_pv_cf = "build/capacityfactors/open-field-pv-time-average.tif", 71 | wind_onshore_cf = "build/capacityfactors/wind-onshore-time-average.tif", 72 | wind_offshore_cf = "build/capacityfactors/wind-offshore-time-average.tif" 73 | params: availability = config["parameters"]["availability"] 74 | output: 75 | pv = "build/technically-eligible-capacityfactor-pv-prio.tif", 76 | wind = "build/technically-eligible-capacityfactor-wind-prio.tif" 77 | conda: "../envs/default.yaml" 78 | script: "../scripts/technically_eligible_capacityfactor.py" 79 | 80 | 81 | rule area_of_technical_eligibility: 82 | message: 83 | "Quantify the area that is technically eligible for renewables." 84 | input: 85 | script = script_dir + "technically_eligible_area.py", 86 | eligibility_categories = rules.category_of_technical_eligibility.output[0], 87 | building_share = rules.settlements.output.buildings, 88 | rooftop_correction_factor = rules.correction_factor_building_footprint_to_available_rooftop.output[0] 89 | output: 90 | "build/technically-eligible-area-km2.tif" 91 | conda: "../envs/default.yaml" 92 | script: "../scripts/technically_eligible_area.py" 93 | 94 | 95 | rule capacity_of_technical_eligibility: 96 | message: 97 | "Quantify the capacity that is technically eligible for renewables." 98 | input: 99 | script = script_dir + "technically_eligible_capacity.py", 100 | eligibility_categories = rules.category_of_technical_eligibility.output[0], 101 | eligible_areas = rules.area_of_technical_eligibility.output[0], 102 | statistical_roof_model = rules.sonnendach_statistics.output[0] 103 | params: 104 | maximum_installable_power_density = config["parameters"]["maximum-installable-power-density"] 105 | output: 106 | pv = "build/technically-eligible-capacity-pv-prio-mw.tif", 107 | wind = "build/technically-eligible-capacity-wind-prio-mw.tif", 108 | conda: "../envs/default.yaml" 109 | script: "../scripts/technically_eligible_capacity.py" 110 | 111 | 112 | rule electricity_yield_of_technical_eligibility: 113 | message: 114 | "Quantify the max annual electricity yield that is technically eligible for renewables." 115 | input: 116 | script = script_dir + "technically_eligible_electricity_yield.py", 117 | eligibility_categories = rules.category_of_technical_eligibility.output[0], 118 | capacities_pv_prio = rules.capacity_of_technical_eligibility.output.pv, 119 | capacities_wind_prio = rules.capacity_of_technical_eligibility.output.wind, 120 | cf_pv_prio = rules.capacityfactor_of_technical_eligibility.output.pv, 121 | cf_wind_prio = rules.capacityfactor_of_technical_eligibility.output.wind 122 | output: 123 | pv = "build/technically-eligible-electricity-yield-pv-prio-twh.tif", 124 | wind = "build/technically-eligible-electricity-yield-wind-prio-twh.tif", 125 | conda: "../envs/default.yaml" 126 | script: "../scripts/technically_eligible_electricity_yield.py" 127 | 128 | 129 | rule units: 130 | message: "Form units of layer {wildcards.layer} by remixing NUTS, LAU, and GADM." 131 | input: 132 | script = script_dir + "units.py", 133 | administrative_borders = rules.administrative_borders.output[0] 134 | params: 135 | layer_name = "{layer}", 136 | layer_config = lambda wildcards: config["layers"][wildcards.layer], 137 | countries = config["scope"]["countries"] 138 | output: 139 | "build/{layer}/units.geojson" 140 | conda: "../envs/default.yaml" 141 | script: "../scripts/units.py" 142 | 143 | 144 | rule local_land_cover: 145 | message: "Land cover statistics per unit of layer {wildcards.layer}." 146 | input: 147 | script = script_dir + "land_cover_stats_to_csv.py", 148 | units = rules.units.output[0], 149 | land_cover = rules.land_cover_in_europe.output[0] 150 | params: 151 | attributes = [ 152 | "lc_11", "lc_14", "lc_20", "lc_30", "lc_40", "lc_50", 153 | "lc_60", "lc_70", "lc_90", "lc_100", "lc_110", "lc_120", 154 | "lc_130", "lc_140", "lc_150", "lc_160", "lc_170", "lc_180", 155 | "lc_190", "lc_200", "lc_210", "lc_220", "lc_230" 156 | ] 157 | output: 158 | "build/{layer}/land-cover.csv" 159 | conda: "../envs/default.yaml" 160 | script: "../scripts/land_cover_stats_to_csv.py" 161 | 162 | 163 | rule local_built_up_area: 164 | message: "Determine the built up area for administrative units in layer {wildcards.layer}." 165 | input: 166 | script = script_dir + "built_up_area.py", 167 | built_up_area = rules.settlements.output.built_up, 168 | units = rules.units.output[0] 169 | output: 170 | "build/{layer}/built-up-areas.csv" 171 | conda: "../envs/default.yaml" 172 | script: "../scripts/built_up_area.py" 173 | 174 | 175 | rule shared_coast: 176 | message: "Determine share of coast length between eez and units of layer {wildcards.layer} using {threads} threads." 177 | input: 178 | script = script_dir + "shared_coast.py", 179 | units = rules.units.output[0], 180 | eez = rules.eez_in_europe.output[0] 181 | output: 182 | "build/{layer}/shared-coast.csv" 183 | threads: config["snakemake"]["max-threads"] 184 | conda: "../envs/default.yaml" 185 | script: "../scripts/shared_coast.py" 186 | 187 | 188 | rule potentials: 189 | message: 190 | "Determine the constrained potentials for layer {wildcards.layer} in scenario {wildcards.scenario}." 191 | input: 192 | script = script_dir + "potentials.py", 193 | units = rules.units.output[0], 194 | eez = rules.eez_in_europe.output[0], 195 | shared_coast = rules.shared_coast.output[0], 196 | pv_yield = rules.electricity_yield_of_technical_eligibility.output.pv, 197 | wind_yield = rules.electricity_yield_of_technical_eligibility.output.wind, 198 | category = rules.category_of_technical_eligibility.output[0], 199 | land_cover = rules.land_cover_in_europe.output[0], 200 | protected_areas = rules.protected_areas_in_europe.output[0] 201 | params: 202 | scenario = lambda wildcards: config["scenarios"][wildcards.scenario], 203 | potential_metric = "electricity_yield" 204 | output: 205 | "build/{layer}/{scenario}/potentials.csv" 206 | conda: "../envs/default.yaml" 207 | script: "../scripts/potentials.py" 208 | 209 | 210 | rule areas: 211 | message: 212 | "Determine eligible areas for layer {wildcards.layer} in scenario {wildcards.scenario}." 213 | input: 214 | script = script_dir + "areas.py", 215 | units = rules.units.output[0], 216 | eez = rules.eez_in_europe.output[0], 217 | shared_coast = rules.shared_coast.output[0], 218 | area = rules.area_of_technical_eligibility.output[0], 219 | category = rules.category_of_technical_eligibility.output[0], 220 | land_cover = rules.land_cover_in_europe.output[0], 221 | protected_areas = rules.protected_areas_in_europe.output[0] 222 | params: 223 | scenario = lambda wildcards: config["scenarios"][wildcards.scenario] 224 | output: 225 | "build/{layer}/{scenario}/areas.csv" 226 | conda: "../envs/default.yaml" 227 | script: "../scripts/areas.py" 228 | 229 | 230 | rule capacities: 231 | message: 232 | "Determine installable capacities for layer {wildcards.layer} in scenario {wildcards.scenario}." 233 | input: 234 | script = script_dir + "potentials.py", 235 | units = rules.units.output[0], 236 | eez = rules.eez_in_europe.output[0], 237 | shared_coast = rules.shared_coast.output[0], 238 | pv_capacity = rules.capacity_of_technical_eligibility.output.pv, 239 | wind_capacity = rules.capacity_of_technical_eligibility.output.wind, 240 | pv_yield = rules.electricity_yield_of_technical_eligibility.output.pv, 241 | wind_yield = rules.electricity_yield_of_technical_eligibility.output.wind, 242 | category = rules.category_of_technical_eligibility.output[0], 243 | land_cover = rules.land_cover_in_europe.output[0], 244 | protected_areas = rules.protected_areas_in_europe.output[0] 245 | params: 246 | scenario = lambda wildcards: config["scenarios"][wildcards.scenario], 247 | potential_metric = "electricity_yield" 248 | output: 249 | "build/{layer}/{scenario}/capacities.csv" 250 | conda: "../envs/default.yaml" 251 | script: "../scripts/potentials.py" 252 | -------------------------------------------------------------------------------- /notebooks/handling-missing-shapes-of-protected-areas.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# The missing shapes of protected areas\n", 8 | "\n", 9 | "The WDPA reports all protected areas in Europe, but for some there is no information on their geographic shape. In here I want to assess whether those areas can be neglected, and if not, how to estimate them." 10 | ] 11 | }, 12 | { 13 | "cell_type": "code", 14 | "execution_count": 1, 15 | "metadata": {}, 16 | "outputs": [], 17 | "source": [ 18 | "import math\n", 19 | "\n", 20 | "import fiona\n", 21 | "import geopandas as gpd\n", 22 | "import pycountry" 23 | ] 24 | }, 25 | { 26 | "cell_type": "markdown", 27 | "metadata": {}, 28 | "source": [ 29 | "## The fraction of protected areas with missing shape\n", 30 | "\n", 31 | "First I am assessing the fraction of protected land, for which the shape is missing. The WDPA comes in two databases: one with polygons reporting the protected areas for which shapes are known, and another one with points of the areas for which shapes are not known. I am not too much interested in the count of areas, but the actual land they cover. Hence I will assess that in the following.\n", 32 | "\n", 33 | "The polygon database is nasty: the polygons are complex and operations often take long. Even worse: there is one polygon which seems invalid and `geopandas` fails reading it. I am hence using `fiona` in the following which allows for quick filtering of the data (the invalid one is not in Europe)." 34 | ] 35 | }, 36 | { 37 | "cell_type": "code", 38 | "execution_count": 2, 39 | "metadata": {}, 40 | "outputs": [], 41 | "source": [ 42 | "COUNTRIES = [\n", 43 | " \"Austria\",\n", 44 | " \"Belgium\",\n", 45 | " \"Bulgaria\",\n", 46 | " \"Croatia\",\n", 47 | " \"Cyprus\",\n", 48 | " \"Czech Republic\",\n", 49 | " \"Denmark\",\n", 50 | " \"Estonia\",\n", 51 | " \"Finland\",\n", 52 | " \"France\",\n", 53 | " \"Germany\",\n", 54 | " \"Greece\",\n", 55 | " \"Hungary\",\n", 56 | " \"Ireland\",\n", 57 | " \"Italy\",\n", 58 | " \"Latvia\",\n", 59 | " \"Lithuania\",\n", 60 | " \"Luxembourg\",\n", 61 | " \"Malta\",\n", 62 | " \"Netherlands\",\n", 63 | " \"Poland\",\n", 64 | " \"Portugal\",\n", 65 | " \"Romania\",\n", 66 | " \"Slovakia\",\n", 67 | " \"Slovenia\",\n", 68 | " \"Spain\",\n", 69 | " \"Sweden\",\n", 70 | " \"United Kingdom\",\n", 71 | " \"Norway\",\n", 72 | " \"Switzerland\"\n", 73 | "]\n", 74 | "\n", 75 | "COUNTRIES_ISO_ALPHA_3 = [pycountry.countries.lookup(country).alpha_3 for country in COUNTRIES]\n", 76 | "BBOX_EUROPE = (-12, 20, 40, 79)" 77 | ] 78 | }, 79 | { 80 | "cell_type": "code", 81 | "execution_count": 3, 82 | "metadata": {}, 83 | "outputs": [], 84 | "source": [ 85 | "with fiona.open(\"../build/raw-wdpa-jan2018/WDPA_Jan2018-shapefile-points.shp\") as f_points:\n", 86 | " rep_area_points = sum([rec[1][\"properties\"][\"REP_AREA\"] \n", 87 | " for rec in f_points.items(bbox=BBOX_EUROPE)\n", 88 | " if rec[1][\"properties\"][\"ISO3\"] in COUNTRIES_ISO_ALPHA_3\n", 89 | " if rec[1][\"properties\"][\"STATUS\"] == 'Designated'\n", 90 | " ])\n", 91 | " rep_m_area_points = sum([rec[1][\"properties\"][\"REP_M_AREA\"] \n", 92 | " for rec in f_points.items(bbox=BBOX_EUROPE) \n", 93 | " if rec[1][\"properties\"][\"ISO3\"] in COUNTRIES_ISO_ALPHA_3\n", 94 | " if rec[1][\"properties\"][\"STATUS\"] == 'Designated'\n", 95 | " ])" 96 | ] 97 | }, 98 | { 99 | "cell_type": "code", 100 | "execution_count": 4, 101 | "metadata": {}, 102 | "outputs": [ 103 | { 104 | "name": "stdout", 105 | "output_type": "stream", 106 | "text": [ 107 | "Reported area of points: 176613.22 km^2\n", 108 | "Reported marine area of points: 241.76 km^2\n" 109 | ] 110 | } 111 | ], 112 | "source": [ 113 | "print(\"Reported area of points: {:.2f} km^2\".format(rep_area_points))\n", 114 | "print(\"Reported marine area of points: {:.2f} km^2\".format(rep_m_area_points))" 115 | ] 116 | }, 117 | { 118 | "cell_type": "code", 119 | "execution_count": 5, 120 | "metadata": {}, 121 | "outputs": [], 122 | "source": [ 123 | "with fiona.open(\"../build/raw-wdpa-jan2018/WDPA_Jan2018-shapefile-polygons.shp\") as f_polygon:\n", 124 | " rep_area_polygons = sum([rec[1][\"properties\"][\"REP_AREA\"] \n", 125 | " for rec in f_polygon.items(bbox=BBOX_EUROPE) \n", 126 | " if rec[1][\"properties\"][\"ISO3\"] in COUNTRIES_ISO_ALPHA_3\n", 127 | " if rec[1][\"properties\"][\"STATUS\"] == 'Designated'\n", 128 | " ])\n", 129 | " rep_m_area_polygons = sum([rec[1][\"properties\"][\"REP_M_AREA\"] \n", 130 | " for rec in f_polygon.items(bbox=BBOX_EUROPE)\n", 131 | " if rec[1][\"properties\"][\"ISO3\"] in COUNTRIES_ISO_ALPHA_3\n", 132 | " if rec[1][\"properties\"][\"STATUS\"] == 'Designated'\n", 133 | " ])" 134 | ] 135 | }, 136 | { 137 | "cell_type": "code", 138 | "execution_count": 6, 139 | "metadata": {}, 140 | "outputs": [ 141 | { 142 | "name": "stdout", 143 | "output_type": "stream", 144 | "text": [ 145 | "Reported area of polygons: 2376365.87 km^2\n", 146 | "Reported marine area of polygons: 876706.27 km^2\n" 147 | ] 148 | } 149 | ], 150 | "source": [ 151 | "print(\"Reported area of polygons: {:.2f} km^2\".format(rep_area_polygons))\n", 152 | "print(\"Reported marine area of polygons: {:.2f} km^2\".format(rep_m_area_polygons))" 153 | ] 154 | }, 155 | { 156 | "cell_type": "code", 157 | "execution_count": 7, 158 | "metadata": {}, 159 | "outputs": [ 160 | { 161 | "name": "stdout", 162 | "output_type": "stream", 163 | "text": [ 164 | "Fraction of area reported as points: 6.9%\n" 165 | ] 166 | } 167 | ], 168 | "source": [ 169 | "fraction_points = rep_area_points / (rep_area_points + rep_area_polygons)\n", 170 | "print(\"Fraction of area reported as points: {:.1f}%\".format(fraction_points * 100))" 171 | ] 172 | }, 173 | { 174 | "cell_type": "code", 175 | "execution_count": 8, 176 | "metadata": {}, 177 | "outputs": [ 178 | { 179 | "name": "stdout", 180 | "output_type": "stream", 181 | "text": [ 182 | "Fraction of marine area reported as points: 0.0%\n" 183 | ] 184 | } 185 | ], 186 | "source": [ 187 | "fraction_m_points = rep_m_area_points / (rep_m_area_points + rep_m_area_polygons)\n", 188 | "print(\"Fraction of marine area reported as points: {:.1f}%\".format(fraction_m_points * 100))" 189 | ] 190 | }, 191 | { 192 | "cell_type": "markdown", 193 | "metadata": {}, 194 | "source": [ 195 | "If I was to ignore the point database, I'd neglect 7% of the protected land in Europe. That's roughly the area of half of the size of Germany. Almost all missing shapes span over land, not marine areas." 196 | ] 197 | }, 198 | { 199 | "cell_type": "markdown", 200 | "metadata": {}, 201 | "source": [ 202 | "## Estimating the shape of areas reported as points" 203 | ] 204 | }, 205 | { 206 | "cell_type": "markdown", 207 | "metadata": {}, 208 | "source": [ 209 | "Let's try to come up with an estimation of the shape of areas that are reported as points. Here's what the manual of WDPA Jan 2018 states about the points:\n", 210 | "\n", 211 | "> Where boundary data is unavailable, the latitude and longitude of the centermost point of the site is requested as a reference point for the protected area instead. Although this is strongly encouraged, data providers are not always able to submit such information. Therefore, it should not be assumed that all points in the WDPA represent a central point of a given site. If the protected area is made up of multiple parts, multi- points associated with the central locations of each part of the protected area may be stored instead (see Figure 2.2).\n", 212 | "\n", 213 | "So the points are centroids, or at least should be. A simple approach would be to draw a circle around that centroid with the reported area size and use that as an estimation of the real protected area. A difficulty here would be the areas that are represented by more than one point. Let's find out how many of those there are and how to handle them." 214 | ] 215 | }, 216 | { 217 | "cell_type": "code", 218 | "execution_count": 9, 219 | "metadata": {}, 220 | "outputs": [], 221 | "source": [ 222 | "with fiona.open(\"../build/raw-wdpa-jan2018/WDPA_Jan2018-shapefile-points.shp\") as f_points:\n", 223 | " points_in_europe = list(rec for rec in f_points.items(bbox=BBOX_EUROPE)\n", 224 | " if rec[1][\"properties\"][\"ISO3\"] in COUNTRIES_ISO_ALPHA_3\n", 225 | " )" 226 | ] 227 | }, 228 | { 229 | "cell_type": "code", 230 | "execution_count": 10, 231 | "metadata": {}, 232 | "outputs": [ 233 | { 234 | "name": "stdout", 235 | "output_type": "stream", 236 | "text": [ 237 | "{'MultiPoint'}\n" 238 | ] 239 | } 240 | ], 241 | "source": [ 242 | "types = set(point[1][\"geometry\"][\"type\"] for point in points_in_europe)\n", 243 | "print(types)" 244 | ] 245 | }, 246 | { 247 | "cell_type": "markdown", 248 | "metadata": {}, 249 | "source": [ 250 | "Technically, everything seems to be stored as a `MultiPoint`." 251 | ] 252 | }, 253 | { 254 | "cell_type": "code", 255 | "execution_count": 11, 256 | "metadata": {}, 257 | "outputs": [ 258 | { 259 | "name": "stdout", 260 | "output_type": "stream", 261 | "text": [ 262 | "{1}\n" 263 | ] 264 | } 265 | ], 266 | "source": [ 267 | "n_coordinates = set(len(point[1][\"geometry\"][\"coordinates\"]) for point in points_in_europe)\n", 268 | "print(n_coordinates)" 269 | ] 270 | }, 271 | { 272 | "cell_type": "markdown", 273 | "metadata": {}, 274 | "source": [ 275 | "In any case, there are no entries in the database that contain more than one point." 276 | ] 277 | }, 278 | { 279 | "cell_type": "markdown", 280 | "metadata": {}, 281 | "source": [ 282 | "### Approach 1: Drawing circles around points" 283 | ] 284 | }, 285 | { 286 | "cell_type": "markdown", 287 | "metadata": {}, 288 | "source": [ 289 | "In the following, I will project to gall peters temporarily, because the coordinate system is equidistant. Every unit represents 1 meter." 290 | ] 291 | }, 292 | { 293 | "cell_type": "code", 294 | "execution_count": 12, 295 | "metadata": {}, 296 | "outputs": [], 297 | "source": [ 298 | "# from http://spatialreference.org/ref/sr-org/22/\n", 299 | "GALL_PETERS_PROJ4 = \"+proj=cea +lon_0=0 +lat_ts=45 +x_0=0 +y_0=0 +ellps=WGS84 +units=m +no_defs \"" 300 | ] 301 | }, 302 | { 303 | "cell_type": "code", 304 | "execution_count": 13, 305 | "metadata": {}, 306 | "outputs": [], 307 | "source": [ 308 | "points = gpd.read_file(\"../build/raw-wdpa-jan2018/WDPA_Jan2018-shapefile-points.shp\")\n", 309 | "points_in_europe = points.cx[-12:40, 20:79].loc[points.ISO3.isin(COUNTRIES_ISO_ALPHA_3)].copy()\n", 310 | "original_crs = points_in_europe.crs" 311 | ] 312 | }, 313 | { 314 | "cell_type": "code", 315 | "execution_count": 14, 316 | "metadata": {}, 317 | "outputs": [], 318 | "source": [ 319 | "# convert points to circles\n", 320 | "points_in_europe = points_in_europe.to_crs(GALL_PETERS_PROJ4)\n", 321 | "def radius_meter(area_squarekilometer):\n", 322 | " area_squaremeter = area_squarekilometer * 1e6\n", 323 | " return math.sqrt(area_squaremeter / math.pi)\n", 324 | "points_in_europe.geometry = [rec[1].geometry.buffer(radius_meter(rec[1][\"REP_AREA\"])) \n", 325 | " for rec in points_in_europe.iterrows()]" 326 | ] 327 | }, 328 | { 329 | "cell_type": "code", 330 | "execution_count": 15, 331 | "metadata": {}, 332 | "outputs": [], 333 | "source": [ 334 | "# test area size (error must be smaller 1%)\n", 335 | "area_size_calculated = points_in_europe.area.sum() / 1e6\n", 336 | "area_size_reported = points_in_europe.REP_AREA.sum()\n", 337 | "assert abs(area_size_calculated - area_size_reported) < area_size_reported / 100" 338 | ] 339 | }, 340 | { 341 | "cell_type": "code", 342 | "execution_count": 16, 343 | "metadata": {}, 344 | "outputs": [], 345 | "source": [ 346 | "# project back to original CRS\n", 347 | "points_in_europe = points_in_europe.to_crs(original_crs)" 348 | ] 349 | }, 350 | { 351 | "cell_type": "markdown", 352 | "metadata": {}, 353 | "source": [ 354 | "This approach is simple and pragmatic and its error in terms of total amount of land area is less than 1%." 355 | ] 356 | }, 357 | { 358 | "cell_type": "code", 359 | "execution_count": null, 360 | "metadata": {}, 361 | "outputs": [], 362 | "source": [] 363 | } 364 | ], 365 | "metadata": { 366 | "kernelspec": { 367 | "display_name": "Python 3", 368 | "language": "python", 369 | "name": "python3" 370 | }, 371 | "language_info": { 372 | "codemirror_mode": { 373 | "name": "ipython", 374 | "version": 3 375 | }, 376 | "file_extension": ".py", 377 | "mimetype": "text/x-python", 378 | "name": "python", 379 | "nbconvert_exporter": "python", 380 | "pygments_lexer": "ipython3", 381 | "version": "3.6.3" 382 | } 383 | }, 384 | "nbformat": 4, 385 | "nbformat_minor": 2 386 | } 387 | -------------------------------------------------------------------------------- /rules/data-preprocessing.smk: -------------------------------------------------------------------------------- 1 | """This is a Snakemake file defining rules to retrieve raw data from online sources.""" 2 | import pycountry 3 | 4 | RESOLUTION_STUDY = (1 / 3600) * 10 # 10 arcseconds 5 | RESOLUTION_SLOPE = (1 / 3600) * 3 # 3 arcseconds 6 | 7 | SRTM_X_MIN = 34 # x coordinate of CGIAR tile raster 8 | SRTM_X_MAX = 44 9 | SRTM_Y_MIN = 1 # y coordinate of CGIAR tile raster 10 | SRTM_Y_MAX = 8 11 | GMTED_Y = ["50N", "70N"] 12 | GMTED_X = ["030W", "000E", "030E"] 13 | 14 | root_dir = config["root-directory"] + "/" if config["root-directory"] not in ["", "."] else "" 15 | script_dir = f"{root_dir}scripts/" 16 | 17 | localrules: raw_gadm_administrative_borders_zipped, raw_protected_areas_zipped, 18 | raw_lau_units_zipped, raw_land_cover_zipped, 19 | raw_land_cover, raw_protected_areas, raw_srtm_elevation_tile_zipped, raw_gmted_elevation_tile, 20 | raw_bathymetry_zipped, raw_bathymetry, raw_gadm_administrative_borders 21 | 22 | 23 | rule raw_gadm_administrative_borders_zipped: 24 | message: "Download administrative borders for {wildcards.country_code} as zip." 25 | params: url = lambda wildcards: config["data-sources"]["gadm"].format(country_code=wildcards.country_code) 26 | output: protected("data/automatic/raw-gadm/{country_code}.zip") 27 | shell: "curl -sLo {output} '{params.url}'" 28 | 29 | 30 | rule raw_gadm_administrative_borders: 31 | message: "Unzip administrative borders of {wildcards.country_code}." 32 | input: "data/automatic/raw-gadm/{country_code}.zip" 33 | output: temp("build/raw-gadm/gadm36_{country_code}.gpkg") 34 | shell: "unzip -o {input} -d build/raw-gadm" 35 | 36 | 37 | rule all_gadm_administrative_borders: 38 | message: "Merge gadm administrative borders of all countries." 39 | input: 40 | ["build/raw-gadm/gadm36_{}.gpkg".format(country_code) 41 | for country_code in [pycountry.countries.lookup(country).alpha_3 42 | for country in config['scope']['countries']] 43 | ] 44 | output: temp("build/raw-gadm/gadm36.gpkg") 45 | params: crs = config["crs"] 46 | conda: '../envs/default.yaml' 47 | shell: "ogrmerge.py -o {output} -f gpkg -src_layer_field_content '{{LAYER_NAME}}' -t_srs {params.crs} -single {input}" 48 | 49 | 50 | rule raw_nuts_units: 51 | message: "Download NUTS units as GeoJSON." 52 | output: 53 | protected("data/automatic/raw-nuts{}-units.geojson".format(config["parameters"]["nuts-year"])) 54 | params: 55 | url = config["data-sources"]["nuts"].format(nuts_year=config["parameters"]["nuts-year"]) 56 | shell: 57 | "curl -sLo {output} '{params.url}'" 58 | 59 | 60 | rule raw_lau_units_zipped: 61 | message: "Download LAU units as zip." 62 | output: 63 | protected("data/automatic/raw-lau-units.zip") 64 | params: url = config["data-sources"]["lau"] 65 | shell: 66 | "curl -sLo {output} '{params.url}'" 67 | 68 | 69 | rule raw_lau_units_unzipped: 70 | message: "Unzip LAU units." 71 | input: 72 | zip = rules.raw_lau_units_zipped.output 73 | output: 74 | shapes = "build/raw-lau-units/COMM_RG_01M_2013.shp", 75 | attributes = "build/raw-lau-units/COMM_AT_2013.dbf" 76 | shell: "unzip -j {input.zip} -d build/raw-lau-units" 77 | 78 | 79 | rule administrative_borders_lau: 80 | message: "Normalise LAU administrative borders." 81 | input: 82 | src = script_dir + "lau.py", 83 | shapes = rules.raw_lau_units_unzipped.output.shapes, 84 | attributes = rules.raw_lau_units_unzipped.output.attributes, 85 | output: 86 | temp("build/raw-lau.gpkg") 87 | shadow: "full" 88 | conda: "../envs/default.yaml" 89 | script: "../scripts/lau.py" 90 | 91 | 92 | rule administrative_borders: 93 | message: "Normalise all administrative borders." 94 | input: 95 | src = script_dir + "administrative_borders.py", 96 | nuts_geojson = rules.raw_nuts_units.output[0], 97 | gadm_gpkg = rules.all_gadm_administrative_borders.output[0], 98 | lau_gpkg = rules.administrative_borders_lau.output[0] 99 | params: 100 | crs = config["crs"], 101 | scope = config["scope"] 102 | output: 103 | "build/administrative-borders.gpkg" 104 | shadow: "full" 105 | conda: "../envs/default.yaml" 106 | script: "../scripts/administrative_borders.py" 107 | 108 | 109 | rule raw_land_cover_zipped: 110 | message: "Download land cover data as zip." 111 | output: protected("data/automatic/raw-globcover2009.zip") 112 | params: url = config["data-sources"]["land_cover"] 113 | shell: "curl -sLo {output} '{params.url}'" 114 | 115 | 116 | rule raw_land_cover: 117 | message: "Extract land cover data as zip." 118 | input: rules.raw_land_cover_zipped.output 119 | output: temp("build/GLOBCOVER_L4_200901_200912_V2.3.tif") 120 | shadow: "minimal" 121 | shell: "unzip {input} -d ./build/" 122 | 123 | 124 | rule raw_protected_areas_zipped: 125 | message: "Download protected areas data as zip." 126 | output: protected("data/automatic/raw-wdpa.zip") 127 | params: url = config["data-sources"]["protected_areas"] 128 | shell: "curl -sLo {output} -H 'Referer: {params.url}' {params.url}" 129 | 130 | 131 | rule raw_protected_areas: 132 | message: "Extract protected areas data as zip." 133 | input: rules.raw_protected_areas_zipped.output 134 | output: 135 | polygons = "build/raw-wdpa-feb2019/WDPA_Feb2019-shapefile-polygons.shp", 136 | polygon_data = "build/raw-wdpa-feb2019/WDPA_Feb2019-shapefile-polygons.dbf", 137 | points = "build/raw-wdpa-feb2019/WDPA_Feb2019-shapefile-points.shp" 138 | shell: "unzip -o {input} -d build/raw-wdpa-feb2019" 139 | 140 | 141 | rule raw_srtm_elevation_tile_zipped: 142 | message: "Download SRTM elevation data tile (x={wildcards.x}, y={wildcards.y}) from CGIAR." 143 | output: protected("data/automatic/raw-srtm/srtm_{x}_{y}.zip") 144 | params: url = config["data-sources"]["cgiar_tile"] 145 | shell: "curl -sLo {output} '{params.url}/srtm_{wildcards.x}_{wildcards.y}.zip'" 146 | 147 | 148 | rule raw_srtm_elevation_tile: 149 | message: "Unzip SRTM elevation data tile (x={wildcards.x}, y={wildcards.y})." 150 | input: 151 | "data/automatic/raw-srtm/srtm_{x}_{y}.zip" 152 | output: 153 | temp("build/srtm_{x}_{y}.tif") 154 | run: # using Python here, because "unzip" issues a warning sometimes causing snakemake to break 155 | import zipfile 156 | from pathlib import Path 157 | 158 | file_to_extract = Path(input[0]).with_suffix(".tif").name 159 | with zipfile.ZipFile(input[0], "r") as zipref: 160 | zipref.extract(file_to_extract, path="build") 161 | 162 | 163 | rule raw_srtm_elevation_data: 164 | message: "Merge all SRTM elevation data tiles." 165 | input: 166 | ["build/srtm_{x:02d}_{y:02d}.tif".format(x=x, y=y) 167 | for x in range(SRTM_X_MIN, SRTM_X_MAX + 1) 168 | for y in range(SRTM_Y_MIN, SRTM_Y_MAX + 1) 169 | if not (x is 34 and y in [3, 4, 5, 6])] # these tiles do not exist 170 | output: 171 | temp("build/raw-srtm-elevation-data.tif") 172 | conda: "../envs/default.yaml" 173 | shell: 174 | "rio merge {input} {output} --overwrite" 175 | 176 | 177 | rule raw_gmted_elevation_tile: 178 | message: "Download GMTED elevation data tile." 179 | output: 180 | protected("data/automatic/raw-gmted/raw-gmted-{y}-{x}.tif") 181 | run: 182 | url = "{base_url}/{x_inverse}/{y}{x}_20101117_gmted_mea075.tif".format(**{ 183 | "base_url": config["data-sources"]["gmted_tile"], 184 | "x": wildcards.x, 185 | "y": wildcards.y, 186 | "x_inverse": wildcards.x[-1] + wildcards.x[:-1] 187 | }) 188 | shell("curl -sLo {output} '{url}'".format(**{"url": url, "output": output})) 189 | 190 | 191 | rule raw_gmted_elevation_data: 192 | message: "Merge all GMTED elevation data tiles." 193 | input: 194 | ["data/automatic/raw-gmted/raw-gmted-{y}-{x}.tif".format(x=x, y=y) 195 | for x in GMTED_X 196 | for y in GMTED_Y 197 | ] 198 | output: 199 | temp("build/raw-gmted-elevation-data.tif") 200 | conda: "../envs/default.yaml" 201 | shell: 202 | "rio merge {input} {output} --overwrite" 203 | 204 | 205 | rule raw_bathymetry_zipped: 206 | message: "Download bathymetric data as zip." 207 | output: protected("data/automatic/raw-bathymetric.zip") 208 | params: url = config["data-sources"]["bathymetric"] 209 | shell: "curl -sLo {output} '{params.url}'" 210 | 211 | 212 | rule raw_bathymetry: 213 | message: "Extract bathymetric data from zip." 214 | input: rules.raw_bathymetry_zipped.output[0] 215 | output: temp("build/ETOPO1_Bed_g_geotiff.tif") 216 | shell: "unzip {input} -d ./build/" 217 | 218 | 219 | rule elevation_in_europe: 220 | message: "Merge SRTM and GMTED elevation data and warp/clip to Europe using {threads} threads." 221 | input: 222 | gmted = rules.raw_gmted_elevation_data.output, 223 | srtm = rules.raw_srtm_elevation_data.output 224 | output: 225 | temp("build/elevation-europe.tif") 226 | params: 227 | srtm_bounds = "{x_min},{y_min},{x_max},60".format(**config["scope"]["bounds"]), 228 | gmted_bounds = "{x_min},59.5,{x_max},{y_max}".format(**config["scope"]["bounds"]) 229 | threads: config["snakemake"]["max-threads"] 230 | conda: "../envs/default.yaml" 231 | shell: 232 | """ 233 | rio clip --bounds {params.srtm_bounds} {input.srtm} -o build/tmp-srtm.tif 234 | rio clip --bounds {params.gmted_bounds} {input.gmted} -o build/tmp-gmted.tif 235 | rio warp build/tmp-gmted.tif -o build/tmp-gmted2.tif -r {RESOLUTION_SLOPE} \ 236 | --resampling nearest --threads {threads} 237 | rio merge build/tmp-srtm.tif build/tmp-gmted2.tif {output} 238 | rm build/tmp-gmted.tif 239 | rm build/tmp-gmted2.tif 240 | rm build/tmp-srtm.tif 241 | """ 242 | 243 | 244 | rule land_cover_in_europe: 245 | message: "Clip land cover data to Europe." 246 | input: rules.raw_land_cover.output 247 | output: "build/land-cover-europe.tif" 248 | params: bounds = "{x_min},{y_min},{x_max},{y_max}".format(**config["scope"]["bounds"]) 249 | conda: "../envs/default.yaml" 250 | shell: "rio clip {input} {output} --bounds {params.bounds}" 251 | 252 | 253 | rule slope_in_europe: 254 | message: "Calculate slope and warp to resolution of study using {threads} threads." 255 | input: 256 | elevation = rules.elevation_in_europe.output, 257 | land_cover = rules.land_cover_in_europe.output 258 | output: 259 | "build/slope-europe.tif" 260 | threads: config["snakemake"]["max-threads"] 261 | conda: "../envs/default.yaml" 262 | shell: 263 | """ 264 | gdaldem slope -s 111120 -compute_edges {input.elevation} build/slope-temp.tif 265 | rio warp build/slope-temp.tif -o {output} --like {input.land_cover} \ 266 | --resampling max --threads {threads} 267 | rm build/slope-temp.tif 268 | """ 269 | 270 | 271 | rule protected_areas_points_to_circles: 272 | message: "Estimate shape of protected areas available as points only." 273 | input: 274 | script_dir + "estimate_protected_shapes.py", 275 | protected_areas= rules.raw_protected_areas.output.points 276 | params: 277 | scope = config["scope"] 278 | output: 279 | temp("build/protected-areas-points-as-circles.geojson") 280 | conda: "../envs/default.yaml" 281 | script: "../scripts/estimate_protected_shapes.py" 282 | 283 | 284 | rule protected_areas_in_europe: 285 | message: "Rasterise protected areas data and clip to Europe." 286 | input: 287 | polygons = rules.raw_protected_areas.output.polygons, 288 | points = rules.protected_areas_points_to_circles.output, 289 | land_cover = rules.land_cover_in_europe.output 290 | output: 291 | "build/protected-areas-europe.tif" 292 | benchmark: 293 | "build/rasterisation-benchmark.txt" 294 | params: 295 | bounds = "{x_min},{y_min},{x_max},{y_max}".format(**config["scope"]["bounds"]) 296 | conda: "../envs/default.yaml" 297 | shell: 298 | # The filter is in accordance to the way UNEP-WCMC calculates statistics: 299 | # https://www.protectedplanet.net/c/calculating-protected-area-coverage 300 | """ 301 | fio cat --rs --bbox {params.bounds} {input.polygons} {input.points} | \ 302 | fio filter "f.properties.STATUS in ['Designated', 'Inscribed', 'Established'] and \ 303 | f.properties.DESIG_ENG != 'UNESCO-MAB Biosphere Reserve'" | \ 304 | fio collect --record-buffered | \ 305 | rio rasterize --like {input.land_cover} \ 306 | --default-value 255 --all_touched -f "GTiff" --co dtype=uint8 -o {output} 307 | """ 308 | 309 | 310 | rule settlements: 311 | message: "Warp settlement data to CRS of study using {threads} threads." 312 | input: 313 | class50 = config["data-sources"]["settlement_data"].format(esm_class="50"), 314 | class40 = config["data-sources"]["settlement_data"].format(esm_class="40"), 315 | class41 = config["data-sources"]["settlement_data"].format(esm_class="41"), 316 | class45 = config["data-sources"]["settlement_data"].format(esm_class="45"), 317 | class30 = config["data-sources"]["settlement_data"].format(esm_class="30"), 318 | class35 = config["data-sources"]["settlement_data"].format(esm_class="35"), 319 | reference = rules.land_cover_in_europe.output[0] 320 | output: 321 | buildings = "build/esm-class50-buildings.tif", 322 | urban_greens = "build/esm-class404145-urban-greens.tif", 323 | built_up = "build/esm-class303550-built-up.tif" 324 | threads: config["snakemake"]["max-threads"] 325 | shadow: "minimal" 326 | conda: "../envs/default.yaml" 327 | shell: 328 | """ 329 | rio calc "(+ (+ (read 1) (read 2)) (read 3))" \ 330 | {input.class40} {input.class41} {input.class45} -o build/esm-class404145-temp-not-warped.tif 331 | rio calc "(+ (+ (read 1) (read 2)) (read 3))" \ 332 | {input.class50} {input.class30} {input.class35} -o build/esm-class303550-temp-not-warped.tif 333 | rio warp {input.class50} -o {output.buildings} \ 334 | --like {input.reference} --threads {threads} --resampling bilinear 335 | rio warp build/esm-class404145-temp-not-warped.tif -o {output.urban_greens} \ 336 | --like {input.reference} --threads {threads} --resampling bilinear 337 | rio warp build/esm-class303550-temp-not-warped.tif -o {output.built_up} \ 338 | --like {input.reference} --threads {threads} --resampling bilinear 339 | """ 340 | 341 | 342 | rule bathymetry_in_europe: 343 | message: "Clip bathymetric data to study area and warp to study resolution." 344 | input: 345 | bathymetry = rules.raw_bathymetry.output, 346 | reference = rules.land_cover_in_europe.output 347 | output: 348 | "build/bathymetry-in-europe.tif" 349 | conda: "../envs/default.yaml" 350 | shell: "rio warp {input.bathymetry} -o {output} --like {input.reference} --resampling min" 351 | 352 | 353 | rule eez_in_europe: 354 | message: "Clip exclusive economic zones to study area." 355 | input: config["data-sources"]["eez_data"] 356 | output: "build/eez-in-europe.geojson" 357 | params: 358 | bounds="{x_min},{y_min},{x_max},{y_max}".format(**config["scope"]["bounds"]), 359 | countries=",".join(["'{}'".format(country) for country in config["scope"]["countries"]]) 360 | conda: "../envs/default.yaml" 361 | shell: 362 | """ 363 | fio cat --bbox {params.bounds} {input}\ 364 | | fio filter "f.properties.Territory1 in [{params.countries}]"\ 365 | | fio collect > {output} 366 | """ 367 | --------------------------------------------------------------------------------