├── logs └── cluster │ └── .keep ├── data ├── technology_costs.xlsx ├── supply_regions │ ├── centroids.shp │ ├── centroids.shx │ ├── supply_regions.shp │ ├── supply_regions.shx │ ├── centroids.prj │ ├── supply_regions.prj │ ├── centroids.qpj │ ├── supply_regions.qpj │ ├── centroids.dbf │ └── supply_regions.dbf ├── Existing Power Stations SA.xlsx ├── EIA_hydro_generation_2011_2014.csv └── num_lines.csv ├── imgs └── network_csir-moderate_redz_E_LC_p_nom_ext.png ├── .gitignore ├── cluster.yaml ├── snakemake_cluster ├── scripts ├── build_landuse_remove_protected_and_conservation_areas.py ├── build_population.py ├── build_inflow_per_country.py ├── plot_comparisons.py ├── scenario_comparison.py ├── base_network.py ├── build_landuse_map_to_tech_and_supply_region.py ├── build_topology.py ├── _helpers.py ├── extract_summaries.py ├── solve_network.py ├── add_sectors.py ├── plot_network.py ├── add_electricity.py └── make_summary.py ├── environment.yaml ├── resources ├── area_solar_redz.csv ├── area_wind_redz.csv ├── area_wind_corridors.csv └── area_solar_corridors.csv ├── report.css ├── README.md ├── config.yaml └── Snakefile /logs/cluster/.keep: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /data/technology_costs.xlsx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PyPSA/pypsa-za/HEAD/data/technology_costs.xlsx -------------------------------------------------------------------------------- /data/supply_regions/centroids.shp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PyPSA/pypsa-za/HEAD/data/supply_regions/centroids.shp -------------------------------------------------------------------------------- /data/supply_regions/centroids.shx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PyPSA/pypsa-za/HEAD/data/supply_regions/centroids.shx -------------------------------------------------------------------------------- /data/Existing Power Stations SA.xlsx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PyPSA/pypsa-za/HEAD/data/Existing Power Stations SA.xlsx -------------------------------------------------------------------------------- /data/supply_regions/supply_regions.shp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PyPSA/pypsa-za/HEAD/data/supply_regions/supply_regions.shp -------------------------------------------------------------------------------- /data/supply_regions/supply_regions.shx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PyPSA/pypsa-za/HEAD/data/supply_regions/supply_regions.shx -------------------------------------------------------------------------------- /imgs/network_csir-moderate_redz_E_LC_p_nom_ext.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PyPSA/pypsa-za/HEAD/imgs/network_csir-moderate_redz_E_LC_p_nom_ext.png -------------------------------------------------------------------------------- /data/supply_regions/centroids.prj: -------------------------------------------------------------------------------- 1 | GEOGCS["GCS_WGS_1984",DATUM["D_WGS_1984",SPHEROID["WGS_1984",6378137,298.257223563]],PRIMEM["Greenwich",0],UNIT["Degree",0.017453292519943295]] -------------------------------------------------------------------------------- /data/supply_regions/supply_regions.prj: -------------------------------------------------------------------------------- 1 | GEOGCS["GCS_WGS_1984",DATUM["D_WGS_1984",SPHEROID["WGS_1984",6378137,298.257223563]],PRIMEM["Greenwich",0],UNIT["Degree",0.017453292519943295]] -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Snakemake internals 2 | .snakemake* 3 | __pycache__ 4 | 5 | # Directory structure 6 | networks/* 7 | resources/* 8 | results/* 9 | 10 | benchmarks/* 11 | logs/* 12 | 13 | data/* 14 | -------------------------------------------------------------------------------- /cluster.yaml: -------------------------------------------------------------------------------- 1 | __default__: 2 | partition: x-men 3 | name: "pypsa-za.{rule}.{wildcards}" 4 | output: "logs/cluster/{rule}.{wildcards}.out" 5 | error: "logs/cluster/{rule}.{wildcards}.err" 6 | -------------------------------------------------------------------------------- /snakemake_cluster: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | snakemake --cluster-config cluster.yaml --cluster "sbatch --parsable -J '{cluster.name}' -p {cluster.partition} -n 1 --cpus-per-task {threads} -o '{cluster.output}' --mem {resources.mem_mb}" "$@" 4 | 5 | -------------------------------------------------------------------------------- /data/supply_regions/centroids.qpj: -------------------------------------------------------------------------------- 1 | GEOGCS["WGS 84",DATUM["WGS_1984",SPHEROID["WGS 84",6378137,298.257223563,AUTHORITY["EPSG","7030"]],AUTHORITY["EPSG","6326"]],PRIMEM["Greenwich",0,AUTHORITY["EPSG","8901"]],UNIT["degree",0.0174532925199433,AUTHORITY["EPSG","9122"]],AUTHORITY["EPSG","4326"]] 2 | -------------------------------------------------------------------------------- /data/supply_regions/supply_regions.qpj: -------------------------------------------------------------------------------- 1 | GEOGCS["WGS 84",DATUM["WGS_1984",SPHEROID["WGS 84",6378137,298.257223563,AUTHORITY["EPSG","7030"]],AUTHORITY["EPSG","6326"]],PRIMEM["Greenwich",0,AUTHORITY["EPSG","8901"]],UNIT["degree",0.0174532925199433,AUTHORITY["EPSG","9122"]],AUTHORITY["EPSG","4326"]] 2 | -------------------------------------------------------------------------------- /scripts/build_landuse_remove_protected_and_conservation_areas.py: -------------------------------------------------------------------------------- 1 | import rasterio, rasterio.features 2 | import geopandas as gpd 3 | import os 4 | 5 | src = rasterio.open(snakemake.input.landuse) 6 | data = src.read(1) 7 | 8 | for n in ('protected_areas', 'conservation_areas'): 9 | area_dir = snakemake.input[n] 10 | 11 | sh = gpd.read_file(area_dir).to_crs(src.crs) 12 | rasterio.features.rasterize(sh['geometry'], out=data, transform=src.transform, default_value=0) 13 | 14 | with rasterio.open(snakemake.output[0], 'w', **src.meta) as dst: 15 | dst.write_band(1, data) 16 | -------------------------------------------------------------------------------- /scripts/build_population.py: -------------------------------------------------------------------------------- 1 | # coding: utf-8 2 | 3 | import pandas as pd 4 | import rasterstats 5 | import geopandas as gpd 6 | 7 | def build_population(): 8 | ## Read in regions and calculate population per region 9 | regions = gpd.read_file(snakemake.input.supply_regions)[['name', 'geometry']] 10 | 11 | population = pd.DataFrame(rasterstats.zonal_stats(regions['geometry'], snakemake.input.population, stats='sum'))['sum'] 12 | population.index = regions['name'] 13 | return population 14 | 15 | if __name__ == "__main__": 16 | pop = build_population() 17 | pop.to_csv(snakemake.output[0], header=['population']) 18 | 19 | -------------------------------------------------------------------------------- /scripts/build_inflow_per_country.py: -------------------------------------------------------------------------------- 1 | import pandas as pd 2 | import atlite 3 | 4 | from vresutils import shapes as vshapes 5 | from vresutils import hydro as vhydro 6 | 7 | countries = snakemake.config['hydro_inflow']['countries'] 8 | 9 | cutout = atlite.Cutout(snakemake.config['hydro_inflow']['cutout']) 10 | shapes = pd.Series(vshapes.countries(countries)) 11 | shapes.index.rename('countries', inplace=True) 12 | 13 | annual_hydro = vhydro.get_eia_annual_hydro_generation(snakemake.input.EIA_hydro_gen).reindex(columns=countries) 14 | 15 | inflow = cutout.runoff(shapes=shapes, 16 | smooth=True, 17 | lower_threshold_quantile=True, 18 | normalize_using_yearly=annual_hydro) 19 | 20 | inflow.transpose('time', 'countries').to_pandas().to_csv(snakemake.output[0]) 21 | 22 | -------------------------------------------------------------------------------- /environment.yaml: -------------------------------------------------------------------------------- 1 | name: pypsa-za 2 | channels: 3 | - conda-forge 4 | - bioconda 5 | dependencies: 6 | - python 7 | - six 8 | - snakemake 9 | - numpy 10 | - pyomo 11 | - scipy 12 | - pandas>=0.22.0 13 | - xlrd 14 | - matplotlib 15 | - seaborn 16 | - networkx>=1.10 17 | - pyomo 18 | - netcdf4 19 | - xarray 20 | 21 | # Include ipython so that one does not inadvertently drop out of the conda 22 | # environment by calling ipython 23 | - ipython 24 | 25 | # GIS dependencies have to come all from conda-forge 26 | - conda-forge::libgdal 27 | - conda-forge::fiona 28 | - conda-forge::pyproj 29 | - conda-forge::pyshp 30 | - conda-forge::geopandas 31 | - conda-forge::rasterstats 32 | - conda-forge::rasterio 33 | - conda-forge::shapely 34 | 35 | - pip: 36 | - pypsa>=0.13 37 | - vresutils>=0.2.4 38 | - countrycode 39 | -------------------------------------------------------------------------------- /scripts/plot_comparisons.py: -------------------------------------------------------------------------------- 1 | import os 2 | import pandas as pd 3 | from itertools import product 4 | 5 | opts = snakemake.config['plotting'] 6 | scenarios = snakemake.config['scenario'].copy() 7 | 8 | plot_dir = snakemake.params.plot_dir 9 | param = snakemake.wildcards.param 10 | param_values = scenarios.pop(snakemake.wildcards.param) 11 | 12 | cost_df = (pd.read_csv(snakemake.input.costs2, 13 | index_col=[0,1,2], 14 | header=list(range(len(scenarios)))) 15 | .reset_index(level=0, drop=True)) 16 | #cost_df.index.rename(['components', 'capmarg', 'tech'], inplace=True) 17 | cost_df = (cost_df.loc['capital'] 18 | .add(cost_df.loc['marginal'] 19 | .rename({s: s+' marginal' for s in opts['conv_techs']}), 20 | fill_value=0.)) 21 | 22 | tmpl = snakemake.params.tmpl.replace('[', '{').replace(']', '}') 23 | for vals in product(*scenarios.values()): 24 | sc = dict(zip(scenarios.keys(), vals)) 25 | sc[param] = "-" 26 | fn = tmpl.format(**sc) 27 | c = cost_df.xs(key=vals, level=scenarios.keys()) 28 | fig, ax = plt.subplots() 29 | c.rename(nice_names).plot.bar(stacked=True, color=itemgetter(c.index)(opts['tech_colors']), ax=ax) 30 | for ext in snakemake.params.exts: 31 | fig.savefig(os.path.join(plot_dir, fn + '.' + ext)) 32 | -------------------------------------------------------------------------------- /resources/area_solar_redz.csv: -------------------------------------------------------------------------------- 1 | name,area_ratio,area,available_area 2 | NAMAQUALAND,0.11438895226682712,98968.23226777789,11320.872396811104 3 | WEST COAST,0.0,69720.09274914216,0.0 4 | PENINSULA,0.016727065625813064,11716.319177763226,195.97963977941762 5 | SOUTHERN CAPE,0.06781593981255084,109653.41050147737,7436.249086809119 6 | PORT ELIZABETH,0.044563351925686376,80714.14512137562,3596.8928544247838 7 | KAROO,0.022920606617093657,151464.45581792606,3471.657208274846 8 | KIMBERLEY,0.11726653172913137,143240.81678140012,16797.35378600275 9 | CARLETONVILLE,0.005929553626346865,49480.7323568361,293.39865598077614 10 | WELKOM,0.008266022694181769,28141.594562120055,232.61905930094665 11 | BLOEMFONTEIN,0.03227913255899179,56075.03914380806,1810.0536217736342 12 | EAST LONDON,0.0988167895287066,71892.17393704964,7104.1538206986 13 | PINETOWN,0.0,45833.68945969081,0.0 14 | LADYSMITH,0.0,13421.066504924082,0.0 15 | VAAL TRIANGLE,0.0,2434.3540144273716,0.0 16 | WESTRAND,0.0,2191.170956247343,0.0 17 | RUSTENBURG,0.0,20426.71510538566,0.0 18 | WATERBERG,0.0,23611.98769252036,0.0 19 | POLOKWANE,0.0,82110.500547676,0.0 20 | WARMBAD,0.0,5703.630275002548,0.0 21 | PRETORIA,0.0,7092.809193499261,0.0 22 | JOHANNESBURG,0.0,2143.9330366380004,0.0 23 | NIGEL,0.0,4547.194032774516,0.0 24 | HIGHVELD SOUTH,0.0,17682.86107567371,0.0 25 | HIGHVELD NORTH,0.0,15490.080167379982,0.0 26 | LOWVELD,0.0,50627.237625866066,0.0 27 | NEWCASTLE,0.0,30569.79776041492,0.0 28 | EMPANGENI,0.0,26293.868785056216,0.0 29 | -------------------------------------------------------------------------------- /resources/area_wind_redz.csv: -------------------------------------------------------------------------------- 1 | name,area_ratio,area,available_area 2 | NAMAQUALAND,0.11438895226682712,98968.23226777789,11320.872396811104 3 | WEST COAST,0.0,69720.09274914216,0.0 4 | PENINSULA,0.016727065625813064,11716.319177763226,195.97963977941762 5 | SOUTHERN CAPE,0.06781593981255084,109653.41050147737,7436.249086809119 6 | PORT ELIZABETH,0.044563351925686376,80714.14512137562,3596.8928544247838 7 | KAROO,0.022920606617093657,151464.45581792606,3471.657208274846 8 | KIMBERLEY,0.11726653172913137,143240.81678140012,16797.35378600275 9 | CARLETONVILLE,0.005929553626346865,49480.7323568361,293.39865598077614 10 | WELKOM,0.008266022694181769,28141.594562120055,232.61905930094665 11 | BLOEMFONTEIN,0.03227913255899179,56075.03914380806,1810.0536217736342 12 | EAST LONDON,0.0988167895287066,71892.17393704964,7104.1538206986 13 | PINETOWN,0.0,45833.68945969081,0.0 14 | LADYSMITH,0.0,13421.066504924082,0.0 15 | VAAL TRIANGLE,0.0,2434.3540144273716,0.0 16 | WESTRAND,0.0,2191.170956247343,0.0 17 | RUSTENBURG,0.0,20426.71510538566,0.0 18 | WATERBERG,0.0,23611.98769252036,0.0 19 | POLOKWANE,0.0,82110.500547676,0.0 20 | WARMBAD,0.0,5703.630275002548,0.0 21 | PRETORIA,0.0,7092.809193499261,0.0 22 | JOHANNESBURG,0.0,2143.9330366380004,0.0 23 | NIGEL,0.0,4547.194032774516,0.0 24 | HIGHVELD SOUTH,0.0,17682.86107567371,0.0 25 | HIGHVELD NORTH,0.0,15490.080167379982,0.0 26 | LOWVELD,0.0,50627.237625866066,0.0 27 | NEWCASTLE,0.0,30569.79776041492,0.0 28 | EMPANGENI,0.0,26293.868785056216,0.0 29 | -------------------------------------------------------------------------------- /data/EIA_hydro_generation_2011_2014.csv: -------------------------------------------------------------------------------- 1 | "International_data" 2 | "https://www.eia.gov/beta/international/data/browser/#/?pa=000000000000000000000000000000g&c=20000040000000000000000000g004500000000000040000003g&ct=0&ug=8&tl_id=2-A&vs=INTL.33-12-ZAF-BKWH.A&cy=2014&vo=0&v=H&start=2000&end=2014" 3 | "Fri Aug 18 2017 19:05:28 GMT+0200 (CEST)" 4 | "Source: U.S. Energy Information Administration" 5 | ,,,2000,2001,2002,2003,2004,2005,2006,2007,2008,2009,2010,2011,2012,2013,2014 6 | "","Electricity","Billion Kwh" 7 | "","Generation","" 8 | "","Renewables","" 9 | "","Hydroelectricity","" 10 | "","Angola","Billion Kwh",0.9,1,1.1,1.2,1.7,2.197,2.638,2.472,3.103,3.063,3.666,3.967,3.734,4.719,4.991 11 | "","Botswana","Billion Kwh",,,,,,,,,,,,,,, 12 | "","Lesotho","Billion Kwh",0.3,0.3,0.3,0.3,0.3,0.459,0.525,0.554,0.584,0.648,0.694,0.63,0.486,0.5,0.5 13 | "","Malawi","Billion Kwh",1,1,1.1,1.1,1.3,1.307,1.339,1.367,1.521,1.488,1.735,1.809,1.9,1.83,2.05 14 | "","Mozambique","Billion Kwh",9.6,12,13,11,12,13.131,14.57,15.902,14.963,16.781,16.481,16.642,14.994,14.401,16.013 15 | "","Namibia","Billion Kwh",1.4,1.4,1.4,1.5,1.6,1.641,1.59,1.56,1.476,1.395,1.235,1.39,1.591,1.259,1.47 16 | "","South Africa","Billion Kwh",1.3,2.1,2.4,0.8,0.9,1.319,2.869,0.859,1.191,1.386,2.093,2.036,1.193,1.147,0.965 17 | "","Swaziland","Billion Kwh",0.2,0.2,0.2,0.1,0.1,0.157,0.154,0.171,0.157,0.202,0.242,0.28,0.228,0.201,0.254 18 | "","Zambia","Billion Kwh",7.7,7.8,8,8.2,8.4,8.794,9.572,9.535,9.427,9.782,10.331,11.368,12.227,13.148,13.902 19 | "","Zimbabwe","Billion Kwh",3.2,3,3.8,5.3,5.5,4.866,5.257,5.329,5.651,5.403,5.741,5.149,5.336,4.946,5.377 -------------------------------------------------------------------------------- /scripts/scenario_comparison.py: -------------------------------------------------------------------------------- 1 | import os 2 | from itertools import product 3 | from snakemake.utils import report 4 | from snakemake.report import data_uri 5 | from textwrap import dedent 6 | 7 | param = snakemake.wildcards.param 8 | plot_dir = snakemake.params.plot_dir 9 | 10 | text = dedent(''' 11 | =========================== 12 | Comparison of param {param} 13 | =========================== 14 | 15 | .. contents:: Table of Contents 16 | 17 | ''').format(param=param) 18 | 19 | scenarios = snakemake.config['scenario'].copy() 20 | param_values = scenarios.pop(snakemake.wildcards.param) 21 | scenarios['attr'] = ['p_nom'] 22 | tmpl = snakemake.params.tmpl.replace('[', '{').replace(']', '}') 23 | 24 | links = {} 25 | for vals in product(*scenarios.values()): 26 | sc = dict(zip(scenarios.keys(), vals)) 27 | headline = ", ".join("{}={}".format(k,v) for k, v in sc.items()) 28 | text += headline + "\n" + '-' * len(headline) + "\n\n" 29 | for p in param_values: 30 | sc[param] = p 31 | fn = tmpl.format(**sc) 32 | #links[fn] = [os.path.join(plot_dir, fn + '.pdf')] 33 | text += dedent(''' 34 | .. figure:: {data} 35 | :scale: 50 % 36 | 37 | {param} = {value} 38 | 39 | ''').format(param=param, value=p, link=fn, 40 | data=data_uri(os.path.join(plot_dir, fn + '.png'))) 41 | 42 | # text += dedent(''' 43 | 44 | # Attachments 45 | # ----------- 46 | # ''') 47 | 48 | # text += '{} = {}: {}_\n\n.. image:: {}\n\n'.format(wildcards.param, p, fn, data_uri('results/plots/' + fn + '.png')) 49 | 50 | report(text=text, path=snakemake.output.html, stylesheet='report.css', **links) 51 | -------------------------------------------------------------------------------- /resources/area_wind_corridors.csv: -------------------------------------------------------------------------------- 1 | name,area_ratio,area,available_area 2 | NAMAQUALAND,0.35638356042092106,98968.23226777789,35270.65098415537 3 | WEST COAST,0.34597697860800763,69720.09274914216,24121.547037618264 4 | PENINSULA,0.06003737221164293,11716.319177763226,703.4170154257811 5 | SOUTHERN CAPE,0.20952439713949186,109653.41050147737,22975.064729611273 6 | PORT ELIZABETH,0.2644311706905863,80714.14512137562,21343.335885735232 7 | KAROO,0.23693173531960857,151464.45581792606,35886.736356181405 8 | KIMBERLEY,0.26526916702600106,143240.81678140012,37997.37215172604 9 | CARLETONVILLE,0.281201165790861,49480.7323568361,13914.03962292789 10 | WELKOM,0.14313281113479803,28141.594562120055,4027.985539491989 11 | BLOEMFONTEIN,0.05349670895872385,56075.03914380806,2999.830048925347 12 | EAST LONDON,0.2662668981968016,71892.17393704964,19142.50615884315 13 | PINETOWN,0.12421645849221946,45833.68945969081,5693.29858431496 14 | LADYSMITH,0.0,13421.066504924082,0.0 15 | VAAL TRIANGLE,0.22291622964399255,2434.3540144273716,542.6570185148671 16 | WESTRAND,0.2458276525528472,2191.170956247343,538.6504125162618 17 | RUSTENBURG,5.367213221129581e-05,20426.71510538566,1.0963453537787324 18 | WATERBERG,0.0,23611.98769252036,0.0 19 | POLOKWANE,0.013904949001036226,82110.500547676,1141.7423225649918 20 | WARMBAD,0.00784444780670763,5703.630275002548,44.741830001014975 21 | PRETORIA,0.12705240430481865,7092.809193499261,901.1584613094028 22 | JOHANNESBURG,0.14017598384960997,2143.9330366380004,300.5279227184136 23 | NIGEL,0.13799476778843012,4547.194032774516,627.4889846416544 24 | HIGHVELD SOUTH,0.005685760471378622,17682.86107567371,100.54051252494524 25 | HIGHVELD NORTH,0.10335662025801122,15490.080167379982,1601.0023336260435 26 | LOWVELD,0.05070553043455079,50627.237625866066,2567.0809382555867 27 | NEWCASTLE,0.0,30569.79776041492,0.0 28 | EMPANGENI,0.0,26293.868785056216,0.0 29 | -------------------------------------------------------------------------------- /resources/area_solar_corridors.csv: -------------------------------------------------------------------------------- 1 | name,area_ratio,area,available_area 2 | NAMAQUALAND,0.35638357538198434,98968.23350682989,35270.652906403135 3 | WEST COAST,0.34596125851098614,69720.09313931153,24120.451165979386 4 | PENINSULA,0.0600524912078365,11716.31933929701,703.5941641113384 5 | SOUTHERN CAPE,0.20949584667365034,109653.41050287843,22971.93407395386 6 | PORT ELIZABETH,0.2644303016152819,80714.14511712026,21343.265737939746 7 | KAROO,0.23692961487952574,151464.45576543547,35886.415172441586 8 | KIMBERLEY,0.2652649143098935,143240.81671785202,37996.76297234018 9 | CARLETONVILLE,0.2811738454153223,49480.73235663842,13912.687790682388 10 | WELKOM,0.1431438493458107,28141.594562102335,4028.2961723484623 11 | BLOEMFONTEIN,0.053491462938068564,56075.039143644186,2999.5358781029868 12 | EAST LONDON,0.2662628438937519,71892.17393677279,19142.21468610939 13 | PINETOWN,0.12418603075976607,45833.68945361201,5691.903968319827 14 | LADYSMITH,0.0,13421.066504393591,0.0 15 | VAAL TRIANGLE,0.22295907305966817,2434.354014426676,542.7613145556537 16 | WESTRAND,0.24580210529777097,2191.170956246988,538.5944341128397 17 | RUSTENBURG,5.3813030008159994e-05,20426.715105382777,1.0992234329340984 18 | WATERBERG,0.0,23611.98769251933,0.0 19 | POLOKWANE,0.013905923264533913,82110.50054329108,1141.822319767476 20 | WARMBAD,0.007844048940810805,5703.630274989095,44.73955501730465 21 | PRETORIA,0.12709180880737977,7092.809193475951,901.4379499244711 22 | JOHANNESBURG,0.1401668949324164,2143.933036636291,300.50843668833545 23 | NIGEL,0.1380234986200581,4547.194032764296,627.6196293063792 24 | HIGHVELD SOUTH,0.005688361011735983,17682.861074213266,100.5864975104986 25 | HIGHVELD NORTH,0.10334934049384505,15490.080165935886,1600.8895693462637 26 | LOWVELD,0.050696280002631856,50627.237612893965,2566.612613783048 27 | NEWCASTLE,0.0,30569.797753048722,0.0 28 | EMPANGENI,0.0,26293.868762569895,0.0 29 | -------------------------------------------------------------------------------- /scripts/base_network.py: -------------------------------------------------------------------------------- 1 | # coding: utf-8 2 | 3 | import networkx as nx 4 | import pandas as pd 5 | import numpy as np 6 | from operator import attrgetter 7 | 8 | from vresutils.costdata import annuity 9 | 10 | import pypsa 11 | 12 | def base_network(): 13 | n = pypsa.Network() 14 | n.name = 'PyPSA-ZA' 15 | 16 | buses = pd.read_csv(snakemake.input.buses, index_col=0) 17 | lines = pd.read_csv(snakemake.input.lines, index_col=0) 18 | 19 | buses['population'] = pd.read_csv(snakemake.input.population, index_col=0)['population'] 20 | 21 | line_config = snakemake.config['lines'] 22 | v_nom = line_config['v_nom'] 23 | line_type = line_config['type'] 24 | 25 | lines['capacity'] = np.sqrt(3)*v_nom*n.line_types.loc[line_type, 'i_nom']*lines.num_parallel 26 | 27 | # Buses from regions 28 | n.set_snapshots(pd.date_range(snakemake.config['historical_year'], periods=8760, freq='h')) 29 | n.import_components_from_dataframe(buses, 'Bus') 30 | 31 | if 'T' in snakemake.wildcards.opts.split('-'): 32 | n.import_components_from_dataframe( 33 | (lines 34 | .drop('num_parallel', axis=1) 35 | .rename(columns={'capacity': 'p_nom_min'}) 36 | .assign(p_nom_extendable=True, p_min_pu=-1)), 37 | "Link" 38 | ) 39 | elif 'FL' in snakemake.wildcards.opts.split('-'): 40 | n.import_components_from_dataframe( 41 | (lines 42 | .loc[lines.num_parallel > 0.1] 43 | .drop('capacity', axis=1) 44 | .assign(s_nom_extendable=False, type=line_type)), 45 | "Line" 46 | ) 47 | else: 48 | n.import_components_from_dataframe( 49 | (lines 50 | .rename(columns={'capacity': 's_nom_min'}) 51 | .assign(s_nom_extendable=True, type=line_type, 52 | num_parallel=lambda df: df.num_parallel.clip(lower=0.5))), 53 | "Line" 54 | ) 55 | 56 | return n 57 | 58 | if __name__ == "__main__": 59 | n = base_network() 60 | n.export_to_netcdf(snakemake.output[0]) 61 | 62 | -------------------------------------------------------------------------------- /data/num_lines.csv: -------------------------------------------------------------------------------- 1 | name,bus0,bus1,num_parallel_275,num_parallel_400,num_parallel_765 2 | 0,PINETOWN,EMPANGENI,2,0,0 3 | 1,PINETOWN,LADYSMITH,2,2,0 4 | 2,PINETOWN,NEWCASTLE,0,2,0 5 | 3,PINETOWN,EAST LONDON,0,0,0 6 | 4,CARLETONVILLE,WESTRAND,0,5,0 7 | 5,CARLETONVILLE,WELKOM,0,0,1 8 | 6,CARLETONVILLE,KIMBERLEY,0,0,0 9 | 7,CARLETONVILLE,VAAL TRIANGLE,1,0,1 10 | 8,CARLETONVILLE,RUSTENBURG,2,3,0 11 | 9,HIGHVELD SOUTH,HIGHVELD NORTH,6,4,0 12 | 10,HIGHVELD SOUTH,WELKOM,0,2,0 13 | 11,HIGHVELD SOUTH,NIGEL,4,7,1 14 | 12,HIGHVELD SOUTH,BLOEMFONTEIN,0,0,2 15 | 13,HIGHVELD SOUTH,NEWCASTLE,0,6,0 16 | 14,VAAL TRIANGLE,WESTRAND,2,0,0 17 | 15,VAAL TRIANGLE,WELKOM,2,0,0 18 | 16,VAAL TRIANGLE,NIGEL,2,3,0 19 | 17,VAAL TRIANGLE,JOHANNESBURG,0,0,0 20 | 18,RUSTENBURG,WESTRAND,0,0,0 21 | 19,RUSTENBURG,WATERBERG,2,4,0 22 | 20,RUSTENBURG,PRETORIA,0,1,0 23 | 21,RUSTENBURG,WARMBAD,0,0,0 24 | 22,SOUTHERN CAPE,PORT ELIZABETH,0,0,0 25 | 23,SOUTHERN CAPE,KAROO,0,3,0 26 | 24,SOUTHERN CAPE,PENINSULA,0,4,0 27 | 25,SOUTHERN CAPE,WEST COAST,0,0,0 28 | 26,WESTRAND,PRETORIA,0,2,0 29 | 27,WESTRAND,JOHANNESBURG,1,0,0 30 | 28,LOWVELD,POLOKWANE,0,1,0 31 | 29,LOWVELD,HIGHVELD NORTH,5,3,0 32 | 30,LOWVELD,PRETORIA,0,0,0 33 | 31,LADYSMITH,EMPANGENI,0,1,0 34 | 32,LADYSMITH,BLOEMFONTEIN,0,0,0 35 | 33,LADYSMITH,NEWCASTLE,2,2,0 36 | 34,PENINSULA,WEST COAST,0,3,0 37 | 35,PRETORIA,HIGHVELD NORTH,0,8,0 38 | 36,PRETORIA,NIGEL,0,0,0 39 | 37,PRETORIA,JOHANNESBURG,6,1,0 40 | 38,PRETORIA,POLOKWANE,0,0,2 41 | 39,PRETORIA,WARMBAD,1,0,0 42 | 40,BLOEMFONTEIN,WELKOM,1,3,1 43 | 41,BLOEMFONTEIN,NEWCASTLE,0,0,0 44 | 42,BLOEMFONTEIN,KAROO,0,4,2 45 | 43,BLOEMFONTEIN,KIMBERLEY,2,0,0 46 | 44,JOHANNESBURG,NIGEL,4,0,0 47 | 45,HIGHVELD NORTH,NIGEL,0,0,0 48 | 46,KAROO,PORT ELIZABETH,0,2,0 49 | 47,KAROO,NAMAQUALAND,0,1,0 50 | 48,KAROO,EAST LONDON,0,1,0 51 | 49,KAROO,WEST COAST,0,0,0 52 | 50,KAROO,KIMBERLEY,0,0,0 53 | 51,KIMBERLEY,WELKOM,0,0,0 54 | 52,KIMBERLEY,NAMAQUALAND,0,0,0 55 | 53,EMPANGENI,NEWCASTLE,0,2,0 56 | 54,EAST LONDON,PORT ELIZABETH,1,1,0 57 | 55,POLOKWANE,WATERBERG,0,2,0 58 | 56,POLOKWANE,WARMBAD,1,0,0 59 | 57,WELKOM,NIGEL,1,1,1 60 | 58,WATERBERG,WARMBAD,0,0,0 61 | 59,NAMAQUALAND,WEST COAST,0,2,0 62 | -------------------------------------------------------------------------------- /scripts/build_landuse_map_to_tech_and_supply_region.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import pandas as pd 3 | import geopandas as gpd 4 | import rasterio, rasterio.features, rasterio.mask 5 | import rasterstats 6 | import shapely.geometry 7 | 8 | # Translate the landuse file into a raster of percentages of available area 9 | landusetype_percent = snakemake.config['respotentials']['landusetype_percent'][snakemake.wildcards.tech] 10 | 11 | with rasterio.open(snakemake.input.landuse) as src, rasterio.open(snakemake.output.raster, 'w', **src.meta) as dst: 12 | 13 | resareas = gpd.read_file(snakemake.input.resarea).to_crs(src.crs) 14 | regions = gpd.read_file(snakemake.input.supply_regions).to_crs(src.crs) 15 | 16 | stats = [] 17 | 18 | for region in regions.itertuples(): 19 | resareas_b = resareas.intersects(region.geometry) 20 | if not resareas_b.any(): 21 | dst.write_band(1, dst_data, window=window) 22 | stats.append({'mean': 0.}) 23 | continue 24 | 25 | minx, miny, maxx, maxy = region.geometry.bounds 26 | minx -= (maxx - minx)*0.05 27 | maxx += (maxx - minx)*0.05 28 | miny -= (maxy - miny)*0.05 29 | maxy += (maxy - miny)*0.05 30 | 31 | window = rasterio.windows.from_bounds(minx, miny, maxx, maxy, src.transform) 32 | box = shapely.geometry.box(minx, miny, maxx, maxy) 33 | transform = rasterio.windows.transform(window, src.transform) 34 | 35 | src_data = src.read(1, window=window) 36 | dst_data = np.zeros_like(src_data) 37 | 38 | for grid_codes, value in landusetype_percent: 39 | dst_data.ravel()[np.in1d(src_data.ravel(), grid_codes)] = value 40 | 41 | mask = rasterio.mask.geometry_mask(resareas.loc[resareas_b, 'geometry'], dst_data.shape, transform) 42 | dst_data = np.ma.array(dst_data, mask=mask, fill_value=0).filled() 43 | 44 | dst.write_band(1, dst_data, window=window) 45 | 46 | stats.extend(rasterstats.zonal_stats(region.geometry, dst_data, affine=transform, 47 | nodata=-999, stats='mean')) 48 | 49 | stats = pd.DataFrame(stats) 50 | 51 | stats['area_ratio'] = stats.pop('mean') / 100 52 | stats['area'] = regions.to_crs(dict(proj='aea')).area/1e6 # albert equal area has area in m^2 53 | stats['available_area'] = stats['area_ratio'] * stats['area'] 54 | 55 | stats.set_index(regions.name).to_csv(snakemake.output.area) 56 | -------------------------------------------------------------------------------- /data/supply_regions/centroids.dbf: -------------------------------------------------------------------------------- 1 | ua[idN 2 | nameCP 2NAMAQUALAND 3WEST COAST 4PENINSULA 5SOUTHERN CAPE 6PORT ELIZABETH 7KAROO 8KIMBERLEY 9CARLETONVILLE 10WELKOM 11BLOEMFONTEIN 12EAST LONDON 13PINETOWN 14LADYSMITH 15VAAL TRIANGLE 16WESTRAND 17RUSTENBURG 18WATERBERG 19POLOKWANE 20WARMBAD 21PRETORIA 22JOHANNESBURG 23NIGEL 24HIGHVELD SOUTH 25HIGHVELD NORTH 26LOWVELD 27NEWCASTLE 28EMPANGENI  -------------------------------------------------------------------------------- /data/supply_regions/supply_regions.dbf: -------------------------------------------------------------------------------- 1 | ua[idN 2 | nameCP 2NAMAQUALAND 3WEST COAST 4PENINSULA 5SOUTHERN CAPE 6PORT ELIZABETH 7KAROO 8KIMBERLEY 9CARLETONVILLE 10WELKOM 11BLOEMFONTEIN 12EAST LONDON 13PINETOWN 14LADYSMITH 15VAAL TRIANGLE 16WESTRAND 17RUSTENBURG 18WATERBERG 19POLOKWANE 20WARMBAD 21PRETORIA 22JOHANNESBURG 23NIGEL 24HIGHVELD SOUTH 25HIGHVELD NORTH 26LOWVELD 27NEWCASTLE 28EMPANGENI  -------------------------------------------------------------------------------- /scripts/build_topology.py: -------------------------------------------------------------------------------- 1 | # coding: utf-8 2 | 3 | import networkx as nx 4 | import pandas as pd 5 | import geopandas as gpd 6 | import numpy as np 7 | from operator import attrgetter 8 | 9 | from vresutils.costdata import annuity 10 | from vresutils.shapes import haversine 11 | 12 | def build_topology(): 13 | ## Read in regions and calculate population per region 14 | 15 | regions = gpd.read_file(snakemake.input.supply_regions)[['name', 'geometry']] 16 | 17 | # Slighly Moved centroids of NAMAQUALAND and PRETORIA manually so that they are within the shapes 18 | centroids = gpd.read_file(snakemake.input.centroids).set_index('name')['geometry'] 19 | 20 | # touching regions are connected by lines, we use nx to take care of all the double countings efficiently 21 | 22 | def edges_between_touching_regions(regions): 23 | G = nx.Graph() 24 | G.add_nodes_from(regions.index) 25 | for r in regions.itertuples(): 26 | neighs = regions.index[regions.touches(r.geometry)] 27 | G.add_edges_from((r.Index, r2_name) for r2_name in neighs.values) 28 | return G.edges() 29 | 30 | regions = regions.set_index('name') 31 | 32 | line_config = snakemake.config['lines'] 33 | v_nom = line_config['v_nom'] 34 | 35 | buses = (regions 36 | .assign( 37 | x=centroids.map(attrgetter('x')), 38 | y=centroids.map(attrgetter('y')), 39 | v_nom=v_nom 40 | ) 41 | .drop('geometry', axis=1)) 42 | 43 | # Lines from touching regions 44 | def asarray(x): return np.asarray(list(map(np.asarray, x))) 45 | lines = pd.DataFrame(list(edges_between_touching_regions(regions)), columns=['bus0', 'bus1']) 46 | lines['length'] = haversine(asarray(lines.bus0.map(centroids)), 47 | asarray(lines.bus1.map(centroids))) * line_config['length_factor'] 48 | 49 | num_lines = pd.read_csv(snakemake.input.num_lines, index_col=0).set_index(['bus0', 'bus1']) 50 | num_parallel = sum(num_lines['num_parallel_{}'.format(int(v))] * (v/v_nom)**2 51 | for v in (275, 400, 765)) 52 | 53 | lines = (lines 54 | .join(num_parallel.rename('num_parallel'), on=['bus0', 'bus1']) 55 | .join(num_parallel.rename("num_parallel_i"), on=['bus1', 'bus0'])) 56 | 57 | lines['num_parallel'] = line_config['s_nom_factor'] * lines['num_parallel'].fillna(lines.pop('num_parallel_i')) 58 | 59 | return buses, lines 60 | 61 | if __name__ == "__main__": 62 | buses, lines = build_topology() 63 | 64 | buses.to_csv(snakemake.output.buses) 65 | lines.to_csv(snakemake.output.lines) 66 | -------------------------------------------------------------------------------- /report.css: -------------------------------------------------------------------------------- 1 | /** 2 | Credits for the colors and font selection go to the Twitter Bootstrap framework. 3 | */ 4 | 5 | 6 | body { 7 | color: rgb(51, 51, 51); 8 | font-size: 10pt; 9 | padding-top: 10px; 10 | font-family: "Helvetica Neue",Helvetica,Arial,sans-serif; 11 | } 12 | 13 | h1 { 14 | font-size: 150%; 15 | } 16 | 17 | h2 { 18 | font-size: 140%; 19 | } 20 | 21 | h3 { 22 | font-size: 130%; 23 | } 24 | 25 | h4 { 26 | font-size: 120%; 27 | } 28 | 29 | h5 { 30 | font-size: 110%; 31 | } 32 | 33 | h6 { 34 | font-size: 100%; 35 | } 36 | 37 | div#attachments { 38 | display: inline-block; 39 | color: gray; 40 | border-width: 1px; 41 | border-style: solid; 42 | border-color: white; 43 | border-radius: 4px 4px 4px 4px; 44 | padding: 0px; 45 | } 46 | 47 | div#attachments dt { 48 | margin-top: 2px; 49 | margin-bottom: 2px; 50 | } 51 | 52 | div#attachments dd p { 53 | margin-top: 2px; 54 | margin-bottom: 2px; 55 | } 56 | 57 | div#attachments :target dt { 58 | font-weight: bold; 59 | } 60 | 61 | div#attachments :target a { 62 | color: rgb(70, 136, 71); 63 | } 64 | 65 | 66 | h1.title { 67 | text-align: center; 68 | font-size: 180%; 69 | } 70 | 71 | div.document { 72 | position: relative; 73 | background: white; 74 | /* max-width: 800px; */ 75 | margin: auto; 76 | padding: 20px; 77 | border: 1px solid rgb(221, 221, 221); 78 | border-radius: 4px 4px 4px 4px; 79 | } 80 | 81 | div.document:after { 82 | content: "snakemake report"; 83 | position: absolute; 84 | top: -1px; 85 | right: -1px; 86 | padding: 3px 7px; 87 | background-color: #f5f5f5; 88 | border: 1px solid rgb(221, 221, 221); 89 | color: #9da0a4; 90 | font-weight: bold; 91 | font-size: 12pt; 92 | border-radius: 0 0 0 4px; 93 | } 94 | 95 | div.document p { 96 | text-align: justify; 97 | } 98 | 99 | div#metadata { 100 | text-align: right; 101 | } 102 | 103 | table.docutils { 104 | border: none; 105 | border-collapse: collapse; 106 | border-top: 2px solid gray; 107 | border-bottom: 2px solid gray; 108 | text-align: center; 109 | } 110 | 111 | table.docutils th { 112 | border: none; 113 | border-top: 2px solid gray; 114 | border-bottom: 2px solid gray; 115 | padding: 5px; 116 | } 117 | 118 | table.docutils td { 119 | border: none; 120 | padding: 5px; 121 | } 122 | 123 | table.docutils th:last-child, td:last-child { 124 | text-align: left; 125 | } 126 | 127 | table.docutils th:first-child, td:first-child { 128 | text-align: right; 129 | } 130 | 131 | table.docutils th:only-child, td:only-child { 132 | text-align: center; 133 | } 134 | 135 | table.docutils.footnote { 136 | border: none; 137 | text-align: left; 138 | } 139 | 140 | a { 141 | color: rgb(0, 136, 204); 142 | text-decoration: none; 143 | } 144 | 145 | a:hover { 146 | color: rgb(0, 85, 128); 147 | text-decoration: underline; 148 | } 149 | 150 | 151 | div.figure { 152 | display: inline-block; 153 | width: 10cm; 154 | margin-left: 2em; 155 | margin-right: 2em; 156 | } 157 | 158 | img { 159 | max-width: 100%; 160 | } 161 | 162 | p.caption { 163 | font-style: italic; 164 | } 165 | -------------------------------------------------------------------------------- /scripts/_helpers.py: -------------------------------------------------------------------------------- 1 | import pandas as pd 2 | import numpy as np 3 | from six import iteritems, iterkeys, itervalues 4 | 5 | import pypsa 6 | 7 | def pdbcast(v, h): 8 | return pd.DataFrame(v.values.reshape((-1, 1)) * h.values, 9 | index=v.index, columns=h.index) 10 | 11 | 12 | def load_network(fn, opts, combine_hydro_ps=True): 13 | n = pypsa.Network(fn) 14 | 15 | n.loads["carrier"] = n.loads.bus.map(n.buses.carrier) + " load" 16 | n.stores["carrier"] = n.stores.bus.map(n.buses.carrier) 17 | 18 | n.links["carrier"] = (n.links.bus0.map(n.buses.carrier) + "-" + n.links.bus1.map(n.buses.carrier)) 19 | n.lines["carrier"] = "AC line" 20 | n.transformers["carrier"] = "AC transformer" 21 | 22 | n.lines['s_nom'] = n.lines['s_nom_min'] 23 | 24 | if combine_hydro_ps: 25 | n.storage_units.loc[n.storage_units.carrier.isin({'Pumped storage', 'Hydro'}), 'carrier'] = 'Hydro+PS' 26 | 27 | # #if the carrier was not set on the heat storage units 28 | # bus_carrier = n.storage_units.bus.map(n.buses.carrier) 29 | # n.storage_units.loc[bus_carrier == "heat","carrier"] = "water tanks" 30 | 31 | for name in opts['heat_links'] + opts['heat_generators']: 32 | n.links.loc[n.links.index.to_series().str.endswith(name), "carrier"] = name 33 | 34 | return n 35 | 36 | def aggregate_p_nom(n): 37 | return pd.concat([ 38 | n.generators.groupby("carrier").p_nom_opt.sum(), 39 | n.storage_units.groupby("carrier").p_nom_opt.sum(), 40 | n.links.groupby("carrier").p_nom_opt.sum(), 41 | n.loads_t.p.groupby(n.loads.carrier,axis=1).sum().mean() 42 | ]) 43 | 44 | def aggregate_p(n): 45 | return pd.concat([ 46 | n.generators_t.p.sum().groupby(n.generators.carrier).sum(), 47 | n.storage_units_t.p.sum().groupby(n.storage_units.carrier).sum(), 48 | n.stores_t.p.sum().groupby(n.stores.carrier).sum(), 49 | -n.loads_t.p.sum().groupby(n.loads.carrier).sum() 50 | ]) 51 | 52 | def aggregate_e_nom(n): 53 | return pd.concat([ 54 | (n.storage_units["p_nom_opt"]*n.storage_units["max_hours"]).groupby(n.storage_units["carrier"]).sum(), 55 | n.stores["e_nom_opt"].groupby(n.stores.carrier).sum() 56 | ]) 57 | 58 | def aggregate_p_curtailed(n): 59 | return pd.concat([ 60 | ((n.generators_t.p_max_pu.sum().multiply(n.generators.p_nom_opt) - n.generators_t.p.sum()) 61 | .groupby(n.generators.carrier).sum()), 62 | ((n.storage_units_t.inflow.sum() - n.storage_units_t.p.sum()) 63 | .groupby(n.storage_units.carrier).sum()) 64 | ]) 65 | 66 | def aggregate_costs(n, flatten=False, opts=None, existing_only=False): 67 | components = dict(Link=("p_nom", "p0"), 68 | Generator=("p_nom", "p"), 69 | StorageUnit=("p_nom", "p"), 70 | Store=("e_nom", "p"), 71 | Line=("s_nom", None), 72 | Transformer=("s_nom", None)) 73 | 74 | costs = {} 75 | for c, (p_nom, p_attr) in zip( 76 | n.iterate_components(iterkeys(components), skip_empty=False), 77 | itervalues(components) 78 | ): 79 | if not existing_only: p_nom += "_opt" 80 | costs[(c.list_name, 'capital')] = (c.df[p_nom] * c.df.capital_cost).groupby(c.df.carrier).sum() 81 | if p_attr is not None: 82 | p = c.pnl[p_attr].sum() 83 | if c.name == 'StorageUnit': 84 | p = p.loc[p > 0] 85 | costs[(c.list_name, 'marginal')] = (p*c.df.marginal_cost).groupby(c.df.carrier).sum() 86 | costs = pd.concat(costs, sort=False) 87 | 88 | if flatten: 89 | assert opts is not None 90 | conv_techs = opts['conv_techs'] 91 | 92 | costs = costs.reset_index(level=0, drop=True) 93 | costs = costs['capital'].add( 94 | costs['marginal'].rename({t: t + ' marginal' for t in conv_techs}), 95 | fill_value=0. 96 | ) 97 | 98 | return costs 99 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # PyPSA-ZA 2 | 3 | [PyPSA](https://pypsa.org/) model of the South African electricity system at the level of ESKOM's supply regions. 4 | 5 | ![Visualisation of optimal capacities and costs in the least cost scenario](imgs/network_csir-moderate_redz_E_LC_p_nom_ext.png) 6 | 7 | The model is described and evaluated in the paper [PyPSA-ZA: Investment and operation co-optimization of integrating wind and solar in South Africa at high spatial and temporal detail](https://arxiv.org/abs/1710.11199), 2017, [arXiv:1710.11199](https://arxiv.org/abs/1710.11199). 8 | 9 | This repository contains the scripts to automatically reproduce the analysis. 10 | 11 | ## Instructions 12 | 13 | To build and solve the model, a computer with about 20GB of memory with a strong 14 | interior-point solver supported by the modelling library 15 | [PYOMO](https://github.com/Pyomo/pyomo) like Gurobi or CPLEX are required. 16 | 17 | We recommend as preparatory steps (the path before the `%` sign denotes the 18 | directory in which the commands following the `%` should be entered): 19 | 20 | 1. cloning the repository using `git` (**to a directory without any spaces in the path**) 21 | ```shell 22 | /some/other/path % cd /some/path/without/spaces 23 | /some/path/without/spaces % git clone https://github.com/FRESNA/pypsa-za.git 24 | ``` 25 | 26 | 2. installing the necessary python dependencies using conda (from within the `pypsa-za` directory) 27 | ```shell 28 | .../pypsa-za % conda env create -f environment.yaml 29 | .../pypsa-za % source activate pypsa-za # or conda activate pypsa-za on windows 30 | ``` 31 | 32 | 3. getting the separate [data bundle](https://vfs.fias.science/d/f204668ef2/files/?p=/pypsa-za-bundle.7z&dl=1) (see also [Data dependencies] below) and unpacking it in `data` 33 | ```shell 34 | .../data % wget "https://vfs.fias.science/d/f204668ef2/files/?dl=1&p=/pypsa-za-bundle.7z" 35 | .../data % 7z x pypsa-za-bundle.7z 36 | ``` 37 | 38 | All results and scenario comparisons are reproduced using the workflow 39 | management system `snakemake` 40 | ```shell 41 | .../pypsa-za % snakemake 42 | [... will take about a week on a recent computer with all scenarios ...] 43 | ``` 44 | 45 | `snakemake` will first compute several intermediate data files in the directory 46 | `resources`, then prepare unsolved networks in `networks`, solve them and save 47 | the resulting networks in `results/version-0.x/networks` and finally render the 48 | main plots into `results/version-0.5/plots`. 49 | 50 | Instead of computing all scenarios (defined by the product of all wildcards in 51 | the `scenario` config section), `snakemake` also allows to compute only a 52 | specific scenario like `csir-aggressive_redz_E_LC`: 53 | ```shell 54 | .../pypsa-za % snakemake results/version-0.5/plots/network_csir-aggressive_redz_E_LC_p_nom 55 | ``` 56 | 57 | ## Data dependencies 58 | 59 | For ease of installation and reproduction we provide a bundle 60 | [`pypsa-za-bundle.7z`](https://vfs.fias.science/d/f204668ef2/files/?p=/pypsa-za-bundle.7z&dl=1) 61 | with the necessary data files: 62 | 63 | | File | Citation | 64 | |----------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| 65 | | South_Africa_100m_Population | WorldPop, South Africa 100m Population (2013). [doi:10.5258/soton/wp00246](https://doi.org/10.5258/soton/wp00246) | 66 | | Supply area normalised power feed-in for PV.xlsx | D. S. Bofinger, B. Zimmermann, A.-K. Gerlach, D. T. Bischof-Niemz, C. Mushwana, [Wind and Solar PV Resource Aggregation Study for South Africa](https://www.csir.co.za/csir-energy-centre-documents). (2016). | 67 | | Supply area normalised power feed-in for Wind.xlsx | same as above | 68 | | EIA_hydro_generation_2011_2014.csv | U.S. EIA, [Hydroelectricity Net Generation ZA and MZ 2011-2014](http://tinyurl.com/EIA-hydro-gen-ZA-MZ-2011-2014) (2017). | 69 | | Existing Power Stations SA.xlsx | Compiled by CSIR from [Eskom Holdings](https://www.eskom.co.za/) (Jan 2017) and RSA DOE, [IRP2016](http://www.energy.gov.za/IRP/2016/Draft-IRP-2016-Assumptions-Base-Case-and-Observations-Revision1.pdf) | 70 | | Power_corridors | RSA DEA, [REDZs Strategic Transmission Corridors](https://egis.environment.gov.za/) (Apr 2017) | 71 | | REDZ_DEA_Unpublished_Draft_2015 | RSA DEA, [Wind and Solar PV Energy Strategic Environmental Assessment REDZ Database](https://egis.environment.gov.za/) (Mar 2017) | 72 | | SACAD_OR_2017_Q2 | RSA DEA, [South Africa Conservation Areas Database (SACAD)](https://egis.environment.gov.za/) (Jun 2017) | 73 | | SAPAD_OR_2017_Q2 | RSA DEA, [South Africa Protected Areas Database (SAPAD)](https://egis.environment.gov.za/) (Jun 2017) | 74 | | SystemEnergy2009_13.csv | Eskom, System Energy 2009-13 Hourly, available from Eskom on request | 75 | | SALandCover_OriginalUTM35North_2013_GTI_72Classes | GEOTERRAIMAGE (South Africa), [2013-14 South African National Land-Cover Dataset](https://egis.environment.gov.za/data_egis/node/109) (2017) | 76 | 77 | -------------------------------------------------------------------------------- /config.yaml: -------------------------------------------------------------------------------- 1 | version: 0.5 2 | 3 | scenario: 4 | resarea: [redz, corridors] 5 | sectors: [E] # ,E+EV,E+BEV,E+BEV+V2G] # [ E+EV, E+BEV, E+BEV+V2G ] 6 | cost: [csir-aggressive] # [csir-today, csir-moderate, csir-aggressive] #, IRP2016-Apr2016] 7 | opts: [LC, Co2L] # , EP, BAU, LC-SAFE, Co2L-SAFE] #, LC-FL, LC-T, Ep-T, Co2L-T] 8 | 9 | data: 10 | resarea: 11 | redz: REDZ_DEA_Unpublished_Draft_2015 12 | corridors: Power_corridors 13 | 14 | year: "2040" 15 | historical_year: "2012" 16 | 17 | electricity: 18 | demand: 428.e+6 # 344TWh 2030, 428TWh 2040, 522TWh 2050 19 | # IRP_Update_Assumptions_1904.pdf 20 | co2limit: 10.e+6 # 470.9e+5 # 0.1 * 470.9e+6 (2014 Electricity Sector) 21 | 22 | csp: false 23 | 24 | extendable_carriers: 25 | Generator: [OCGT, CCGT, Coal, Nuclear] 26 | StorageUnit: [Battery] # [CAES] 27 | 28 | buses: 29 | OCGT: [WEST COAST, EMPANGENI, PORT ELIZABETH] # Saldana Bay, Richards Bay, Coega 30 | CCGT: [WEST COAST, EMPANGENI, PORT ELIZABETH] 31 | Coal: [WATERBERG] 32 | Nuclear: [PORT ELIZABETH, PENINSULA] # Thyspunt, Koeberg 33 | 34 | BAU_mincapacities: 35 | Nuclear: 10000 36 | Coal: 10000 37 | 38 | SAFE_reservemargin: 0.1 39 | 40 | max_hours: 41 | Battery: 3 42 | CAES: 10 43 | 44 | respotentials: 45 | # Gives us ball-park the available areas the IWES study reports 46 | landusetype_percent: 47 | wind: 48 | - [[7, 8, 9, 41], 80] 49 | # - [[5, 6], 50] 50 | # - [[11, 12, 14, 15], 10] 51 | solar: 52 | - [[7, 8, 9, 41], 80] 53 | # - [[11, 12, 14, 15], 50] 54 | # - [[46, 47, 51, 56, 64, 68, 72], 10] 55 | 56 | capacity_per_sqm: 57 | wind: 5 # half of 10 (IWES) 58 | solar: 16.5 # half of 33 (IWES) 59 | 60 | hydro_inflow: 61 | disable: true 62 | countries: [ZA, MZ] 63 | cutout: za-mz-2012 64 | 65 | lines: 66 | v_nom: 380. 67 | type: 'Al/St 240/40 4-bundle 380.0' 68 | 69 | # costs 70 | s_nom_factor: 0.7 71 | length_factor: 1.25 72 | 73 | costs: 74 | # costs come from data/technology_costs.xlsx 75 | discountrate: 0.08 76 | 77 | # Marginal and capital costs can be overwritten 78 | # capital_cost: 79 | # Wind: Bla 80 | marginal_cost: # 81 | PV: 0.01 82 | Wind: 0.015 83 | EUR_to_ZAR: 15.63 84 | 85 | emission_prices: # only used with the option Ep (emission prices) 86 | # Externality costs from Integrated Energy Plan by the ZA DOE 87 | co2: 0.27e+3 88 | sox: 7.6e+3 89 | nox: 4.5e+3 90 | hg: 41484.e-6 # is also part of the excel sheet 91 | particulate: 11.3e+3 92 | 93 | transport: 94 | # 2014 Energy 702EJ 95 | # Table 18 in 20170331CSIR_EC_DOE p. 48 (64) 96 | energy_total: 25.2e+6 # 24 2030, 25.2 2040, 26.7 2050 97 | # efficiency_gain: 3.5 # Explanation 98 | availability_max: 0.95 99 | availability_mean: 0.8 100 | total_cars: 8.4e+6 # 8.0 2030, 8.4 2040, 8.9 2050 101 | car_battery_p_nom: 0.011 # 11kW 3-phase charger 102 | car_battery_e_nom: 0.05 # 50kWh battery 103 | # http://www.zeit.de/mobilitaet/2014-10/auto-fahrzeug-bestand 104 | standing_loss: 8.33e-5 # Buddischak 105 | efficiency: 0.9 # " 106 | 107 | # cars_per_person: 0.6 108 | 109 | solving: 110 | tmpdir: /tmp 111 | options: 112 | clip_p_max_pu: 1.e-2 113 | load_shedding: true 114 | noisy_costs: true 115 | min_iterations: 4 116 | max_iterations: 10 117 | formulation: kirchhoff 118 | # max_iterations: 1 119 | # nhours: 10 120 | solver: 121 | name: gurobi 122 | threads: 4 123 | method: 2 124 | crossover: 0 # -1 (Choose freely) 125 | BarConvTol: 1.e-5 126 | FeasibilityTol: 1.e-6 127 | LogToConsole: 0 128 | OutputFlag: 1 129 | 130 | plotting: 131 | map: 132 | figsize: [7, 7] 133 | boundaries: [16, -35, 33, -22] 134 | p_nom: 135 | bus_size_factor: 5.e+4 136 | linewidth_factor: 1.e+3 # 1.e+3 #3.e+3 137 | 138 | costs_max: 800 139 | 140 | vre_techs: ["Wind", "PV"] 141 | conv_techs: ["OCGT", "CCGT", "Nuclear", "Coal"] 142 | storage_techs: ["Hydro", "CAES", "Battery", "Pumped storage", "Hydro+PS"] 143 | store_techs: ["Li ion", "water tanks"] 144 | load_carriers: ["AC load", "heat load", "Li ion load"] 145 | AC_carriers: ["AC line", "AC transformer"] 146 | link_carriers: ["DC line", "Converter AC-DC"] 147 | heat_links: ["heat pump", "resistive heater", "CHP heat", "CHP electric", 148 | "gas boiler", "central heat pump", "central resistive heater", "central CHP heat", 149 | "central CHP electric", "central gas boiler"] 150 | heat_generators: ["gas boiler", "central gas boiler", "solar thermal collector", "central solar thermal collector"] 151 | tech_colors: 152 | Wind: "xkcd:azure" 153 | Hydro: "g" 154 | ror: "g" 155 | Hydro+PS: "g" 156 | PV: "y" 157 | OCGT: "brown" 158 | OCGT marginal: "sandybrown" 159 | OCGT-heat: "orange" 160 | central gas boiler: "orange" 161 | gas boiler: "orange" 162 | gas boilers: "orange" 163 | gas boiler marginal: "orange" 164 | gas: "brown" 165 | lines: "k" 166 | AC line: "k" 167 | AC-AC: "k" 168 | transmission lines: "k" 169 | H2: "m" 170 | hydrogen storage: "m" 171 | Battery: "slategray" 172 | battery storage: "slategray" 173 | CAES: "lightgray" 174 | Nuclear: "r" 175 | Nuclear marginal: "r" 176 | Coal: "k" 177 | Coal marginal: "k" 178 | Lignite: "grey" 179 | Lignite marginal: "grey" 180 | CCGT: "orange" 181 | CCGT marginal: "orange" 182 | Diesel: "darkred" 183 | Diesel marginal: "darkred" 184 | heat pumps: "green" 185 | heat pump: "green" 186 | central heat pump: "green" 187 | resistive heater: "pink" 188 | central resistive heater: "pink" 189 | Sabatier: "turquoise" 190 | water tanks: "w" 191 | CHP: "r" 192 | CHP heat: "r" 193 | CHP electric: "r" 194 | central CHP heat: "r" 195 | central CHP electric: "r" 196 | Pumped storage: "g" 197 | Ambient: "k" 198 | AC load: "b" 199 | Heat load: "r" 200 | Li ion load: "grey" 201 | heat: "r" 202 | Li ion: "grey" 203 | district heating: "#CC4E5C" 204 | nice_names: 205 | # OCGT: "Gas" 206 | # OCGT marginal: "Gas (marginal)" 207 | #Battery: "Battery storage" 208 | lines: "Transmission lines" 209 | AC line: "AC lines" 210 | AC-AC: "DC lines" 211 | ror: "Run of river" 212 | nice_names_n: 213 | offwind: "offshore\nwind" 214 | onwind: "onshore\nwind" 215 | # OCGT: "Gas" 216 | H2: "Hydrogen\nstorage" 217 | # OCGT marginal: "Gas (marginal)" 218 | lines: "transmission\nlines" 219 | ror: "run of river" 220 | -------------------------------------------------------------------------------- /scripts/extract_summaries.py: -------------------------------------------------------------------------------- 1 | import os 2 | import pypsa 3 | import pandas as pd 4 | import numpy as np 5 | from itertools import product, chain 6 | from six.moves import map, zip 7 | from six import itervalues, iterkeys 8 | from collections import OrderedDict as odict 9 | 10 | from _helpers import load_network 11 | 12 | if 'snakemake' not in globals(): 13 | from vresutils import Dict 14 | import yaml 15 | snakemake = Dict() 16 | snakemake.input = ['../results/networks/CSIR-Expected-Apr2016_corridors_E', 17 | '../results/networks/IRP2016-Apr2016_corridors_E'] 18 | snakemake.output = ['../results/summaries'] 19 | snakemake.params = Dict(scenario_tmpl="{cost}_{mask}_{sectors}", 20 | scenarios=Dict(cost=['CSIR-Expected-Apr2016', 21 | 'IRP2016-Apr2016'], 22 | mask=['corridors'], sectors=['E'])) 23 | with open('../config.yaml') as f: 24 | snakemake.config = yaml.load(f) 25 | 26 | opts = snakemake.config['plotting'] 27 | 28 | def collect_networks(): 29 | basenames = list(map(os.path.basename, snakemake.input)) 30 | networks = [] 31 | 32 | for p in (odict(zip(iterkeys(snakemake.params.scenarios), o)) 33 | for o in product(*itervalues(snakemake.params.scenarios))): 34 | scenario = snakemake.params.scenario_tmpl.replace('[', '{').replace(']', '}').format(**p) 35 | if scenario in basenames: 36 | networks.append((scenario, p, snakemake.input[basenames.index(scenario)])) 37 | 38 | return networks 39 | 40 | 41 | 42 | group_sum_dir = snakemake.output[0] 43 | if not os.path.isdir(group_sum_dir): 44 | os.mkdir(group_sum_dir) 45 | 46 | def clean_and_save(df, fn, dropna=True): 47 | if dropna: 48 | df = df.dropna(axis=0, how='all') 49 | df.columns = pd.MultiIndex.from_tuples(df.columns, names=list(snakemake.params.scenarios.keys())) 50 | df = df.sort_index(axis=1) 51 | df.to_csv(os.path.join(group_sum_dir, fn)) 52 | return df 53 | 54 | 55 | ## Look at aggregated totals 56 | 57 | class p(object): 58 | def __init__(s): 59 | s.storage = pd.DataFrame(index=opts['storage_techs']) 60 | s.gen = pd.DataFrame(index=opts['vre_techs']+opts['conv_techs']) 61 | s.links = pd.DataFrame(index=opts['link_carriers'] + opts['heat_links'] + opts['heat_generators']) 62 | s.loads = pd.DataFrame(index=opts['load_carriers']) 63 | 64 | def add(s, sn, p, n): 65 | s.storage[tuple(p.values())] = n.storage_units.groupby("carrier").p_nom_opt.sum() 66 | s.gen[tuple(p.values())] = n.generators.groupby("carrier").p_nom_opt.sum() 67 | s.links[tuple(p.values())] = n.links.groupby("carrier").p_nom_opt.sum() 68 | s.loads[tuple(p.values())] = n.loads_t.p.groupby(n.loads.carrier,axis=1).sum().mean() 69 | 70 | def write(s): 71 | return clean_and_save(pd.concat((s.storage, s.gen, s.links, s.loads)), snakemake.output.p_nom_opt) 72 | 73 | class e(object): 74 | def __init__(s): 75 | s.nom = pd.DataFrame(index=opts['storage_techs'] + opts['store_techs']) 76 | s.storage = pd.DataFrame(index=opts['storage_techs']) 77 | s.store = pd.DataFrame(index=opts['store_techs']) 78 | s.gen = pd.DataFrame(index=opts['vre_techs'] + opts['conv_techs']) 79 | s.load = pd.DataFrame(index=opts['load_carriers']) 80 | 81 | def add(s, sn, p, n): 82 | s.nom[tuple(p.values())] = pd.concat([ 83 | (n.storage_units["p_nom_opt"]*n.storage_units["max_hours"]).groupby(n.storage_units["carrier"]).sum(), 84 | n.stores["e_nom_opt"].groupby(n.stores["carrier"]).sum() 85 | ]) 86 | 87 | s.storage[tuple(p.values())] = n.storage_units_t.p.sum().groupby(n.storage_units["carrier"]).sum() 88 | s.store[tuple(p.values())] = n.stores_t.p.sum().groupby(n.stores["carrier"]).sum() 89 | s.gen[tuple(p.values())] = n.generators_t.p.sum().groupby(n.generators["carrier"]).sum() 90 | 91 | #s.ambient.loc["Ambient", tuple(p.values())] = -(n.links_t.p0.sum().sum() + n.links_t.p1.sum().sum()) 92 | s.load[tuple(p.values())] = -n.loads_t.p.sum().groupby(n.loads.carrier).sum() 93 | 94 | def write(s): 95 | clean_and_save(s.nom, snakemake.output.e_nom_opt) 96 | return clean_and_save(pd.concat((s.gen, s.storage, s.store, s.load)), snakemake.output.e) 97 | 98 | 99 | 100 | ## Examine curtailment as % of available VRE energy 101 | 102 | class e_curtailed(object): 103 | def __init__(s): 104 | s.curtailed = pd.DataFrame() 105 | 106 | def add(s, sn, p, n): 107 | s.curtailed[tuple(p.values())] = pd.concat([ 108 | ((n.generators_t.p_max_pu.sum().multiply(n.generators.p_nom_opt) - n.generators_t.p.sum()) 109 | .groupby(n.generators.carrier).sum()), 110 | ((n.storage_units_t.inflow.sum() - n.storage_units_t.p.sum()) 111 | .groupby(n.storage_units.carrier).sum()) 112 | ]) 113 | 114 | def write(s): 115 | return clean_and_save(s.curtailed, snakemake.output.e_curtailed) 116 | 117 | 118 | class costs(object): 119 | def __init__(s): 120 | s.components = dict(Link=("p_nom_opt", "p0"), 121 | Generator=("p_nom_opt", "p"), 122 | StorageUnit=("p_nom_opt", "p"), 123 | Store=("e_nom_opt", "p"), 124 | Line=("s_nom_opt", None), 125 | Transformer=("s_nom_opt", None)) 126 | 127 | combinations=chain(*(product([n], c) 128 | for n, c in (("generators", opts['vre_techs'] + opts['conv_techs']), 129 | ("links", opts["heat_links"] + opts["heat_generators"] + opts['link_carriers']), 130 | ("storage_units", opts["storage_techs"]), 131 | ("stores", opts["store_techs"]), 132 | ("lines", opts["AC_carriers"])))) 133 | index = pd.MultiIndex.from_tuples([(comp, capmarg, carrier) 134 | for (comp, carrier), capmarg in product(combinations, ['capital', 'marginal'])]) 135 | s.costs2 = pd.DataFrame(index=index) 136 | s.costs = pd.DataFrame(index=(opts['vre_techs'] + opts['conv_techs'] + 137 | [t + ' marginal' for t in opts['conv_techs']] + 138 | opts['heat_links'] + opts['heat_generators'] + 139 | opts['link_carriers'] + opts['storage_techs'] + 140 | opts['store_techs'] + opts['AC_carriers'])) 141 | 142 | def add(s, sn, pa, n): 143 | costs = {} 144 | for c, (p_nom, p_attr) in zip(n.iterate_components(iterkeys(s.components), skip_empty=False), itervalues(s.components)): 145 | costs[(c.list_name, 'capital')] = (c.df[p_nom] * c.df.capital_cost).groupby(c.df.carrier).sum() 146 | if p_attr is not None: 147 | p = c.pnl[p_attr].sum() 148 | if c.name == 'StorageUnit': 149 | p = p.loc[p > 0] 150 | costs[(c.list_name, 'marginal')] = (p*c.df.marginal_cost).groupby(c.df.carrier).sum() 151 | costs = pd.concat(costs) 152 | 153 | s.costs2[tuple(pa.values())] = costs 154 | 155 | costs = costs.reset_index(level=0, drop=True) 156 | s.costs[tuple(pa.values())] = costs['capital'].add((costs['marginal'] 157 | .rename(columns={t: t + ' marginal' for t in opts['conv_techs']})), 158 | fill_value=0.) 159 | 160 | def write(s): 161 | clean_and_save(s.costs, snakemake.output.costs) 162 | return clean_and_save(s.costs2, snakemake.output.costs2) 163 | 164 | 165 | if __name__ == '__main__': 166 | summers = [p(), e(), e_curtailed(), costs()] 167 | networks = collect_networks() 168 | for scenario, params, fn in networks: 169 | n = load_network(fn, opts) 170 | for s in summers: s.add(scenario, params, n) 171 | 172 | for s in summers: s.write() 173 | -------------------------------------------------------------------------------- /Snakefile: -------------------------------------------------------------------------------- 1 | configfile: "config.yaml" 2 | 3 | localrules: all, base_network, add_electricity, add_sectors, plot_network, scenario_comparison # , extract_summaries 4 | 5 | wildcard_constraints: 6 | resarea="[a-zA-Z0-9]+", 7 | cost="[-a-zA-Z0-9]+", 8 | sectors="[+a-zA-Z0-9]+", 9 | opts="[-+a-zA-Z0-9]+" 10 | 11 | rule all: 12 | input: 13 | expand("results/version-" + str(config['version']) + "/plots/scenario_{param}.html", 14 | param=list(config['scenario'])) 15 | 16 | rule build_landuse_remove_protected_and_conservation_areas: 17 | input: 18 | landuse = "data/bundle/SALandCover_OriginalUTM35North_2013_GTI_72Classes/sa_lcov_2013-14_gti_utm35n_vs22b.tif", 19 | protected_areas = "data/bundle/SAPAD_OR_2017_Q2", 20 | conservation_areas = "data/bundle/SACAD_OR_2017_Q2" 21 | output: "resources/landuse_without_protected_conservation.tiff" 22 | benchmark: "benchmarks/landuse_remove_protected_and_conservation_areas" 23 | threads: 1 24 | resources: mem_mb=10000 25 | script: "scripts/build_landuse_remove_protected_and_conservation_areas.py" 26 | 27 | rule build_landuse_map_to_tech_and_supply_region: 28 | input: 29 | landuse = "resources/landuse_without_protected_conservation.tiff", 30 | supply_regions = "data/supply_regions/supply_regions.shp", 31 | resarea = lambda w: "data/bundle/" + config['data']['resarea'][w.resarea] 32 | output: 33 | raster = "resources/raster_{tech}_percent_{resarea}.tiff", 34 | area = "resources/area_{tech}_{resarea}.csv" 35 | benchmark: "benchmarks/build_landuse_map_to_tech_and_supply_region/{tech}_{resarea}" 36 | threads: 1 37 | resources: mem_mb=10000 38 | script: "scripts/build_landuse_map_to_tech_and_supply_region.py" 39 | 40 | rule build_population: 41 | input: 42 | supply_regions='data/supply_regions/supply_regions.shp', 43 | population='data/bundle/South_Africa_100m_Population/ZAF15adjv4.tif' 44 | output: 'resources/population.csv' 45 | threads: 1 46 | resources: mem_mb=1000 47 | script: "scripts/build_population.py" 48 | 49 | if not config['hydro_inflow']['disable']: 50 | rule build_inflow_per_country: 51 | input: EIA_hydro_gen="data/EIA_hydro_generation_2011_2014.csv" 52 | output: "resources/hydro_inflow.csv" 53 | benchmark: "benchmarks/inflow_per_country" 54 | threads: 1 55 | resources: mem_mb=1000 56 | script: "scripts/build_inflow_per_country.py" 57 | 58 | rule build_topology: 59 | input: 60 | supply_regions='data/supply_regions/supply_regions.shp', 61 | centroids='data/supply_regions/centroids.shp', 62 | num_lines='data/num_lines.csv' 63 | output: 64 | buses='resources/buses.csv', 65 | lines='resources/lines.csv' 66 | threads: 1 67 | script: "scripts/build_topology.py" 68 | 69 | rule base_network: 70 | input: 71 | buses='resources/buses.csv', 72 | lines='resources/lines.csv', 73 | population='resources/population.csv' 74 | output: "networks/base_{opts}.nc" 75 | benchmark: "benchmarks/base_network_{opts}" 76 | threads: 1 77 | resources: mem_mb=1000 78 | script: "scripts/base_network.py" 79 | 80 | rule add_electricity: 81 | input: 82 | base_network='networks/base_{opts}.nc', 83 | supply_regions='data/supply_regions/supply_regions.shp', 84 | load='data/bundle/SystemEnergy2009_13.csv', 85 | wind_profiles='data/bundle/Supply area normalised power feed-in for Wind.xlsx', 86 | pv_profiles='data/bundle/Supply area normalised power feed-in for PV.xlsx', 87 | wind_area='resources/area_wind_{resarea}.csv', 88 | solar_area='resources/area_solar_{resarea}.csv', 89 | existing_generators="data/Existing Power Stations SA.xlsx", 90 | hydro_inflow="resources/hydro_inflow.csv", 91 | tech_costs="data/technology_costs.xlsx" 92 | output: "networks/elec_{cost}_{resarea}_{opts}.nc" 93 | benchmark: "benchmarks/add_electricity/elec_{cost}_{resarea}_{opts}" 94 | threads: 1 95 | resources: mem_mb=1000 96 | script: "scripts/add_electricity.py" 97 | 98 | rule add_sectors: 99 | input: 100 | network="networks/elec_{cost}_{resarea}_{opts}.nc" 101 | # emobility="data/emobility" 102 | output: "networks/sector_{cost}_{resarea}_{sectors}_{opts}.nc" 103 | threads: 1 104 | resources: mem_mb=1000 105 | script: "scripts/add_sectors.py" 106 | 107 | rule solve_network: 108 | input: network="networks/sector_{cost}_{resarea}_{sectors}_{opts}.nc" 109 | output: "results/version-" + str(config['version']) + "/networks/{cost}_{resarea}_{sectors}_{opts}.nc" 110 | shadow: "shallow" 111 | log: 112 | gurobi="logs/{cost}_{resarea}_{sectors}_{opts}_gurobi.log", 113 | python="logs/{cost}_{resarea}_{sectors}_{opts}_python.log" 114 | benchmark: "benchmarks/solve_network/{cost}_{resarea}_{sectors}_{opts}" 115 | threads: 4 116 | resources: mem_mb=19000 # for electricity only 117 | script: "scripts/solve_network.py" 118 | 119 | rule plot_network: 120 | input: 121 | network='results/version-' + str(config['version']) + '/networks/{cost}_{resarea}_{sectors}_{opts}.nc', 122 | supply_regions='data/supply_regions/supply_regions.shp', 123 | resarea=lambda w: 'data/bundle/' + config['data']['resarea'][w.resarea] 124 | output: 125 | only_map=touch('results/version-' + str(config['version']) + '/plots/network_{cost}_{resarea}_{sectors}_{opts}_{attr}'), 126 | ext=touch('results/version-' + str(config['version']) + '/plots/network_{cost}_{resarea}_{sectors}_{opts}_{attr}_ext') 127 | params: ext=['png', 'pdf'] 128 | script: "scripts/plot_network.py" 129 | 130 | rule scenario_comparison: 131 | input: 132 | expand('results/version-{version}/plots/network_{cost}_{resarea}_{sectors}_{opts}_{attr}_ext', 133 | version=config['version'], 134 | attr=['p_nom'], 135 | **config['scenario']) 136 | output: 137 | html='results/version-' + str(config['version']) + '/plots/scenario_{param}.html' 138 | params: 139 | tmpl="network_[cost]_[resarea]_[sectors]_[opts]_[attr]_ext", 140 | plot_dir='results/version-' + str(config['version']) + '/plots' 141 | script: "scripts/scenario_comparison.py" 142 | 143 | def input_make_summary(w): 144 | # It's mildly hacky to include the separate costs input as first entry 145 | return (expand("results/version-" + str(config['version']) + "/networks/{cost}_{resarea}_{sectors}_{opts}.nc", 146 | **{k: config["scenario"][k] if getattr(w, k) == "all" else getattr(w, k) 147 | for k in ["cost", "resarea", "sectors", "opts"]})) 148 | 149 | rule make_summary: 150 | input: input_make_summary 151 | output: directory("results/version-" + str(config['version']) + "/summaries/{cost}_{resarea}_{sectors}_{opts}") 152 | script: "scripts/make_summary.py" 153 | 154 | # extract_summaries and plot_costs needs to be updated before it can be used again 155 | # 156 | # rule extract_summaries: 157 | # input: 158 | # expand("results/version-{version}/networks/{cost}_{resarea}_{sectors}_{opts}.nc", 159 | # version=config['version'], 160 | # **config['scenario']) 161 | # output: 162 | # **{n: "results/version-{version}/summaries/{}-summary.csv".format(n, version=config['version']) 163 | # for n in ['costs', 'costs2', 'e_curtailed', 'e_nom_opt', 'e', 'p_nom_opt']} 164 | # params: 165 | # scenario_tmpl="[cost]_[resarea]_[sectors]_[opts]", 166 | # scenarios=config['scenario'] 167 | # script: "scripts/extract_summaries.py" 168 | 169 | # rule plot_costs: 170 | # input: 'results/summaries/costs2-summary.csv' 171 | # output: 172 | # expand('results/plots/costs_{cost}_{resarea}_{sectors}_{opt}', 173 | # **dict(chain(config['scenario'].items(), (('{param}'))) 174 | # touch('results/plots/scenario_plots') 175 | # params: 176 | # tmpl="results/plots/costs_[cost]_[resarea]_[sectors]_[opt]" 177 | # exts=["pdf", "png"] 178 | # scripts: "scripts/plot_costs.py" 179 | 180 | 181 | # Local Variables: 182 | # mode: python 183 | # End: 184 | -------------------------------------------------------------------------------- /scripts/solve_network.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import pandas as pd 3 | 4 | import logging 5 | logging.basicConfig(filename=snakemake.log.python, level=logging.INFO) 6 | 7 | import pypsa 8 | 9 | if 'tmpdir' in snakemake.config['solving']: 10 | # PYOMO should write its lp files into tmp here 11 | tmpdir = snakemake.config['solving']['tmpdir'] 12 | import os 13 | if not os.path.isdir(tmpdir): 14 | os.mkdir(tmpdir) 15 | from pyutilib.services import TempfileManager 16 | TempfileManager.tempdir = tmpdir 17 | 18 | def prepare_network(n): 19 | solve_opts = snakemake.config['solving']['options'] 20 | if 'clip_p_max_pu' in solve_opts: 21 | for df in (n.generators_t.p_max_pu, n.storage_units_t.inflow): 22 | df.where(df>solve_opts['clip_p_max_pu'], other=0., inplace=True) 23 | 24 | if solve_opts.get('load_shedding'): 25 | n.add("Carrier", "Load") 26 | load_i = n.madd("Generator", n.buses.index, suffix=" Load", 27 | bus=n.buses.index, 28 | carrier='load', 29 | marginal_cost=1.0e5 * snakemake.config['costs']['EUR_to_ZAR'], 30 | # intersect between macroeconomic and surveybased 31 | # willingness to pay 32 | # http://journal.frontiersin.org/article/10.3389/fenrg.2015.00055/full 33 | p_nom=1e6) 34 | 35 | if 'SAFE' in snakemake.wildcards.opts.split('-'): 36 | # there must be no load shedding in the extra hour introduced in the SAFE scenario 37 | load_p_max_pu = pd.DataFrame(1., index=n.snapshots, columns=load_i) 38 | load_p_max_pu.iloc[-1, :] = 0. 39 | 40 | n.generators_t.p_max_pu = pd.concat([n.generators_t.p_max_pu, load_p_max_pu], axis=1) 41 | 42 | if solve_opts.get('noisy_costs'): 43 | for t in n.iterate_components(): 44 | #if 'capital_cost' in t.df: 45 | # t.df['capital_cost'] += 1e1 + 2.*(np.random.random(len(t.df)) - 0.5) 46 | if 'marginal_cost' in t.df: 47 | t.df['marginal_cost'] += 1e-2 + 2e-3*(np.random.random(len(t.df)) - 0.5) 48 | 49 | if solve_opts.get('nhours'): 50 | nhours = solve_opts['nhours'] 51 | n = n[:solve_opts['nhours'], :] 52 | n.snapshot_weightings[:] = 8760./nhours 53 | 54 | return n 55 | 56 | def solve_network(n): 57 | def add_opts_constraints(n): 58 | opts = snakemake.wildcards.opts.split('-') 59 | 60 | if 'BAU' in opts: 61 | mincaps = snakemake.config['electricity']['BAU_mincapacities'] 62 | def bau_mincapacities_rule(model, carrier): 63 | gens = n.generators.index[n.generators.p_nom_extendable & (n.generators.carrier == carrier)] 64 | return sum(model.generator_p_nom[gen] for gen in gens) >= mincaps[carrier] 65 | n.model.bau_mincapacities = pypsa.opt.Constraint(list(mincaps), rule=bau_mincapacities_rule) 66 | 67 | def fix_lines(n, lines_i=None, links_i=None): # , fix=True): 68 | if lines_i is not None and len(lines_i) > 0: 69 | s_nom = n.lines.s_nom.where( 70 | n.lines.type == '', 71 | np.sqrt(3) * n.lines.type.map(n.line_types.i_nom) * n.lines.bus0.map(n.buses.v_nom) * n.lines.num_parallel 72 | ) 73 | for l in lines_i: 74 | n.model.passive_branch_s_nom["Line", l].fix(s_nom.at[l]) 75 | # n.model.passive_branch_s_nom[l].fixed = fix 76 | if isinstance(n.opt, pypsa.opf.PersistentSolver): 77 | n.opt.update_var(n.model.passive_branch_s_nom) 78 | 79 | if links_i is not None and len(links_i) > 0: 80 | for l in links_i: 81 | n.model.link_p_nom[l].fix(n.links.at[l, 'p_nom']) 82 | # n.model.link_p_nom[l].fixed = fix 83 | if isinstance(n.opt, pypsa.opf.PersistentSolver): 84 | n.opt.update_var(n.model.link_p_nom) 85 | 86 | solve_opts = snakemake.config['solving']['options'] 87 | 88 | solver_options = snakemake.config['solving']['solver'].copy() 89 | solver_options['logfile'] = snakemake.log.gurobi 90 | solver_name = solver_options.pop('name') 91 | 92 | def run_lopf(n, allow_warning_status=False, fix_zero_lines=False): 93 | if not hasattr(n, 'opt') or not isinstance(n.opt, pypsa.opf.PersistentSolver): 94 | pypsa.opf.network_lopf_build_model(n, formulation=solve_opts['formulation']) 95 | add_opts_constraints(n) 96 | 97 | pypsa.opf.network_lopf_prepare_solver(n, solver_name=solver_name) 98 | 99 | if fix_zero_lines: 100 | fix_lines_b = (n.lines.s_nom_opt == 0.) & n.lines.s_nom_extendable 101 | n.lines.loc[fix_lines_b & (n.lines.type == ''), 's_nom'] = 0. 102 | n.lines.loc[fix_lines_b & (n.lines.type != ''), 'num_parallel'] = 0. 103 | 104 | fix_links_b = (n.links.p_nom_opt == 0.) & n.links.p_nom_extendable 105 | n.links.loc[fix_links_b, 'p_nom'] = 0. 106 | 107 | # WARNING: We are not unfixing these later 108 | fix_lines(n, lines_i=n.lines.index[fix_lines_b], links_i=n.links.index[fix_links_b]) 109 | 110 | status, termination_condition = \ 111 | pypsa.opf.network_lopf_solve(n, 112 | solver_options=solver_options, 113 | formulation=solve_opts['formulation']) 114 | 115 | assert status == "ok" or allow_warning_status and status == 'warning', \ 116 | ("network_lopf did abort with status={} " 117 | "and termination_condition={}" 118 | .format(status, termination_condition)) 119 | 120 | return status, termination_condition 121 | 122 | lines_ext_b = n.lines.s_nom_extendable 123 | if lines_ext_b.any(): 124 | # puh: ok, we need to iterate, since there is a relation 125 | # between s/p_nom and r, x for branches. 126 | msq_threshold = 0.01 127 | lines = pd.DataFrame(n.lines[['r', 'x', 'type', 'num_parallel']]) 128 | 129 | lines['s_nom'] = ( 130 | np.sqrt(3) * n.lines['type'].map(n.line_types.i_nom) * n.lines.bus0.map(n.buses.v_nom) * n.lines.num_parallel 131 | ).where(n.lines.type != '', n.lines['s_nom']) 132 | 133 | lines_ext_typed_b = (n.lines.type != '') & lines_ext_b 134 | lines_ext_untyped_b = (n.lines.type == '') & lines_ext_b 135 | 136 | def update_line_parameters(n, zero_lines_below=10, fix_zero_lines=False): 137 | if zero_lines_below > 0: 138 | n.lines.loc[n.lines.s_nom_opt < zero_lines_below, 's_nom_opt'] = 0. 139 | n.links.loc[n.links.p_nom_opt < zero_lines_below, 'p_nom_opt'] = 0. 140 | 141 | if lines_ext_untyped_b.any(): 142 | for attr in ('r', 'x'): 143 | n.lines.loc[lines_ext_untyped_b, attr] = ( 144 | lines[attr].multiply(lines['s_nom']/n.lines['s_nom_opt']) 145 | ) 146 | 147 | if lines_ext_typed_b.any(): 148 | n.lines.loc[lines_ext_typed_b, 'num_parallel'] = ( 149 | lines['num_parallel'].multiply(n.lines['s_nom_opt']/lines['s_nom']) 150 | ) 151 | logger.debug("lines.num_parallel={}".format(n.lines.loc[lines_ext_typed_b, 'num_parallel'])) 152 | 153 | if isinstance(n.opt, pypsa.opf.PersistentSolver): 154 | n.calculate_dependent_values() 155 | 156 | assert solve_opts['formulation'] == 'kirchhoff', \ 157 | "Updating persistent solvers has only been implemented for the kirchhoff formulation for now" 158 | 159 | n.opt.remove_constraint(n.model.cycle_constraints) 160 | del n.model.cycle_constraints_index 161 | del n.model.cycle_constraints_index_0 162 | del n.model.cycle_constraints_index_1 163 | del n.model.cycle_constraints 164 | 165 | pypsa.opf.define_passive_branch_flows_with_kirchhoff(n, n.snapshots, skip_vars=True) 166 | n.opt.add_constraint(n.model.cycle_constraints) 167 | 168 | iteration = 1 169 | 170 | lines['s_nom_opt'] = lines['s_nom'] 171 | status, termination_condition = run_lopf(n, allow_warning_status=True) 172 | 173 | def msq_diff(n): 174 | lines_err = np.sqrt(((n.lines['s_nom_opt'] - lines['s_nom_opt'])**2).mean())/lines['s_nom_opt'].mean() 175 | logger.info("Mean square difference after iteration {} is {}".format(iteration, lines_err)) 176 | return lines_err 177 | 178 | min_iterations = solve_opts.get('min_iterations', 2) 179 | max_iterations = solve_opts.get('max_iterations', 999) 180 | while msq_diff(n) > msq_threshold or iteration < min_iterations: 181 | if iteration >= max_iterations: 182 | logger.info("Iteration {} beyond max_iterations {}. Stopping ...".format(iteration, max_iterations)) 183 | break 184 | 185 | update_line_parameters(n) 186 | lines['s_nom_opt'] = n.lines['s_nom_opt'] 187 | iteration += 1 188 | 189 | # Not really needed, could also be taken out 190 | n.export_to_netcdf(snakemake.output[0]) 191 | 192 | status, termination_condition = run_lopf(n, allow_warning_status=True) 193 | 194 | update_line_parameters(n, zero_lines_below=500) 195 | 196 | status, termination_condition = run_lopf(n, fix_zero_lines=True, allow_warning_status=True) 197 | 198 | # Drop zero lines from network 199 | zero_lines_i = n.lines.index[(n.lines.s_nom_opt == 0.) & n.lines.s_nom_extendable] 200 | if len(zero_lines_i): 201 | n.mremove("Line", zero_lines_i) 202 | zero_links_i = n.links.index[(n.links.p_nom_opt == 0.) & n.links.p_nom_extendable] 203 | if len(zero_links_i): 204 | n.mremove("Link", zero_links_i) 205 | 206 | if status != 'ok': 207 | # save a backup 208 | backup_fn = snakemake.output[0][:-3] + "_suboptimal.h5" 209 | n.export_to_netcdf(backup_fn) 210 | logger.error("Last solving step returned with status '{}': Aborting. A backup is at {}." 211 | .format(status, backup_fn)) 212 | raise AssertionError() 213 | 214 | return n 215 | 216 | if __name__ == "__main__": 217 | n = pypsa.Network(snakemake.input[0]) 218 | 219 | n = prepare_network(n) 220 | n = solve_network(n) 221 | 222 | n.export_to_netcdf(snakemake.output[0]) 223 | -------------------------------------------------------------------------------- /scripts/add_sectors.py: -------------------------------------------------------------------------------- 1 | # coding: utf-8 2 | 3 | import pypsa 4 | import pandas as pd 5 | import numpy as np 6 | import os 7 | from six import iteritems 8 | 9 | from vresutils.costdata import annuity, USD2013_to_EUR2013 10 | 11 | from _helpers import pdbcast 12 | 13 | def normed(s): return s/s.sum() 14 | 15 | ########################################################################################### 16 | 17 | def generate_periodic_profiles(dt_index, freq="H", weekly_profile=range(24*7)): 18 | 19 | #Give a 24*7 long list of weekly hourly profiles 20 | weekly_profile = pd.Series(weekly_profile, range(24*7)) 21 | hour_of_the_week = pd.Series(24*dt_index.weekday+dt_index.hour, dt_index) 22 | 23 | return hour_of_the_week.map(weekly_profile) 24 | 25 | 26 | def add_transport(n, BEV=True, V2G=True): 27 | buses = n.buses.index[n.buses.population > 0.] 28 | population = n.buses.loc[buses, 'population'] 29 | 30 | opts = snakemake.config['transport'] 31 | 32 | emobility = snakemake.input.emobility 33 | weekly_profile_kfz = pd.read_csv(os.path.join(emobility, "KFZ__count"), skiprows=2)["count"] 34 | weekly_profile_pkw = pd.read_csv(os.path.join(emobility, "Pkw__count"), skiprows=2)["count"] 35 | 36 | transport_demand = ( 37 | normed(generate_periodic_profiles(n.snapshots, weekly_profile=weekly_profile_kfz.values)) 38 | * (opts['energy_total'] / opts.get('efficiency_gain', 1.)) 39 | ) 40 | 41 | def renormalize(s, vmax, vmean): 42 | return vmax - (vmax - vmean) * (s - s.min())/(s.mean() - s.min()) 43 | battery_availability = generate_periodic_profiles( 44 | n.snapshots, 45 | weekly_profile=renormalize( 46 | weekly_profile_pkw, 47 | vmax=opts['availability_max'], 48 | vmean=opts['availability_mean'] 49 | )) 50 | 51 | 52 | n.add("Carrier", "Li ion") 53 | buses_ev_battery = n.madd("Bus", buses, suffix=" EV battery", bus=buses, carrier="Li ion") 54 | n.madd("Load", buses, suffix=" EV battery", 55 | bus=buses_ev_battery, 56 | p_set=pdbcast(transport_demand, normed(population))) 57 | 58 | cars = normed(population) * opts['total_cars'] 59 | charging_discharging_power = cars * opts['car_battery_p_nom'] 60 | 61 | n.madd("Link", buses, suffix=" BEV charger", 62 | bus0=buses, bus1=buses_ev_battery, 63 | p_nom=charging_discharging_power, 64 | efficiency=opts['efficiency'], 65 | p_max_pu=battery_availability, 66 | #These were set non-zero to find LU infeasibility when availability = 0.25 67 | #p_nom_extendable=True, 68 | #p_nom_min=p_nom, 69 | #capital_cost=1e6, #i.e. so high it only gets built where necessary 70 | ) 71 | 72 | if BEV: 73 | n.madd("Store", buses, suffix=" battery storage", 74 | bus=buses_ev_battery, 75 | e_cyclic=True, 76 | e_nom=cars * opts['car_battery_e_nom'], 77 | standing_loss=opts['standing_loss']) 78 | 79 | if V2G: 80 | n.madd("Link", buses, suffix=" V2G", 81 | bus0=buses_ev_battery, bus1=buses, 82 | p_nom=charging_discharging_power, 83 | p_max_pu=battery_availability, 84 | efficiency=opts['efficiency']) 85 | 86 | #TO DO 87 | # network.add("Load",node + " transport fuel cell", 88 | # bus=node + " H2", 89 | # p_set=options['transport_fuel_cell_share']/0.58*transport[node].values[:8760], 90 | # ) 91 | 92 | 93 | def add_gas_infrastructure(n, costs): 94 | buses = n.buses.index[n.buses.population > 0.] 95 | discountrate = snakemake.config['costs']['discountrate'] 96 | 97 | n.add("Carrier", "H2") 98 | buses_h2 = n.madd("Bus", buses, suffix=" H2", carrier="H2") 99 | n.madd("Link", buses, suffix=" H2 Electrolysis", 100 | bus0=buses, bus1=buses_h2, 101 | p_nom_extendable=True, 102 | efficiency=0.75, 103 | #Cost from http://www.nrel.gov/docs/fy09osti/45873.pdf "Future Timeframe" 104 | #(same source as Budishak) 105 | capital_cost=(annuity(20.,discountrate)+0.017)*300.*1000.*USD2013_to_EUR2013) 106 | n.madd( "Link", buses, suffix=" H2 Fuel Cell", 107 | bus0=buses_h2, bus1=buses, 108 | p_nom_extendable=True, 109 | efficiency=0.58, 110 | #Cost from http://www.nrel.gov/docs/fy09osti/45873.pdf "Future Timeframe" 111 | #(same source as Budishak) 112 | #NB: Costs refer to electrical side, so must multiply by efficiency 113 | capital_cost=(annuity(20.,discountrate)+0.017)*437.*0.58*1000.*USD2013_to_EUR2013) 114 | n.madd("Store", buses_h2, suffix=" Store", 115 | bus=buses_h2, 116 | e_nom_extendable=True, 117 | e_cyclic=True, 118 | capital_cost=annuity(20.,discountrate)*11.2*1000.*USD2013_to_EUR2013) 119 | 120 | 121 | # TODO 122 | #OCGT bus w/gas generation link 123 | # network.add("Bus", 124 | # node + " OCGT", 125 | # carrier="OCGT") 126 | # network.add("Link", 127 | # node + " OCGT", 128 | # bus0=node+ " OCGT", 129 | # bus1=node, 130 | # capital_cost=cc['efi']*cc['wki']*Nyears, 131 | # p_nom_extendable=True, 132 | # efficiency=cc['efi']) 133 | 134 | ############### 135 | #### heat 136 | 137 | def compute_heat_demand(n): 138 | # shares ={} 139 | # file_name = snakemake.input.heating_residential 140 | # shares["Residential"] = (pd.read_csv(file_name,index_col=0).T["DE"]).T 141 | # file_name = snakemake.input.heating_tertiary 142 | # shares["Services"] = (pd.read_csv(file_name,index_col=0).T["DE"]).T 143 | 144 | # sectors = ["Residential","Services"] 145 | 146 | # for sector in sectors: 147 | # energy_totals[sector +" Combustion"] = energy_totals[sector] - energy_totals[sector + " Electricity"] 148 | # energy_totals[sector +" Space"] = shares[sector].space*energy_totals[sector + " Combustion"] 149 | # energy_totals[sector +" Water"] = shares[sector].water*energy_totals[sector + " Combustion"] 150 | 151 | # energy_totals["Space Heating"] = energy_totals[[sector + " Space" for sector in sectors]].sum() 152 | # energy_totals["Water Heating"] = energy_totals[[sector + " Water" for sector in sectors]].sum() 153 | 154 | heating_space = 0. 155 | heating_water = 0. # TODO 156 | 157 | heat_demand = heating_water / 8760 158 | 159 | if heating_space > 0.: 160 | daily_heat_demand = pd.read_csv(snakemake.input.heat_demand, index_col=0, parse_dates=True) 161 | intraday_profiles = pd.read_csv(snakemake.input.heat_profile, index_col=0) 162 | heat_demand_space = normed(daily_heat_demand.reindex(n.snapshots, method='ffill').multiply( 163 | generate_periodic_profiles( 164 | n.snapshots, 165 | weekly_profile=np.r_[np.repeat(intraday_profiles['weekday'], 5), np.repeat(intraday_profiles['weekend'], 2)]), 166 | axis=0)) * heating_space 167 | 168 | # cop = (pd.read_csv(snakemake.input.cop, index_col=0, parse_dates=True)) 169 | 170 | heat_demand = heat_demand + heat_demand_space 171 | 172 | return heat_demand 173 | 174 | def add_water_heating(n): 175 | ##### CHP Parameters 176 | ###### electrical efficiency with no heat output 177 | eta_elec = 0.468 178 | ###### loss of fuel for each addition of heat 179 | c_v = 0.15 180 | ###### backpressure ratio 181 | c_m = 0.75 182 | ###### ratio of max heat output to max electrical output 183 | p_nom_ratio = 1. 184 | 185 | 186 | heat_demand = compute_heat_demand(n) 187 | 188 | network.add("Carrier", "heat") 189 | 190 | network.add("Bus", node + " heat", carrier="heat") 191 | network.add("Link", node + " heat pump", 192 | bus0=node, 193 | bus1=node + " heat", 194 | efficiency=cop[node], #cop for 2011 time_dep_hp_cop 195 | capital_cost=(annuity(20,discountrate)+0.015)*3.*1.050e6, #20a, 1% FOM, 1050 EUR/kWth from [HP] NB: PyPSA uses bus0 for p_nom restriction, hence factor 3 to get 3150 EUR/kWel 196 | p_nom_extendable=True 197 | ) 198 | network.add("Load", node + " heat", 199 | bus=node + " heat", 200 | p_set= heat_demand[node] 201 | ) 202 | network.add("Link", node + " resistive heater", 203 | bus0=node, 204 | bus1=node + " heat", 205 | efficiency=0.9, 206 | capital_cost=(annuity(20,discountrate)+0.02)*0.9*1.e5, #100 EUR/kW_th, 2% FOM from Schaber thesis 207 | p_nom_extendable=True, 208 | ) 209 | 210 | 211 | ##### H2 bus w/methanation 212 | # methanation 213 | network.add("Link", 214 | node + " Sabatier", 215 | bus0=node+" H2", 216 | bus1=node+" OCGT", 217 | p_nom_extendable=True, 218 | #Efficiency from Katrin Schaber PhD thesis 219 | efficiency=0.77, 220 | #Costs from Katrin Schaber PhD thesis; comparable with HP (1.5e6 including H2 electrolysis) 221 | capital_cost=(annuity(20.,discountrate)+0.02)*1.1e6*Nyears) 222 | # gas boiler 223 | network.add("Link", node + " gas boiler", 224 | p_nom_extendable=True, 225 | bus0=node + " OCGT", 226 | bus1=node + " heat", 227 | capital_cost=(annuity(20,0.07)+0.01)*0.9*3.e5, #300 EUR/kW_th, 1% FOM from Schaber thesis, 20a from HP 228 | efficiency=0.9,) 229 | # chp - standardmäßig rein? Ist es jetzt auf jeden Fall! 230 | network.add("Link", node + " CHP electric", 231 | bus0=node+ " OCGT", 232 | bus1=node, 233 | capital_cost=(annuity(25,0.07)+0.03)*1.4e6*eta_elec, #From HP decentral 234 | efficiency=eta_elec, 235 | p_nom_extendable=True) 236 | network.add("Link", node + " CHP heat", 237 | p_nom_extendable=True, 238 | bus0=node + " OCGT", 239 | bus1=node + " heat", 240 | capital_cost=0., 241 | efficiency=eta_elec/c_v) 242 | 243 | 244 | 245 | ##### heat flexibilities: 246 | if "TES" in flexibilities: 247 | network.add("Carrier","water tanks") 248 | network.add("Bus", node + " water tanks", carrier="water tanks") 249 | network.add("Link", node + " water tanks charger", 250 | bus0=node + " heat", 251 | bus1=node + " water tanks", 252 | efficiency=0.9, 253 | capital_cost=0., 254 | p_nom_extendable=True 255 | ) 256 | network.add("Link", node + " water tanks discharger", 257 | bus0=node + " water tanks", 258 | bus1=node + " heat", 259 | efficiency=0.9, 260 | capital_cost=0., 261 | p_nom_extendable=True 262 | ) 263 | network.add("Store", node + " water tank", 264 | bus=node + " water tanks", 265 | e_cyclic=True, 266 | e_nom_extendable=True, 267 | standing_loss=1-np.exp(-1/(24.*180)), #options["tes_tau"])), # [HP] 180 day time constant for centralised, 3 day for decentralised 268 | capital_cost=(annuity(40,discountrate)+0.01)*20/(1.17e-3*40), #[HP] 40 years, 20 EUR/m^3 in EUR/MWh for 40 K diff and 1.17 kWh/m^2, 1% FOM 269 | ) 270 | #################################################################### 271 | 272 | 273 | if __name__ == "__main__": 274 | n = pypsa.Network(snakemake.input.network) 275 | sectors = set(snakemake.wildcards.sectors.split('+')) 276 | 277 | if 'EV' in sectors or 'BEV' in sectors: 278 | add_transport(n, BEV='BEV' in sectors, V2G='V2G' in sectors) 279 | 280 | # if 'WEH' in sectors: 281 | # add_water_heating(n) 282 | 283 | n.export_to_netcdf(snakemake.output[0]) 284 | 285 | -------------------------------------------------------------------------------- /scripts/plot_network.py: -------------------------------------------------------------------------------- 1 | if 'snakemake' not in globals(): 2 | from vresutils import Dict 3 | from snakemake.rules import expand 4 | import yaml 5 | snakemake = Dict() 6 | snakemake.wildcards = Dict(#cost=#'IRP2016-Apr2016', 7 | cost='csir-aggressive', 8 | mask='redz', 9 | sectors='E', 10 | opts='Co2L', 11 | attr='p_nom') 12 | snakemake.input = Dict(network='../results/version-0.5/networks/{cost}_{mask}_{sectors}_{opts}.nc'.format(**snakemake.wildcards), 13 | supply_regions='../data/supply_regions/supply_regions.shp', 14 | resarea = "../data/bundle/REDZ_DEA_Unpublished_Draft_2015") 15 | snakemake.output = (expand('../results/plots/network_{cost}_{mask}_{sectors}_{opts}_{attr}.pdf', 16 | **snakemake.wildcards) + 17 | expand('../results/plots/network_{cost}_{mask}_{sectors}_{opts}_{attr}_ext.pdf', 18 | **snakemake.wildcards)) 19 | snakemake.params = Dict(ext=['png']) 20 | with open('../config.yaml') as f: 21 | snakemake.config = yaml.load(f) 22 | else: 23 | import matplotlib as mpl 24 | mpl.use('Agg') 25 | 26 | from add_electricity import add_emission_prices 27 | from _helpers import load_network, aggregate_p, aggregate_costs 28 | from vresutils import plot as vplot 29 | 30 | 31 | import os 32 | import pypsa 33 | import pandas as pd 34 | import geopandas as gpd 35 | import numpy as np 36 | from itertools import product, chain 37 | from six.moves import map, zip 38 | from six import itervalues, iterkeys 39 | from collections import OrderedDict as odict 40 | 41 | import matplotlib.pyplot as plt 42 | import matplotlib as mpl 43 | from matplotlib.patches import Circle, Ellipse 44 | from matplotlib.legend_handler import HandlerPatch 45 | import seaborn as sns 46 | to_rgba = mpl.colors.colorConverter.to_rgba 47 | 48 | def make_handler_map_to_scale_circles_as_in(ax, dont_resize_actively=False): 49 | fig = ax.get_figure() 50 | def axes2pt(): 51 | return np.diff(ax.transData.transform([(0,0), (1,1)]), axis=0)[0] * (72./fig.dpi) 52 | 53 | ellipses = [] 54 | if not dont_resize_actively: 55 | def update_width_height(event): 56 | dist = axes2pt() 57 | for e, radius in ellipses: e.width, e.height = 2. * radius * dist 58 | fig.canvas.mpl_connect('resize_event', update_width_height) 59 | ax.callbacks.connect('xlim_changed', update_width_height) 60 | ax.callbacks.connect('ylim_changed', update_width_height) 61 | 62 | def legend_circle_handler(legend, orig_handle, xdescent, ydescent, 63 | width, height, fontsize): 64 | w, h = 2. * orig_handle.get_radius() * axes2pt() 65 | e = Ellipse(xy=(0.5*width-0.5*xdescent, 0.5*height-0.5*ydescent), width=w, height=w) 66 | ellipses.append((e, orig_handle.get_radius())) 67 | return e 68 | return {Circle: HandlerPatch(patch_func=legend_circle_handler)} 69 | 70 | def make_legend_circles_for(sizes, scale=1.0, **kw): 71 | return [Circle((0,0), radius=(s/scale)**0.5, **kw) for s in sizes] 72 | 73 | plt.style.use(['classic', 'seaborn-white', 74 | {'axes.grid': False, 'grid.linestyle': '--', 'grid.color': u'0.6', 75 | 'hatch.color': 'white', 76 | 'patch.linewidth': 0.5, 77 | 'font.size': 12, 78 | 'legend.fontsize': 'medium', 79 | 'lines.linewidth': 1.5, 80 | 'pdf.fonttype': 42, 81 | # 'font.family': 'Times New Roman' 82 | }]) 83 | 84 | opts = snakemake.config['plotting'] 85 | map_figsize = opts['map']['figsize'] 86 | map_boundaries = opts['map']['boundaries'] 87 | 88 | n = load_network(snakemake.input.network, opts) 89 | 90 | scenario_opts = snakemake.wildcards.opts.split('-') 91 | if 'Ep' in scenario_opts or 'Co2L' in scenario_opts: 92 | # Substract emission prices 93 | add_emission_prices(n, - pd.Series(snakemake.config['costs']['emission_prices']), 94 | exclude_co2='Co2L' in scenario_opts) 95 | 96 | supply_regions = gpd.read_file(snakemake.input.supply_regions).buffer(-0.005) #.to_crs(n.crs) 97 | renewable_regions = gpd.read_file(snakemake.input.resarea).to_crs(supply_regions.crs) 98 | 99 | ## DATA 100 | line_colors = {'cur': "purple", 101 | 'exp': to_rgba("red", 0.7)} 102 | tech_colors = opts['tech_colors'] 103 | 104 | if snakemake.wildcards.attr == 'p_nom': 105 | # bus_sizes = n.generators_t.p.sum().loc[n.generators.carrier == "load"].groupby(n.generators.bus).sum() 106 | bus_sizes = pd.concat((n.generators.query('carrier != "load"').groupby(['bus', 'carrier']).p_nom_opt.sum(), 107 | n.storage_units.groupby(['bus', 'carrier']).p_nom_opt.sum())) 108 | line_widths_exp = pd.concat(dict(Line=n.lines.s_nom_opt, Link=n.links.p_nom_opt)) 109 | line_widths_cur = pd.concat(dict(Line=n.lines.s_nom_min, Link=n.links.p_nom_min)) 110 | else: 111 | raise 'plotting of {} has not been implemented yet'.format(plot) 112 | 113 | 114 | line_colors_with_alpha = \ 115 | ((line_widths_cur / pd.concat(dict(Line=n.lines.s_nom, Link=n.links.p_nom)) > 1e-3) 116 | .map({True: line_colors['cur'], False: to_rgba(line_colors['cur'], 0.)})) 117 | 118 | ## FORMAT 119 | linewidth_factor = opts['map'][snakemake.wildcards.attr]['linewidth_factor'] 120 | bus_size_factor = opts['map'][snakemake.wildcards.attr]['bus_size_factor'] 121 | 122 | ## PLOT 123 | fig, ax = plt.subplots(figsize=map_figsize) 124 | vplot.shapes(supply_regions.geometry, facecolors='k', outline='k', ax=ax, rasterized=True) 125 | vplot.shapes(renewable_regions.geometry, facecolors='gray', alpha=0.2, ax=ax, rasterized=True) 126 | n.plot(line_widths=line_widths_exp/linewidth_factor, 127 | line_colors=dict(Line=line_colors['exp'], Link=line_colors['exp']), 128 | bus_sizes=bus_sizes/bus_size_factor, 129 | bus_colors=tech_colors, 130 | boundaries=map_boundaries, 131 | basemap=False, 132 | ax=ax) 133 | n.plot(line_widths=line_widths_cur/linewidth_factor, 134 | line_colors=line_colors_with_alpha, 135 | bus_sizes=0, 136 | bus_colors=tech_colors, 137 | boundaries=map_boundaries, 138 | basemap=False, 139 | ax=ax) 140 | ax.set_aspect('equal') 141 | ax.axis('off') 142 | 143 | x1, y1, x2, y2 = map_boundaries 144 | ax.set_xlim(x1, x2) 145 | ax.set_ylim(y1, y2) 146 | 147 | 148 | # Rasterize basemap 149 | #for c in ax.collections[:2]: c.set_rasterized(True) 150 | 151 | # LEGEND 152 | handles = [] 153 | labels = [] 154 | 155 | for s in (10, 5): 156 | handles.append(plt.Line2D([0],[0],color=line_colors['exp'], 157 | linewidth=s*1e3/linewidth_factor)) 158 | labels.append("{} GW".format(s)) 159 | l1 = l1_1 = ax.legend(handles, labels, 160 | loc="upper left", bbox_to_anchor=(0.24, 1.01), 161 | frameon=False, 162 | labelspacing=0.8, handletextpad=1.5, 163 | title='Transmission Exist./Exp. ') 164 | ax.add_artist(l1_1) 165 | 166 | handles = [] 167 | labels = [] 168 | for s in (10, 5): 169 | handles.append(plt.Line2D([0],[0],color=line_colors['cur'], 170 | linewidth=s*1e3/linewidth_factor)) 171 | labels.append("/") 172 | l1_2 = ax.legend(handles, labels, 173 | loc="upper left", bbox_to_anchor=(0.26, 1.01), 174 | frameon=False, 175 | labelspacing=0.8, handletextpad=0.5, 176 | title=' ') 177 | ax.add_artist(l1_2) 178 | 179 | handles = make_legend_circles_for([10e3, 5e3, 1e3], scale=bus_size_factor, facecolor="w") 180 | labels = ["{} GW".format(s) for s in (10, 5, 3)] 181 | l2 = ax.legend(handles, labels, 182 | loc="upper left", bbox_to_anchor=(0.01, 1.01), 183 | frameon=False, labelspacing=1.0, 184 | title='Generation', 185 | handler_map=make_handler_map_to_scale_circles_as_in(ax)) 186 | ax.add_artist(l2) 187 | 188 | techs = (bus_sizes.index.levels[1]) & pd.Index(opts['vre_techs'] + opts['conv_techs'] + opts['storage_techs']) 189 | handles = [] 190 | labels = [] 191 | for t in techs: 192 | handles.append(plt.Line2D([0], [0], color=tech_colors[t], marker='o', markersize=8, linewidth=0)) 193 | labels.append(opts['nice_names'].get(t, t)) 194 | l3 = ax.legend(handles, labels, loc="lower left", bbox_to_anchor=(0.6, -0.15), # bbox_to_anchor=(0.72, -0.05), 195 | handletextpad=0., columnspacing=0.5, ncol=2, title='Technology') 196 | 197 | 198 | for ext in snakemake.params.ext: 199 | fig.savefig(snakemake.output.only_map+'.'+ext, dpi=150, 200 | bbox_inches='tight', bbox_extra_artists=[l1,l2,l3]) 201 | 202 | 203 | co2_emi = ((n.generators_t.p.multiply(n.snapshot_weightings,axis=0)).sum()/n.generators.efficiency * n.generators.carrier.map(n.carriers.co2_emissions)).sum() 204 | 205 | fig.text(0.2, 0.16, "CO$_2$ emissions: {} MtCO$_2$/a".format(int(np.round(co2_emi/1e6)))) 206 | 207 | #n = load_network(snakemake.input.network, opts, combine_hydro_ps=False) 208 | 209 | ## Add total energy p 210 | 211 | ax1 = ax = fig.add_axes([-0.13, 0.555, 0.2, 0.2]) 212 | ax.set_title('Energy per technology', fontdict=dict(fontsize="medium")) 213 | 214 | e_primary = aggregate_p(n).drop('load', errors='ignore').loc[lambda s: s>0] 215 | 216 | patches, texts, autotexts = ax.pie(e_primary, 217 | startangle=90, 218 | labels = e_primary.rename(opts['nice_names_n']).index, 219 | autopct='%.0f%%', 220 | shadow=False, 221 | colors = [tech_colors[tech] for tech in e_primary.index]) 222 | for t1, t2, i in zip(texts, autotexts, e_primary.index): 223 | if e_primary.at[i] < 0.04 * e_primary.sum(): 224 | t1.remove() 225 | t2.remove() 226 | elif i == 'Coal': 227 | t2.set_color('gray') 228 | 229 | ## Add average system cost bar plot 230 | # ax2 = ax = fig.add_axes([-0.1, 0.2, 0.1, 0.33]) 231 | # ax2 = ax = fig.add_axes([-0.1, 0.15, 0.1, 0.37]) 232 | ax2 = ax = fig.add_axes([-0.1, 0.19, 0.15, 0.33]) 233 | total_load = n.loads_t.p.sum().sum() 234 | 235 | def split_costs(n): 236 | costs = aggregate_costs(n).reset_index(level=0, drop=True) 237 | costs_ex = aggregate_costs(n, existing_only=True).reset_index(level=0, drop=True) 238 | return (costs['capital'].add(costs['marginal'], fill_value=0.), 239 | costs_ex['capital'], costs['capital'] - costs_ex['capital'], costs['marginal']) 240 | 241 | costs, costs_cap_ex, costs_cap_new, costs_marg = split_costs(n) 242 | add_emission_prices(n, snakemake.config['costs']['emission_prices']) 243 | costs_ep, costs_cap_ex_ep, costs_cap_new_ep, costs_marg_ep = split_costs(n) 244 | 245 | costs_graph = pd.DataFrame(dict(a=costs.drop('load', errors='ignore'), b=costs_ep.drop('load', errors='ignore')), 246 | index=['AC-AC', 'AC line', 'Wind', 'PV', 'Nuclear', 247 | 'Coal', 'OCGT', 'CCGT', 'CAES', 'Battery']).dropna() 248 | bottom = np.array([0., 0.]) 249 | texts = [] 250 | 251 | for i,ind in enumerate(costs_graph.index): 252 | data = np.asarray(costs_graph.loc[ind])/total_load 253 | ax.bar([0.1, 0.55], data, bottom=bottom, color=tech_colors[ind], width=0.35, zorder=-1) 254 | bottom_sub = bottom 255 | bottom = bottom+data 256 | 257 | if ind in opts['conv_techs'] + ['AC line']: 258 | for c, c_ep, h in [(costs_cap_ex, costs_cap_ex_ep, None), 259 | (costs_cap_new, costs_cap_new_ep, 'xxxx'), 260 | (costs_marg, costs_marg_ep, None)]: 261 | if ind in c and ind in c_ep: 262 | data_sub = np.asarray([c.loc[ind], c_ep.loc[ind]])/total_load 263 | ax.bar([0.1, 0.55], data_sub, linewidth=0, 264 | bottom=bottom_sub, color=tech_colors[ind], 265 | width=0.35, zorder=-1, hatch=h, alpha=0.8) 266 | bottom_sub += data_sub 267 | 268 | if abs(data[-1]) < 30: 269 | continue 270 | 271 | text = ax.text(1.1,(bottom-0.5*data)[-1]-15,opts['nice_names'].get(ind,ind)) 272 | texts.append(text) 273 | 274 | ax.set_ylabel("Average system cost [R/MWh]") 275 | ax.set_ylim([0,opts['costs_max']]) 276 | ax.set_xlim([0,1]) 277 | ax.set_xticks([0.3, 0.7]) 278 | ax.set_xticklabels(["w/o\nEp", "w/\nEp"]) 279 | ax.grid(True, axis="y", color='k', linestyle='dotted') 280 | 281 | #fig.tight_layout() 282 | 283 | 284 | for ext in snakemake.params.ext: 285 | fig.savefig(snakemake.output.ext + '.' + ext, transparent=True, 286 | bbox_inches='tight', bbox_extra_artists=[l1, l2, l3, ax1, ax2]) 287 | 288 | 289 | # if False: 290 | # filename = "total-pie-{}".format(key).replace(".","-")+".pdf" 291 | # print("Saved to {}".format(filename)) 292 | # fig.savefig(filename,transparent=True,bbox_inches='tight',bbox_extra_artists=texts) 293 | 294 | # #ax.set_title('Expansion to 1.25 x today\'s line volume at 256 clusters')f True or 'snakemake' not in globals(): 295 | 296 | -------------------------------------------------------------------------------- /scripts/add_electricity.py: -------------------------------------------------------------------------------- 1 | # coding: utf-8 2 | 3 | import networkx as nx 4 | import pandas as pd 5 | import numpy as np 6 | import scipy as sp 7 | from operator import attrgetter 8 | from six import string_types 9 | 10 | import rasterio 11 | import fiona 12 | import rasterstats 13 | import geopandas as gpd 14 | 15 | from shapely.geometry import Point 16 | from vresutils.shapes import haversine 17 | from vresutils.costdata import annuity 18 | 19 | import pypsa 20 | 21 | from _helpers import pdbcast 22 | 23 | def normed(s): return s/s.sum() 24 | 25 | 26 | 27 | def _add_missing_carriers_from_costs(n, costs, carriers): 28 | missing_carriers = pd.Index(carriers).difference(n.carriers.index) 29 | emissions_cols = costs.columns.to_series().loc[lambda s: s.str.endswith('_emissions')].values 30 | n.import_components_from_dataframe(costs.loc[missing_carriers, emissions_cols].fillna(0.), 'Carrier') 31 | 32 | def load_costs(): 33 | costs = pd.read_excel(snakemake.input.tech_costs, 34 | sheet_name=snakemake.wildcards.cost, 35 | index_col=0).T 36 | 37 | discountrate = snakemake.config['costs']['discountrate'] 38 | costs['capital_cost'] = ((annuity(costs.pop('Lifetime [a]'), discountrate) + 39 | costs.pop('FOM [%/a]').fillna(0.) / 100.) 40 | * costs.pop('Overnight cost [R/kW_el]')*1e3) 41 | 42 | costs['efficiency'] = costs.pop('Efficiency').fillna(1.) 43 | costs['marginal_cost'] = (costs.pop('VOM [R/MWh_el]').fillna(0.) + 44 | (costs.pop('Fuel cost [R/MWh_th]') / costs['efficiency']).fillna(0.)) 45 | 46 | emissions_cols = costs.columns.to_series().loc[lambda s: s.str.endswith(' emissions [kg/MWh_th]')] 47 | costs.loc[:, emissions_cols.index] = (costs.loc[:, emissions_cols.index]/1e3).fillna(0.) 48 | costs = costs.rename(columns=emissions_cols.str[:-len(" [kg/MWh_th]")].str.lower().str.replace(' ', '_')) 49 | 50 | for attr in ('marginal_cost', 'capital_cost'): 51 | overwrites = snakemake.config['costs'].get(attr) 52 | if overwrites is not None: 53 | overwrites = pd.Series(overwrites) 54 | costs.loc[overwrites.index, attr] = overwrites 55 | 56 | return costs 57 | 58 | # ## Attach components 59 | 60 | # ### Load 61 | 62 | def attach_load(n): 63 | load = pd.read_csv(snakemake.input.load) 64 | load = load.set_index( 65 | pd.to_datetime(load['SETTLEMENT_DATE'] + ' ' + 66 | load['PERIOD'].astype(str) + ':00') 67 | .rename('t') 68 | )['SYSTEMENERGY'] 69 | 70 | demand = (snakemake.config['electricity']['demand'] * 71 | normed(load.loc[snakemake.config['historical_year']])) 72 | n.madd("Load", n.buses.index, 73 | bus=n.buses.index, 74 | p_set=pdbcast(demand, normed(n.buses.population))) 75 | 76 | ### Set line costs 77 | 78 | def update_transmission_costs(n, costs): 79 | opts = snakemake.config['lines'] 80 | for df in (n.lines, n.links): 81 | if df.empty: continue 82 | 83 | df['capital_cost'] = (df['length'] / opts['s_nom_factor'] * 84 | costs.at['Transmission lines', 'capital_cost']) 85 | 86 | 87 | # ### Generators 88 | 89 | def attach_wind_and_solar(n, costs): 90 | historical_year = snakemake.config['historical_year'] 91 | capacity_per_sqm = snakemake.config['respotentials']['capacity_per_sqm'] 92 | 93 | ## Wind 94 | 95 | n.add("Carrier", name="Wind") 96 | windarea = pd.read_csv(snakemake.input.wind_area, index_col=0).loc[lambda s: s.available_area > 0.] 97 | windres = (pd.read_excel(snakemake.input.wind_profiles, 98 | skiprows=[1], sheet_name='Wind power profiles') 99 | .rename(columns={'supply area\'s name': 't'}).set_index('t') 100 | .resample('1h').mean().loc[historical_year] 101 | .reindex(columns=windarea.index) 102 | .clip(lower=0., upper=1.)) 103 | n.madd("Generator", windarea.index, suffix=" Wind", 104 | bus=windarea.index, 105 | carrier="Wind", 106 | p_nom_extendable=True, 107 | p_nom_max=windarea.available_area * capacity_per_sqm['wind'], 108 | marginal_cost=costs.at['Wind', 'marginal_cost'], 109 | capital_cost=costs.at['Wind', 'capital_cost'], 110 | efficiency=costs.at['Wind', 'efficiency'], 111 | p_max_pu=windres) 112 | 113 | ## PV 114 | 115 | n.add("Carrier", name="PV") 116 | pvarea = pd.read_csv(snakemake.input.solar_area, index_col=0).loc[lambda s: s.available_area > 0.] 117 | pvres = (pd.read_excel(snakemake.input.pv_profiles, 118 | skiprows=[1], sheet_name='PV profiles') 119 | .rename(columns={'supply area\'s name': 't'}) 120 | .set_index('t') 121 | .resample('1h').mean().loc[historical_year].reindex(n.snapshots, fill_value=0.) 122 | .reindex(columns=pvarea.index) 123 | .clip(lower=0., upper=1.)) 124 | n.madd("Generator", pvarea.index, suffix=" PV", 125 | bus=pvarea.index, 126 | carrier="PV", 127 | p_nom_extendable=True, 128 | p_nom_max=pvarea.available_area * capacity_per_sqm['solar'], 129 | marginal_cost=costs.at['PV', 'marginal_cost'], 130 | capital_cost=costs.at['PV', 'capital_cost'], 131 | efficiency=costs.at['PV', 'efficiency'], 132 | p_max_pu=pvres) 133 | 134 | 135 | # # Generators 136 | 137 | 138 | def attach_existing_generators(n, costs): 139 | historical_year = snakemake.config['historical_year'] 140 | 141 | ps_f = dict(efficiency="Pump Efficiency (%)", 142 | pump_units="Pump Units", 143 | pump_load="Pump Load per unit (MW)", 144 | max_storage="Pumped Storage - Max Storage (GWh)") 145 | 146 | csp_f = dict(max_hours='CSP Storage (hours)') 147 | 148 | g_f = dict(fom="Fixed Operations and maintenance costs (R/kW/yr)", 149 | p_nom='Installed/ Operational Capacity in 2016 (MW)', 150 | name='Power Station Name', 151 | carrier='Fuel/technology type', 152 | decomdate='Decommissioning Date', 153 | x='GPS Longitude', 154 | y='GPS Latitude', 155 | status='Status', 156 | heat_rate='Heat Rate (GJ/MWh)', 157 | fuel_price='Fuel Price (R/GJ)', 158 | vom='Variable Operations and Maintenance Cost (R/MWh)', 159 | max_ramp_up='Max Ramp Up (MW/min)', 160 | unit_size='Unit size (MW)', 161 | units='Number units', 162 | maint_rate='Typical annual maintenance rate (%)', 163 | out_rate='Typical annual forced outage rate (%)', 164 | owner='Owner') 165 | 166 | gens = pd.read_excel(snakemake.input.existing_generators, na_values=['-']) 167 | 168 | # Make field "Fixed Operations and maintenance costs" numeric 169 | includescapex_i = gens[g_f['fom']].str.endswith(' (includes capex)').dropna().index 170 | gens.loc[includescapex_i, g_f['fom']] = gens.loc[includescapex_i, g_f['fom']].str[:-len(' (includes capex)')] 171 | gens[g_f['fom']] = pd.to_numeric(gens[g_f['fom']]) 172 | 173 | 174 | # Calculate fields where pypsa uses different conventions 175 | gens['efficiency'] = 3.6/gens.pop(g_f['heat_rate']) 176 | gens['marginal_cost'] = 3.6*gens.pop(g_f['fuel_price'])/gens['efficiency'] + gens.pop(g_f['vom']) 177 | gens['capital_cost'] = 1e3*gens.pop(g_f['fom']) 178 | gens['ramp_limit_up'] = 60*gens.pop(g_f['max_ramp_up'])/gens[g_f['p_nom']] 179 | 180 | year = snakemake.config['year'] 181 | gens = (gens 182 | # rename remaining fields 183 | .rename(columns={g_f[f]: f 184 | for f in {'p_nom', 'name', 'carrier', 'x', 'y'}}) 185 | # remove all power plants decommissioned before 2030 186 | .loc[lambda df: ((pd.to_datetime(df[g_f['decomdate']].replace({'beyond 2050': np.nan}).dropna()) >= year) 187 | .reindex(df.index, fill_value=True))] 188 | # drop unused fields 189 | .drop([g_f[f] for f in {'unit_size', 'units', 'maint_rate', 190 | 'out_rate', 'decomdate', 'status'}], axis=1) 191 | ).set_index('name') 192 | 193 | # CahoraBassa will be added later, even though we don't have coordinates 194 | CahoraBassa = gens.loc["CahoraBassa"] 195 | 196 | # Drop power plants where we don't have coordinates or capacity 197 | gens = pd.DataFrame(gens.loc[lambda df: (df.p_nom>0.) & df.x.notnull() & df.y.notnull()]) 198 | 199 | # Associate every generator with the bus of the region it is in or closest to 200 | pos = gpd.GeoSeries([Point(o.x, o.y) for o in gens[['x', 'y']].itertuples()], index=gens.index) 201 | 202 | regions = gpd.read_file(snakemake.input.supply_regions).set_index('name') 203 | 204 | for bus, region in regions.geometry.iteritems(): 205 | pos_at_bus_b = pos.within(region) 206 | if pos_at_bus_b.any(): 207 | gens.loc[pos_at_bus_b, "bus"] = bus 208 | 209 | gens.loc[gens.bus.isnull(), "bus"] = pos[gens.bus.isnull()].map(lambda p: regions.distance(p).idxmin()) 210 | 211 | CahoraBassa['bus'] = "POLOKWANE" 212 | gens = gens.append(CahoraBassa) 213 | 214 | # Now we split them by carrier and have some more carrier specific cleaning 215 | gens.carrier.replace({"Pumped Storage": "Pumped storage"}, inplace=True) 216 | 217 | # HYDRO 218 | 219 | n.add("Carrier", "Hydro") 220 | n.add("Carrier", "Pumped storage") 221 | 222 | hydro = pd.DataFrame(gens.loc[gens.carrier.isin({'Pumped storage', 'Hydro'})]) 223 | hydro["efficiency_store"] = hydro["efficiency_dispatch"] = np.sqrt(hydro.pop(ps_f['efficiency'])/100.).fillna(1.) 224 | 225 | hydro["max_hours"] = 1e3*hydro.pop(ps_f["max_storage"])/hydro["p_nom"] 226 | 227 | hydro["p_min_pu"] = - (hydro.pop(ps_f["pump_load"]) * hydro.pop(ps_f["pump_units"]) / hydro["p_nom"]).fillna(0.) 228 | 229 | hydro = (hydro 230 | .assign(p_max_pu=1.0, cyclic_state_of_charge=True) 231 | .drop(list(csp_f.values()) + ['ramp_limit_up', 'efficiency'], axis=1)) 232 | 233 | hydro.max_hours.fillna(hydro.max_hours.mean(), inplace=True) 234 | 235 | hydro_inflow = pd.read_csv(snakemake.input.hydro_inflow, index_col=0, parse_dates=True).loc[historical_year] 236 | hydro_za_b = (hydro.index.to_series() != 'CahoraBassa') 237 | hydro_inflow_za = pd.DataFrame(hydro_inflow[['ZA']].values * normed(hydro.loc[hydro_za_b, 'p_nom'].values), 238 | columns=hydro.index[hydro_za_b], index=hydro_inflow.index) 239 | hydro_inflow_za['CahoraBassa'] = hydro.at['CahoraBassa', 'p_nom']/2187.*hydro_inflow['MZ'] 240 | 241 | hydro.marginal_cost.fillna(0., inplace=True) 242 | n.import_components_from_dataframe(hydro, "StorageUnit") 243 | n.import_series_from_dataframe(hydro_inflow_za, "StorageUnit", "inflow") 244 | 245 | if snakemake.config['electricity'].get('csp'): 246 | n.add("Carrier", "CSP") 247 | 248 | csp = (pd.DataFrame(gens.loc[gens.carrier == "CSP"]) 249 | .drop(list(ps_f.values()) + ["ramp_limit_up", "efficiency"], axis=1) 250 | .rename(columns={csp_f['max_hours']: 'max_hours'})) 251 | 252 | # TODO add to network with time-series and everything 253 | 254 | gens = (gens.loc[gens.carrier.isin({"Coal", "Nuclear"})] 255 | .drop(list(ps_f.values()) + list(csp_f.values()), axis=1)) 256 | _add_missing_carriers_from_costs(n, costs, gens.carrier.unique()) 257 | n.import_components_from_dataframe(gens, "Generator") 258 | 259 | def attach_extendable_generators(n, costs): 260 | elec_opts = snakemake.config['electricity'] 261 | carriers = elec_opts['extendable_carriers']['Generator'] 262 | buses = elec_opts['buses'] 263 | 264 | _add_missing_carriers_from_costs(n, costs, carriers) 265 | 266 | for carrier in carriers: 267 | buses_i = buses.get(carrier, n.buses.index) 268 | n.madd("Generator", buses_i, suffix=" " + carrier, 269 | bus=buses_i, 270 | p_nom_extendable=True, 271 | carrier=carrier, 272 | capital_cost=costs.at[carrier, 'capital_cost'], 273 | marginal_cost=costs.at[carrier, 'marginal_cost'], 274 | efficiency=costs.at[carrier, 'efficiency']) 275 | 276 | 277 | def attach_storage(n, costs): 278 | elec_opts = snakemake.config['electricity'] 279 | carriers = elec_opts['extendable_carriers']['StorageUnit'] 280 | max_hours = elec_opts['max_hours'] 281 | buses = elec_opts['buses'] 282 | 283 | _add_missing_carriers_from_costs(n, costs, carriers) 284 | 285 | for carrier in carriers: 286 | buses_i = buses.get(carrier, n.buses.index) 287 | n.madd("StorageUnit", buses_i, " " + carrier, 288 | bus=buses_i, 289 | p_nom_extendable=True, 290 | carrier=carrier, 291 | capital_cost=costs.at[carrier, 'capital_cost'], 292 | marginal_cost=costs.at[carrier, 'marginal_cost'], 293 | efficiency_store=costs.at[carrier, 'efficiency'], 294 | efficiency_dispatch=costs.at[carrier, 'efficiency'], 295 | max_hours=max_hours[carrier], 296 | cyclic_state_of_charge=True) 297 | 298 | def add_co2limit(n): 299 | n.add("GlobalConstraint", "CO2Limit", 300 | carrier_attribute="co2_emissions", sense="<=", 301 | constant=snakemake.config['electricity']['co2limit']) 302 | 303 | def add_emission_prices(n, emission_prices=None, exclude_co2=False): 304 | if emission_prices is None: 305 | emission_prices = snakemake.config['costs']['emission_prices'] 306 | if exclude_co2: emission_prices.pop('co2') 307 | ep = (pd.Series(emission_prices).rename(lambda x: x+'_emissions') * n.carriers).sum(axis=1) 308 | n.generators['marginal_cost'] += n.generators.carrier.map(ep) 309 | n.storage_units['marginal_cost'] += n.storage_units.carrier.map(ep) 310 | 311 | def add_peak_demand_hour_without_variable_feedin(n): 312 | new_hour = n.snapshots[-1] + pd.Timedelta(hours=1) 313 | n.set_snapshots(n.snapshots.append(pd.Index([new_hour]))) 314 | 315 | # Don't value new hour for energy totals 316 | n.snapshot_weightings[new_hour] = 0. 317 | 318 | # Don't allow variable feed-in in this hour 319 | n.generators_t.p_max_pu.loc[new_hour] = 0. 320 | 321 | n.loads_t.p_set.loc[new_hour] = ( 322 | n.loads_t.p_set.loc[n.loads_t.p_set.sum(axis=1).idxmax()] 323 | * (1.+snakemake.config['electricity']['SAFE_reservemargin']) 324 | ) 325 | 326 | 327 | 328 | if __name__ == "__main__": 329 | opts = snakemake.wildcards.opts.split('-') 330 | 331 | n = pypsa.Network(snakemake.input.base_network) 332 | costs = load_costs() 333 | attach_load(n) 334 | update_transmission_costs(n, costs) 335 | attach_existing_generators(n, costs) 336 | attach_wind_and_solar(n, costs) 337 | attach_extendable_generators(n, costs) 338 | attach_storage(n, costs) 339 | 340 | if 'Co2L' in opts: 341 | add_co2limit(n) 342 | add_emission_prices(n, exclude_co2=True) 343 | 344 | if 'Ep' in opts: 345 | add_emission_prices(n) 346 | 347 | if 'SAFE' in opts: 348 | add_peak_demand_hour_without_variable_feedin(n) 349 | 350 | n.export_to_netcdf(snakemake.output[0]) 351 | -------------------------------------------------------------------------------- /scripts/make_summary.py: -------------------------------------------------------------------------------- 1 | import os 2 | from six import iteritems 3 | from itertools import product 4 | import pandas as pd 5 | 6 | import pypsa 7 | 8 | from add_electricity import load_costs, update_transmission_costs 9 | 10 | idx = pd.IndexSlice 11 | 12 | opt_name = {"Store": "e", "Line" : "s", "Transformer" : "s"} 13 | 14 | def assign_carriers(n): 15 | 16 | if "carrier" not in n.loads: 17 | n.loads["carrier"] = "electricity" 18 | for carrier in ["transport","heat","urban heat"]: 19 | n.loads.loc[n.loads.index.str.contains(carrier),"carrier"] = carrier 20 | 21 | n.storage_units['carrier'].replace({'hydro': 'hydro+PHS', 'PHS': 'hydro+PHS'}, inplace=True) 22 | 23 | if "carrier" not in n.lines: 24 | n.lines["carrier"] = "AC" 25 | 26 | n.lines["carrier"].replace({"AC": "lines"}, inplace=True) 27 | 28 | if n.links.empty: n.links["carrier"] = pd.Series(dtype=str) 29 | n.links["carrier"].replace({"DC": "lines"}, inplace=True) 30 | 31 | if "EU gas store" in n.stores.index and n.stores.loc["EU gas Store","carrier"] == "": 32 | n.stores.loc["EU gas Store","carrier"] = "gas Store" 33 | 34 | 35 | def calculate_costs(n,label,costs): 36 | 37 | for c in n.iterate_components(n.branch_components|n.controllable_one_port_components^{"Load"}): 38 | capital_costs = c.df.capital_cost*c.df[opt_name.get(c.name,"p") + "_nom_opt"] 39 | capital_costs_grouped = capital_costs.groupby(c.df.carrier).sum() 40 | 41 | costs = costs.reindex(costs.index|pd.MultiIndex.from_product([[c.list_name],["capital"],capital_costs_grouped.index])) 42 | 43 | costs.loc[idx[c.list_name,"capital",list(capital_costs_grouped.index)],label] = capital_costs_grouped.values 44 | 45 | if c.name == "Link": 46 | p = c.pnl.p0.multiply(n.snapshot_weightings,axis=0).sum() 47 | elif c.name == "Line": 48 | continue 49 | elif c.name == "StorageUnit": 50 | p_all = c.pnl.p.multiply(n.snapshot_weightings,axis=0) 51 | p_all[p_all < 0.] = 0. 52 | p = p_all.sum() 53 | else: 54 | p = c.pnl.p.multiply(n.snapshot_weightings,axis=0).sum() 55 | 56 | marginal_costs = p*c.df.marginal_cost 57 | 58 | marginal_costs_grouped = marginal_costs.groupby(c.df.carrier).sum() 59 | 60 | costs = costs.reindex(costs.index|pd.MultiIndex.from_product([[c.list_name],["marginal"],marginal_costs_grouped.index])) 61 | 62 | costs.loc[idx[c.list_name,"marginal",list(marginal_costs_grouped.index)],label] = marginal_costs_grouped.values 63 | 64 | return costs 65 | 66 | def calculate_curtailment(n,label,curtailment): 67 | 68 | avail = n.generators_t.p_max_pu.multiply(n.generators.p_nom_opt).sum().groupby(n.generators.carrier).sum() 69 | used = n.generators_t.p.sum().groupby(n.generators.carrier).sum() 70 | 71 | curtailment[label] = (((avail - used)/avail)*100).round(3) 72 | 73 | return curtailment 74 | 75 | def calculate_energy(n,label,energy): 76 | 77 | for c in n.iterate_components(n.one_port_components|n.branch_components): 78 | 79 | if c.name in n.one_port_components: 80 | c_energies = c.pnl.p.multiply(n.snapshot_weightings,axis=0).sum().multiply(c.df.sign).groupby(c.df.carrier).sum() 81 | else: 82 | c_energies = (-c.pnl.p1.multiply(n.snapshot_weightings,axis=0).sum() - c.pnl.p0.multiply(n.snapshot_weightings,axis=0).sum()).groupby(c.df.carrier).sum() 83 | 84 | energy = include_in_summary(energy, [c.list_name], label, c_energies) 85 | 86 | return energy 87 | 88 | def include_in_summary(summary, multiindexprefix, label, item): 89 | summary = summary.reindex(summary.index | pd.MultiIndex.from_product([[p] for p in multiindexprefix] + [item.index])) 90 | summary.loc[idx[tuple(multiindexprefix + [list(item.index)])], label] = item.values 91 | return summary 92 | 93 | def calculate_capacity(n,label,capacity): 94 | 95 | for c in n.iterate_components(n.one_port_components): 96 | if 'p_nom_opt' in c.df.columns: 97 | c_capacities = abs(c.df.p_nom_opt.multiply(c.df.sign)).groupby(c.df.carrier).sum() 98 | capacity = include_in_summary(capacity, [c.list_name], label, c_capacities) 99 | 100 | for c in n.iterate_components(n.passive_branch_components): 101 | c_capacities = c.df['s_nom_opt'].groupby(c.df.carrier).sum() 102 | capacity = include_in_summary(capacity, [c.list_name], label, c_capacities) 103 | 104 | for c in n.iterate_components(n.controllable_branch_components): 105 | c_capacities = c.df.p_nom_opt.groupby(c.df.carrier).sum() 106 | capacity = include_in_summary(capacity, [c.list_name], label, c_capacities) 107 | 108 | return capacity 109 | 110 | def calculate_supply(n,label,supply): 111 | """calculate the max dispatch of each component at the buses where the loads are attached""" 112 | 113 | load_types = n.loads.carrier.value_counts().index 114 | 115 | for i in load_types: 116 | 117 | buses = n.loads.bus[n.loads.carrier == i].values 118 | 119 | bus_map = pd.Series(False,index=n.buses.index) 120 | 121 | bus_map.loc[buses] = True 122 | 123 | for c in n.iterate_components(n.one_port_components): 124 | 125 | items = c.df.index[c.df.bus.map(bus_map)] 126 | 127 | if len(items) == 0: 128 | continue 129 | 130 | s = c.pnl.p[items].max().multiply(c.df.loc[items,'sign']).groupby(c.df.loc[items,'carrier']).sum() 131 | 132 | supply = supply.reindex(supply.index|pd.MultiIndex.from_product([[i],[c.list_name],s.index])) 133 | supply.loc[idx[i,c.list_name,list(s.index)],label] = s.values 134 | 135 | 136 | for c in n.iterate_components(n.branch_components): 137 | 138 | for end in ["0","1"]: 139 | 140 | items = c.df.index[c.df["bus" + end].map(bus_map)] 141 | 142 | if len(items) == 0: 143 | continue 144 | 145 | #lots of sign compensation for direction and to do maximums 146 | s = (-1)**(1-int(end))*((-1)**int(end)*c.pnl["p"+end][items]).max().groupby(c.df.loc[items,'carrier']).sum() 147 | 148 | supply = supply.reindex(supply.index|pd.MultiIndex.from_product([[i],[c.list_name],s.index])) 149 | supply.loc[idx[i,c.list_name,list(s.index)],label] = s.values 150 | 151 | return supply 152 | 153 | def calculate_supply_energy(n,label,supply_energy): 154 | """calculate the total dispatch of each component at the buses where the loads are attached""" 155 | 156 | load_types = n.loads.carrier.value_counts().index 157 | 158 | for i in load_types: 159 | 160 | buses = n.loads.bus[n.loads.carrier == i].values 161 | 162 | bus_map = pd.Series(False,index=n.buses.index) 163 | 164 | bus_map.loc[buses] = True 165 | 166 | for c in n.iterate_components(n.one_port_components): 167 | 168 | items = c.df.index[c.df.bus.map(bus_map)] 169 | 170 | if len(items) == 0: 171 | continue 172 | 173 | s = c.pnl.p[items].sum().multiply(c.df.loc[items,'sign']).groupby(c.df.loc[items,'carrier']).sum() 174 | 175 | supply_energy = supply_energy.reindex(supply_energy.index|pd.MultiIndex.from_product([[i],[c.list_name],s.index])) 176 | supply_energy.loc[idx[i,c.list_name,list(s.index)],label] = s.values 177 | 178 | 179 | for c in n.iterate_components(n.branch_components): 180 | 181 | for end in ["0","1"]: 182 | 183 | items = c.df.index[c.df["bus" + end].map(bus_map)] 184 | 185 | if len(items) == 0: 186 | continue 187 | 188 | s = (-1)*c.pnl["p"+end][items].sum().groupby(c.df.loc[items,'carrier']).sum() 189 | 190 | supply_energy = supply_energy.reindex(supply_energy.index|pd.MultiIndex.from_product([[i],[c.list_name],s.index])) 191 | supply_energy.loc[idx[i,c.list_name,list(s.index)],label] = s.values 192 | 193 | return supply_energy 194 | 195 | def calculate_metrics(n,label,metrics): 196 | 197 | metrics = metrics.reindex(metrics.index|pd.Index(["line_volume","line_volume_limit","line_volume_AC","line_volume_DC","line_volume_shadow","co2_shadow"])) 198 | 199 | metrics.at["line_volume_DC",label] = (n.links.length*n.links.p_nom_opt)[n.links.carrier == "DC"].sum() 200 | metrics.at["line_volume_AC",label] = (n.lines.length*n.lines.s_nom_opt).sum() 201 | metrics.at["line_volume",label] = metrics.loc[["line_volume_AC","line_volume_DC"],label].sum() 202 | 203 | if hasattr(n,"line_volume_limit"): 204 | metrics.at["line_volume_limit",label] = n.line_volume_limit 205 | 206 | if hasattr(n,"line_volume_limit_dual"): 207 | metrics.at["line_volume_shadow",label] = n.line_volume_limit_dual 208 | 209 | if "CO2Limit" in n.global_constraints.index: 210 | metrics.at["co2_shadow",label] = n.global_constraints.at["CO2Limit","mu"] 211 | 212 | return metrics 213 | 214 | 215 | def calculate_prices(n,label,prices): 216 | 217 | bus_type = pd.Series(n.buses.index.str[3:],n.buses.index).replace("","electricity") 218 | 219 | prices = prices.reindex(prices.index|bus_type.value_counts().index) 220 | 221 | #WARNING: this is time-averaged, should really be load-weighted average 222 | prices[label] = n.buses_t.marginal_price.mean().groupby(bus_type).mean() 223 | 224 | return prices 225 | 226 | 227 | 228 | def calculate_weighted_prices(n,label,weighted_prices): 229 | # Warning: doesn't include storage units as loads 230 | 231 | 232 | weighted_prices = weighted_prices.reindex(pd.Index(["electricity","heat","space heat","urban heat","space urban heat","gas","H2"])) 233 | 234 | link_loads = {"electricity" : ["heat pump", "resistive heater", "battery charger", "H2 Electrolysis"], 235 | "heat" : ["water tanks charger"], 236 | "urban heat" : ["water tanks charger"], 237 | "space heat" : [], 238 | "space urban heat" : [], 239 | "gas" : ["OCGT","gas boiler","CHP electric","CHP heat"], 240 | "H2" : ["Sabatier", "H2 Fuel Cell"]} 241 | 242 | for carrier in link_loads: 243 | 244 | if carrier == "electricity": 245 | suffix = "" 246 | elif carrier[:5] == "space": 247 | suffix = carrier[5:] 248 | else: 249 | suffix = " " + carrier 250 | 251 | buses = n.buses.index[n.buses.index.str[2:] == suffix] 252 | 253 | if buses.empty: 254 | continue 255 | 256 | if carrier in ["H2","gas"]: 257 | load = pd.DataFrame(index=n.snapshots,columns=buses,data=0.) 258 | elif carrier[:5] == "space": 259 | load = heat_demand_df[buses.str[:2]].rename(columns=lambda i: str(i)+suffix) 260 | else: 261 | load = n.loads_t.p_set[buses] 262 | 263 | 264 | for tech in link_loads[carrier]: 265 | 266 | names = n.links.index[n.links.index.to_series().str[-len(tech):] == tech] 267 | 268 | if names.empty: 269 | continue 270 | 271 | load += n.links_t.p0[names].groupby(n.links.loc[names,"bus0"],axis=1).sum(axis=1) 272 | 273 | #Add H2 Store when charging 274 | if carrier == "H2": 275 | stores = n.stores_t.p[buses+ " Store"].groupby(n.stores.loc[buses+ " Store","bus"],axis=1).sum(axis=1) 276 | stores[stores > 0.] = 0. 277 | load += -stores 278 | 279 | weighted_prices.loc[carrier,label] = (load*n.buses_t.marginal_price[buses]).sum().sum()/load.sum().sum() 280 | 281 | if carrier[:5] == "space": 282 | print(load*n.buses_t.marginal_price[buses]) 283 | 284 | return weighted_prices 285 | 286 | 287 | 288 | # BROKEN don't use 289 | # 290 | # def calculate_market_values(n, label, market_values): 291 | # # Warning: doesn't include storage units 292 | 293 | # n.buses["suffix"] = n.buses.index.str[2:] 294 | # suffix = "" 295 | # buses = n.buses.index[n.buses.suffix == suffix] 296 | 297 | # ## First do market value of generators ## 298 | # generators = n.generators.index[n.buses.loc[n.generators.bus,"suffix"] == suffix] 299 | # techs = n.generators.loc[generators,"carrier"].value_counts().index 300 | # market_values = market_values.reindex(market_values.index | techs) 301 | 302 | # for tech in techs: 303 | # gens = generators[n.generators.loc[generators,"carrier"] == tech] 304 | # dispatch = n.generators_t.p[gens].groupby(n.generators.loc[gens,"bus"],axis=1).sum().reindex(columns=buses,fill_value=0.) 305 | # revenue = dispatch*n.buses_t.marginal_price[buses] 306 | # market_values.at[tech,label] = revenue.sum().sum()/dispatch.sum().sum() 307 | 308 | # ## Now do market value of links ## 309 | 310 | # for i in ["0","1"]: 311 | # all_links = n.links.index[n.buses.loc[n.links["bus"+i],"suffix"] == suffix] 312 | # techs = n.links.loc[all_links,"carrier"].value_counts().index 313 | # market_values = market_values.reindex(market_values.index | techs) 314 | 315 | # for tech in techs: 316 | # links = all_links[n.links.loc[all_links,"carrier"] == tech] 317 | # dispatch = n.links_t["p"+i][links].groupby(n.links.loc[links,"bus"+i],axis=1).sum().reindex(columns=buses,fill_value=0.) 318 | # revenue = dispatch*n.buses_t.marginal_price[buses] 319 | # market_values.at[tech,label] = revenue.sum().sum()/dispatch.sum().sum() 320 | 321 | # return market_values 322 | 323 | 324 | # OLD CODE must be adapted 325 | 326 | # def calculate_price_statistics(n, label, price_statistics): 327 | 328 | 329 | # price_statistics = price_statistics.reindex(price_statistics.index|pd.Index(["zero_hours","mean","standard_deviation"])) 330 | # n.buses["suffix"] = n.buses.index.str[2:] 331 | # suffix = "" 332 | # buses = n.buses.index[n.buses.suffix == suffix] 333 | 334 | # threshold = 0.1 #higher than phoney marginal_cost of wind/solar 335 | # df = pd.DataFrame(data=0.,columns=buses,index=n.snapshots) 336 | # df[n.buses_t.marginal_price[buses] < threshold] = 1. 337 | # price_statistics.at["zero_hours", label] = df.sum().sum()/(df.shape[0]*df.shape[1]) 338 | # price_statistics.at["mean", label] = n.buses_t.marginal_price[buses].unstack().mean() 339 | # price_statistics.at["standard_deviation", label] = n.buses_t.marginal_price[buses].unstack().std() 340 | # return price_statistics 341 | 342 | 343 | outputs = ["costs", 344 | "curtailment", 345 | "energy", 346 | "capacity", 347 | "supply", 348 | "supply_energy", 349 | "prices", 350 | "weighted_prices", 351 | # "price_statistics", 352 | # "market_values", 353 | # "metrics", 354 | ] 355 | 356 | def make_summaries(networks_dict): 357 | 358 | columns = pd.MultiIndex.from_tuples(networks_dict.keys(), names=["cost", "resarea", "sectors", "opts"]) 359 | 360 | dfs = {} 361 | 362 | for output in outputs: 363 | dfs[output] = pd.DataFrame(columns=columns, dtype=float) 364 | 365 | for label, filename in iteritems(networks_dict): 366 | print(label, filename) 367 | if not os.path.exists(filename): 368 | print("does not exist!!") 369 | continue 370 | 371 | try: 372 | n = pypsa.Network(filename) 373 | except OSError: 374 | logger.warning("Skipping {filename}".format(filename=filename)) 375 | continue 376 | 377 | Nyears = n.snapshot_weightings.sum()/8760. 378 | 379 | assign_carriers(n) 380 | 381 | for output in outputs: 382 | dfs[output] = globals()["calculate_" + output](n, label, dfs[output]) 383 | 384 | return dfs 385 | 386 | 387 | def to_csv(dfs): 388 | dir = snakemake.output[0] 389 | os.makedirs(dir, exist_ok=True) 390 | for key, df in iteritems(dfs): 391 | df.to_csv(os.path.join(dir, f"{key}.csv")) 392 | 393 | 394 | if __name__ == "__main__": 395 | # Detect running outside of snakemake and mock snakemake for testing 396 | def expand_from_wildcard(key): 397 | w = getattr(snakemake.wildcards, key) 398 | return snakemake.config["scenario"][key] if w == "all" else [w] 399 | 400 | version = snakemake.config['version'] 401 | 402 | networks_dict = {(cost,resarea,sectors,opts) : f'results/version-{version}/networks/{cost}_{resarea}_{sectors}_{opts}.nc' 403 | for cost in expand_from_wildcard("cost") 404 | for resarea in expand_from_wildcard("resarea") 405 | for sectors in expand_from_wildcard("sectors") 406 | for opts in expand_from_wildcard("opts")} 407 | 408 | # print(networks_dict) 409 | 410 | dfs = make_summaries(networks_dict) 411 | 412 | to_csv(dfs) 413 | --------------------------------------------------------------------------------