├── .vscode
└── settings.json
├── requirements.txt
├── MANIFEST.in
├── entsog
├── __init__.py
├── __pycache__
│ ├── misc.cpython-38.pyc
│ ├── entsog.cpython-38.pyc
│ ├── parsers.cpython-38.pyc
│ ├── __init__.cpython-38.pyc
│ ├── decorators.cpython-38.pyc
│ ├── exceptions.cpython-38.pyc
│ └── mappings.cpython-38.pyc
├── exceptions.py
├── misc.py
├── plot_utils.py
├── decorators.py
├── mappings.py
├── parsers.py
└── entsog.py
├── .gitignore
├── .github
└── workflows
│ └── publish-to-pypi.yml
├── LICENSE
├── setup.py
├── tests.py
├── retrieve_mappings.py
└── readme.md
/.vscode/settings.json:
--------------------------------------------------------------------------------
1 | {
2 | "jupyter.jupyterServerType": "local"
3 | }
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | requests
2 | pytz
3 | beautifulsoup4
4 | pandas>=1.4.0
5 | unidecode
--------------------------------------------------------------------------------
/MANIFEST.in:
--------------------------------------------------------------------------------
1 | include README.md
2 | include LICENSE
3 | include requirements.txt
4 | include entsog/*
--------------------------------------------------------------------------------
/entsog/__init__.py:
--------------------------------------------------------------------------------
1 | from .entsog import EntsogRawClient, EntsogPandasClient, __version__
2 | from .mappings import Area
3 |
--------------------------------------------------------------------------------
/entsog/__pycache__/misc.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/nhcb/entsog-py/HEAD/entsog/__pycache__/misc.cpython-38.pyc
--------------------------------------------------------------------------------
/entsog/__pycache__/entsog.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/nhcb/entsog-py/HEAD/entsog/__pycache__/entsog.cpython-38.pyc
--------------------------------------------------------------------------------
/entsog/__pycache__/parsers.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/nhcb/entsog-py/HEAD/entsog/__pycache__/parsers.cpython-38.pyc
--------------------------------------------------------------------------------
/entsog/__pycache__/__init__.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/nhcb/entsog-py/HEAD/entsog/__pycache__/__init__.cpython-38.pyc
--------------------------------------------------------------------------------
/entsog/__pycache__/decorators.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/nhcb/entsog-py/HEAD/entsog/__pycache__/decorators.cpython-38.pyc
--------------------------------------------------------------------------------
/entsog/__pycache__/exceptions.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/nhcb/entsog-py/HEAD/entsog/__pycache__/exceptions.cpython-38.pyc
--------------------------------------------------------------------------------
/entsog/__pycache__/mappings.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/nhcb/entsog-py/HEAD/entsog/__pycache__/mappings.cpython-38.pyc
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # caches, environment and IDE settings
2 | __pycache__
3 | venv
4 | .idea
5 |
6 | # Distribution / packaging
7 | .Python
8 | env/
9 | build/
10 | develop-eggs/
11 | dist/
12 | downloads/
13 | eggs/
14 | .eggs/
15 | lib/
16 | lib64/
17 | parts/
18 | sdist/
19 | var/
20 | wheels/
21 | *.egg-info/
22 | .installed.cfg
23 | *.egg
--------------------------------------------------------------------------------
/entsog/exceptions.py:
--------------------------------------------------------------------------------
1 | class NoMatchingDataError(Exception):
2 | pass
3 |
4 | class UnauthorizedError(Exception):
5 | pass
6 |
7 | class PaginationError(Exception):
8 | pass
9 |
10 | class BadGatewayError(Exception):
11 | pass
12 |
13 | class TooManyRequestsError(Exception):
14 | pass
15 |
16 | class GatewayTimeOut(Exception):
17 | pass
18 |
19 | class NotFoundError(Exception):
20 | pass
21 |
--------------------------------------------------------------------------------
/.github/workflows/publish-to-pypi.yml:
--------------------------------------------------------------------------------
1 | name: Publish Python 🐍 distributions 📦 to PyPI and TestPyPI
2 |
3 | on:
4 | release:
5 | types: [published]
6 |
7 | jobs:
8 | build-n-publish:
9 | name: Build and publish Python 🐍 distributions 📦 to PyPI and TestPyPI
10 | runs-on: ubuntu-latest
11 | steps:
12 | - uses: actions/checkout@master
13 | - name: Set up Python 3.9
14 | uses: actions/setup-python@v1
15 | with:
16 | python-version: 3.9
17 | - name: Install pypa/build
18 | run: >-
19 | python -m
20 | pip install
21 | build
22 | --user
23 | - name: Build a binary wheel and a source tarball
24 | run: >-
25 | python -m
26 | build
27 | --sdist
28 | --wheel
29 | --outdir dist/
30 | .
31 | - name: Publish distribution 📦 to PyPI
32 | uses: pypa/gh-action-pypi-publish@master
33 | with:
34 | password: ${{ secrets.PYPI_API_TOKEN }}
35 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2022 Nicky Sonnemans
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/setup.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | """
4 | A setuptools based setup module for entsog-py
5 | Adapted from
6 | https://packaging.python.org/en/latest/distributing.html
7 | https://github.com/pypa/sampleproject
8 | """
9 |
10 | # Always prefer setuptools over distutils
11 | from setuptools import setup, find_packages
12 | # To use a consistent encoding
13 | from codecs import open
14 | from os import path
15 |
16 | here = path.abspath(path.dirname(__file__))
17 |
18 | # Get the long description from the README file
19 | with open(path.join(here, 'readme.md'), encoding='utf-8') as f:
20 | long_description = f.read()
21 |
22 | # Get the version from the source code
23 | with open(path.join(here, 'entsog', 'entsog.py'), encoding='utf-8') as f:
24 | lines = f.readlines()
25 | for l in lines:
26 | if l.startswith('__version__'):
27 | __version__ = l.split('"')[1] # take the part after the first "
28 |
29 | setup(
30 | name='entsog-py',
31 | version=__version__,
32 | description='A python API wrapper for transparency.entsog.eu',
33 | long_description=long_description,
34 | long_description_content_type='text/markdown',
35 | url='https://github.com/nhcb/entsog-py',
36 | author='Nicky Sonnemans',
37 | author_email='nicky.sonnemans@gmail.nl',
38 | license='MIT',
39 |
40 | # See https://pypi.python.org/pypi?%3Aaction=list_classifiers
41 | classifiers=[
42 | 'Development Status :: 3 - Alpha',
43 |
44 | # Indicate who your project is intended for
45 | 'Intended Audience :: Developers',
46 | 'Topic :: Scientific/Engineering',
47 |
48 | # Pick your license as you wish (should match "license" above)
49 | 'License :: OSI Approved :: MIT License',
50 |
51 | # Specify the Python versions you support here. In particular, ensure
52 | # that you indicate whether you support Python 2, Python 3 or both.
53 | 'Programming Language :: Python :: 3.8',
54 | 'Programming Language :: Python :: 3.9',
55 | ],
56 |
57 | keywords='entsog api gas energy',
58 |
59 | # You can just specify the packages manually here if your project is
60 | # simple. Or you can use find_packages().
61 | packages=find_packages(),
62 |
63 |
64 | # List run-time dependencies here. These will be installed by pip when
65 | # your project is installed.
66 | install_requires=['requests', 'pandas', 'bs4', 'unidecode'],
67 |
68 | # If there are data files included in your packages that need to be
69 | # installed, specify them here. If using Python 2.6 or less, then these
70 | # have to be included in MANIFEST.in as well.
71 | # Note: for creating the source distribution, they had to be included in the
72 | # MANIFEST.in as well.
73 | package_data={
74 | 'entsog-py': ['LICENSE.md', 'readme.md'],
75 | },
76 | )
77 |
--------------------------------------------------------------------------------
/tests.py:
--------------------------------------------------------------------------------
1 | from entsog import EntsogPandasClient
2 | import pandas as pd
3 |
4 | client = EntsogPandasClient()
5 |
6 | start = pd.Timestamp('20220918', tz='Europe/Brussels')
7 | end = pd.Timestamp('20220920', tz='Europe/Brussels')
8 | country_code = 'NL' # Netherlands
9 |
10 | #client.query_connection_points()
11 | #client.query_operators(country_code)
12 | #client.query_balancing_zones()
13 | print(client.query_operator_point_directions())
14 | #client.query_interconnections()
15 | #client.query_aggregate_interconnections()
16 | #client.query_urgent_market_messages()
17 |
18 | #
19 | #client.query_tariffs(start = start, end = end, country_code = country_code, melt = True, verbose = True)
20 | #client.query_tariffs_sim(start = start, end = end, country_code = country_code, verbose = True)
21 |
22 | #client.query_aggregated_data(start = start, end = end, country_code = country_code)
23 | # TODO: Add interruptions...
24 | #client.query_interruptions(start = start, end = end)
25 | #client.query_CMP_auction_premiums(start = start, end = end)
26 | #client.query_CMP_unavailable_firm_capacity(start = start, end = end)
27 |
28 | #client.query_CMP_unsuccesful_requests(start = start, end = end)
29 |
30 | operational_options = {
31 | 'interruption_capacity' : "Actual interruption of interruptible capacity",
32 | 'allocation' : "Allocation",
33 | 'firm_available' : "Firm Available",
34 | 'firm_booked' : "Firm Booked",
35 | 'firm_interruption_planned' : "Firm Interruption Planned - Interrupted",
36 | 'firm_interruption_unplanned' :"Firm Interruption Unplanned - Interrupted",
37 | 'firm_technical' : "Firm Technical",
38 | 'gcv' : "GCV",
39 | 'interruptible_available' : "Interruptible Available",
40 | 'interruptible_booked' : "Interruptible Booked",
41 | 'interruptible_interruption_actual' : "Interruptible Interruption Actual – Interrupted",
42 | 'interruptible_interruption_planned' : "Interruptible Interruption Planned - Interrupted",
43 | 'interruptible_total' : "Interruptible Total",
44 | 'nomination' : "Nomination",
45 | 'physical_flow' : "Physical Flow",
46 | 'firm_interruption_capacity_planned' : "Planned interruption of firm capacity",
47 | 'renomination' : "Renomination",
48 | 'firm_interruption_capacity_unplanned' : "Unplanned interruption of firm capacity",
49 | 'wobbe_index' : "Wobbe Index",
50 | 'oversubscription_available' : "Available through Oversubscription",
51 | 'surrender_available' : "Available through Surrender",
52 | 'uioli_available_lt' : "Available through UIOLI long-term",
53 | 'uioli_available_st' : "Available through UIOLI short-term"
54 | }
55 |
56 | #client.query_operational_data(start = start, end = end, country_code = country_code, indicators = ['renomination', 'physical_flow'])
57 | # You should use this when you want to query operational data for the entirety of continental europe.
58 | data = client.query_operational_data_all(start = start, end = end,period_type = 'hour', indicators = ['renomination', 'physical_flow', 'nomination'])
59 | print(data)
60 | print(data['url'])
61 |
62 | data.to_csv("test.csv")
63 |
64 | # # Example for if you would like to see Fluxys points.
65 | # points = client.query_operator_point_directions()
66 | # mask = points['connected_operators'].str.contains('Fluxys')
67 | # masked_points = points[mask]
68 | # print(masked_points)
69 |
70 | # keys = []
71 | # for idx, item in masked_points.iterrows():
72 | # keys.append(f"{item['operator_key']}{item['point_key']}{item['direction_key']}")
73 | # print(keys)
74 | # data = client.query_operational_point_data(start = start, end = end, indicators = ['physical_flow'], point_directions = keys, verbose = False)
75 |
76 | # print(data.head())
--------------------------------------------------------------------------------
/entsog/misc.py:
--------------------------------------------------------------------------------
1 | import re
2 | from itertools import tee
3 | import pandas as pd
4 | from dateutil import rrule
5 | from unidecode import unidecode
6 |
7 |
8 | def year_blocks(start, end):
9 | """
10 | Create pairs of start and end with max a year in between, to deal with usage restrictions on the API
11 |
12 | Parameters
13 | ----------
14 | start : dt.datetime | pd.Timestamp
15 | end : dt.datetime | pd.Timestamp
16 |
17 | Returns
18 | -------
19 | ((pd.Timestamp, pd.Timestamp))
20 | """
21 | rule = rrule.YEARLY
22 |
23 | res = []
24 | for day in rrule.rrule(rule, dtstart=start, until=end):
25 | res.append(pd.Timestamp(day))
26 | res.append(end)
27 | res = sorted(set(res))
28 | res = pairwise(res)
29 | return res
30 |
31 |
32 | def month_blocks(start, end):
33 | """
34 | Create pairs of start and end with max a month in between, to deal with usage restrictions on the API
35 |
36 | Parameters
37 | ----------
38 | start : dt.datetime | pd.Timestamp
39 | end : dt.datetime | pd.Timestamp
40 |
41 | Returns
42 | -------
43 | ((pd.Timestamp, pd.Timestamp))
44 | """
45 | rule = rrule.MONTHLY
46 |
47 | res = []
48 | for day in rrule.rrule(rule, dtstart=start, until=end):
49 | res.append(pd.Timestamp(day))
50 | res.append(end)
51 | res = sorted(set(res))
52 | res = pairwise(res)
53 | return res
54 |
55 |
56 | def week_blocks(start, end):
57 | """
58 | Create pairs of start and end with max a week in between, to deal with usage restrictions on the API
59 |
60 | Parameters
61 | ----------
62 | start : dt.datetime | pd.Timestamp
63 | end : dt.datetime | pd.Timestamp
64 |
65 | Returns
66 | -------
67 | ((pd.Timestamp, pd.Timestamp))
68 | """
69 | rule = rrule.WEEKLY
70 |
71 | res = []
72 | for day in rrule.rrule(rule, dtstart=start, until=end):
73 | res.append(pd.Timestamp(day))
74 | res.append(end)
75 | res = sorted(set(res))
76 | res = pairwise(res)
77 | return res
78 |
79 |
80 | def day_blocks(start, end):
81 | """
82 | Create pairs of start and end with max a day in between, to deal with usage restrictions on the API
83 |
84 | Parameters
85 | ----------
86 | start : dt.datetime | pd.Timestamp
87 | end : dt.datetime | pd.Timestamp
88 |
89 | Returns
90 | -------
91 | ((pd.Timestamp, pd.Timestamp))
92 | """
93 | rule = rrule.DAILY
94 |
95 | res = []
96 | for day in rrule.rrule(rule, dtstart=start, until=end):
97 | res.append(pd.Timestamp(day))
98 | res.append(end)
99 | res = sorted(set(res))
100 | res = pairwise(res)
101 | return res
102 |
103 |
104 | def pairwise(iterable):
105 | """
106 | Create pairs to iterate over
107 | eg. [A, B, C, D] -> ([A, B], [B, C], [C, D])
108 |
109 | Parameters
110 | ----------
111 | iterable : iterable
112 |
113 | Returns
114 | -------
115 | iterable
116 | """
117 | a, b = tee(iterable)
118 | next(b, None)
119 | return zip(a, b)
120 |
121 |
122 | def to_snake_case(string: str) -> str:
123 | """Converts any string to snake case
124 |
125 | Parameters
126 | ----------
127 | name : str
128 |
129 | Returns
130 | -------
131 | str
132 | """
133 | string = unidecode(string)
134 | string = re.sub('[^A-Za-z0-9 _]+', '_', string)
135 | string = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', string)
136 | string = re.sub('([a-z0-9])([A-Z])', r'\1_\2', string)
137 | string = re.sub('_+', '_', string.replace(' ', '_'))
138 | string = string.lower().replace('m_wh', 'mwh').lstrip('_').rstrip('_')
139 | string = string.lower().replace('k_wh', 'kwh').lstrip('_').rstrip('_')
140 | return string
141 |
--------------------------------------------------------------------------------
/retrieve_mappings.py:
--------------------------------------------------------------------------------
1 | import json
2 | import pandas as pd
3 | from entsog import EntsogRawClient, EntsogPandasClient
4 | from entsog.parsers import parse_general
5 |
6 |
7 | def get_operator_mappings():
8 | client = EntsogPandasClient()
9 | operators = client.query_operators(country_code=None)
10 | points = client.query_operator_point_directions()
11 | countries = operators.drop_duplicates(subset=['operator_country_key'])
12 |
13 | c = []
14 | for i, item in countries.iterrows():
15 | country = item['operator_country_key']
16 |
17 | operators_country = operators[operators['operator_country_key'] == country]
18 |
19 | d = []
20 | e = []
21 | for j, jtem in operators_country.iterrows():
22 | print(jtem)
23 | operator = jtem['operator_key']
24 | points_operator = points[points['operator_key'] == country]
25 |
26 | for m, mtem in points_operator.iterrows():
27 | e.append(
28 | {
29 | 'point': mtem['point_key'],
30 | 'point_label': mtem['point_label']
31 | }
32 | )
33 |
34 | d.append({**jtem.to_dict(), 'points': e})
35 |
36 | c.append(
37 | {
38 | 'country': country,
39 | 'operators': d
40 | }
41 | )
42 |
43 | print(c)
44 | #with open('mapping/countries.json', 'w') as fp:
45 | # json.dump(c, fp)
46 |
47 | def get_point_mappings():
48 | client = EntsogPandasClient()
49 | points = client.query_operator_point_directions()
50 | print(points.columns)
51 | print(points.head())
52 | print(points['id'])
53 | points = points[['point_key','point_label']].drop_duplicates()
54 |
55 | for idx, item in points.iterrows():
56 | print(f"""{item['point_label']} = "{item['point_key']}" """)
57 |
58 |
59 |
60 | def get_area():
61 | client = EntsogRawClient()
62 | data = json.loads(client.query_operator_point_directions(limit=-1))
63 |
64 | df = pd.json_normalize(data['operatorpointdirections'])
65 |
66 | df_drop = df.drop_duplicates(subset=['tSOCountry'])
67 |
68 | c = {}
69 | for idx, item in df_drop.iterrows():
70 | country = item['tSOCountry']
71 |
72 | filtered = df[df['tSOCountry'] == country]
73 |
74 | operatorKey = filtered.loc[:, 'operatorKey'].drop_duplicates()
75 | # print(operatorKey)
76 | operatorLabel = filtered.loc[:, 'operatorLabel'].drop_duplicates()
77 |
78 | if country is None:
79 | country = 'misc'
80 |
81 | print(f"{country} ={country}, {tuple(operatorKey)}, {tuple(operatorLabel)} ,")
82 |
83 |
84 | def check_new_operators():
85 | client = EntsogRawClient()
86 |
87 | json_data, url = client.query_operators(country_code=None)
88 |
89 | data = parse_general(json_data)
90 |
91 | result = {}
92 | for index, item in data.iterrows():
93 | country = item['operator_country_key']
94 | # Only TSO's
95 | if 'TSO' not in item['operator_key']:
96 | continue
97 |
98 | if country in result.keys():
99 |
100 | result[country]['operators'].append(item['operator_key'])
101 | result[country]['operator_labels'].append(item['operator_label'])
102 |
103 | else:
104 |
105 | result[country] = {
106 | 'operators': [item['operator_key']],
107 | 'operator_labels': [item['operator_label']],
108 | 'label': item['operator_country_label']
109 | }
110 |
111 | # Print result
112 |
113 | # Countries
114 | for key, value in result.items():
115 | print(f"""{key} = "{key}", "{value['label']}" """)
116 |
117 | # Enum area
118 | for key, value in result.items():
119 | parsed_operators = ','.join([f'"{o}"' for o in value['operators']])
120 | parsed_operator_labels = ','.join([f'"{o}"' for o in value['operator_labels']])
121 |
122 | print(f"""{key} = ({parsed_operators},), ({parsed_operator_labels},)""")
123 |
124 | regions = pd.read_csv(
125 | "https://raw.githubusercontent.com/lukes/ISO-3166-Countries-with-Regional-Codes/master/all/all.csv")
126 |
127 | # Regions
128 | for key, value in result.items():
129 |
130 | try:
131 | region = str(regions[regions['alpha-2'] == key]['sub-region'].iloc[0])
132 | print(f"""{key} : "{region}" """)
133 | except Exception as e:
134 | print(f"""{key} : "REGION" """)
135 | continue
136 |
137 | get_point_mappings()
--------------------------------------------------------------------------------
/entsog/plot_utils.py:
--------------------------------------------------------------------------------
1 |
2 | from plistlib import PlistFormat
3 | from typing import Optional
4 | import pandas as pd
5 | from entsog.entsog import EntsogPandasClient
6 | import plotnine as p9
7 |
8 | from entsog.mappings import lookup_country
9 |
10 | ENTSOG_THEME = p9.theme(
11 | axis_text = p9.element_text(),
12 | axis_ticks = p9.element_line(),
13 |
14 | legend_position="bottom",
15 | legend_direction="horizontal",
16 | legend_title_align="center",
17 | legend_box_spacing=0.4,
18 | legend_text = p9.element_text(size = 8),
19 | legend_key=p9.element_blank(),
20 | axis_line=p9.element_line(size=1, colour="black"),
21 | panel_grid_major=p9.element_line(colour="#d3d3d3"),
22 | panel_grid_minor=p9.element_blank(),
23 | panel_border=p9.element_blank(),
24 | panel_background=p9.element_blank(),
25 | plot_title=p9.element_text(size=15,
26 | face="bold"),
27 | text=p9.element_text(size=11),
28 | axis_text_x=p9.element_text(colour="black", size=8, angle = 45),
29 | axis_text_y=p9.element_text(colour="black", size=8),
30 |
31 | strip_background=p9.element_rect(colour="#f0f0f0",fill="#f0f0f0"),
32 | strip_text = p9.element_text(size = 8, colour = 'black'),
33 | plot_caption = p9.element_text(size = 8, colour = 'black', margin={'r': -50, 't': 0}),
34 | )
35 |
36 | UNIT_TRANSFORMATION = {
37 | 'kWh': 1,
38 | 'MWh': 1_000,
39 | 'GWh': 1_000_000,
40 | 'TWh': 1_000_000_000,
41 | 'mcm' : 10.55 * 1_000_000, # https://learnmetrics.com/m3-gas-to-kwh/
42 | }
43 |
44 | def label_func(x):
45 |
46 | if len(x) == 2:
47 | country = lookup_country(x)
48 | return country.label
49 | else:
50 | return x
51 |
52 |
53 |
54 | def plot_area(
55 | flow_data : pd.DataFrame,
56 | point_data : pd.DataFrame,
57 | facet_row : Optional[str] = 'country',
58 | facet_col : Optional[str] = 'adjacent_country',
59 | unit : Optional[str] = 'kWh',
60 | aggregation : Optional[str] = '1D'
61 | ):
62 |
63 | assert facet_row in ['country','operator_label','balancing_zone', 'region', 'point_type', 'point_label']
64 |
65 | assert facet_col in ['adjacent_country','connected_operators','adjacent_zone', 'adjacent_region', 'point_type', 'point_label','indicator']
66 |
67 | flow_data = flow_data.replace({'': None})
68 |
69 |
70 | flow_data['value'] = flow_data['value'].astype(float)
71 |
72 |
73 | mask = (flow_data['direction_key'] == 'exit')
74 | flow_data[mask]['value'] = -1* flow_data[mask]['value']
75 |
76 | # Turn to datetime
77 | flow_data['period_from'] = pd.to_datetime(flow_data['period_from'], utc = True)
78 |
79 |
80 | # Join together
81 | merged = pd.merge(flow_data, point_data, on = ['tso_item_identifier','tso_eic_code','point_key','direction_key'], suffixes = ('','_y'))
82 |
83 |
84 | merged.rename(
85 | columns = {
86 | 't_so_country' : 'country',
87 | 't_so_balancing_zone' : 'balancing_zone'
88 | },
89 | inplace= True
90 | )
91 |
92 | merged['point_label'] = merged['point_label'] + " - (" + merged['country'] + ")"
93 |
94 | # Group by point and period_from
95 | merged_grouped = merged.groupby([pd.Grouper(key = 'period_from',freq = aggregation, label = 'right'), 'point_label','flow_status', facet_row, facet_col]).agg(
96 | {'value': 'sum'}
97 | ).reset_index()
98 |
99 |
100 | #merged_grouped['label'] = f"{merged_grouped['point_label']} - {merged_grouped[facet_row]}"
101 |
102 | # If flow status == 'Confirmed', delete the other rows for the keys ['period_from','tso_item_identifier','tso_eic_code','point_key','direction_key']
103 | # Pandas drop duplicates, but keep the first row where flow_status == 'Confirmed'
104 | merged_grouped = merged_grouped.sort_values(by = ['flow_status'])
105 | merged_grouped = merged_grouped.drop_duplicates(subset=['period_from','point_label'], keep='first')
106 |
107 |
108 | plot = (
109 | p9.ggplot(merged_grouped)
110 | + p9.aes(x='period_from', y='value', fill = 'point_label')
111 | #+ p9.geom_line(size = 1)
112 | #+ p9.geom_area(alpha = 0.6)
113 | + p9.labs(x='', y='', title = '', caption = f'Source: Entsog\nLibrary: entsog-py', color = 'Point Label', fill = 'Point Label')
114 | + p9.scale_x_date()
115 | + p9.scale_y_continuous(labels= lambda l: [f"{(round(v / UNIT_TRANSFORMATION[unit]))} {unit}" for v in l],
116 | expand = (0, 1))
117 | + p9.facet_grid(f'{facet_row}~{facet_col}', scales='free_y', labeller = p9.labeller(rows = label_func, cols = label_func))
118 | + ENTSOG_THEME
119 | #+ p9.scale_color_brewer(type = 'qual', palette = 'Pastel2')
120 | + p9.scale_fill_brewer(type = 'qual', palette = 'Pastel1')
121 | + p9.geom_area(alpha = 0.8,position = 'stack', color = 'black', size = 0.35)
122 |
123 | )
124 |
125 | return plot
--------------------------------------------------------------------------------
/readme.md:
--------------------------------------------------------------------------------
1 | # Entsog-py
2 | Python client for the ENTSO-G API (european network of transmission system operators for gas)
3 |
4 | Documentation of the API found on https://transparency.entsog.eu/api/archiveDirectories/8/api-manual/TP_REG715_Documentation_TP_API%20-%20v2.1.pdf
5 |
6 | Documentation of the data (user manual) found on https://www.entsog.eu/sites/default/files/2021-07/ENTSOG%20-%20TP%20User%20Manual_v_4.5.pdf
7 |
8 | Heavily inspired upon (and forked from) https://github.com/EnergieID/entsoe-py
9 |
10 | ## Installation
11 | ```
12 | python3 -m pip install entsog-py
13 | ```
14 |
15 | ## Usage
16 | The package comes with 2 clients:
17 | - [`EntsogRawClient`](#EntsogRawClient): Returns data in its raw format, usually JSON
18 | - [`EntsogPandasClient`](#EntsogPandasClient): Returns data parsed as a Pandas DataFrame
19 |
20 | It's preferable to use the Pandas Client as this will handle most API limitations itself. However, if you want to obtain the pure raw data; you can use the raw client.
21 |
22 | ## Example Use Case
23 | On wwww.gasparency.com you can find example use cases of the data. Almost everything there is achieved with the help of this package!
24 |
25 | ### EntsogRawClient
26 | ```python
27 | from entsog import EntsogRawClient
28 | import pandas as pd
29 |
30 | client = EntsogRawClient()
31 |
32 | start = pd.Timestamp('20171201', tz='Europe/Brussels')
33 | end = pd.Timestamp('20180101', tz='Europe/Brussels')
34 | country_code = 'NL' # Netherlands
35 |
36 | client.query_connection_points()
37 | client.query_operators()
38 | client.query_balancing_zones()
39 | client.query_operator_point_directions(country_code)
40 | client.query_interconnections()
41 | client.query_aggregate_interconnections()
42 | client.query_urgent_market_messages()
43 | client.query_tariffs(start = start, end = end, country_code = country_code)
44 | client.query_tariffs_sim(start = start, end = end, country_code = country_code)
45 |
46 | operational_options = {
47 | interruption_capacity : "Actual interruption of interruptible capacity",
48 | allocation : "Allocation",
49 | firm_available : "Firm Available",
50 | firm_booked : "Firm Booked",
51 | firm_interruption_planned : "Firm Interruption Planned - Interrupted",
52 | firm_interruption_unplanned :"Firm Interruption Unplanned - Interrupted",
53 | firm_technical : "Firm Technical",
54 | gcv : "GCV",
55 | interruptible_available : "Interruptible Available",
56 | interruptible_booked : "Interruptible Booked",
57 | interruptible_interruption_actual : "Interruptible Interruption Actual – Interrupted",
58 | interruptible_interruption_planned : "Interruptible Interruption Planned - Interrupted",
59 | interruptible_total : "Interruptible Total",
60 | nomination : "Nomination",
61 | physical_flow : "Physical Flow",
62 | firm_interruption_capacity_planned : "Planned interruption of firm capacity",
63 | renomination : "Renomination",
64 | firm_interruption_capacity_unplanned : "Unplanned interruption of firm capacity",
65 | wobbe_index : "Wobbe Index",
66 | oversubscription_available : "Available through Oversubscription",
67 | surrender_available : "Available through Surrender",
68 | uioli_available_lt : "Available through UIOLI long-term",
69 | uioli_available_st : "Available through UIOLI short-term"}
70 |
71 | ```
72 |
73 | ### EntsogPandasClient
74 | The Pandas Client works similar to the Raw Client, with extras:
75 | - API limitations of big requests are automatically dealt with and put into multiple calls.
76 | - Tariffs (and simulated tariffs) can be melted into nice storable format. Instead of having row with EUR, local currency, shared currency for each seperate product, it will create a row for each.
77 | - Operational data can be either requested as in the raw format (which requires some loading time) or in an aggregate function `query_operational_data_all` which will aggressively request all points in Europe and a lot faster.
78 | - It's easier to navigate points, for instance if you want to check gazprom points. See below.
79 |
80 | ```python
81 | from entsog import EntsogPandasClient
82 | import pandas as pd
83 |
84 | client = EntsogPandasClient()
85 |
86 | start = pd.Timestamp('20171228', tz='Europe/Brussels')
87 | end = pd.Timestamp('20180101', tz='Europe/Brussels')
88 | country_code = 'NL' # Netherlands
89 |
90 | client.query_connection_points()
91 | client.query_operators(country_code)
92 | client.query_balancing_zones()
93 | client.query_operator_point_directions()
94 | client.query_interconnections()
95 | client.query_aggregate_interconnections()
96 | client.query_urgent_market_messages()
97 |
98 |
99 | client.query_tariffs(start = start, end = end, country_code = country_code, melt = True, verbose = True)
100 | client.query_tariffs_sim(start = start, end = end, country_code = country_code, verbose = True)
101 |
102 | client.query_aggregated_data(start = start, end = end, country_code = country_code)
103 | # TODO: Add interruptions...
104 | # client.query_interruptions(start = start, end = end)
105 | client.query_CMP_auction_premiums(start = start, end = end)
106 | client.query_CMP_unavailable_firm_capacity(start = start, end = end)
107 |
108 | client.query_CMP_unsuccesful_requests(start = start, end = end)
109 |
110 | operational_options = {
111 | 'interruption_capacity' : "Actual interruption of interruptible capacity",
112 | 'allocation' : "Allocation",
113 | 'firm_available' : "Firm Available",
114 | 'firm_booked' : "Firm Booked",
115 | 'firm_interruption_planned' : "Firm Interruption Planned - Interrupted",
116 | 'firm_interruption_unplanned' :"Firm Interruption Unplanned - Interrupted",
117 | 'firm_technical' : "Firm Technical",
118 | 'gcv' : "GCV",
119 | 'interruptible_available' : "Interruptible Available",
120 | 'interruptible_booked' : "Interruptible Booked",
121 | 'interruptible_interruption_actual' : "Interruptible Interruption Actual – Interrupted",
122 | 'interruptible_interruption_planned' : "Interruptible Interruption Planned - Interrupted",
123 | 'interruptible_total' : "Interruptible Total",
124 | 'nomination' : "Nomination",
125 | 'physical_flow' : "Physical Flow",
126 | 'firm_interruption_capacity_planned' : "Planned interruption of firm capacity",
127 | 'renomination' : "Renomination",
128 | 'firm_interruption_capacity_unplanned' : "Unplanned interruption of firm capacity",
129 | 'wobbe_index' : "Wobbe Index",
130 | 'oversubscription_available' : "Available through Oversubscription",
131 | 'surrender_available' : "Available through Surrender",
132 | 'uioli_available_lt' : "Available through UIOLI long-term",
133 | 'uioli_available_st' : "Available through UIOLI short-term"
134 | }
135 |
136 | client.query_operational_data(start = start, end = end, country_code = country_code, indicators = ['renomination', 'physical_flow'])
137 | # You should use this when you want to query operational data for the entirety of continental europe.
138 | client.query_operational_data_all(start = start, end = end, indicators = ['renomination', 'physical_flow'])
139 | # Example for if you would like to see Gazprom points.
140 | points = client.query_operator_point_directions()
141 | mask = points['connected_operators'].str.contains('Gazprom')
142 | masked_points = points[mask]
143 | print(masked_points)
144 |
145 | keys = []
146 | for idx, item in masked_points.iterrows():
147 | keys.append(f"{item['operator_key']}{item['point_key']}{item['direction_key']}")
148 |
149 | data = client.query_operational_point_data(start = start, end = end, indicators = ['physical_flow'], point_directions = keys, verbose = False)
150 |
151 | print(data.head())
152 |
153 |
154 | ```
155 |
--------------------------------------------------------------------------------
/entsog/decorators.py:
--------------------------------------------------------------------------------
1 | import sys
2 | from socket import gaierror
3 | from time import sleep
4 | import requests
5 | from functools import wraps
6 | from .exceptions import NoMatchingDataError, PaginationError, BadGatewayError, TooManyRequestsError, NotFoundError
7 | import pandas as pd
8 | import logging
9 |
10 | from .misc import year_blocks, day_blocks, month_blocks, week_blocks
11 |
12 |
13 | def retry(func):
14 | """Catches connection errors, waits and retries"""
15 |
16 | @wraps(func)
17 | def retry_wrapper(*args, **kwargs):
18 | self = args[0]
19 | error = None
20 | for r in range(self.retry_count):
21 | try:
22 | result = func(*args, **kwargs)
23 | except (requests.ConnectionError, gaierror, BadGatewayError, TooManyRequestsError) as e:
24 | error = e
25 | retry_delay = self.retry_delay * (r + 1) # Exponential backoff
26 | print(f"Connection error, retrying in {retry_delay} seconds", file=sys.stderr)
27 | sleep(retry_delay)
28 | continue
29 | else:
30 | return result
31 | else:
32 | raise error
33 |
34 | return retry_wrapper
35 |
36 | def paginated(func):
37 | """Catches a PaginationError, splits the requested period in two and tries
38 | again. Finally it concatenates the results"""
39 |
40 | @wraps(func)
41 | def pagination_wrapper(*args, start, end, **kwargs):
42 | try:
43 | df = func(*args, start=start, end=end, **kwargs)
44 | except PaginationError:
45 | pivot = start + (end - start) / 2
46 | df1 = pagination_wrapper(*args, start=start, end=pivot, **kwargs)
47 | df2 = pagination_wrapper(*args, start=pivot, end=end, **kwargs)
48 | df = pd.concat([df1, df2])
49 | return df
50 |
51 | return pagination_wrapper
52 |
53 | def documents_limited(n):
54 | def decorator(func):
55 | """Deals with calls where you cannot query more than n documents at a time, by offsetting per n documents"""
56 |
57 | @wraps(func)
58 | def documents_wrapper(*args, **kwargs):
59 | frames = []
60 | for offset in range(0, 250_000 + n, n):
61 | try:
62 | frame = func(*args, offset=offset, **kwargs)
63 | sleep(0.25)
64 | frames.append(frame)
65 | except NoMatchingDataError:
66 | logging.debug(f"NoMatchingDataError: for offset {offset}")
67 | break
68 | except NotFoundError:
69 | logging.debug(f"NotFoundError: for offset {offset}")
70 | break
71 |
72 | if len(frames) == 0:
73 | # All the data returned are void
74 | raise NoMatchingDataError
75 |
76 | df = pd.concat(frames, sort=True)
77 | df = df.drop_duplicates(keep = 'first')
78 | return df
79 | return documents_wrapper
80 | return decorator
81 |
82 |
83 | def year_limited(func):
84 | """Deals with calls where you cannot query more than a year, by splitting
85 | the call up in blocks per year"""
86 |
87 | @wraps(func)
88 | def year_wrapper(*args, start, end, **kwargs):
89 | blocks = year_blocks(start, end)
90 | frames = []
91 | for _start, _end in blocks:
92 | try:
93 | frame = func(*args, start=_start, end=_end, **kwargs)
94 | except NoMatchingDataError:
95 | logging.debug(f"NoMatchingDataError: between {_start} and {_end}")
96 | frame = None
97 | frames.append(frame)
98 |
99 | if sum([f is None for f in frames]) == len(frames):
100 | # All the data returned are void
101 | raise NoMatchingDataError
102 |
103 | df = pd.concat(frames, sort=True)
104 | df = df.drop_duplicates(keep = 'first')
105 | return df
106 |
107 | return year_wrapper
108 |
109 |
110 | def month_limited(func):
111 | """Deals with calls where you cannot query more than a month, by splitting
112 | the call up in blocks per month"""
113 |
114 | @wraps(func)
115 | def month_wrapper(*args, start, end, **kwargs):
116 | blocks = month_blocks(start, end)
117 | frames = []
118 | for _start, _end in blocks:
119 | try:
120 | frame = func(*args, start=_start, end=_end, **kwargs)
121 | except NoMatchingDataError:
122 | logging.debug(f"NoMatchingDataError: between {_start} and {_end}")
123 | frame = None
124 | frames.append(frame)
125 |
126 | if sum([f is None for f in frames]) == len(frames):
127 | # All the data returned are void
128 | raise NoMatchingDataError
129 |
130 | df = pd.concat(frames, sort=True)
131 | df = df.drop_duplicates(keep = 'first')
132 | return df
133 |
134 | return month_wrapper
135 |
136 |
137 | def day_limited(func):
138 | """Deals with calls where you cannot query more than a year, by splitting
139 | the call up in blocks per year"""
140 |
141 | @wraps(func)
142 | def day_wrapper(*args, start, end, **kwargs):
143 | blocks = day_blocks(start, end)
144 | frames = []
145 | for _start, _end in blocks:
146 | try:
147 | frame = func(*args, start=_start, end=_end, **kwargs)
148 | except NoMatchingDataError:
149 | print(f"NoMatchingDataError: between {_start} and {_end}", file=sys.stderr)
150 | frame = None
151 | frames.append(frame)
152 |
153 | if sum([f is None for f in frames]) == len(frames):
154 | # All the data returned are void
155 | raise NoMatchingDataError
156 |
157 | df = pd.concat(frames)
158 |
159 | df = df.drop_duplicates(keep = 'first')
160 |
161 | return df
162 |
163 | return day_wrapper
164 |
165 |
166 | def week_limited(func):
167 | """Deals with calls where you cannot query more than a year, by splitting
168 | the call up in blocks per year"""
169 |
170 | @wraps(func)
171 | def week_wrapper(*args, start, end, **kwargs):
172 | blocks = week_blocks(start, end)
173 | frames = []
174 | for _start, _end in blocks:
175 | try:
176 | frame = func(*args, start=_start, end=_end, **kwargs)
177 | except NoMatchingDataError:
178 | print(f"NoMatchingDataError: between {_start} and {_end}", file=sys.stderr)
179 | frame = None
180 | frames.append(frame)
181 |
182 | if sum([f is None for f in frames]) == len(frames):
183 | # All the data returned are void
184 | raise NoMatchingDataError
185 |
186 | df = pd.concat(frames)
187 | df = df.drop_duplicates(keep = 'first')
188 |
189 | return df
190 |
191 | return week_wrapper
192 |
193 |
194 | def operator_limited(func):
195 | """Deals with calls where you cannot query more than one operator, by splitting
196 | the call up in blocks per operator"""
197 |
198 | @wraps(func)
199 | def operator_wrapper(*args, operator, **kwargs):
200 | blocks = operator
201 | frames = []
202 | for _operator in blocks:
203 | try:
204 | frame = func(*args, operator = _operator, **kwargs)
205 | except NoMatchingDataError:
206 | print(f"NoMatchingDataError: {_operator}", file=sys.stderr)
207 | frame = None
208 | frames.append(frame)
209 |
210 | if sum([f is None for f in frames]) == len(frames):
211 | # All the data returned are void
212 | raise NoMatchingDataError
213 |
214 | df = pd.concat(frames)
215 | return df
216 |
217 | return operator_wrapper
218 |
--------------------------------------------------------------------------------
/entsog/mappings.py:
--------------------------------------------------------------------------------
1 | import enum
2 | from typing import Union
3 |
4 |
5 | def lookup_area(s: Union['Area', str]) -> 'Area':
6 | if isinstance(s, Area):
7 | # If it already is an Area object, we're happy
8 | area = s
9 | else: # It is a string
10 | try:
11 | # If it is a 'country code' string, we do a lookup
12 | area = Area[s]
13 | except KeyError:
14 | # It is not, it may be a direct code
15 | try:
16 | area = [area for area in Area if area.value == s][0]
17 | except IndexError:
18 | # None argument
19 | area = None
20 |
21 | return area
22 |
23 |
24 | def lookup_indicator(s: Union['Indicator', str]) -> 'Indicator':
25 | return _lookup(s, Indicator)
26 |
27 |
28 | def lookup_balancing_zone(s: Union['BalancingZone', str]) -> 'BalancingZone':
29 | return _lookup(s, BalancingZone)
30 |
31 |
32 | def lookup_country(s: Union['Country', str]) -> 'Country':
33 | return _lookup(s, Country)
34 |
35 |
36 | def _lookup(s, object):
37 | if isinstance(s, object):
38 | # If it already is the required object, we're happy
39 | _object = s
40 | else: # It is a string
41 | try:
42 | # If it is a code string, we do a lookup
43 | _object = object[s]
44 | except KeyError:
45 | # It is not, it may be a direct code
46 | try:
47 | _object = [_object for _object in object if _object.value == s][0]
48 | except IndexError as e:
49 | print(
50 | f"{s} is not contained in {object}. This information is hardcoded, please raise an issue. Message: {e}")
51 | raise
52 |
53 | return _object
54 |
55 |
56 | class BalancingZone(enum.Enum):
57 | '''
58 | ENUM containing 3 things about a BalancingZone: Key, Label, Manager
59 | '''
60 |
61 | def __new__(cls, *args, **kwds):
62 | obj = object.__new__(cls)
63 | obj._value_ = args[0]
64 | return obj
65 |
66 | # ignore the first param since it's already set by __new__
67 | def __init__(self, _: str, label: str, manager_label: str):
68 | self._label = label
69 | self._manager_label = manager_label
70 |
71 | def __str__(self):
72 | return self.value
73 |
74 | @property
75 | def code(self):
76 | return (self.value)
77 |
78 | @property
79 | def label(self):
80 | return (self._operator_labels)
81 |
82 | @property
83 | def manager_label(self):
84 | return (self._operator_labels)
85 |
86 | AT = "AT---------", "Austria", "Central European Gas Hub AG",
87 | BE_H = "BE-H-ZONE--", "H-Zone", "Fluxys Belgium",
88 | BE_L = "BE-L-ZONE--", "L-Zone", "Fluxys Belgium",
89 | BE_LUX = "BE-LUX------", "BeLux", "Fluxys Belgium",
90 | BG_GNTT = "BG-GTNTT---", "GTNTT-BG", "Bulgartransgaz EAD",
91 | BG_NGTS = "BG-NGTS----", "Bulgaria", "Bulgartransgaz EAD",
92 | CH = "CH---------", "Switzerland", "Swissgas AS",
93 | CZ = "CZ---------", "Czech", "NET4GAS, s.r.o.",
94 | DE_GASPOOL = "DE-GASPOOL-", "GASPOOL", "GASPOOL Balancing Services GmbH",
95 | DE_NCG = "DE-NCG-----", "NCG", "Net Connect Germany",
96 | DK = "DK---------", "Denmark", "Energinet",
97 | EE = "EE---------", "Estonia", "Elering AS",
98 | ES = "ES---------", "Spain", "Enagas Transporte S.A.U.",
99 | FI = "FI---------", "Finland", "Gasgrid Finland Oy",
100 | FR_NORTH = "FR-NORTH---", "PEG North", "GRTgaz",
101 | FR_SOUTH = "FR-SOUTH---", "PEG South", "GRTgaz",
102 | FR_TIGF = "FR-TIGF----", "PEG TIGF", "TERÉGA",
103 | FR_TRS = "FR-TRS------", "TRS", "GRTgaz",
104 | GR = "GR---------", "Greece", "DESFA S.A.",
105 | HR = "HR---------", "Croatia", "Plinacro Ltd",
106 | HU = "HU---------", "Hungary", "FGSZ Ltd.",
107 | IE = "IE---------", "Ireland", "Gas Networks Ireland",
108 | IT = "IT---------", "Italy", "Snam Rete Gas S.p.A.",
109 | LT = "LT---------", "Lithuania", "AB Amber Grid",
110 | LU = "LU---------", "Luxemburg", "Creos Luxembourg S.A.",
111 | LV = "LV---------", "Latvia", "Conexus Baltic Grid",
112 | NL = "NL---------", "Netherlands", "Gasunie Transport Services B.V.",
113 | PL = "PL---------", "Poland H-gas", "GAZ-SYSTEM S.A.",
114 | PL_YAMAL = "PL-YAMAL---", "TGPS (YAMAL)", "GAZ-SYSTEM S.A.",
115 | PT = "PT---------", "Portugal", "REN - Gasodutos, S.A.",
116 | RO = "RO---------", "RO_NTS", "SNTGN Transgaz S.A.",
117 | RO_TBP = "RO-TBP-----", "RO_DTS", "SNTGN Transgaz S.A.",
118 | SE = "SE---------", "Sweden", "Swedegas AB",
119 | SI = "SI---------", "Slovenia", "Plinovodi d.o.o.",
120 | SK = "SK---------", "Slovakia", "eustream, a.s.",
121 | UK = "UK---------", "UK", "National Grid Gas plc",
122 | UK_IUK = "UK-IUK-----", "IUK", "Interconnector",
123 | UK_NI = "UK-NI------", "NI", "Premier Transmission Ltd",
124 | PL_L = "PL-L-gas---", "Poland L-gas", "GAZ-SYSTEM S.A. (ISO)",
125 | FR = "FR----------", "TRF", "GRTgaz",
126 | DK_SE = "DK-SE-------", "Joint Bal Zone DK/SE", "Energinet",
127 | UA = "UA---------", "Ukraine", "LLC Gas TSO of Ukraine",
128 | MD = "MD---------", "Moldova", "Moldovatransgaz LLC",
129 | TR = "TR---------", "Turkey", "",
130 | MK = "MK---------", "North Macedonia", "GA-MA - Skopje",
131 | RS = "RS---------", "Serbia", "Srbijagas",
132 | EE_LV = "EE-LV------", "Joint Bal Zone EE/LV", "Elering AS",
133 | DE_THE = "DE-THE-----", "DE THE BZ", ""
134 |
135 |
136 | class Country(enum.Enum):
137 | '''
138 | ENUM containing 2 things about a country: code, label
139 | '''
140 |
141 | def __new__(cls, *args, **kwds):
142 | obj = object.__new__(cls)
143 | obj._value_ = args[0]
144 | return obj
145 |
146 | # ignore the first param since it's already set by __new__
147 | def __init__(self, _: str, label: str):
148 | self._label = label
149 |
150 | def __str__(self):
151 | return self.value
152 |
153 | @property
154 | def code(self):
155 | return (self.value)
156 |
157 | @property
158 | def label(self):
159 | return (self._label)
160 |
161 | AL = "AL", "Albania"
162 | CH = "CH", "Switzerland"
163 | AT = "AT", "Austria"
164 | AZ = "AZ", "Azerbaijan"
165 | BA = "BA", "Bosnia Herzegovina"
166 | BE = "BE", "Belgium"
167 | BG = "BG", "Bulgaria"
168 | BY = "BY", "Belarus"
169 | CY = "CY", "Cyprus"
170 | CZ = "CZ", "Czechia"
171 | DE = "DE", "Germany"
172 | DK = "DK", "Denmark"
173 | EE = "EE", "Estonia"
174 | ES = "ES", "Spain"
175 | FI = "FI", "Finland"
176 | FR = "FR", "France"
177 | GR = "GR", "Greece"
178 | HR = "HR", "Croatia"
179 | HU = "HU", "Hungary"
180 | IE = "IE", "Ireland"
181 | UK = "UK", "United Kingdom"
182 | IT = "IT", "Italy"
183 | LT = "LT", "Lithuania"
184 | LU = "LU", "Luxemburg"
185 | LV = "LV", "Latvia"
186 | LY = "LY", "Libya"
187 | MD = "MD", "Moldavia"
188 | MK = "MK", "North Macedonia"
189 | MT = "MT", "Malta"
190 | NL = "NL", "Netherlands"
191 | NO = "NO", "Norway"
192 | PL = "PL", "Poland"
193 | PT = "PT", "Portugal"
194 | RO = "RO", "Romania"
195 | RS = "RS", "Serbia"
196 | RU = "RU", "Russia"
197 | SE = "SE", "Sweden"
198 | SI = "SI", "Slovenia"
199 | SK = "SK", "Slovakia"
200 | TN = "TN", "Tunisia"
201 | TR = "TR", "Turkey"
202 | SM = 'SM', 'San Marino'
203 | UA = "UA", "Ukraine"
204 |
205 |
206 | class Area(enum.Enum):
207 | '''
208 | ENUM containing 2 things about an Area: OperatorKeys, OperatorLabels
209 | '''
210 |
211 | def __new__(cls, *args, **kwds):
212 | obj = object.__new__(cls)
213 | obj._value_ = args[0]
214 | return obj
215 |
216 | # ignore the first param since it's already set by __new__
217 | def __init__(self, _: str, operator_labels: tuple):
218 | self._operator_labels = operator_labels
219 |
220 | def __str__(self):
221 | return self.value
222 |
223 | @property
224 | def code(self):
225 | return (self.value)
226 |
227 | @property
228 | def operator_labels(self):
229 | return (self._operator_labels)
230 |
231 | # One element tuple consists of (element, )
232 | CH = ("AL-TSO-0001", "CH-TSO-0001", "CH-TSO-0002", "CH-TSO-0003", "CH-TSO-0004",), (
233 | "TAP AG", "Swissgas", "FluxSwiss", "South Stream", "NABUCCO",)
234 | AL = ("AL-TSO-0002", "AL-TSO-0003", "AL-TSO-0004", "AL-TSO-0005",), (
235 | "TEE", "MIE Albania & Albgaz", "Albgaz", "ALKOGAP",)
236 | AT = ("AT-TSO-0001", "AT-TSO-0002", "AT-TSO-0003", "AT-TSO-0004", "AT-TSO-0005", "AT-TSO-0006", "AT-TSO-0007",
237 | "AT-TSO-0008", "AT-TSO-0009",), (
238 | "GCA", "BOG", "TAG GmbH", "TIGAS", "OÖ. Ferngas", "Salzburg", "KELAG Netz", "EVA", "South Stream AT",)
239 | AZ = ("AZ-TSO-0001", "AZ-TSO-0002", "AZ-TSO-0003",), ("BP Exp (Shah Deniz)", "W-Stream Caspian", "SMO",)
240 | BA = ("BA-TSO-0001",), ("BH Gas",)
241 | BE = ("BE-TSO-0001", "BE-TSO-0002",), ("Fluxys Belgium", "Unknown",)
242 | BG = ("BG-TSO-0001", "BG-TSO-0002", "BG-TSO-0003", "BG-TSO-0004",), (
243 | "Bulgartransgaz", "IBS Future Operator", "ICGB", "South Stream BG",)
244 | BY = ("BY-TSO-0001",), ("Gazprom Belarus",)
245 | CY = ("CY-TSO-0001",), ("Cy",)
246 | CZ = ("CZ-TSO-0001",), ("N4G",)
247 | DE = ("DE-TSO-0001", "DE-TSO-0002", "DE-TSO-0003", "DE-TSO-0004", "DE-TSO-0005", "DE-TSO-0006", "DE-TSO-0007",
248 | "DE-TSO-0008", "DE-TSO-0009", "DE-TSO-0010", "DE-TSO-0011", "DE-TSO-0012", "DE-TSO-0013", "DE-TSO-0014",
249 | "DE-TSO-0015", "DE-TSO-0016", "DE-TSO-0017", "DE-TSO-0018", "DE-TSO-0019", "DE-TSO-0020", "DE-TSO-0021",
250 | "DE-TSO-0022",), (
251 | "GASCADE", "Thyssengas", "ONTRAS Gastransport GmbH", "GRTD", "GUD", "GTG", "Fluxys TENP GmbH", "Nowega", "OGE",
252 | "Bayernets", "Tauerngasleitung", "OPAL NEL Transport", "JGT (TSO)", "TNBW", "GOAL", "OPAL", "NEL",
253 | "Fluxys Deutschland", "Fluxys TENP & OGE", "LBTG", "GRTD und OGE", "Germany DCS",)
254 | DK = ("DK-TSO-0001", "DK-TSO-0002",), ("Energinet", "DONG",)
255 | EE = ("EE-TSO-0001", "EE-TSO-0002",), ("Elering", "Balti Gaas",)
256 | ES = ("ES-TSO-0001", "ES-TSO-0002", "ES-TSO-0003", "ES-TSO-0004", "ES-TSO-0005", "ES-TSO-0006", "ES-TSO-0007",
257 | "ES-TSO-0008", "ES-TSO-0009", "ES-TSO-0010",), (
258 | "Medgaz", "Reganosa (LSO)", "Saggas", "ETN", "BBG", "Enagas", "GNA", "EMPL", "Reganosa", "Enagas (LSO)",)
259 | FI = ("FI-TSO-0001", "FI-TSO-0002", "FI-TSO-0003",), ("Gasum", "Baltic Connector Oy", "Gasgrid Finland",)
260 | FR = ("FR-TSO-0001", "FR-TSO-0002", "FR-TSO-0003",), ("Gaz de Normandie", "TERÉGA", "GRTgaz",)
261 | GR = ("GR-TSO-0001", "GR-TSO-0002", "GR-TSO-0003", "GR-TSO-0004", "GR-TSO-0005",), (
262 | "DESFA S.A.", "IGI Poseidon", "Future Greek Trans-M", "HRADF", "East Med Operator",)
263 | HR = ("HR-TSO-0001",), ("Plinacro",)
264 | HU = ("HU-TSO-0001", "HU-TSO-0002",), ("FGSZ", "MGT",)
265 | UK = ("IE-TSO-0001", "UK-TSO-0001", "UK-TSO-0002", "UK-TSO-0003", "UK-TSO-0005", "UK-TSO-0006", "UK-TSO-0007",), (
266 | "GNI (UK)", "National Grid", "PTL", "IUK", "Belfast Gas", "BGE (NI)", "White Stream",)
267 | IE = ("IE-TSO-0002",), ("GNI",)
268 | IT = ("IT-TSO-0001", "IT-TSO-0002", "IT-TSO-0003", "IT-TSO-0004", "IT-TSO-0005",), (
269 | "SNAM RETE GAS", "Galsi", "S.G.I. S.p.A.", "Infrastrutture Trasporto Gas", "ENURA",)
270 | LT = ("LT-TSO-0001",), ("Amber Grid",)
271 | LU = ("LU-TSO-0001",), ("CREOS",)
272 | LV = ("LV-TSO-0001",), ("Conexus Baltic Grid JSC",)
273 | LY = ("LY-TSO-0001",), ("Green Stream",)
274 | MD = ("MD-TSO-0001", "RO-TSO-0004",), ("Moldovatransgaz LLC", "Vestmoldtransgaz",)
275 | MK = ("MK-TSO-0001", "MK-TSO-0002", "MK-TSO-0003",), ("Makpetrol", "GA-MA - Skopje", "MER",)
276 | MT = ("MT-TSO-0001", "MT-TSO-0002", "MT-TSO-0003",), ("MEW Malta", "Melita TransGas", "ICM",)
277 | NL = ("NL-TSO-0001", "UK-TSO-0004",), ("GTS", "BBL",)
278 | NO = ("NO-TSO-0001",), ("Gassco",)
279 | PL = ("PL-TSO-0001", "PL-TSO-0002",), ("GAZ-SYSTEM (ISO)", "GAZ-SYSTEM",)
280 | PT = ("PT-TSO-0001", "PT-TSO-0002",), ("REN", "REN Atlantico",)
281 | RO = ("RO-TSO-0001", "RO-TSO-0002", "RO-TSO-0003",), ("Transgaz", "GdF Energy Romania", "AGRI",)
282 | RS = ("RS-TSO-0001", "RS-TSO-0002", "RS-TSO-0003",), ("Srbijagas", "Kosovo TSO", "GASTRANS",)
283 | RU = ("RU-TSO-0001", "RU-TSO-0002", "RU-TSO-0003", "RU-TSO-0004",), (
284 | "Gazprom", "Nord Stream", "Nord Stream 2", "Chornomornaftogaz",)
285 | SE = ("SE-TSO-0001", "SE-TSO-0002",), ("Swedegas", "Svenska Kraftnat",)
286 | SI = ("SI-TSO-0001",), ("Plinovodi",)
287 | SK = ("SK-TSO-0001", "SK-TSO-0002",), ("Eustream", "Eastring",)
288 | TN = ("TN-TSO-0001",), ("TMPC",)
289 | TR = ("TR-TSO-0001", "TR-TSO-0002", "TR-TSO-0003", "TR-TSO-0004",), ("Botas", "TANAP", "Leviathan TSO", "TAGTAS",)
290 | UA = ("UA-TSO-0001",), ("Gas TSO UA",)
291 |
292 |
293 | # TODO: Add label containing description
294 | class Indicator(enum.Enum):
295 | '''
296 | ENUM containing full label of indicator
297 | '''
298 |
299 | def __new__(cls, *args, **kwds):
300 | obj = object.__new__(cls)
301 | obj._value_ = args[0]
302 | return obj
303 |
304 | # ignore the first param since it's already set by __new__
305 | def __init__(self, _: str):
306 | self = self
307 |
308 | def __str__(self):
309 | return self.value
310 |
311 | @property
312 | def code(self):
313 | return (self.value)
314 |
315 | interruption_capacity = "Actual interruption of interruptible capacity",
316 | allocation = "Allocation",
317 | firm_available = "Firm Available",
318 | firm_booked = "Firm Booked",
319 | firm_interruption_planned = "Firm Interruption Planned - Interrupted",
320 | firm_interruption_unplanned = "Firm Interruption Unplanned - Interrupted",
321 | firm_technical = "Firm Technical",
322 | gcv = "GCV",
323 | interruptible_available = "Interruptible Available",
324 | interruptible_booked = "Interruptible Booked",
325 | interruptible_interruption_actual = "Interruptible Interruption Actual – Interrupted",
326 | interruptible_interruption_planned = "Interruptible Interruption Planned - Interrupted",
327 | interruptible_total = "Interruptible Total",
328 | nomination = "Nomination",
329 | physical_flow = "Physical Flow",
330 | firm_interruption_capacity_planned = "Planned interruption of firm capacity",
331 | renomination = "Renomination",
332 | firm_interruption_capacity_unplanned = "Unplanned interruption of firm capacity",
333 | wobbe_index = "Wobbe Index",
334 | oversubscription_available = "Available through Oversubscription",
335 | surrender_available = "Available through Surrender",
336 | uioli_available_lt = "Available through UIOLI long-term",
337 | uioli_available_st = "Available through UIOLI short-term"
338 |
339 |
340 |
341 | DATASET_MAPPINGS = {
342 | '1': 'Operators and Operational data',
343 | '2': 'Points and CMP Unsuccessful Request',
344 | '3': 'Balancing Zones CMP Auction Premium, balancing zones',
345 | '4': 'Interconnections and CMP Unavailable Firm',
346 | '5': 'Operator Point Directions',
347 | '6': 'Aggregate Interconnections'
348 | }
349 |
350 | # TODO: All countries must be represented in operational_aggregates, or this does NOT work
351 | REGIONS = {
352 | "AL": "Northern Africa",
353 | 'SM': "Southern Europe",
354 | "CH": "Western Europe",
355 | "AT": "Western Europe",
356 | "AZ": "Western Asia",
357 | "BA": "Southern Europe",
358 | "BE": "Western Europe",
359 | "BG": "Eastern Europe",
360 | "BY": "Eastern Europe",
361 | "CY": "Western Asia",
362 | "CZ": "Eastern Europe",
363 | "DE": "Western Europe",
364 | "DK": "Northern Europe",
365 | "EE": "Northern Europe",
366 | "ES": "Southern Europe",
367 | "FI": "Northern Europe",
368 | "FR": "Western Europe",
369 | "GR": "Southern Europe",
370 | "HR": "Southern Europe",
371 | "HU": "Eastern Europe",
372 | "IE": "Northern Europe",
373 | "UK": "Western Europe",
374 | "IT": "Southern Europe",
375 | "LT": "Northern Europe",
376 | "LU": "Western Europe",
377 | "LV": "Northern Europe",
378 | "LY": "Northern Africa",
379 | "MD": "Eastern Europe",
380 | "MK": "Southern Europe",
381 | "MT": "Southern Europe",
382 | "NL": "Western Europe",
383 | "NO": "Northern Europe",
384 | "PL": "Eastern Europe",
385 | "PT": "Southern Europe",
386 | "RO": "Eastern Europe",
387 | "RS": "Southern Europe",
388 | "RU": "Eastern Europe",
389 | "SE": "Northern Europe",
390 | "SI": "Southern Europe",
391 | "SK": "Eastern Europe",
392 | "TN": "Northern Africa",
393 | "TR": "Western Asia",
394 | "UA": "Eastern Europe",
395 | 'Undefined': 'Undefined'
396 | }
397 |
--------------------------------------------------------------------------------
/entsog/parsers.py:
--------------------------------------------------------------------------------
1 | import bs4
2 | import pandas as pd
3 | import json
4 |
5 | from entsog.exceptions import NoMatchingDataError
6 | from .mappings import REGIONS
7 | from .misc import to_snake_case
8 |
9 |
10 | def _extract_data(json_text):
11 | json_data = json.loads(json_text)
12 | keys = list(json_data.keys())
13 | # Returns nothing
14 | if len(keys) == 1 or keys[0] == 'message':
15 | return pd.DataFrame()
16 | else:
17 | df = pd.json_normalize(json_data[keys[1]])
18 | return df
19 |
20 |
21 | def parse_general(json_text):
22 | df = _extract_data(json_text)
23 | df.columns = [to_snake_case(col) for col in df.columns]
24 | return df
25 |
26 |
27 | def parse_operational_data(json_text: str, verbose: bool):
28 | data = parse_general(json_text)
29 | columns = ['point_key', 'point_label', 'period_from', 'period_to', 'period_type', 'unit', 'indicator',
30 | 'direction_key', 'flow_status', 'value',
31 | 'tso_eic_code', 'tso_item_identifier',
32 | 'operator_key',
33 | 'interruption_type',
34 | 'restoration_information',
35 | 'capacity_type',
36 | 'last_update_date_time',
37 | 'item_remarks', 'general_remarks']
38 |
39 | if not data.empty:
40 | if verbose:
41 | return data
42 | else:
43 | return data[columns]
44 | else:
45 | raise NoMatchingDataError('No matching data found')
46 |
47 |
48 | def parse_CMP_unsuccesful_requests(json_text: str, verbose: bool):
49 | data = parse_general(json_text)
50 | columns = ['point_key', 'point_label', 'capacity_from', 'capacity_to', 'unit', 'direction_key',
51 | 'requested_volume',
52 | 'allocated_volume',
53 | 'unallocated_volume',
54 | 'last_update_date_time',
55 | 'occurence_count',
56 | 'item_remarks', 'general_remarks']
57 |
58 | if not data.empty:
59 | if verbose:
60 | return data
61 | else:
62 | return data[columns]
63 | else:
64 | raise NoMatchingDataError('No matching data found')
65 |
66 |
67 | def parse_CMP_unavailable_firm_capacity(json_text: str, verbose: bool):
68 | data = parse_general(json_text)
69 | columns = ['point_key', 'point_label', 'period_from', 'period_to', 'unit', 'allocation_process', 'direction_key',
70 | 'requested_volume',
71 | 'allocated_volume',
72 | 'unallocated_volume',
73 | 'last_update_date_time',
74 | 'item_remarks', 'general_remarks']
75 |
76 | if not data.empty:
77 | if verbose:
78 | return data
79 | else:
80 | return data[columns]
81 | else:
82 | raise NoMatchingDataError('No matching data found')
83 |
84 |
85 | def parse_CMP_auction_premiums(json_text: str, verbose: bool):
86 | data = parse_general(json_text)
87 | columns = ['point_key', 'point_label', 'auction_from', 'auction_to', 'capacity_from', 'capacity_to', 'unit',
88 | 'booking_platform_key', 'booking_platform_url', 'direction_key',
89 | 'auction_premium',
90 | 'cleared_price',
91 | 'reserve_price',
92 | 'last_update_date_time',
93 | 'item_remarks', 'general_remarks']
94 |
95 | if not data.empty:
96 | if verbose:
97 | return data
98 | else:
99 | return data[columns]
100 | else:
101 | raise NoMatchingDataError('No matching data found')
102 |
103 |
104 | def parse_interruptions(json_text: str, verbose: bool):
105 | data = parse_general(json_text)
106 | columns = ['point_key', 'point_label', 'period_from', 'period_to', 'direction_key', 'unit', 'interruption_type',
107 | 'capacity_type', 'capacity_commercial_type',
108 | 'value',
109 | 'restoration_information',
110 | 'last_update_date_time',
111 | 'item_remarks', 'general_remarks']
112 |
113 | if not data.empty:
114 | if verbose:
115 | return data
116 | else:
117 | return data[columns]
118 | else:
119 | raise NoMatchingDataError('No matching data found')
120 |
121 |
122 | # TODO: implement melt...
123 | def parse_tariffs_sim(json_text: str, verbose: bool, melt: bool):
124 | data = parse_general(json_text)
125 |
126 | renamed_columns = {
127 | 'product_simulation_cost_in_euro': 'product_simulation_cost_in_euro'
128 | }
129 |
130 | data = data.rename(
131 | columns=renamed_columns
132 | )
133 |
134 | columns = ['point_key', 'point_label', 'period_from', 'period_to', 'direction_key', 'connection',
135 | 'tariff_capacity_type', 'tariff_capacity_unit', 'tariff_capacity_remarks',
136 | 'product_type',
137 | 'operator_currency',
138 | 'product_simulation_cost_in_local_currency',
139 | 'product_simulation_cost_in_euro',
140 | 'product_simulation_cost_remarks',
141 | 'exchange_rate_reference_date',
142 | 'last_update_date_time',
143 | 'remarks',
144 | 'item_remarks',
145 | 'general_remarks']
146 |
147 | if not data.empty:
148 | if verbose:
149 | return data
150 | else:
151 | return data[columns]
152 | else:
153 | raise NoMatchingDataError('No matching data found')
154 |
155 |
156 | def parse_tariffs(json_text: str, verbose: bool, melt: bool):
157 | # https://transparency.entsog.eu/api/v1/tariffsfulls
158 |
159 | data = parse_general(json_text)
160 | renamed_columns = {
161 | 'applicable_tariff_per_local_currency_kwh_d_value': 'applicable_tariff_per_local_currency_kwh_d_value',
162 | 'applicable_tariff_per_local_currency_kwh_d_unit': 'applicable_tariff_per_local_currency_kwh_d_unit',
163 | 'applicable_tariff_per_local_currency_kwh_h_value': 'applicable_tariff_per_local_currency_kwh_h_value',
164 | 'applicable_tariff_per_local_currency_kwh_h_unit': 'applicable_tariff_per_local_currency_kwh_h_unit',
165 | 'applicable_tariff_per_eurkwh_d_unit': 'applicable_tariff_per_eur_kwh_d_unit',
166 | 'applicable_tariff_per_eurkwh_d_value': 'applicable_tariff_per_eur_kwh_d_value',
167 | 'applicable_tariff_per_eurkwh_h_unit': 'applicable_tariff_per_eur_kwh_h_unit',
168 | 'applicable_tariff_per_eurkwh_h_value': 'applicable_tariff_per_eur_kwh_h_value',
169 | 'applicable_commodity_tariff_local_currency': 'applicable_commodity_tariff_local_currency'
170 | }
171 |
172 | data = data.rename(
173 | columns=renamed_columns
174 | )
175 |
176 | columns = [
177 | 'point_key', 'point_label',
178 | 'period_from', 'period_to', 'direction_key',
179 | 'product_period_from', 'product_period_to',
180 | 'product_type', 'connection',
181 | 'multiplier', 'multiplier_factor_remarks',
182 | 'discount_for_interruptible_capacity_value', 'discount_for_interruptible_capacity_remarks',
183 | 'seasonal_factor', 'seasonal_factor_remarks',
184 | 'operator_currency',
185 | 'applicable_tariff_per_local_currency_kwh_d_value',
186 | 'applicable_tariff_per_local_currency_kwh_d_unit',
187 | 'applicable_tariff_per_local_currency_kwh_h_value',
188 | 'applicable_tariff_per_local_currency_kwh_h_unit',
189 |
190 | 'applicable_tariff_per_eur_kwh_d_unit',
191 | 'applicable_tariff_per_eur_kwh_d_value',
192 | 'applicable_tariff_per_eur_kwh_h_unit',
193 | 'applicable_tariff_per_eur_kwh_h_value',
194 |
195 | 'applicable_tariff_in_common_unit_value',
196 | 'applicable_tariff_in_common_unit_unit',
197 |
198 | 'applicable_commodity_tariff_local_currency',
199 | 'applicable_commodity_tariff_euro',
200 | 'applicable_commodity_tariff_remarks',
201 |
202 | 'exchange_rate_reference_date',
203 |
204 | 'last_update_date_time',
205 | 'remarks',
206 | 'item_remarks',
207 | 'general_remarks']
208 |
209 | if verbose and not melt:
210 | return data
211 | elif not verbose or melt:
212 | data = data[columns]
213 |
214 | if melt:
215 | melt_columns_value = [
216 | "applicable_tariff_per_local_currency_kwh_d_value",
217 | "applicable_tariff_per_local_currency_kwh_h_value",
218 | "applicable_tariff_per_eur_kwh_h_value",
219 | "applicable_tariff_per_eur_kwh_d_value",
220 | "applicable_tariff_in_common_unit_value",
221 | ]
222 |
223 | melt_columns_unit = [
224 | "applicable_tariff_per_local_currency_kwh_d_unit",
225 | "applicable_tariff_per_local_currency_kwh_h_unit",
226 | "applicable_tariff_per_eur_kwh_h_unit",
227 | "applicable_tariff_per_eur_kwh_d_unit",
228 | "applicable_tariff_in_common_unit_unit",
229 | ]
230 |
231 | id_columns = list(set(columns) - set(melt_columns_unit + melt_columns_value))
232 |
233 | data_value = pd.melt(
234 | data,
235 | id_vars=id_columns,
236 | value_vars=melt_columns_value,
237 | var_name='variable',
238 | value_name='value'
239 | )
240 |
241 | data_value['variable'] = data_value["variable"].str.replace("_value$", "")
242 |
243 | data_unit = pd.melt(
244 | data,
245 | id_vars=id_columns,
246 | value_vars=melt_columns_unit,
247 | var_name='variable',
248 | value_name='code'
249 | )
250 | data_unit['variable'] = data_unit["variable"].str.replace("_unit$", "")
251 |
252 | # Append with variable
253 | id_columns.append('variable')
254 |
255 | data_pivot = pd.merge(data_value, data_unit, on=id_columns)
256 |
257 | data_pivot['variable'] = data_pivot['variable'].str.extract(r'(local_currency|eur|common_unit)')
258 | data_pivot['currency'] = data_pivot['code'].str.extract(r'^(.*?)\/') # ^(.*?)\/ LINE START
259 | data_pivot['unit'] = data_pivot['code'].str.extract(
260 | r'\((.*?)\)') # \((.*?)\) UNIT IN MIDDLE BETWEEN BRACKETS ()
261 | data_pivot['product_code'] = data_pivot['code'].str.extract(
262 | r'\)\/(.*?)$') # \)\/(.*?)$ Product after unit
263 |
264 | # Regex: applicable_tariff_(.*?)(_unit|_value)
265 |
266 | return data_pivot
267 | else:
268 | return data
269 |
270 |
271 | def parse_interconnections(json_text):
272 | df = _extract_data(json_text)
273 | df.columns = [to_snake_case(col) for col in df.columns]
274 |
275 | # Get the regions in Europe
276 | df['from_region_key'] = df['from_country_key'].map(REGIONS)
277 | df['to_region_key'] = df['to_country_key'].map(REGIONS)
278 |
279 | return df
280 |
281 |
282 | def parse_operator_points_directions(json_text):
283 | df = _extract_data(json_text)
284 | df.columns = [to_snake_case(col) for col in df.columns]
285 |
286 | # Get the regions in Europe
287 | df['region'] = df['t_so_country'].map(REGIONS)
288 | df['adjacent_region'] = df['adjacent_country'].map(REGIONS)
289 |
290 | return df
291 |
292 |
293 | def parse_aggregate_data(
294 | json_text,
295 | verbose: bool
296 | ):
297 | data = parse_general(json_text)
298 |
299 | data['adjacent_bz_key'] = data['adjacent_systems_key'].str.extract(r"^Transmission(.*)$").fillna(
300 | '-----------').replace(r'^\s*$', '-----------', regex=True)
301 | columns = [
302 | 'country_key', 'country_label',
303 | 'bz_key', 'bz_short', 'bz_long',
304 | 'operator_key', 'operator_label',
305 | 'adjacent_systems_key', 'adjacent_systems_label', 'adjacent_bz_key',
306 | 'period_from', 'period_to', 'period_type', 'direction_key', 'indicator',
307 | 'unit', 'value']
308 |
309 | if not data.empty:
310 | if verbose:
311 | return data
312 | else:
313 | return data[columns]
314 | else:
315 | raise NoMatchingDataError('No matching data found')
316 |
317 |
318 | # Legacy stuff
319 | def parse_aggregate_data_complex(
320 | json_text,
321 | interconnections: pd.DataFrame,
322 | group_type: str = None,
323 | entry_exit: bool = False):
324 | # Group on point, operator, balancing zone, country or region.
325 | df = _extract_data(json_text)
326 | df.columns = [to_snake_case(col) for col in df.columns]
327 |
328 | # Get the regions in Europe
329 | df['region_key'] = df['country_key'].map(REGIONS)
330 | # Extract the balancing zone from adjacent system, equal to matching last 11 characters of adjacent_systems_key
331 |
332 | # Only if it starts with transmission
333 | # df['adjacent_bz_key'] = df['adjacent_systems_key'].str.extract(r"^Transmission?.*(.{11}$)").fillna('-----------') # Problem: DK-SE 12 characters
334 | df['adjacent_bz_key'] = df['adjacent_systems_key'].str.extract(r"^Transmission(.*)$").fillna('-----------').replace(
335 | r'^\s*$', '-----------', regex=True)
336 |
337 | # Join with interconnections (only the ones with Transmission, Transmission)... These are outside Europe Transmissions
338 | # Entry gets joined with to_point (points_names) to_operator_key (operator_key)
339 | # Exit gets joined with from_point (points_names) from_operator_key (operator_key)
340 | # However, we have adjacent_systems label, thus it doesn't matter ; it always defaults to to_point
341 |
342 | mask = ((df['adjacent_systems_key'] == 'Transmission') & (df['adjacent_systems_label'] == 'Transmission'))
343 |
344 | df_unmasked = df[~mask]
345 | df_unmasked['note'] = '' # Make empty column
346 | df_unmasked['outside_eu'] = False
347 | df_masked = df[mask]
348 |
349 | # df_masked.to_csv('data/temp_agg.csv',sep=';')
350 | # Join with interconnections
351 | df_masked_joined = pd.merge(
352 | df_masked,
353 | interconnections,
354 | # left_on = ['points_names','operator_key'], # Not possible to do on TSO level, apparently the TSO is often not similar
355 | # right_on = ['to_point_label','to_operator_key'],
356 | left_on=['points_names'],
357 | right_on=['to_point_label'],
358 | suffixes=('', '_ic'),
359 | how='left')
360 | # Clean up the joined masked
361 |
362 | df_masked_joined['outside_eu'] = True
363 | # df_masked_joined['adjacent_systems_key'] = df_masked_joined['from_country_key'] Let it be transmission so we know it's outside Europe transmission
364 | df_masked_joined['adjacent_systems_label'] = df_masked_joined['from_country_label']
365 | df_masked_joined['note'] = df_masked_joined['from_operator_label'] # e.g. NordStream 2
366 |
367 | # Coalesce
368 | df_masked_joined['note'] = df_masked_joined['note'].combine_first(df_masked_joined['points_names'])
369 | # This is to be sure that e.g. Dormund is correctly mapped to Norway
370 | # Keep in mind tht a lot can not be mapped e.g. Emden (EPT1) (Thyssengas), Dornum / NETRA (GUD)|Emden (EPT1) (GUD)|Greifswald / GUD|Lubmin II
371 | # Uncomment the line below to check
372 |
373 | # df_masked_joined[df_unmasked.columns].to_csv('data/temp_agg.csv',sep=';')
374 |
375 | # Only get the columns like in the unmasked version
376 | df = pd.concat([df_masked_joined[df_unmasked.columns], df_unmasked])
377 |
378 | if entry_exit:
379 | mask = (df['direction_key'] == 'exit')
380 | df['value'][mask] = df['value'][mask] * -1 # Multiply by minus one as it is an exit
381 | df['direction_key'] = 'aggregated'
382 |
383 | if group_type is None:
384 | return df
385 |
386 | if group_type == 'point':
387 | df = df.groupby([
388 | 'period_from',
389 | 'period_to',
390 | 'region_key',
391 | 'country_key',
392 | 'bz_key',
393 | 'adjacent_bz_key',
394 | 'adjacent_systems_key',
395 | 'adjacent_systems_label',
396 | 'operator_key',
397 | 'points_names',
398 | 'indicator',
399 | 'direction_key',
400 | 'note']).agg(
401 | value=('value', sum) # KWh/d
402 | )
403 | elif group_type == 'operator':
404 |
405 | df = df.groupby([
406 | 'period_from',
407 | 'period_to',
408 | 'region_key',
409 | 'country_key',
410 | 'bz_key',
411 | 'adjacent_bz_key',
412 | 'adjacent_systems_key',
413 | 'adjacent_systems_label',
414 | 'operator_key',
415 | 'indicator',
416 | 'direction_key',
417 | 'note']).agg(
418 | value=('value', sum) # KWh/d
419 | )
420 | elif group_type == 'balancing_zone':
421 | df = df.groupby([
422 | 'period_from',
423 | 'period_to',
424 | 'region_key',
425 | 'country_key',
426 | 'bz_key',
427 | 'adjacent_bz_key',
428 | 'adjacent_systems_key',
429 | 'adjacent_systems_label',
430 | 'indicator',
431 | 'direction_key',
432 | 'note']).agg(
433 | value=('value', sum) # KWh/d
434 | )
435 | elif group_type == 'country':
436 | df = df.groupby([
437 | 'period_from',
438 | 'period_to',
439 | 'region_key',
440 | 'country_key',
441 | 'adjacent_bz_key',
442 | 'adjacent_systems_key',
443 | 'adjacent_systems_label',
444 | 'indicator',
445 | 'direction_key',
446 | 'note']).agg(
447 | value=('value', sum) # KWh/d
448 | )
449 | elif group_type == 'region':
450 | df = df.groupby([
451 | 'period_from',
452 | 'period_to',
453 | 'region_key',
454 | 'adjacent_bz_key',
455 | 'adjacent_systems_key',
456 | 'adjacent_systems_label',
457 | 'indicator',
458 | 'direction_key',
459 | 'note']).agg(
460 | value=('value', sum) # KWh/d
461 | )
462 |
463 | return df.reset_index()
464 |
465 |
466 | def parse_grouped_operational_aggregates(
467 | data: pd.DataFrame,
468 | group_type: str,
469 | entry_exit: str
470 | ) -> str:
471 | # Get the regions in Europe
472 |
473 | # data['tso_country'] = data['tso_country'].replace(nan,'Undefined')
474 | # data['adjacent_country'] = data['adjacent_country'].replace(nan,'Undefined')
475 |
476 | # print(group_type)
477 | # print(set(data['tso_country']))
478 |
479 | data['region_key'] = data['tso_country'].map(REGIONS)
480 | data['adjacent_region_key'] = data['adjacent_country'].map(REGIONS)
481 |
482 | if entry_exit:
483 | mask = (data['direction_key'] == 'exit')
484 | data['value'][mask] = data['value'][mask] * -1 # Multiply by minus one as it is an exit
485 | data['direction_key'] = 'aggregated'
486 |
487 | if group_type == 'point':
488 | df = data.groupby([
489 | 'period_from', # All
490 | 'period_to', # All
491 | 'point_type', # All
492 | 'cross_border_point_type', # e.g. in-country, within EU,
493 | 'eu_relationship', # Within EU, outside EU
494 | 'operator_key', # Operator level
495 | 'operator_label', # Operator level
496 | 'tso_country', # Country level
497 | 'adjacent_country', # Country level
498 | 'connected_operators', # E.g. Nordstream - Country level
499 | 'tso_balancing_zone', # Balancing zone level
500 | 'adjacent_zones', # Balancing zone level
501 |
502 | 'region_key', # Region level
503 | 'adjacent_region_key', # Region level
504 |
505 | 'point_key', # Point level
506 | 'point_label', # Point level
507 |
508 | 'indicator',
509 | 'direction_key'
510 | ]).agg(
511 | value=('value', sum) # KW/ period_type
512 | )
513 | elif group_type == 'operator':
514 |
515 | df = data.groupby([
516 | 'period_from', # All
517 | 'period_to', # All
518 | 'point_type', # All
519 | 'cross_border_point_type', # e.g. in-country, within EU,
520 | 'eu_relationship', # Within EU, outside EU
521 | 'operator_key', # Operator level
522 | 'operator_label', # Operator level
523 | 'tso_country', # Country level
524 | 'adjacent_country', # Country level
525 | 'connected_operators', # E.g. Nordstream - Country level
526 | 'tso_balancing_zone', # Balancing zone level
527 | 'adjacent_zones', # Balancing zone level
528 |
529 | 'region_key', # Region level
530 | 'adjacent_region_key', # Region level
531 |
532 | 'indicator',
533 | 'direction_key'
534 | ]).agg(
535 | value=('value', sum) # KW/ period_type
536 | )
537 | elif group_type == 'balancing_zone':
538 | df = data.groupby([
539 | 'period_from', # All
540 | 'period_to', # All
541 | 'point_type', # All
542 | 'cross_border_point_type', # e.g. in-country, within EU,
543 | 'eu_relationship', # Within EU, outside EU
544 |
545 | 'connected_operators', # E.g. Nordstream - Country level
546 | 'tso_balancing_zone', # Balancing zone level
547 | 'adjacent_zones', # Balancing zone level
548 |
549 | 'region_key', # Region level
550 | 'adjacent_region_key', # Region level
551 |
552 | 'indicator',
553 | 'direction_key'
554 | ]).agg(
555 | value=('value', sum) # KW/ period_type
556 | )
557 | elif group_type == 'country':
558 | df = data.groupby([
559 | 'period_from', # All
560 | 'period_to', # All
561 | 'point_type', # All
562 | 'cross_border_point_type', # e.g. in-country, within EU,
563 | 'eu_relationship', # Within EU, outside EU
564 |
565 | 'tso_country', # Country level
566 | 'adjacent_country', # Country level
567 |
568 | 'connected_operators', # E.g. Nordstream - Country level
569 | 'tso_balancing_zone', # Balancing zone level
570 | 'adjacent_zones', # Balancing zone level
571 |
572 | 'region_key', # Region level
573 | 'adjacent_region_key', # Region level
574 |
575 | 'indicator',
576 | 'direction_key'
577 | ]).agg(
578 | value=('value', sum) # KW/ period_type
579 | )
580 | elif group_type == 'region':
581 | df = data.groupby([
582 | 'period_from', # All
583 | 'period_to', # All
584 | 'point_type', # All
585 | 'cross_border_point_type', # e.g. in-country, within EU,
586 | 'eu_relationship', # Within EU, outside EU
587 |
588 | 'region_key', # Region level
589 | 'adjacent_region_key', # Region level
590 |
591 | 'indicator',
592 | 'direction_key'
593 | ]).agg(
594 | value=('value', sum) # KW/ period_type
595 | )
596 |
597 | return df.reset_index()
598 |
--------------------------------------------------------------------------------
/entsog/entsog.py:
--------------------------------------------------------------------------------
1 | import urllib.parse
2 | import urllib.request
3 | from typing import List
4 | from typing import Union, Optional, Dict
5 |
6 | import pandas as pd
7 | import pytz
8 | import requests
9 |
10 | from .decorators import *
11 | from .exceptions import GatewayTimeOut, UnauthorizedError, BadGatewayError, TooManyRequestsError, NotFoundError
12 | from .mappings import Area, lookup_area, Indicator, lookup_balancing_zone, lookup_country, lookup_indicator, Country, BalancingZone
13 | from .parsers import *
14 |
15 | __title__ = "entsog-py"
16 | __version__ = "1.0.3"
17 | __author__ = "nhcb"
18 | __license__ = "MIT"
19 |
20 | URL = 'https://transparency.entsog.eu/api/v1'
21 | OFFSET = 10000
22 |
23 | class EntsogRawClient:
24 | """
25 | Client to perform API calls and return the raw responses API-documentation:
26 | https://transparency.entsog.eu/api/archiveDirectories/8/api-manual/TP_REG715_Documentation_TP_API%20-%20v2.1.pdf
27 | User Manual:
28 | https://www.entsog.eu/sites/default/files/2021-07/ENTSOG%20-%20TP%20User%20Manual_v_4.5.pdf
29 |
30 | Attributions: Entire framework is based upon the existing scraper for Entsoe authored from EnergieID.be
31 | """
32 |
33 | def __init__(
34 | self, session: Optional[requests.Session] = None,
35 | retry_count: int = 5, retry_delay: int = 3,
36 | proxies: Optional[Dict] = None, timeout: Optional[int] = None):
37 | """
38 | Parameters
39 | ----------
40 | session : requests.Session
41 | retry_count : int
42 | number of times to retry the call if the connection fails
43 | retry_delay: int
44 | amount of seconds to wait between retries
45 | proxies : dict
46 | requests proxies
47 | timeout : int
48 | """
49 |
50 | if session is None:
51 | session = requests.Session()
52 | self.session = session
53 | self.proxies = proxies
54 | self.retry_count = retry_count
55 | self.retry_delay = retry_delay
56 | self.timeout = timeout
57 |
58 | @retry
59 | def _base_request(self, endpoint: str, params: Dict) -> requests.Response:
60 |
61 | """
62 | Parameters
63 | ----------
64 | endpoint: str
65 | endpoint to url to gather data, in format /
66 | params : dict
67 |
68 | Returns
69 | -------
70 | requests.Response
71 | """
72 |
73 | url = URL + endpoint
74 | base_params = {
75 | 'limit': -1,
76 | 'timeZone': 'UCT'
77 | }
78 | # Update the default parameters and add the new ones.
79 | params = {**base_params, **params}
80 | logging.debug(f'Performing request to {url} with params {params}')
81 |
82 | params = urllib.parse.urlencode(params, safe=',') # ENTSOG uses comma-seperated values
83 | # UPDATE: ENTSOG now cannot handle verifications of SSL certificates. This is a temporary fix, will contact ENTSOG to fix this.
84 | response = self.session.get(url=url, params=params, proxies=self.proxies, timeout=self.timeout)
85 | try:
86 | response.raise_for_status()
87 | except requests.HTTPError as e:
88 | if response.status_code == 401:
89 | raise UnauthorizedError
90 | elif response.status_code == 500:
91 | # Gets a 500 error when the API is not available or no data is available
92 | raise NoMatchingDataError
93 | elif response.status_code == 502:
94 | raise BadGatewayError
95 | elif response.status_code == 504:
96 | raise GatewayTimeOut
97 | elif response.status_code == 429:
98 | raise TooManyRequestsError
99 | elif response.status_code == 404:
100 | raise NotFoundError
101 | else:
102 | raise e
103 | else:
104 | if response.headers.get('content-type', '') == 'application/xml':
105 | if response.status_code == 401:
106 | raise UnauthorizedError
107 | elif response.status_code == 500:
108 | # Gets a 500 error when the API is not available or no data is available
109 | raise NoMatchingDataError
110 | elif response.status_code == 502:
111 | raise BadGatewayError
112 | elif response.status_code == 429:
113 | raise TooManyRequestsError
114 | elif response.status_code == 404:
115 | raise NotFoundError
116 |
117 | return response
118 |
119 | @staticmethod
120 | def _datetime_to_str(dtm: pd.Timestamp) -> str:
121 | """
122 | Convert a datetime object to a string in UTC
123 | of the form YYYYMMDDhh00
124 |
125 | Parameters
126 | ----------
127 | dtm : pd.Timestamp
128 | Recommended to use a timezone-aware object!
129 | If timezone-naive, UTC is assumed
130 |
131 | Returns
132 | -------
133 | str
134 | """
135 | if dtm.tzinfo is not None and dtm.tzinfo != pytz.UTC:
136 | dtm = dtm.tz_convert("UTC")
137 |
138 | ret_str = dtm.date()
139 | # fmt = '%Y%m%d%H00'
140 | # ret_str = dtm.strftime(fmt).date()
141 |
142 | return ret_str
143 |
144 | def query_connection_points(self) -> str:
145 | """
146 |
147 | Interconnection points as visible on the Map. Please note that
148 | this only included the Main points and not the sub points. To
149 | download all points, the API for Operator Point Directions
150 | should be used.
151 |
152 | Parameters
153 | ----------
154 | None
155 |
156 | Returns
157 | -------
158 | str
159 | """
160 |
161 | """
162 | Expected columns:
163 | -----------------
164 | "pointKey",
165 | "pointLabel",
166 | "isSingleOperator",
167 | "pointTooltip",
168 | "pointEicCode",
169 | "controlPointType",
170 | "tpMapX",
171 | "tpMapY",
172 | "pointType",
173 | "commercialType",
174 | "importFromCountryKey",
175 | "importFromCountryLabel",
176 | "hasVirtualPoint",
177 | "virtualPointKey",
178 | "virtualPointLabel",
179 | "hasData",
180 | "isPlanned",
181 | "isInterconnection",
182 | "isImport",
183 | "infrastructureKey",
184 | "infrastructureLabel",
185 | "isCrossBorder",
186 | "euCrossing",
187 | "isInvalid",
188 | "isMacroPoint",
189 | "isCAMRelevant",
190 | "isPipeInPipe",
191 | "isCMPRelevant",
192 | "id",
193 | "dataSet"
194 | -----------------
195 | """
196 |
197 | response = self._base_request(endpoint='/connectionpoints', params = {})
198 |
199 | return response.text, response.url
200 |
201 | def query_operators(self,
202 | country_code: Union[Country, str] = None,
203 | has_data: int = 1) -> str:
204 |
205 | """
206 |
207 | All operators connected to the transmission system
208 |
209 | Parameters
210 | ----------
211 | country Union[Area, str]
212 | has_data: int
213 |
214 | Returns
215 | -------
216 | str
217 | """
218 |
219 | """
220 | Expected columns:
221 | -----------------
222 | "operatorLogoUrl",
223 | "operatorKey",
224 | "operatorLabel",
225 | "operatorLabelLong",
226 | "operatorTooltip",
227 | "operatorCountryKey",
228 | "operatorCountryLabel",
229 | "operatorCountryFlag",
230 | "operatorTypeLabel",
231 | "operatorTypeLabelLong",
232 | "participates",
233 | "membershipLabel",
234 | "tsoEicCode",
235 | "tsoDisplayName",
236 | "tsoShortName",
237 | "tsoLongName",
238 | "tsoStreet",
239 | "tsoBuildingNumber",
240 | "tsoPostOfficeBox",
241 | "tsoZipCode",
242 | "tsoCity",
243 | "tsoContactName",
244 | "tsoContactPhone",
245 | "tsoContactEmail",
246 | "tsoContactUrl",
247 | "tsoContactRemarks",
248 | "tsoGeneralWebsiteUrl",
249 | "tsoGeneralWebsiteUrlRemarks",
250 | "tsoTariffInformationUrl",
251 | "tsoTariffInformationUrlRemarks",
252 | "tsoTariffCalculatorUrl",
253 | "tsoTariffCalculatorUrlRemarks",
254 | "tsoCapacityInformationUrl",
255 | "tsoCapacityInformationUrlRemarks",
256 | "tsoGasQualityURL",
257 | "tsoGasQualityURLRemarks",
258 | "tsoAccessConditionsUrl",
259 | "tsoAccessConditionsUrlRemarks",
260 | "tsoContractDocumentsUrl",
261 | "tsoContractDocumentsUrlRemarks",
262 | "tsoMaintainanceUrl",
263 | "tsoMaintainanceUrlRemarks",
264 | "gasDayStartHour",
265 | "gasDayStartHourRemarks",
266 | "multiAnnualContractsIsAvailable",
267 | "multiAnnualContractsRemarks",
268 | "annualContractsIsAvailable",
269 | "annualContractsRemarks",
270 | "halfAnnualContractsIsAvailable",
271 | "halfAnnualContractsRemarks",
272 | "quarterlyContractsIsAvailable",
273 | "quarterlyContractsRemarks",
274 | "monthlyContractsIsAvailable",
275 | "monthlyContractsRemarks",
276 | "dailyContractsIsAvailable",
277 | "dailyContractsRemarks",
278 | "withinDayContractsIsAvailable",
279 | "withinDayContractsRemarks",
280 | "availableContractsRemarks",
281 | "firmCapacityTariffIsApplied",
282 | "firmCapacityTariffUnit",
283 | "firmCapacityTariffRemarks",
284 | "interruptibleCapacityTariffIsApplied",
285 | "interruptibleCapacityTariffUnit",
286 | "interruptibleCapacityTariffRemarks",
287 | "auctionIsApplied",
288 | "auctionTariffIsApplied",
289 | "auctionCapacityTariffUnit",
290 | "auctionRemarks",
291 | "commodityTariffIsApplied",
292 | "commodityTariffUnit",
293 | "commodityTariffPrice",
294 | "commodityTariffRemarks",
295 | "othersTariffIsApplied",
296 | "othersTariffRemarks",
297 | "generalTariffInformationRemarks",
298 | "generalCapacityRemark",
299 | "firstComeFirstServedIsApplied",
300 | "firstComeFirstServedRemarks",
301 | "openSubscriptionWindowIsApplied",
302 | "openSubscriptionWindowRemarks",
303 | "firmTechnicalRemark",
304 | "firmBookedRemark",
305 | "firmAvailableRemark",
306 | "interruptibleTotalRemark",
307 | "interruptibleBookedRemark",
308 | "interruptibleAvailableRemark",
309 | "tsoGeneralRemarks",
310 | "balancingModel",
311 | "bMHourlyImbalanceToleranceIsApplied",
312 | "bMHourlyImbalanceToleranceIsInformation",
313 | "bMHourlyImbalanceToleranceIsRemarks",
314 | "bMDailyImbalanceToleranceIsApplied",
315 | "bMDailyImbalanceToleranceIsInformation",
316 | "bMDailyImbalanceToleranceIsRemarks",
317 | "bMAdditionalDailyImbalanceToleranceIsApplied",
318 | "bMAdditionalDailyImbalanceToleranceIsInformation",
319 | "bMAdditionalDailyImbalanceToleranceIsRemarks",
320 | "bMCumulatedImbalanceToleranceIsApplied",
321 | "bMCumulatedImbalanceToleranceIsInformation",
322 | "bMCumulatedImbalanceToleranceIsRemarks",
323 | "bMAdditionalCumulatedImbalanceToleranceIsApplied",
324 | "bMAdditionalCumulatedImbalanceToleranceIsInformation",
325 | "bMAdditionalCumulatedImbalanceToleranceIsRemarks",
326 | "bMStatusInformation",
327 | "bMStatusInformationFrequency",
328 | "bMPenalties",
329 | "bMCashOutRegime",
330 | "bMRemarks",
331 | "gridTransportModelType",
332 | "gridTransportModelTypeRemarks",
333 | "gridConversionFactorCapacityDefault",
334 | "gridConversionFactorCapacityDefaultRemaks",
335 | "gridGrossCalorificValueDefaultValue",
336 | "gridGrossCalorificValueDefaultValueTo",
337 | "gridGrossCalorificValueDefaultUnit",
338 | "gridGrossCalorificValueDefaultRemarks",
339 | "gridGasSourceDefault",
340 | "lastUpdateDateTime",
341 | "transparencyInformationURL",
342 | "transparencyInformationUrlRemarks",
343 | "transparencyGuidelinesInformationURL",
344 | "transparencyGuidelinesInformationUrlRemarks",
345 | "tsoUmmRssFeedUrlGas",
346 | "tsoUmmRssFeedUrlOther",
347 | "includeUmmInAcerRssFeed",
348 | "id",
349 | "dataSet"
350 | -----------------
351 | """
352 |
353 | params = {
354 | 'hasData': has_data
355 | }
356 |
357 | if country_code is not None:
358 | params['operatorCountryKey'] = lookup_country(country_code).code
359 |
360 | response = self._base_request(endpoint='/operators', params=params)
361 |
362 | return response.text, response.url
363 |
364 | def query_balancing_zones(self) -> str:
365 |
366 | """
367 |
368 | European balancing zones
369 |
370 | Parameters
371 | ----------
372 | limit: int
373 |
374 | Returns
375 | -------
376 | str
377 | """
378 |
379 | """
380 | Expected columns:
381 | -----------------
382 |
383 | "tpMapX",
384 | "tpMapY",
385 | "controlPointType",
386 | "bzKey",
387 | "bzLabel",
388 | "bzLabelLong",
389 | "bzTooltip",
390 | "bzEicCode",
391 | "bzManagerKey",
392 | "bzManagerLabel",
393 | "replacedBy",
394 | "isDeactivated",
395 | "id",
396 | "dataSet"
397 | -----------------
398 | """
399 |
400 | params = {
401 |
402 | }
403 |
404 | response = self._base_request(endpoint='/balancingzones', params=params)
405 |
406 | return response.text, response.url
407 |
408 | def query_operator_point_directions(self,
409 | country_code: Union[Country, str] = None) -> str:
410 |
411 | """
412 |
413 | All the possible flow directions, being combination of an
414 | operator, a point, and a flow direction
415 |
416 | Parameters
417 | ----------
418 | country_code : Union[Country, str]
419 |
420 | Returns
421 | -------
422 | str
423 | """
424 |
425 | """
426 | Expected columns:
427 | -----------------
428 |
429 | "pointKey",
430 | "pointLabel",
431 | "operatorKey",
432 | "tsoEicCode",
433 | "operatorLabel",
434 | "directionKey",
435 | "validFrom",
436 | "validTo",
437 | "hasData",
438 | "isVirtualizedCommercially",
439 | "virtualizedCommerciallySince",
440 | "isVirtualizedOperationally",
441 | "virtualizedOperationallySince",
442 | "isPipeInPipe",
443 | "relatedOperators",
444 | "relatedPoints",
445 | "pipeInPipeWithTsoKey",
446 | "pipeInPipeWithTsoLabel",
447 | "isDoubleReporting",
448 | "doubleReportingWithTsoKey",
449 | "doubleReportingWithTsoLabel",
450 | "tsoItemIdentifier",
451 | "tpTsoItemLabel",
452 | "tpTsoValidFrom",
453 | "tpTsoValidTo",
454 | "tpTsoRemarks",
455 | "tpTsoConversionFactor",
456 | "tpRmkGridConversionFactorCapacityDefault",
457 | "tpTsoGCVMin",
458 | "tpTsoGCVMax",
459 | "tpTsoGCVRemarks",
460 | "tpTsoGCVUnit",
461 | "tpTsoEntryExitType",
462 | "multiAnnualContractsIsAvailable",
463 | "multiAnnualContractsRemarks",
464 | "annualContractsIsAvailable",
465 | "annualContractsRemarks",
466 | "halfAnnualContractsIsAvailable",
467 | "halfAnnualContractsRemarks",
468 | "quarterlyContractsIsAvailable",
469 | "quarterlyContractsRemarks",
470 | "monthlyContractsIsAvailable",
471 | "monthlyContractsRemarks",
472 | "dailyContractsIsAvailable",
473 | "dailyContractsRemarks",
474 | "dayAheadContractsIsAvailable",
475 | "dayAheadContractsRemarks",
476 | "availableContractsRemarks",
477 | "sentenceCMPUnsuccessful",
478 | "sentenceCMPUnavailable",
479 | "sentenceCMPAuction",
480 | "sentenceCMPMadeAvailable",
481 | "lastUpdateDateTime",
482 | "isInvalid",
483 | "isCAMRelevant",
484 | "isCMPRelevant",
485 | "bookingPlatformKey",
486 | "bookingPlatformLabel",
487 | "bookingPlatformURL",
488 | "virtualReverseFlow",
489 | "virtualReverseFlowRemark",
490 | "tSOCountry",
491 | "tSOBalancingZone",
492 | "crossBorderPointType",
493 | "eURelationship",
494 | "connectedOperators",
495 | "adjacentTsoEic",
496 | "adjacentOperatorKey",
497 | "adjacentCountry",
498 | "pointType",
499 | "idPointType",
500 | "adjacentZones",
501 | "id",
502 | "dataSet"
503 | -----------------
504 | """
505 | params = {}
506 | if country_code is not None:
507 | params['tSOCountry'] = lookup_country(country_code).code
508 |
509 | response = self._base_request(endpoint='/operatorpointdirections', params=params)
510 |
511 | return response.text, response.url
512 |
513 | def query_interconnections(self,
514 | from_country_code: Union[Country, str],
515 | to_country_code: Union[Country, str] = None,
516 | from_balancing_zone: Union[BalancingZone, str] = None,
517 | to_balancing_zone: Union[BalancingZone, str] = None,
518 | from_operator: str = None,
519 | to_operator: str = None) -> str:
520 |
521 | """
522 |
523 | All the interconnections between an exit system and an entry
524 | system
525 |
526 | Parameters
527 | ----------
528 | from_country_code : Union[Country, str]
529 | to_country_code : Union[Country, str]
530 | from_balancing_zone : Union[BalancingZone, str]
531 | to_balancing_zone : Union[BalancingZone, str]
532 | from_operator: str
533 | to_operator: str
534 |
535 | Returns
536 | -------
537 | str
538 | """
539 |
540 | """
541 | Expected columns:
542 | -----------------
543 |
544 | "pointKey",
545 | "pointLabel",
546 | "isSingleOperator",
547 | "pointTpMapX",
548 | "pointTpMapY",
549 | "fromSystemLabel",
550 | "fromInfrastructureTypeLabel",
551 | "fromCountryKey",
552 | "fromCountryLabel",
553 | "fromBzKey",
554 | "fromBzLabel",
555 | "fromBzLabelLong",
556 | "fromOperatorKey",
557 | "fromOperatorLabel",
558 | "fromOperatorLongLabel",
559 | "fromPointKey",
560 | "fromPointLabel",
561 | "fromIsCAM",
562 | "fromIsCMP",
563 | "fromBookingPlatformKey",
564 | "fromBookingPlatformLabel",
565 | "fromBookingPlatformURL",
566 | "toIsCAM",
567 | "toIsCMP",
568 | "toBookingPlatformKey",
569 | "toBookingPlatformLabel",
570 | "toBookingPlatformURL",
571 | "fromTsoItemIdentifier",
572 | "fromTsoPointLabel",
573 | "fromDirectionKey",
574 | "fromHasData",
575 | "toSystemLabel",
576 | "toInfrastructureTypeLabel",
577 | "toCountryKey",
578 | "toCountryLabel",
579 | "toBzKey",
580 | "toBzLabel",
581 | "toBzLabelLong",
582 | "toOperatorKey",
583 | "toOperatorLabel",
584 | "toOperatorLongLabel",
585 | "toPointKey",
586 | "toPointLabel",
587 | "toDirectionKey",
588 | "toHasData",
589 | "toTsoItemIdentifier",
590 | "toTsoPointLabel",
591 | "validFrom",
592 | "validto",
593 | "lastUpdateDateTime",
594 | "isInvalid",
595 | "entryTpNeMoUsage",
596 | "exitTpNeMoUsage",
597 | "id",
598 | "dataSet"
599 | -----------------
600 | """
601 |
602 | params = {}
603 |
604 | if from_country_code is not None:
605 | params['fromCountryKey'] = lookup_country(from_country_code).code
606 | if to_country_code is not None:
607 | params['toCountryKey'] = lookup_country(to_country_code).code
608 |
609 | if from_balancing_zone is not None:
610 | params['fromBzKey'] = lookup_balancing_zone(from_balancing_zone).code
611 | if to_balancing_zone is not None:
612 | params['toBzKeys'] = lookup_balancing_zone(to_balancing_zone).code
613 |
614 | if from_operator is not None:
615 | params['fromOperatorKey'] = from_operator
616 | if to_operator is not None:
617 | params['toOperatorKey'] = to_operator
618 |
619 | response = self._base_request(endpoint='/interconnections', params=params)
620 |
621 | return response.text, response.url
622 |
623 | def query_aggregate_interconnections(self,
624 | country_code: Union[Country, str] = None,
625 | balancing_zone: Union[BalancingZone, str] = None,
626 | operator_key: str = None) -> str:
627 |
628 | """
629 |
630 | All the connections between transmission system operators
631 | and their respective balancing zones
632 |
633 | Parameters
634 | ----------
635 | country Union[Area, str]
636 | limit: int
637 |
638 | Returns
639 | -------
640 | str
641 | """
642 |
643 | """
644 | Expected columns:
645 | -----------------
646 |
647 | "countryKey",
648 | "countryLabel",
649 | "bzKey",
650 | "bzLabel",
651 | "bzLabelLong",
652 | "operatorKey",
653 | "operatorLabel",
654 | "directionKey",
655 | "adjacentSystemsKey",
656 | "adjacentSystemsCount",
657 | "adjacentSystemsAreBalancingZones",
658 | "adjacentSystemsLabel",
659 | "id",
660 | "dataSet"
661 | -----------------
662 | """
663 | params = {}
664 |
665 | if country_code is not None:
666 | country_code = lookup_country(country_code)
667 | params['countryKey'] = country_code.code
668 | if balancing_zone is not None:
669 | balancing_zone = lookup_balancing_zone(balancing_zone)
670 | params['bzKey'] = balancing_zone.code
671 | if operator_key is not None:
672 | params['operatorKey'] = operator_key
673 |
674 | response = self._base_request(endpoint='/aggregateInterconnections', params=params)
675 |
676 | return response.text, response.url
677 |
678 | def query_urgent_market_messages(self,
679 | balancing_zone: Union[BalancingZone, str] = None) -> str:
680 |
681 | """
682 |
683 | Urgent Market Messages
684 |
685 | Parameters
686 | ----------
687 |
688 | balancing_zone : Union[BalancingZone, str]
689 | limit: int
690 |
691 | Returns
692 | -------
693 | str
694 | """
695 |
696 | """
697 | Expected columns:
698 | -----------------
699 |
700 | "id",
701 | "messageId",
702 | "marketParticipantKey",
703 | "marketParticipantEic",
704 | "marketParticipantName",
705 | "messageType",
706 | "publicationDateTime",
707 | "threadId",
708 | "versionNumber",
709 | "eventStatus",
710 | "eventType",
711 | "eventStart",
712 | "eventStop",
713 | "unavailabilityType",
714 | "unavailabilityReason",
715 | "unitMeasure",
716 | "balancingZoneKey",
717 | "balancingZoneEic",
718 | "balancingZoneName",
719 | "affectedAssetIdentifier",
720 | "affectedAssetName",
721 | "affectedAssetEic",
722 | "direction",
723 | "unavailableCapacity",
724 | "availableCapacity",
725 | "technicalCapacity",
726 | "remarks",
727 | "lastUpdateDateTime",
728 | "sharePointPointId",
729 | "isLatestVersion",
730 | "sharePointPublicationId",
731 | "uMMType",
732 | "isArchived"
733 | -----------------
734 | """
735 |
736 | params = {}
737 |
738 | if balancing_zone is not None:
739 | balancing_zone = lookup_balancing_zone(balancing_zone)
740 | params['balancingZoneKey'] = balancing_zone.code
741 |
742 | response = self._base_request(endpoint='/urgentmarketmessages', params=params)
743 |
744 | return response.text, response.url
745 |
746 | def query_tariffs(self, start: pd.Timestamp, end: pd.Timestamp,
747 | country_code: Union[Country, str]) -> str:
748 |
749 | """
750 |
751 | Information about the various tariff types and components
752 | related to the tariffs
753 |
754 | Parameters
755 | ----------
756 | start: pd.Timestamp
757 | end: pd.Timestamp
758 | country_code: Union[Country, str]
759 |
760 | Returns
761 | -------
762 | str
763 | """
764 |
765 | """
766 | Expected columns:
767 | -----------------
768 |
769 | Tariff Period
770 | Tariff Period Remarks
771 | Point Name
772 | Point Identifier (EIC)
773 | Direction
774 | Operator
775 | Country code
776 | Connection
777 | Remarks fo connection
778 | From BZ
779 | To BZ
780 | Start time of validity
781 | End time of validity
782 | Capacity Type
783 | Unit
784 | Product type according to its duration
785 | Multiplier
786 | Remarks for multiplier
787 | Discount for interruptible capacity
788 | Remarks for discount
789 | Seasonal factor
790 | Remarks for seasonal factor
791 | Operator Currency
792 | Applicable tariff per kWh/d (local)
793 | Local Currency/ kWh/d
794 | Applicable tariff per kWh/h (local)
795 | Local Currency/ kWh/h
796 | Applicable tariff per kWh/d (Euro)
797 | EUR / kWh/d
798 | Applicable tariff per kWh/h (Euro)
799 | EUR / kWh/h
800 | Remarks for applicable tariff
801 | Applicable tariff in common unit
802 | EUR/kWh/h/d for all products EUR/kWh/h/h for within-day
803 | Remarks for applicable tariff in common unit
804 | Applicable commodity tariff per kWh, if any, in the Local Currency
805 | Applicable commodity tariff per kWh, if any, in the EURO
806 | Remarks for applicable commodity
807 | Last Update Date
808 | Exchange Rate Reference Date
809 | Remarks
810 | Operator key
811 | Tso Eic code
812 | Point key
813 | -----------------
814 | """
815 |
816 | params = {
817 | 'from': self._datetime_to_str(start),
818 | 'to': self._datetime_to_str(end)
819 | }
820 | if country_code is not None:
821 | country_code = lookup_country(country_code)
822 | params['countryKey'] = country_code.code
823 |
824 | response = self._base_request(endpoint='/tariffsfulls', params=params)
825 |
826 | return response.text, response.url
827 |
828 | def query_tariffs_sim(self, start: pd.Timestamp, end: pd.Timestamp,
829 | country_code: Union[Country, str]) -> str:
830 |
831 | """
832 |
833 | Simulation of all the costs for flowing 1 GWh/day/year for
834 | each IP per product type and tariff period
835 |
836 | Parameters
837 | ----------
838 | start: pd.Timestamp
839 | end: pd.Timestamp
840 | country_code: Union[Country, str]
841 |
842 | Returns
843 | -------
844 | str
845 | """
846 |
847 | """
848 | Expected columns:
849 | -----------------
850 | Tariff Period
851 | Tariff Period Remarks
852 | Point Name
853 | Point Identifier (EIC)
854 | Direction
855 | Operator
856 | Country code
857 | Connection
858 | Remarks for connection
859 | From BZ
860 | To BZ
861 | Capacity Type
862 | Unit
863 | Product type according to its duration
864 | Operator Currency
865 | Simulation of all the costs foe flowing 1 GWh/day/year in Local currency
866 | Simulation of all the costs for flowing 1 GWh/day/year in EUR
867 | Remars for Simulation costs
868 | Last Update Date
869 | Exchange Rate Reference Date
870 | Remarks
871 | Operator key
872 | Tso Eic code
873 | Point key
874 | -----------------
875 | """
876 | params = {
877 | 'from': self._datetime_to_str(start),
878 | 'to': self._datetime_to_str(end),
879 | }
880 | if country_code is not None:
881 | country_code = lookup_country(country_code)
882 | params['countryKey'] = country_code.code
883 |
884 | response = self._base_request(endpoint='/tariffsSimulations', params=params)
885 |
886 | return response.text, response.url
887 |
888 | def query_aggregated_data(self, start: pd.Timestamp, end: pd.Timestamp,
889 | country_code: Union[Country, str] = None,
890 | balancing_zone: Union[BalancingZone, str] = None,
891 | period_type: str = 'day') -> str:
892 | """
893 |
894 | Latest nominations, allocations, physical flow. Not recommended.
895 |
896 | Parameters
897 | ----------
898 | start: pd.Timestamp
899 | end: pd.Timestamp
900 | country_code: Union[Country, str]
901 | balancing_zone: Union[BalancingZone, str]
902 | period_type: str
903 | limit: int
904 |
905 | Returns
906 | -------
907 | str
908 | """
909 |
910 | """
911 | Expected columns:
912 | -----------------
913 |
914 | "id",
915 | "dataSet",
916 | "dataSetLabel",
917 | "indicator",
918 | "periodType",
919 | "periodFrom",
920 | "periodTo",
921 | "countryKey",
922 | "countryLabel",
923 | "bzKey",
924 | "bzShort",
925 | "bzLong",
926 | "operatorKey",
927 | "operatorLabel",
928 | "tsoEicCode",
929 | "directionKey",
930 | "adjacentSystemsKey",
931 | "adjacentSystemsLabel",
932 | "year",
933 | "month",
934 | "day",
935 | "unit",
936 | "value",
937 | "countPointPresents",
938 | "flowStatus",
939 | "pointsNames",
940 | "lastUpdateDateTime"
941 | -----------------
942 | """
943 |
944 | params = {
945 | 'from': self._datetime_to_str(start),
946 | 'to': self._datetime_to_str(end)
947 | }
948 | if country_code is not None:
949 | country_code = lookup_country(country_code)
950 | params['countryKey'] = country_code.code
951 |
952 | if balancing_zone is not None:
953 | balancing_zone = lookup_balancing_zone(balancing_zone)
954 | params['bzKey'] = balancing_zone.code
955 |
956 | if period_type is not None:
957 | params['periodType'] = period_type
958 |
959 | response = self._base_request(endpoint='/aggregatedData', params=params)
960 |
961 | return response.text, response.url
962 |
963 | def query_interruptions(self, start : pd.Timestamp, end : pd.Timestamp) -> str:
964 |
965 | """
966 |
967 | Interruptions
968 |
969 | Parameters
970 | ----------
971 | start: pd.Timestamp
972 | end: pd.Timestamp
973 | country_code: Union[Country, str]
974 | period_type: str
975 | limit: int
976 |
977 | Returns
978 | -------
979 | str
980 | """
981 |
982 | """"
983 | Expected columns:
984 | -----------------
985 |
986 | periodFrom",
987 | "periodTo",
988 | "operatorKey",
989 | "tsoEicCode",
990 | "operatorLabel",
991 | "pointKey",
992 | "pointLabel",
993 | "tsoItemIdentifier",
994 | "directionKey",
995 | "interruptionType",
996 | "capacityType",
997 | "capacityCommercialType",
998 | "unit",
999 | "value",
1000 | "restorationInformation",
1001 | "lastUpdateDateTime",
1002 | "isOverlapping",
1003 | "id",
1004 | "dataSet",
1005 | "indicator",
1006 | "periodType",
1007 | "itemRemarks",
1008 | "generalRemarks",
1009 | "isUnlimited",
1010 | "flowStatus",
1011 | "capacityBookingStatus",
1012 | "isCamRelevant",
1013 | "isNA",
1014 | "originalPeriodFrom",
1015 | "isCmpRelevant",
1016 | "bookingPlatformKey",
1017 | "bookingPlatformLabel",
1018 | "bookingPlatformURL",
1019 | "interruptionCalculationRemark",
1020 | "pointType",
1021 | "idPointType",
1022 | "isArchived"
1023 | -----------------
1024 | """
1025 | params = {
1026 | 'from': self._datetime_to_str(start),
1027 | 'to': self._datetime_to_str(end),
1028 | }
1029 | response_text, response_url = self._base_request(endpoint='/interruptions', params = params)
1030 |
1031 | return response_text, response_url
1032 |
1033 | def query_CMP_auction_premiums(self, start: pd.Timestamp, end: pd.Timestamp,
1034 | period_type: str = 'day') -> str:
1035 |
1036 | """
1037 |
1038 | CMP Auction Premiums
1039 |
1040 | Parameters
1041 | ----------
1042 | start: pd.Timestamp
1043 | end: pd.Timestamp
1044 | period_type: str
1045 |
1046 | Returns
1047 | -------
1048 | str
1049 | """
1050 |
1051 | """
1052 | Expected columns:
1053 | -----------------
1054 |
1055 | "auctionFrom",
1056 | "auctionTo",
1057 | "capacityFrom",
1058 | "capacityTo",
1059 | "operatorKey",
1060 | "tsoEicCode",
1061 | "operatorLabel",
1062 | "pointKey",
1063 | "pointLabel",
1064 | "tsoItemIdentifier",
1065 | "directionKey",
1066 | "unit",
1067 | "itemRemarks",
1068 | "generalRemarks",
1069 | "auctionPremium",
1070 | "clearedPrice",
1071 | "reservePrice",
1072 | "lastUpdateDateTime",
1073 | "isCAMRelevant",
1074 | "bookingPlatformKey",
1075 | "bookingPlatformLabel",
1076 | "bookingPlatformURL",
1077 | "pointType",
1078 | "idPointType",
1079 | "id",
1080 | "dataSet",
1081 | "indicator",
1082 | "periodType",
1083 | "periodFrom",
1084 | "periodTo",
1085 | "value",
1086 | "isUnlimited",
1087 | "flowStatus",
1088 | "interruptionType",
1089 | "restorationInformation",
1090 | "capacityType",
1091 | "capacityBookingStatus",
1092 | "isNA",
1093 | "originalPeriodFrom",
1094 | "isCmpRelevant",
1095 | "interruptionCalculationRemark",
1096 | "isArchived"
1097 | -----------------
1098 | """
1099 |
1100 | params = {
1101 | 'from': self._datetime_to_str(start),
1102 | 'to': self._datetime_to_str(end),
1103 | 'periodType': period_type,
1104 | }
1105 |
1106 | response = self._base_request(endpoint='/cmpauctions', params=params)
1107 |
1108 | return response.text, response.url
1109 |
1110 | def query_CMP_unavailable_firm_capacity(self, start: pd.Timestamp, end: pd.Timestamp,
1111 | period_type: str = 'day') -> str:
1112 |
1113 | """
1114 |
1115 | CMP Unsuccessful requests
1116 |
1117 | Parameters
1118 | ----------
1119 | start: pd.Timestamp
1120 | end: pd.Timestamp
1121 | period_type: str
1122 | limit: int
1123 |
1124 | Returns
1125 | -------
1126 | str
1127 | """
1128 |
1129 | """
1130 | Expected columns:
1131 | -----------------
1132 |
1133 | "periodFrom",
1134 | "periodTo",
1135 | "operatorKey",
1136 | "tsoEicCode",
1137 | "operatorLabel",
1138 | "pointKey",
1139 | "pointLabel",
1140 | "tsoItemIdentifier",
1141 | "directionKey",
1142 | "allocationProcess",
1143 | "itemRemarks",
1144 | "generalRemarks",
1145 | "lastUpdateDateTime",
1146 | "pointType",
1147 | "idPointType",
1148 | "id",
1149 | "dataSet",
1150 | "indicator",
1151 | "periodType",
1152 | "unit",
1153 | "value",
1154 | "isUnlimited",
1155 | "flowStatus",
1156 | "interruptionType",
1157 | "restorationInformation",
1158 | "capacityType",
1159 | "capacityBookingStatus",
1160 | "isCamRelevant",
1161 | "isNA",
1162 | "originalPeriodFrom",
1163 | "isCmpRelevant",
1164 | "bookingPlatformKey",
1165 | "bookingPlatformLabel",
1166 | "bookingPlatformURL",
1167 | "interruptionCalculationRemark",
1168 | "isArchived"
1169 | -----------------
1170 | """
1171 | # area = lookup_country(country_code)
1172 | params = {
1173 | 'from': self._datetime_to_str(start),
1174 | 'to': self._datetime_to_str(end),
1175 | 'periodType': period_type
1176 | }
1177 |
1178 | response = self._base_request(endpoint='/cmpunavailables', params=params)
1179 |
1180 | return response.text, response.url
1181 |
1182 | def query_CMP_unsuccesful_requests(self, start: pd.Timestamp, end: pd.Timestamp,
1183 | period_type: str = 'day') -> str:
1184 |
1185 | """
1186 |
1187 | CMP Unsuccessful requests
1188 |
1189 | Parameters
1190 | ----------
1191 | start: pd.Timestamp
1192 | end: pd.Timestamp
1193 | period_type: str
1194 |
1195 | Returns
1196 | -------
1197 | str
1198 | """
1199 |
1200 | """
1201 | Expected columns:
1202 | -----------------
1203 |
1204 | "auctionFrom",
1205 | "auctionTo",
1206 | "capacityFrom",
1207 | "capacityTo",
1208 | "operatorKey",
1209 | "tsoEicCode",
1210 | "operatorLabel",
1211 | "pointKey",
1212 | "pointLabel",
1213 | "tsoItemIdentifier",
1214 | "directionKey",
1215 | "unit",
1216 | "itemRemarks",
1217 | "generalRemarks",
1218 | "requestedVolume",
1219 | "allocatedVolume",
1220 | "unallocatedVolume",
1221 | "lastUpdateDateTime",
1222 | "occurenceCount",
1223 | "indicator",
1224 | "periodType",
1225 | "isUnlimited",
1226 | "flowStatus",
1227 | "interruptionType",
1228 | "restorationInformation",
1229 | "capacityType",
1230 | "capacityBookingStatus",
1231 | "value",
1232 | "pointType",
1233 | "idPointType",
1234 | "id",
1235 | "dataSet",
1236 | "periodFrom",
1237 | "periodTo",
1238 | "isCamRelevant",
1239 | "isNA",
1240 | "originalPeriodFrom",
1241 | "isCmpRelevant",
1242 | "bookingPlatformKey",
1243 | "bookingPlatformLabel",
1244 | "bookingPlatformURL",
1245 | "interruptionCalculationRemark",
1246 | "isArchived"
1247 | -----------------
1248 | """
1249 |
1250 | params = {
1251 | 'from': self._datetime_to_str(start),
1252 | 'to': self._datetime_to_str(end),
1253 | 'periodType': period_type
1254 | }
1255 |
1256 | response = self._base_request(endpoint='/cmpUnsuccessfulRequests', params=params)
1257 |
1258 | return response.text, response.url
1259 |
1260 | def query_operational_data(self,
1261 | start: pd.Timestamp,
1262 | end: pd.Timestamp,
1263 | period_type: str = 'day',
1264 | indicators: Union[List[Indicator], List[str]] = None,
1265 | point_directions : Optional[List[str]] = None,
1266 | offset : int = None,
1267 | ) -> str:
1268 |
1269 | """
1270 |
1271 | Nomination, Renominations, Allocations, Physical Flows, GCV,
1272 | Wobbe Index, Capacities, Interruptions, and CMP CMA
1273 |
1274 | Parameters
1275 | ----------
1276 | start: pd.Timestamp
1277 | end: pd.Timestamp
1278 | country_code: Union[Country, str]
1279 | period_type: str
1280 | limit: int
1281 |
1282 | Returns
1283 | -------
1284 | str
1285 | """
1286 |
1287 | """
1288 | Expected columns:
1289 | -----------------
1290 |
1291 | "id",
1292 | "dataSet",
1293 | "indicator",
1294 | "periodType",
1295 | "periodFrom",
1296 | "periodTo",
1297 | "operatorKey",
1298 | "tsoEicCode",
1299 | "operatorLabel",
1300 | "pointKey",
1301 | "pointLabel",
1302 | "c",
1303 | "directionKey",
1304 | "unit",
1305 | "itemRemarks",
1306 | "generalRemarks",
1307 | "value",
1308 | "lastUpdateDateTime",
1309 | "isUnlimited",
1310 | "flowStatus",
1311 | "interruptionType",
1312 | "restorationInformation",
1313 | "capacityType",
1314 | "capacityBookingStatus",
1315 | "isCamRelevant",
1316 | "isNA",
1317 | "originalPeriodFrom",
1318 | "isCmpRelevant",
1319 | "bookingPlatformKey",
1320 | "bookingPlatformLabel",
1321 | "bookingPlatformURL",
1322 | "interruptionCalculationRemark",
1323 | "pointType",
1324 | "idPointType",
1325 | "isArchived"
1326 | -----------------
1327 | """
1328 |
1329 | params = {
1330 | 'from': self._datetime_to_str(start),
1331 | 'to': self._datetime_to_str(end),
1332 | 'periodType': period_type
1333 | }
1334 |
1335 | if offset is not None:
1336 | params['offset'] = offset
1337 | params['limit'] = OFFSET
1338 |
1339 | if indicators is not None:
1340 | decoded_indicators = []
1341 | for indicator in indicators:
1342 | decoded_indicators.append(lookup_indicator(indicator).code)
1343 |
1344 | params['indicator'] = ','.join(decoded_indicators)
1345 |
1346 | if point_directions is not None:
1347 | params['pointDirection'] = ','.join(point_directions)
1348 |
1349 | response = self._base_request(endpoint='/operationaldatas', params=params)
1350 |
1351 | return response.text, response.url
1352 |
1353 |
1354 | class EntsogPandasClient(EntsogRawClient):
1355 |
1356 | def __init__(self):
1357 | super(EntsogPandasClient, self).__init__()
1358 | self._interconnections = None
1359 | self._operator_point_directions = None
1360 |
1361 | def query_connection_points(self) -> pd.DataFrame:
1362 | """
1363 |
1364 | Interconnection points as visible on the Map. Please note that
1365 | this only included the Main points and not the sub points. To
1366 | download all points, the API for Operator Point Directions
1367 | should be used.
1368 |
1369 | Parameters
1370 | ----------
1371 | limit: int
1372 |
1373 | Returns
1374 | -------
1375 | str
1376 | """
1377 |
1378 | json, url = super(EntsogPandasClient, self).query_connection_points(
1379 |
1380 | )
1381 | data = parse_general(json)
1382 | data['url'] = url
1383 |
1384 | return data
1385 |
1386 | def query_operators(self,
1387 | country_code: Union[Country, str] = None,
1388 | has_data: int = 1) -> pd.DataFrame:
1389 |
1390 | """
1391 |
1392 | All operators connected to the transmission system
1393 |
1394 | Parameters
1395 | ----------
1396 | country Union[Area, str]
1397 | limit: int
1398 |
1399 | Returns
1400 | -------
1401 | str
1402 | """
1403 | if country_code:
1404 | country_code = lookup_country(country_code)
1405 | json, url = super(EntsogPandasClient, self).query_operators(
1406 | country_code=country_code, has_data=has_data
1407 | )
1408 | data = parse_general(json)
1409 | data['url'] = url
1410 |
1411 | return data
1412 |
1413 | def query_balancing_zones(self) -> pd.DataFrame:
1414 |
1415 | """
1416 |
1417 | European balancing zones
1418 |
1419 | Parameters
1420 | ----------
1421 |
1422 | Returns
1423 | -------
1424 | pd.DataFrame
1425 | """
1426 |
1427 | json, url = super(EntsogPandasClient, self).query_balancing_zones(
1428 |
1429 | )
1430 | data = parse_general(json)
1431 | data['url'] = url
1432 |
1433 | return data
1434 |
1435 | def query_operator_point_directions(self,
1436 | country_code: Optional[Union[Country, str]] = None) -> pd.DataFrame:
1437 |
1438 | """
1439 |
1440 | All the possible flow directions, being combination of an
1441 | operator, a point, and a flow direction
1442 |
1443 | Parameters
1444 | ----------
1445 | country Union[Area, str]
1446 |
1447 | Returns
1448 | -------
1449 | pd.DataFrame
1450 | """
1451 | if country_code is not None:
1452 | country_code = lookup_country(country_code)
1453 | json, url = super(EntsogPandasClient, self).query_operator_point_directions(
1454 | country_code=country_code
1455 | )
1456 | data = parse_operator_points_directions(json)
1457 | data['url'] = url
1458 |
1459 | return data
1460 |
1461 | def query_interconnections(self,
1462 | from_country_code: Union[Country, str] = None,
1463 | to_country_code: Union[Country, str] = None,
1464 | from_balancing_zone: Union[BalancingZone, str] = None,
1465 | to_balancing_zone: Union[BalancingZone, str] = None,
1466 | from_operator: str = None,
1467 | to_operator: str = None) -> pd.DataFrame:
1468 |
1469 | """
1470 |
1471 | All the interconnections between an exit system and an entry
1472 | system
1473 |
1474 | Parameters
1475 | ----------
1476 | from_country Union[Area, str]
1477 | to_country Union[Area, str]
1478 | from_balancing_zone Union[BalancingZone, str]
1479 | to_balancing_zone Union[BalancingZone, str]
1480 | from_operator str
1481 | to_operator str
1482 |
1483 | Returns
1484 | -------
1485 | pd.DataFrame
1486 |
1487 | """
1488 |
1489 | if from_country_code is not None:
1490 | from_country_code = lookup_country(from_country_code).code
1491 | if to_country_code is not None:
1492 | to_country_code = lookup_country(to_country_code).code
1493 |
1494 | if from_balancing_zone is not None:
1495 | from_balancing_zone = lookup_balancing_zone(from_balancing_zone).code
1496 | if to_balancing_zone is not None:
1497 | to_balancing_zone = lookup_balancing_zone(to_balancing_zone).code
1498 |
1499 | if from_operator is not None:
1500 | from_operator = from_operator
1501 | if to_operator is not None:
1502 | to_operator = to_operator
1503 |
1504 | json, url = super(EntsogPandasClient, self).query_interconnections(
1505 | from_country_code,
1506 | to_country_code,
1507 | from_balancing_zone,
1508 | to_balancing_zone,
1509 | from_operator,
1510 | to_operator
1511 | )
1512 | data = parse_interconnections(json)
1513 |
1514 | return data
1515 |
1516 | def query_aggregate_interconnections(self,
1517 | country_code: Optional[Union[Country, str]] = None) -> pd.DataFrame:
1518 |
1519 | """
1520 |
1521 | All the connections between transmission system operators
1522 | and their respective balancing zones
1523 |
1524 | Parameters
1525 | ----------
1526 | country_code Union[Area, str]
1527 |
1528 | Returns
1529 | -------
1530 | pd.DataFrame
1531 | """
1532 | if country_code is not None:
1533 | country_code = lookup_country(country_code)
1534 | json, url = super(EntsogPandasClient, self).query_aggregate_interconnections(
1535 | country_code=country_code
1536 | )
1537 | data = parse_general(json)
1538 | data['url'] = url
1539 |
1540 | return data
1541 |
1542 | def query_urgent_market_messages(self,
1543 | balancing_zone: Union[BalancingZone, str] = None) -> pd.DataFrame:
1544 |
1545 | """
1546 |
1547 | Urgent Market Messages
1548 |
1549 | Parameters
1550 | ----------
1551 | balancing_zone Union[BalancingZone, str]
1552 |
1553 |
1554 | Returns
1555 | -------
1556 | pd.DataFrame
1557 | """
1558 | if balancing_zone:
1559 | balancing_zone = lookup_balancing_zone(balancing_zone)
1560 |
1561 | json, url = super(EntsogPandasClient, self).query_urgent_market_messages(
1562 | balancing_zone=balancing_zone
1563 | )
1564 |
1565 | data = parse_general(json)
1566 | data['url'] = url
1567 |
1568 | return data
1569 |
1570 | @week_limited
1571 | def query_tariffs(self, start: pd.Timestamp, end: pd.Timestamp,
1572 | country_code: Union[Country, str],
1573 | verbose: bool = True,
1574 | melt: bool = False) -> pd.DataFrame:
1575 |
1576 | """
1577 |
1578 | Information about the various tariff types and components
1579 | related to the tariffs
1580 |
1581 | Parameters
1582 | ----------
1583 | start: pd.Timestamp
1584 | end: pd.Timestamp
1585 | country Union[Area, str]
1586 | limit: int
1587 |
1588 | Returns
1589 | -------
1590 | pd.DataFrame
1591 | """
1592 | country_code = lookup_country(country_code)
1593 | json, url = super(EntsogPandasClient, self).query_tariffs(
1594 | start=start, end=end, country_code=country_code
1595 | )
1596 | data = parse_tariffs(json, verbose=verbose, melt=melt)
1597 | data['url'] = url
1598 |
1599 | return data
1600 |
1601 | @week_limited
1602 | def query_tariffs_sim(self, start: pd.Timestamp, end: pd.Timestamp,
1603 | country_code: Union[Country, str],
1604 | verbose: bool = True,
1605 | melt: bool = False) -> pd.DataFrame:
1606 |
1607 | """
1608 |
1609 | Simulation of all the costs for flowing 1 GWh/day/year for
1610 | each IP per product type and tariff period
1611 |
1612 | Parameters
1613 | ----------
1614 | start: pd.Timestamp
1615 | end: pd.Timestamp
1616 | country Union[Area, str]
1617 | limit: int
1618 |
1619 | Returns
1620 | -------
1621 | pd.DataFrame
1622 | """
1623 | country_code = lookup_country(country_code)
1624 | json, url = super(EntsogPandasClient, self).query_tariffs_sim(
1625 | start=start, end=end, country_code=country_code
1626 | )
1627 | data = parse_tariffs_sim(json, verbose=verbose, melt=melt)
1628 | data['url'] = url
1629 |
1630 | return data
1631 |
1632 | @week_limited
1633 | def query_aggregated_data(self, start: pd.Timestamp, end: pd.Timestamp,
1634 | country_code: Union[Country, str] = None,
1635 | balancing_zone: Union[BalancingZone, str] = None,
1636 | period_type: str = 'day',
1637 | verbose: bool = True) -> str:
1638 | """
1639 | Latest nominations, allocations, physical flow
1640 |
1641 | Parameters
1642 | ----------
1643 | start: pd.Timestamp
1644 | end: pd.Timestamp
1645 | country Union[Area, str]
1646 | limit: int
1647 |
1648 | Returns
1649 | -------
1650 | pd.DataFrame
1651 | """
1652 |
1653 | if country_code is not None:
1654 | country_code = lookup_country(country_code)
1655 | if balancing_zone is not None:
1656 | balancing_zone = lookup_balancing_zone(balancing_zone)
1657 |
1658 | json, url = super(EntsogPandasClient, self).query_aggregated_data(
1659 | start=start, end=end, country_code=country_code, balancing_zone=balancing_zone, period_type=period_type
1660 | )
1661 |
1662 | data = parse_aggregate_data(json, verbose)
1663 | data['url'] = url
1664 |
1665 | return data
1666 |
1667 | @day_limited
1668 | def query_interruptions(self, start : pd.Timestamp, end : pd.Timestamp, verbose : bool = False) -> pd.DataFrame:
1669 |
1670 | """
1671 | Interruptions
1672 |
1673 | Parameters
1674 | ----------
1675 | start: pd.Timestamp
1676 | end: pd.Timestamp
1677 | country Union[Area, str]
1678 | limit: int
1679 |
1680 | Returns
1681 | -------
1682 | pd.DataFrame
1683 | """
1684 |
1685 | json, url = super(EntsogPandasClient, self).query_interruptions(start = start, end = end)
1686 | data = parse_interruptions(json, verbose)
1687 | data['url'] = url
1688 |
1689 | return data
1690 |
1691 | def query_CMP_auction_premiums(self, start: pd.Timestamp, end: pd.Timestamp,
1692 | verbose: bool = True) -> pd.DataFrame:
1693 |
1694 | """
1695 | CMP Auction Premiums
1696 |
1697 | Parameters
1698 | ----------
1699 | start: pd.Timestamp
1700 | end: pd.Timestamp
1701 | country Union[Area, str]
1702 | limit: int
1703 |
1704 | Returns
1705 | -------
1706 | pd.DataFrame
1707 | """
1708 | json, url = super(EntsogPandasClient, self).query_CMP_auction_premiums(
1709 | start=start, end=end
1710 | )
1711 | data = parse_CMP_auction_premiums(json, verbose)
1712 | data['url'] = url
1713 |
1714 | return data
1715 |
1716 | def query_CMP_unavailable_firm_capacity(self, start: pd.Timestamp, end: pd.Timestamp,
1717 | verbose: bool = True) -> pd.DataFrame:
1718 |
1719 | """
1720 | CMP Unavailable firm capacity
1721 |
1722 | Parameters
1723 | ----------
1724 | start: pd.Timestamp
1725 | end: pd.Timestamp
1726 | country Union[Area, str]
1727 | limit: int
1728 |
1729 | Returns
1730 | -------
1731 | pd.DataFrame
1732 | """
1733 | json, url = super(EntsogPandasClient, self).query_CMP_unavailable_firm_capacity(
1734 | start=start, end=end
1735 | )
1736 | data = parse_CMP_unavailable_firm_capacity(json, verbose)
1737 | data['url'] = url
1738 |
1739 | return data
1740 |
1741 | @week_limited
1742 | def query_CMP_unsuccesful_requests(self, start: pd.Timestamp, end: pd.Timestamp,
1743 | verbose: bool = True) -> pd.DataFrame:
1744 |
1745 | """
1746 | CMP Unsuccessful requests
1747 |
1748 | Parameters
1749 | ----------
1750 | start: pd.Timestamp
1751 | end: pd.Timestamp
1752 | country Union[Area, str]
1753 | limit: int
1754 |
1755 | Returns
1756 | -------
1757 | pd.DataFrame
1758 | """
1759 | json, url = super(EntsogPandasClient, self).query_CMP_unsuccesful_requests(
1760 | start=start, end=end
1761 | )
1762 | data = parse_CMP_unsuccesful_requests(json, verbose)
1763 | data['url'] = url
1764 |
1765 | return data
1766 |
1767 | @day_limited
1768 | @paginated
1769 | @documents_limited(OFFSET)
1770 | def query_operational_data_all(self,
1771 | start: pd.Timestamp,
1772 | end: pd.Timestamp,
1773 | period_type: str = 'day',
1774 | indicators: Union[List[Indicator], List[str]] = ['physical_flow'],
1775 | verbose: bool = True,
1776 | offset: int = 0) -> pd.DataFrame:
1777 |
1778 | """
1779 | Operational data for all countries
1780 |
1781 | Parameters
1782 | ----------
1783 | start: pd.Timestamp
1784 | end: pd.Timestamp
1785 | period_type: str
1786 | indicators: Union[List[Indicator],List[str]]
1787 | verbose: bool
1788 |
1789 | Returns
1790 | -------
1791 | pd.DataFrame
1792 |
1793 | """
1794 |
1795 | json, url = super(EntsogPandasClient, self).query_operational_data(
1796 | start=start,
1797 | end=end,
1798 | period_type=period_type,
1799 | indicators=indicators,
1800 | offset = offset
1801 | )
1802 | data = parse_operational_data(json, verbose)
1803 | data['url'] = url
1804 |
1805 | return data
1806 |
1807 | @year_limited
1808 | def query_operational_point_data(
1809 | self,
1810 | start: pd.Timestamp,
1811 | end: pd.Timestamp,
1812 | point_directions : List[str],
1813 | period_type: str = 'day',
1814 | indicators: Union[List[Indicator], List[str]] = None,
1815 | verbose: bool = False) -> pd.DataFrame:
1816 |
1817 | json_data, url = super(EntsogPandasClient, self).query_operational_data(
1818 | start=start,
1819 | end=end,
1820 | point_directions= point_directions,
1821 | period_type=period_type,
1822 | indicators=indicators
1823 | )
1824 |
1825 | data = parse_operational_data(json_data, verbose)
1826 | data['url'] = url
1827 | return data
1828 |
1829 |
1830 | @week_limited
1831 | def _query_operational_data(self,
1832 | start: pd.Timestamp,
1833 | end: pd.Timestamp,
1834 | operator: str,
1835 | period_type: str = 'day',
1836 | indicators: Union[List[Indicator], List[str]] = None,
1837 | verbose: bool = False) -> pd.DataFrame:
1838 |
1839 | #try:
1840 | json_data, url = super(EntsogPandasClient, self).query_operational_data(
1841 | start=start,
1842 | end=end,
1843 | operator=operator,
1844 | period_type=period_type,
1845 | indicators=indicators
1846 | )
1847 |
1848 | data = parse_operational_data(json_data, verbose)
1849 | data['url'] = url
1850 | return data
1851 |
--------------------------------------------------------------------------------