├── .gitignore
├── dev
├── img
├── img_3.png
├── img_4.png
├── img_5.png
├── img_6.png
├── img_7.png
├── img_9.png
├── preview.png
├── calendar.png
├── status_alert.png
├── esp_status_bar.png
└── mushroom_chips.png
├── .github
├── FUNDING.yml
└── workflows
│ └── validate.yml
├── hacs.json
├── examples
├── automations
│ ├── load_shedding_reload.yaml
│ ├── load_shedding_warning_2hr.yaml
│ ├── load_shedding_stage.yaml
│ ├── load_shedding_warning.yaml
│ └── load_shedding_start_end.yaml
└── dashboards
│ ├── mushroom_chips.yaml
│ ├── calendar.yaml
│ ├── status_alert.yaml
│ └── esp_status_bar.yaml
├── custom_components
└── load_shedding
│ ├── manifest.json
│ ├── const.py
│ ├── strings.json
│ ├── translations
│ └── en.json
│ ├── calendar.py
│ ├── sensor.py
│ ├── config_flow.py
│ └── __init__.py
├── LICENSE
└── README.md
/.gitignore:
--------------------------------------------------------------------------------
1 | .idea
2 | .DS_Store
3 | __pycache__
4 |
5 |
--------------------------------------------------------------------------------
/dev:
--------------------------------------------------------------------------------
1 | python -m script.translations develop --integration load_shedding
2 | python -m script.hassfest
--------------------------------------------------------------------------------
/img/img_3.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wernerhp/ha.integration.load_shedding/HEAD/img/img_3.png
--------------------------------------------------------------------------------
/img/img_4.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wernerhp/ha.integration.load_shedding/HEAD/img/img_4.png
--------------------------------------------------------------------------------
/img/img_5.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wernerhp/ha.integration.load_shedding/HEAD/img/img_5.png
--------------------------------------------------------------------------------
/img/img_6.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wernerhp/ha.integration.load_shedding/HEAD/img/img_6.png
--------------------------------------------------------------------------------
/img/img_7.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wernerhp/ha.integration.load_shedding/HEAD/img/img_7.png
--------------------------------------------------------------------------------
/img/img_9.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wernerhp/ha.integration.load_shedding/HEAD/img/img_9.png
--------------------------------------------------------------------------------
/img/preview.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wernerhp/ha.integration.load_shedding/HEAD/img/preview.png
--------------------------------------------------------------------------------
/img/calendar.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wernerhp/ha.integration.load_shedding/HEAD/img/calendar.png
--------------------------------------------------------------------------------
/img/status_alert.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wernerhp/ha.integration.load_shedding/HEAD/img/status_alert.png
--------------------------------------------------------------------------------
/img/esp_status_bar.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wernerhp/ha.integration.load_shedding/HEAD/img/esp_status_bar.png
--------------------------------------------------------------------------------
/img/mushroom_chips.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wernerhp/ha.integration.load_shedding/HEAD/img/mushroom_chips.png
--------------------------------------------------------------------------------
/.github/FUNDING.yml:
--------------------------------------------------------------------------------
1 | # These are supported funding model platforms
2 |
3 | github: wernerhp
4 | custom: ['https://www.buymeacoffee.com/wernerhp']
5 |
--------------------------------------------------------------------------------
/hacs.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "Load Shedding",
3 | "render_readme": true,
4 | "content_in_root": false,
5 | "country": "ZA",
6 | "homeassistant": "2022.7.0"
7 | }
8 |
9 |
--------------------------------------------------------------------------------
/.github/workflows/validate.yml:
--------------------------------------------------------------------------------
1 | name: Validate
2 |
3 | on:
4 | push:
5 | pull_request:
6 | schedule:
7 | - cron: "0 0 * * *"
8 |
9 | jobs:
10 | validate:
11 | runs-on: "ubuntu-latest"
12 | steps:
13 | - uses: "actions/checkout@v2"
14 | - name: HACS validation
15 | uses: "hacs/action@main"
16 | with:
17 | category: "integration"
--------------------------------------------------------------------------------
/examples/automations/load_shedding_reload.yaml:
--------------------------------------------------------------------------------
1 | alias: Load Shedding Reload
2 | description: "Reloads the integration every night to work around Issue #70/#71"
3 | trigger:
4 | - platform: time
5 | at: "00:00:00"
6 | condition: []
7 | action:
8 | - service: homeassistant.reload_config_entry
9 | data:
10 | entry_id: ee9d0703259463a110ef7b96a8c8c773
11 | mode: single
12 |
--------------------------------------------------------------------------------
/examples/dashboards/mushroom_chips.yaml:
--------------------------------------------------------------------------------
1 | type: custom:mushroom-chips-card
2 | chips:
3 | - type: template
4 | content: '{{state_attr(entity, "count")}}/{{state_attr(entity, "limit")}}'
5 | entity: sensor.load_shedding_sepush_api_quota
6 | icon: mdi:api
7 | - type: entity
8 | entity: sensor.load_shedding_stage_eskom
9 | - type: entity
10 | entity: sensor.load_shedding_area_tshwane_3_garsfonteinext8
11 | hold_action:
12 | action: more-info
13 | alignment: justify
14 |
--------------------------------------------------------------------------------
/custom_components/load_shedding/manifest.json:
--------------------------------------------------------------------------------
1 | {
2 | "domain": "load_shedding",
3 | "name": "Load Shedding",
4 | "config_flow": true,
5 | "documentation": "https://github.com/wernerhp/ha_integration_load_shedding/blob/master/README.md",
6 | "issue_tracker": "https://github.com/wernerhp/ha_integration_load_shedding/issues",
7 | "requirements": ["load_shedding==0.12.2"],
8 | "ssdp": [],
9 | "zeroconf": [],
10 | "homekit": {},
11 | "dependencies": [],
12 | "codeowners": ["@wernerhp"],
13 | "iot_class": "cloud_polling",
14 | "version": "1.5.2"
15 | }
16 |
--------------------------------------------------------------------------------
/examples/dashboards/calendar.yaml:
--------------------------------------------------------------------------------
1 | type: custom:atomic-calendar-revive
2 | enableModeChange: true
3 | firstDayOfWeek: 1
4 | refreshInterval: 1800
5 | entities:
6 | - calendar.load_shedding_forecast
7 | showCurrentEventLine: false
8 | showMonth: true
9 | showWeekDay: true
10 | disableEventLink: true
11 | showNoEventsForToday: true
12 | disableLocationLink: true
13 | showFullDayProgress: false
14 | showEventIcon: false
15 | showHiddenText: false
16 | showCalendarName: false
17 | calShowDescription: false
18 | showLastCalendarWeek: true
19 | disableCalEventLink: true
20 | disableCalLocationLink: true
21 | disableCalLink: true
22 | showDescription: false
23 | dateFormat: LL
24 | showDate: false
25 | sortByStartTime: false
26 | showRelativeTime: true
27 | showProgressBar: true
28 | showLocation: true
29 | showDeclined: true
30 | showMultiDayEventParts: false
31 | showMultiDay: false
32 | showLoader: false
33 | maxDaysToShow: 7
34 | showAllDayEvents: true
35 | showEventDate: false
36 | showTimeRemaining: true
37 | showDatePerEvent: false
38 | showAllDayHours: true
39 |
--------------------------------------------------------------------------------
/examples/automations/load_shedding_warning_2hr.yaml:
--------------------------------------------------------------------------------
1 | alias: Load Shedding (Warning) (2hr)
2 | description: ""
3 | trigger:
4 | - platform: numeric_state
5 | entity_id: sensor.load_shedding_area_tshwane_3_garsfonteinext8
6 | attribute: starts_in
7 | below: "120"
8 | condition:
9 | - condition: and
10 | conditions:
11 | - condition:
12 | - condition: numeric_state
13 | entity_id: sensor.load_shedding_stage_eskom
14 | attribute: stage
15 | above: "0"
16 | enabled: true
17 | - condition: time
18 | after: input_datetime.wake
19 | before: input_datetime.sleep
20 | action:
21 | - service: media_player.volume_set
22 | data:
23 | volume_level: 0.75
24 | target:
25 | entity_id: media_player.assistant_speakers
26 | device_id: 4b0376fc9b29b09797298badeea28d72
27 | - service: tts.home_assistant_say
28 | data:
29 | entity_id: media_player.assistant_speakers
30 | message: Load Shedding starts in 2 hours.
31 | cache: true
32 | mode: single
33 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | Copyright 2020 Werner Pieterson @wernerhp
2 |
3 | Permission is hereby granted, free of charge, to any person obtaining a copy
4 | of this software and associated documentation files (the "Software"), to deal
5 | in the Software without restriction, including without limitation the rights
6 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
7 | copies of the Software, and to permit persons to whom the Software is
8 | furnished to do so, subject to the following conditions:
9 |
10 | The above copyright notice and this permission notice shall be included in all
11 | copies or substantial portions of the Software.
12 |
13 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
18 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
19 | SOFTWARE.
--------------------------------------------------------------------------------
/custom_components/load_shedding/const.py:
--------------------------------------------------------------------------------
1 | """Constants for LoadShedding integration."""
2 | from __future__ import annotations
3 |
4 | from typing import Final
5 |
6 | API: Final = "API"
7 | ATTRIBUTION: Final = "Data provided by {provider}"
8 | DOMAIN: Final = "load_shedding"
9 | MAX_FORECAST_DAYS: Final = 7
10 | NAME: Final = "Load Shedding"
11 | MANUFACTURER: Final = "@wernerhp"
12 | VERSION: Final = "1.5.2"
13 | DEFAULT_SCAN_INTERVAL: Final = 60 # 60sec / every minute
14 | AREA_UPDATE_INTERVAL: Final = 86400 # 60sec * 60min * 24h / every day
15 | QUOTA_UPDATE_INTERVAL: Final = 1800 # 60sec * 60min * 0.5 / every half hour
16 | STAGE_UPDATE_INTERVAL: Final = 3600 # 60sec * 60min / every hourly
17 |
18 | CONF_DEFAULT_SCHEDULE_STAGE: Final = "default_schedule_stage"
19 | CONF_MUNICIPALITY: Final = "municipality"
20 | CONF_OPTIONS: Final = "options"
21 | CONF_PROVIDER: Final = "provider"
22 | CONF_PROVIDER_ID: Final = "provider_id"
23 | CONF_PROVINCE: Final = "province"
24 | CONF_PROVINCE_ID: Final = "province_id"
25 | CONF_SCHEDULE: Final = "schedule"
26 | CONF_SCHEDULES: Final = "schedules"
27 | CONF_ACTION = "action"
28 | CONF_ADD_AREA = "add_area"
29 | CONF_DELETE_AREA = "delete_area"
30 | CONF_SETUP_API = "setup_api"
31 | CONF_MULTI_STAGE_EVENTS = "multi_stage_events"
32 | CONF_MIN_EVENT_DURATION = "min_event_duration"
33 | CONF_API_KEY: Final = "api_key"
34 | CONF_AREA: Final = "area"
35 | CONF_AREAS: Final = "areas"
36 | CONF_AREA_ID: Final = "area_id"
37 | CONF_SEARCH: Final = "search"
38 | CONF_STAGE: Final = "stage"
39 | CONF_STAGE_COCT: Final = "coct_stage"
40 |
41 | CONF_COCT: Final = "coct"
42 | CONF_ESKOM: Final = "eskom"
43 |
44 | ATTR_AREA: Final = "area"
45 | ATTR_AREAS: Final = "areas"
46 | ATTR_AREA_ID: Final = "area_id"
47 | ATTR_CURRENT: Final = "current"
48 | ATTR_END_IN: Final = "ends_in"
49 | ATTR_END_TIME: Final = "end_time"
50 | ATTR_EVENTS: Final = "events"
51 | ATTR_FORECAST: Final = "forecast"
52 | ATTR_LAST_UPDATE: Final = "last_update"
53 | ATTR_NEXT: Final = "next"
54 | ATTR_NEXT_END_TIME: Final = "next_end_time"
55 | ATTR_NEXT_STAGE: Final = "next_stage"
56 | ATTR_NEXT_START_TIME: Final = "next_start_time"
57 | ATTR_PLANNED: Final = "planned"
58 | ATTR_QUOTA: Final = "quota"
59 | ATTR_SCHEDULE: Final = "schedule"
60 | ATTR_SCHEDULES: Final = "schedules"
61 | ATTR_SCHEDULE_STAGE: Final = "schedule_stage"
62 | ATTR_STAGE: Final = "stage"
63 | ATTR_STAGE_DATA: Final = "stage_data"
64 | ATTR_STAGE_FORECAST: Final = "stage_forecast"
65 | ATTR_START_IN: Final = "starts_in"
66 | ATTR_START_TIME: Final = "start_time"
67 | ATTR_TIME_UNTIL: Final = "time_until"
68 |
--------------------------------------------------------------------------------
/examples/automations/load_shedding_stage.yaml:
--------------------------------------------------------------------------------
1 | alias: Load Shedding (Stage)
2 | description: ""
3 | trigger:
4 | - platform: state
5 | entity_id:
6 | - sensor.load_shedding_stage_eskom
7 | attribute: stage
8 | condition:
9 | - condition: not
10 | conditions:
11 | - condition: state
12 | entity_id: sensor.load_shedding_stage_eskom
13 | state: unavailable
14 | alias: Not Unavailable
15 | action:
16 | - service: notify.mobile_app_all
17 | data:
18 | title: Load Shedding
19 | message: |-
20 | {% if is_state_attr(trigger.entity_id, "stage", 0) %}
21 | Suspended
22 | {% else %}
23 | {{ states(trigger.entity_id) }}
24 | {% endif %}
25 | enabled: true
26 | alias: Notify
27 | - choose:
28 | - conditions:
29 | - condition: or
30 | conditions:
31 | - condition: time
32 | after: input_datetime.sleep
33 | weekday:
34 | - mon
35 | - tue
36 | - wed
37 | - thu
38 | - fri
39 | - sat
40 | - sun
41 | - condition: time
42 | before: input_datetime.wake
43 | weekday:
44 | - sun
45 | - sat
46 | - fri
47 | - thu
48 | - wed
49 | - tue
50 | - mon
51 | sequence:
52 | - wait_for_trigger:
53 | - platform: time
54 | at: input_datetime.wake
55 | continue_on_timeout: false
56 | default: []
57 | alias: Defer
58 | - service: tts.home_assistant_say
59 | data:
60 | entity_id: media_player.assistant_speakers
61 | cache: true
62 | message: |-
63 | Load Shedding {% if is_state_attr(trigger.entity_id, "stage", 0) %}
64 | Suspended
65 | {% else %}
66 | {{ states(trigger.entity_id) }}
67 | {% endif %}
68 | alias: Announce
69 | - delay:
70 | hours: 0
71 | minutes: 0
72 | seconds: 5
73 | milliseconds: 0
74 | - if:
75 | - condition: state
76 | entity_id: sensor.load_shedding_area_tshwane_3_garsfonteinext8
77 | state: "on"
78 | then:
79 | - service: tts.home_assistant_say
80 | data:
81 | message: Load shedding imminent!
82 | entity_id: media_player.assistant_speakers
83 | cache: true
84 | alias: Announce
85 | alias: Announce Imminent
86 | mode: restart
87 |
--------------------------------------------------------------------------------
/examples/automations/load_shedding_warning.yaml:
--------------------------------------------------------------------------------
1 | alias: Load Shedding (Warning)
2 | description: ""
3 | trigger:
4 | - platform: numeric_state
5 | entity_id: sensor.load_shedding_area_tshwane_3_garsfonteinext8
6 | attribute: starts_in
7 | below: "15"
8 | condition:
9 | - condition: and
10 | conditions:
11 | - condition: numeric_state
12 | entity_id: sensor.load_shedding_stage_eskom
13 | attribute: stage
14 | above: "0"
15 | - condition: time
16 | after: input_datetime.wake
17 | before: input_datetime.sleep
18 | action:
19 | - service: telegram_bot.send_message
20 | data:
21 | message: Load Shedding starts in 15 minutes.
22 | title: Load Shedding
23 | enabled: false
24 | - service: media_player.volume_set
25 | data:
26 | volume_level: 0.7
27 | target:
28 | entity_id: media_player.assistant_speakers
29 | - service: tts.home_assistant_say
30 | data:
31 | entity_id: media_player.assistant_speakers
32 | message: Load Shedding starts in 15 minutes.
33 | cache: true
34 | - service: rest_command.dell_alert
35 | data: {}
36 | - delay:
37 | hours: 0
38 | minutes: 0
39 | seconds: 5
40 | milliseconds: 0
41 | - service: rest_command.dell_off
42 | data: {}
43 | - service: rest_command.slack_status
44 | data:
45 | emoji: ":gopher-coffee:"
46 | status: "\"Grabbing coffee before it's too late...\""
47 | enabled: true
48 | - service: notify.mobile_app_macbook_pro
49 | data:
50 | message: Load Shedding starts in 15 minutes.
51 | - service: notify.notify
52 | data:
53 | message: TTS
54 | title: Load Shedding
55 | data:
56 | ttl: 0
57 | priority: high
58 | media_stream: alarm_stream
59 | tts_text: Load Shedding starts in 15 minutes.
60 | enabled: false
61 | - service: notify.notify
62 | data:
63 | message: Load Shedding starts in 15 minutes.
64 | title: Load Shedding
65 | data:
66 | notification_icon: mdi:lightning-bolt-outline
67 | timeout: 900
68 | chronometer: true
69 | when: >-
70 | {{ (now() + timedelta( minutes = 15 )) | as_timestamp(default=0.0) |
71 | int(default=0.0) }}
72 | ttl: 0
73 | priority: high
74 | color: red
75 | actions:
76 | - action: URI
77 | title: Forecast
78 | uri: homeassistant://navigate/wall-panel/load-shedding
79 | - delay:
80 | hours: 0
81 | minutes: 10
82 | seconds: 0
83 | milliseconds: 0
84 | enabled: false
85 | - service: shell_command.turn_off_um690_ubuntu
86 | data: {}
87 | enabled: false
88 | mode: single
89 |
--------------------------------------------------------------------------------
/examples/dashboards/status_alert.yaml:
--------------------------------------------------------------------------------
1 | type: markdown
2 | content: >-
3 | {%- set stage_sensor = "sensor.load_shedding_stage_eskom" -%}
4 | {%- set area_sensor = "sensor.load_shedding_area_tshwane_3_garsfonteinext8" -%}
5 |
6 | {%- set stage = state_attr(stage_sensor, "stage") or 0 -%}
7 | {%- set next_stage = state_attr(stage_sensor, "next_stage") or 0 -%}
8 | {%- set next_start = state_attr(stage_sensor, "next_start_time") or 0 -%}
9 | {%- set next_end = state_attr(stage_sensor, "next_end_time") or 0 -%}
10 | {%- set next_in = 0 -%}
11 | {%- set next_time = 0 -%}
12 | {%- set alert = states(stage_sensor) or 0 -%}
13 | {%- set alert_type = "success" -%}
14 |
15 | {% set area_forecast = state_attr(area_sensor, "forecast") or [] -%}
16 | {%- set starts_in = state_attr(area_sensor, "starts_in") or 0 -%}
17 | {%- set ends_in = state_attr(area_sensor, "ends_in") or 0 -%}
18 |
19 | {%- if area_forecast -%}
20 | {%- set next_start = area_forecast[0].start_time or 0 -%}
21 | {%- set next_end = area_forecast[0].end_time or 0 -%}
22 | {%- endif -%}
23 | {%- if is_state(area_sensor, "off") and starts_in and next_start and next_end -%}
24 | {%- set next_in = starts_in if starts_in else 0 -%}
25 | {%- if next_start == 0 or next_end == 0 -%}
26 | {%- set next_time = as_timestamp(next_start, default=0.0) -%}
27 | {%- set alert = "Stage {stage}".format(stage=next_stage) + " starts in {countdown} ({next})" -%}
28 | {%- elif not stage and starts_in > 1440 -%}
29 | {%- set next_time = as_timestamp(next_start, default=0.0) -%}
30 | {%- set alert = "No Load Shedding" -%}
31 | {%- else -%}
32 | {%- set next_time = as_timestamp(next_start, default=0.0) -%}
33 | {%- set alert = "Load Shedding starts in {countdown} ({next})" -%}
34 | {%- endif -%}
35 | {% if next_in > 1440 %}
36 | {%- set alert_type = "success" -%}
37 | {% elif 1440 >= next_in >= 60 %}
38 | {%- set alert_type = "warning" -%}
39 | {% elif 60 > next_in %}
40 | {%- set alert_type = "error" -%}
41 | {% endif %}
42 | {%- elif is_state(area_sensor, "on") and ends_in -%}
43 | {%- set next_time = as_timestamp(next_end, default=0.0) -%}
44 | {# {%- set next_in = ends_in if ends_in else 0 -%} #}
45 | {%- set alert = "Load Shedding ends in {countdown} ({next})" -%}
46 | {%- set alert_type = "error" -%}
47 | {%- endif -%}
48 |
49 | {% set mins = starts_in %}
50 | {% if is_state(area_sensor, "on") %}
51 | {% set mins = ends_in %}
52 | {% endif %}
53 | {% set countdown = "%02dh%02d"|format(mins // 60, mins % 60) %}
54 |
55 | {%- set alert = alert.format(countdown=countdown, next=next_time | timestamp_custom("%H:%M", True)) -%}
56 |
57 | {{ alert }}
58 |
--------------------------------------------------------------------------------
/custom_components/load_shedding/strings.json:
--------------------------------------------------------------------------------
1 | {
2 | "config": {
3 | "step": {
4 | "lookup_areas": {
5 | "description": "Search your area name, e.g. Milnerton, then select your area ID from the results.",
6 | "data": {
7 | "provider": "Provider",
8 | "province": "Province",
9 | "results": "Results",
10 | "search": "Search",
11 | "default_schedule": "Default Schedule",
12 | "area": "Area",
13 | "area_id": "Area"
14 | }
15 | },
16 | "provider": {
17 | "description": "",
18 | "data": {
19 | "provider": "Provider"
20 | }
21 | },
22 | "sepush": {
23 | "description": "Get a Free (50 requests per day) API Key from [Eskom Se Push](https://eskomsepush.gumroad.com/l/api).",
24 | "data": {
25 | "api_key": "API Key / Token"
26 | }
27 | }
28 | },
29 | "error": {
30 | "cannot_connect": "[%key:common::config_flow::error::cannot_connect%]",
31 | "provider_error": "Unable to reach Provider. See Logs for details.",
32 | "no_results_found": "No results found. Try searching for a different area with the same schedule as yours or choose a different provider.",
33 | "no_provider": "No provider selected.",
34 | "sepush_400": "Bad request. Create a issue for the integration and include logs.",
35 | "sepush_403": "Invalid API Key / Token",
36 | "sepush_429": "Token quota exceeded",
37 | "sepush_500": "SePush server unavailable"
38 | },
39 | "abort": {
40 | "already_configured": "[%key:common::config_flow::abort::already_configured_device%]",
41 | "cannot_connect": "[%key:common::config_flow::error::cannot_connect%]"
42 | }
43 | },
44 | "options": {
45 | "step": {
46 | "init": {
47 | "title": "Load Shedding Configuration",
48 | "description": "Please select the desired action.",
49 | "data": {
50 | "add_area": "Add area",
51 | "delete_area": "Remove area",
52 | "setup_api": "Configure API",
53 | "multi_stage_events": "Multi-stage events",
54 | "min_event_duration": "Min. event duration (mins)"
55 | }
56 | },
57 | "sepush": {
58 | "description": "Get a Free (50 requests per day) API Key from [Eskom Se Push](https://eskomsepush.gumroad.com/l/api).",
59 | "data": {
60 | "api_key": "API Key / Token"
61 | }
62 | },
63 | "lookup_areas": {
64 | "description": "Search your area name, e.g. Milnerton, then select your area ID from the results.",
65 | "data": {
66 | "provider": "Provider",
67 | "province": "Province",
68 | "results": "Results",
69 | "search": "Search",
70 | "default_schedule": "Default Schedule",
71 | "area": "Area",
72 | "area_id": "Area"
73 | }
74 | }
75 | },
76 | "error": {
77 | "cannot_connect": "[%key:common::config_flow::error::cannot_connect%]",
78 | "provider_error": "Unable to reach Provider. See Logs for details.",
79 | "no_results_found": "No results found. Try searching for a different area with the same schedule as yours or choose a different provider.",
80 | "no_provider": "No provider selected.",
81 | "sepush_400": "Bad request. Create a issue for the integration and include logs.",
82 | "sepush_403": "Invalid API Key / Token",
83 | "sepush_429": "Token quota exceeded",
84 | "sepush_500": "SePush server unavailable"
85 | }
86 | }
87 | }
88 |
--------------------------------------------------------------------------------
/examples/automations/load_shedding_start_end.yaml:
--------------------------------------------------------------------------------
1 | alias: Load Shedding (Start/End)
2 | description: ""
3 | trigger:
4 | - platform: state
5 | entity_id:
6 | - sensor.load_shedding_area_tshwane_3_garsfonteinext8
7 | to: "on"
8 | from: "off"
9 | condition:
10 | - condition: numeric_state
11 | entity_id: sensor.load_shedding_stage_eskom
12 | attribute: stage
13 | above: "0"
14 | action:
15 | - service: scene.create
16 | data:
17 | scene_id: slack_status_backup
18 | snapshot_entities:
19 | - input_text.slack_status_emoji
20 | - input_text.slack_status_text
21 | - service: input_text.set_value
22 | data:
23 | value: ":loadsheddingtransparent:"
24 | target:
25 | entity_id: input_text.slack_status_emoji
26 | - service: input_text.set_value
27 | data:
28 | value: >-
29 | Load Shedding until {{
30 | (state_attr('sensor.load_shedding_area_tshwane_3_garsfonteinext8','end_time')
31 | | as_datetime | as_local).strftime('%H:%M (%Z)') }}
32 | target:
33 | entity_id: input_text.slack_status_text
34 | - if:
35 | - condition: time
36 | after: "17:00:00"
37 | weekday:
38 | - sat
39 | - fri
40 | - thu
41 | - wed
42 | - tue
43 | - mon
44 | - sun
45 | then:
46 | - service: light.turn_on
47 | data:
48 | color_temp: 500
49 | brightness_pct: 1
50 | target:
51 | entity_id: light.all_lights
52 | - service: light.turn_off
53 | data: {}
54 | target:
55 | entity_id: light.all_lights
56 | enabled: false
57 | - delay: >-
58 | {{ state_attr('sensor.load_shedding_area_tshwane_3_garsfonteinext8',
59 | 'ends_in') | multiply(60) | int(default=0.0) }}
60 | - service: scene.turn_on
61 | target:
62 | entity_id: scene.slack_status_backup
63 | data: {}
64 | - service: rest_command.slack_status
65 | data:
66 | emoji: "{{ states.input_text.slack_status_emoji.state }}"
67 | status: "{{ states.input_text.slack_status_text.state }}"
68 | enabled: true
69 | - service: rest_command.slack_status_zatech
70 | data:
71 | emoji: ":loadsheddingtransparent:"
72 | status: >-
73 | Load Shedding until {{
74 | (state_attr('sensor.load_shedding_area_tshwane_3_garsfonteinext8','end_time')
75 | | as_datetime | as_local).strftime('%H:%M (%Z)') }}
76 | status_expiration: >-
77 | {{
78 | state_attr('sensor.load_shedding_area_tshwane_3_garsfonteinext8','end_time')
79 | | as_timestamp(default=0.0)| int(default=0.0) }}
80 | enabled: true
81 | - if:
82 | - condition: state
83 | entity_id: group.family
84 | state: home
85 | - condition: time
86 | after: "08:00:00"
87 | before: "17:00:00"
88 | then:
89 | - service: light.turn_on
90 | data:
91 | transition: 5
92 | kelvin: 5000
93 | brightness_pct: 75
94 | target:
95 | entity_id: light.study_lights
96 | enabled: false
97 | - if:
98 | - condition: state
99 | entity_id: sensor.sun_stage
100 | state: Night
101 | then:
102 | - service: light.turn_on
103 | data:
104 | brightness_pct: 2
105 | target:
106 | area_id: 42ace1a6ae2940f481359957243acb92
107 | - service: homeassistant.turn_off
108 | data: {}
109 | target:
110 | entity_id:
111 | - input_boolean.bedroom_motion_lights
112 | - input_boolean.bedroom_routine
113 | - light.bed_lamps
114 | enabled: false
115 | mode: single
116 |
--------------------------------------------------------------------------------
/custom_components/load_shedding/translations/en.json:
--------------------------------------------------------------------------------
1 | {
2 | "config": {
3 | "abort": {
4 | "already_configured": "Device is already configured",
5 | "cannot_connect": "Failed to connect"
6 | },
7 | "error": {
8 | "cannot_connect": "Failed to connect",
9 | "no_provider": "No provider selected.",
10 | "no_results_found": "No results found. Try searching for a different area with the same schedule as yours or choose a different provider.",
11 | "provider_error": "Unable to reach Provider. See Logs for details.",
12 | "sepush_400": "Bad request. Create a issue for the integration and include logs.",
13 | "sepush_403": "Invalid API Key / Token",
14 | "sepush_429": "Token quota exceeded",
15 | "sepush_500": "SePush server unavailable"
16 | },
17 | "step": {
18 | "lookup_areas": {
19 | "data": {
20 | "area": "Area",
21 | "area_id": "Area",
22 | "default_schedule": "Default Schedule",
23 | "provider": "Provider",
24 | "province": "Province",
25 | "results": "Results",
26 | "search": "Search"
27 | },
28 | "description": "Search your area name, e.g. Milnerton, then select your area ID from the results."
29 | },
30 | "provider": {
31 | "data": {
32 | "provider": "Provider"
33 | },
34 | "description": ""
35 | },
36 | "sepush": {
37 | "data": {
38 | "api_key": "API Key / Token"
39 | },
40 | "description": "Get a Free (50 requests per day) API Key from [Eskom Se Push](https://eskomsepush.gumroad.com/l/api)."
41 | }
42 | }
43 | },
44 | "options": {
45 | "error": {
46 | "cannot_connect": "Failed to connect",
47 | "no_provider": "No provider selected.",
48 | "no_results_found": "No results found. Try searching for a different area with the same schedule as yours or choose a different provider.",
49 | "provider_error": "Unable to reach Provider. See Logs for details.",
50 | "sepush_400": "Bad request. Create a issue for the integration and include logs.",
51 | "sepush_403": "Invalid API Key / Token",
52 | "sepush_429": "Token quota exceeded",
53 | "sepush_500": "SePush server unavailable"
54 | },
55 | "step": {
56 | "init": {
57 | "data": {
58 | "add_area": "Add area",
59 | "delete_area": "Remove area",
60 | "min_event_duration": "Min. event duration (mins)",
61 | "multi_stage_events": "Multi-stage events",
62 | "setup_api": "Configure API"
63 | },
64 | "description": "Please select the desired action.",
65 | "title": "Load Shedding Configuration"
66 | },
67 | "lookup_areas": {
68 | "data": {
69 | "area": "Area",
70 | "area_id": "Area",
71 | "default_schedule": "Default Schedule",
72 | "provider": "Provider",
73 | "province": "Province",
74 | "results": "Results",
75 | "search": "Search"
76 | },
77 | "description": "Search your area name, e.g. Milnerton, then select your area ID from the results."
78 | },
79 | "sepush": {
80 | "data": {
81 | "api_key": "API Key / Token"
82 | },
83 | "description": "Get a Free (50 requests per day) API Key from [Eskom Se Push](https://eskomsepush.gumroad.com/l/api)."
84 | }
85 | }
86 | }
87 | }
--------------------------------------------------------------------------------
/custom_components/load_shedding/calendar.py:
--------------------------------------------------------------------------------
1 | """Support for the LoadShedding service."""
2 | from __future__ import annotations
3 |
4 | from datetime import datetime
5 |
6 | from homeassistant.components.calendar import CalendarEntity, CalendarEvent
7 | from homeassistant.config_entries import ConfigEntry
8 | from homeassistant.core import HomeAssistant, callback
9 | from homeassistant.helpers.entity import Entity
10 | from homeassistant.helpers.entity_platform import AddEntitiesCallback
11 | from homeassistant.helpers.update_coordinator import CoordinatorEntity
12 |
13 | from . import LoadSheddingDevice
14 | from .const import (
15 | ATTR_AREA,
16 | ATTR_END_TIME,
17 | ATTR_FORECAST,
18 | ATTR_STAGE,
19 | ATTR_START_TIME,
20 | CONF_MULTI_STAGE_EVENTS,
21 | DOMAIN,
22 | NAME,
23 | )
24 |
25 |
26 | async def async_setup_entry(
27 | hass: HomeAssistant, entry: ConfigEntry, async_add_entities: AddEntitiesCallback
28 | ) -> None:
29 | """Add LoadShedding entities from a config_entry."""
30 | coordinators = hass.data.get(DOMAIN, {}).get(entry.entry_id)
31 | area_coordinator = coordinators.get(ATTR_AREA)
32 |
33 | multi_stage_events = False
34 | if entry.options.get(CONF_MULTI_STAGE_EVENTS):
35 | multi_stage_events = True
36 |
37 | entities: list[Entity] = [
38 | LoadSheddingForecastCalendar(area_coordinator, multi_stage_events)
39 | ]
40 | async_add_entities(entities)
41 |
42 |
43 | class LoadSheddingForecastCalendar(
44 | LoadSheddingDevice, CoordinatorEntity, CalendarEntity
45 | ):
46 | """Define a LoadShedding Calendar entity."""
47 |
48 | def __init__(
49 | self, coordinator: CoordinatorEntity, multi_stage_events: bool
50 | ) -> None:
51 | """Initialize the forecast calendar."""
52 | super().__init__(coordinator)
53 | self.data = self.coordinator.data
54 |
55 | self._attr_unique_id = (
56 | f"{self.coordinator.config_entry.entry_id}_calendar_forecast"
57 | )
58 | self._event: CalendarEvent | None = None
59 | self.entity_id = f"{DOMAIN}.{DOMAIN}_forecast"
60 | self.multi_stage_events = multi_stage_events
61 |
62 | @property
63 | def name(self) -> str | None:
64 | """Return the forecast calendar name."""
65 | return f"{NAME} Forecast"
66 |
67 | @property
68 | def event(self) -> CalendarEvent | None:
69 | """Return the next upcoming event."""
70 | return self._event
71 |
72 | async def async_get_events(
73 | self,
74 | hass: HomeAssistant,
75 | start_date: datetime,
76 | end_date: datetime,
77 | ) -> list[CalendarEvent]:
78 | """Return calendar events within a datetime range."""
79 | events = []
80 |
81 | for area in self.coordinator.areas:
82 | area_forecast = self.data.get(area.id, {}).get(ATTR_FORECAST)
83 | if area_forecast:
84 | for forecast in area_forecast:
85 | forecast_stage = str(forecast.get(ATTR_STAGE))
86 | forecast_start_time = forecast.get(ATTR_START_TIME)
87 | forecast_end_time = forecast.get(ATTR_END_TIME)
88 |
89 | if forecast_start_time <= start_date >= forecast_end_time:
90 | continue
91 | if forecast_start_time >= end_date <= forecast_end_time:
92 | continue
93 |
94 | event: CalendarEvent = CalendarEvent(
95 | start=forecast_start_time,
96 | end=forecast_end_time,
97 | summary=forecast_stage,
98 | location=area.name,
99 | description=f"{NAME}",
100 | )
101 | events.append(event)
102 |
103 | if not self.multi_stage_events:
104 | continue
105 |
106 | # Multi-stage events
107 | for i, cur in enumerate(events):
108 | if i + 1 >= len(events):
109 | continue
110 | nxt = events[i + 1]
111 | if cur.end == nxt.start:
112 | cur.summary = f"{cur.summary}/{nxt.summary}"
113 | cur.end = nxt.end
114 | del events[i + 1]
115 |
116 | if events:
117 | self._event = events[0]
118 |
119 | return events
120 |
121 | @callback
122 | def _handle_coordinator_update(self) -> None:
123 | """Handle updated data from the coordinator."""
124 | if data := self.coordinator.data:
125 | self.data = data
126 | self.async_write_ha_state()
127 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Load Shedding [](https://github.com/hacs/integration)
2 |
3 | A Home Assistant integration to track your load schedding schedule.
4 |
5 |
6 |
7 |
8 | >💡This integration uses the Eskom SePush API.
9 | >🚨Read the [EskomSePush API Licence Agreement](https://esp.info/license-agreement).
10 | >🔑Get a Free (50 requests per day) API Key from [Eskom Se Push](https://eskomsepush.gumroad.com/l/api). You can pay for a higher limit.
11 | >⚠️Do not use more than one API key from the same IP. Your key will be blocked.
12 |
13 | # HACS Install
14 | 1. Go to **HACS Integrations** on your Home Assitant instance
15 | 2. Select "+ **Explore & Download Repositories**" and search for "**Load Shedding**"
16 | 3. Select **Load Shedding: A Home assistant integration to track your load shedding schedule.** and **Download this repository with HACS**
17 | 
18 | 4. Once downloaded, click the **My Integrations** button to configure the integration.
19 | [](https://my.home-assistant.io/redirect/config_flow_start/?domain=load_shedding)
20 | 5. Setup cards and automations
21 | 6. Please [support my work](https://github.com/wernerhp/ha.integration.load_shedding/blob/master/README.md#support)
22 |
23 | # Support
24 | If you find this integration useful, please consider supporting my work by [buying me a coffee](https://www.buymeacoffee.com/wernerhp) or making a donation in the form of Bitcoin.
25 |
26 |
27 | ### Buy me a coffee
28 |
29 |
30 | Sign-up for a free Luno wallet using [this invite link](http://www.luno.com/invite/X48WY) or enter the code **X48WY** in the **Rewards** section and we can both earn **R 25.00 free BTC** after investing our first R 500.
31 |
32 | ### Bitcoin
33 | `3EGnQKKbF6AijqW9unyBuW8YeEscY5wMSE`
34 |
35 |
36 |
37 | # Manual Install
38 |
39 | Instructions
40 |
41 | 1. Download and unzip to your Home Assistant `config/custom_components` folder.
42 |
43 | Screenshot
44 |
45 | 
46 |
47 |
48 | 2. Restart Home Assistant.
49 | 3. Go to Settings > Devices & Services > + Add Integration
50 |
51 | [](https://my.home-assistant.io/redirect/config_flow_start/?domain=load_shedding)
52 |
53 | 5. Search for 'Load Shedding' and follow the config flow.
54 |
55 | Screenshot
56 |
57 |
58 |
59 |
60 | 6. If you're coming from a previous version of this integration, you may need to delete the `.json` files in `/config/.cache`.
61 |
62 | Screenshot
63 |
64 | 
65 |
66 |
67 |
68 | # Sensor
69 | The load shedding sensor State will always reflect the current load shedding stage.
70 | i.e When load shedding is suspended, it will show **No Load Shedding**. When Stage 2 is active, it will show **Stage 2**.
71 |
72 |
73 | Screenshot
74 |
75 | |
|
|
76 |
77 |
78 |
79 |
80 |
81 | # Cards
82 |
83 | ## Mushroom Chips
84 | - [Mushroom](https://github.com/piitaya/lovelace-mushroom)
85 |
86 | - [Code](examples/dashboards/mushroom_chips.yaml)
87 |
88 |
89 |
90 |
91 | ## Status Alert
92 | - [Markdown Card](https://www.home-assistant.io/dashboards/markdown/)
93 |
94 | - [Code](examples/dashboards/status_alert.yaml)
95 |
96 |
97 |
98 | ## Calendar
99 |
100 | - [Atomic Calendar Revive](https://github.com/totaldebug/atomic-calendar-revive)
101 |
102 | - [Code](examples/dashboards/calendar.yaml)
103 |
104 |
105 |
106 | ## ESP Status Bar
107 | Thanks to @ribeirompl for this [contribution](https://github.com/wernerhp/ha.integration.load_shedding/issues/72#issuecomment-1712458448)
108 |
109 | - [Lovelace HTML Jinja2 Template card](https://github.com/PiotrMachowski/Home-Assistant-Lovelace-HTML-Jinja2-Template-card)
110 |
111 | - [Code](examples/dashboards/esp_status_bar.yaml)
112 |
113 |
114 |
115 | # Automation Ideas
116 | These are just some automations I've got set up. They are not perfect and will require some tweaking on your end.
117 | Replace `sensor.load_shedding_south_africa_stage`, `sensor.load_shedding_milnerton_14` and other `entity_id` with your own.
118 |
119 | ## Load Shedding Stage
120 | Announce Load Shedding stage changes on speakers and push notifications.
121 | - [Load Shedding (Stage)](examples/automations/load_shedding_stage.yaml)
122 |
123 | ## Load Shedding Start/End
124 | Do stuff when load shedding starts, e.g. update your slack status, turn off lights, etc.
125 | - [Load Shedding (Start/End)](examples/automations/load_shedding_start_end.yaml)
126 |
127 | ### Slack Status
128 |
129 | Setup a [REST Command](https://www.home-assistant.io/integrations/rest_command/) and two automations to set your Slack status when Load Shedding starts and ends.
130 |
131 | Example
132 |
133 | `secrets.yaml`
134 | ```yaml
135 | slack_token: Bearer xoxp-XXXXXXXXXX-XXXXXXXXXXXX-XXXXXXXXXXXXX-XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
136 | ```
137 | `configuration.yaml`
138 | ```yaml
139 | rest_command:
140 | slack_status:
141 | url: https://slack.com/api/users.profile.set
142 | method: POST
143 | headers:
144 | authorization: !secret slack_token
145 | accept: "application/json, text/html"
146 | payload: '{"profile":{"status_text": "{{ status }}","status_emoji": "{{ emoji }}"}}'
147 | content_type: "application/json; charset=utf-8"
148 | verify_ssl: true
149 | ```
150 |
151 |
152 |
153 |
154 | ## Load Shedding Warning (15 min)
155 | 15 minutes warning on speaker and telegram before load shedding starts.
156 | - [Load Shedding (Warning)](examples/automations/load_shedding_warning.yaml)
157 |
158 | ## Load Shedding Warning (2 hr)
159 | 2 hour warning on smart speaker
160 | - [Load Shedding (Warning) (2hr)](examples/automations/load_shedding_warning_2hr.yaml)
161 |
162 | ## Load Shedding Reload
163 | Reload the integration every night. This is a workaround for Issue #70/#71
164 | - [Load Shedding Reload](examples/automations/load_shedding_reload.yaml)
165 |
--------------------------------------------------------------------------------
/examples/dashboards/esp_status_bar.yaml:
--------------------------------------------------------------------------------
1 | type: custom:html-template-card
2 | ignore_line_breaks: true
3 | content: >
4 | {% set area_sensor = "sensor.load_shedding_area_tshwane_3_garsfonteinext8" %}
5 | {% set number_of_days = 2 %}
6 | {% set show_day_borders = false %}
7 | {% set show_end_times = false %}
8 | {% set timeslots = 48 %}
9 |
173 | {% set area_schedule = state_attr(area_sensor, "forecast") %}
174 | {% if area_schedule is none %}{% set area_schedule = [] %}{% endif %}
175 | {% for day_offset_idx in range(number_of_days) %}
176 | {% set today_datetime_midnight = now().replace(hour=0,minute=0,second=0,microsecond=0) + timedelta(days=day_offset_idx) %}
177 |
178 |
{{ today_datetime_midnight.strftime("%A, %B %-d") }}
182 |
183 | {% set ns = namespace(active_class_name="", last_slot_was_active=false, current_slot_was_activated=false) %}
184 | {% for half_hour_time_slot_idx in range(timeslots) %}
185 | {% set half_hour_time_slot = today_datetime_midnight + timedelta(minutes=30*half_hour_time_slot_idx) %}
186 | {% set ns.active_class_name = "" %}
187 | {% set ns.current_slot_was_activated = false %}
188 | {% for loadshedding in area_schedule %}
189 | {% if not ns.current_slot_was_activated %}
190 | {% if loadshedding["start_time"] <= half_hour_time_slot < loadshedding["end_time"] %}
191 | {% if not ns.last_slot_was_active %}
192 | {% set percentage_of_region = (half_hour_time_slot_idx/timeslots)*100 %}
193 |
194 |
{{ half_hour_time_slot.strftime("%H:%M") }}
196 | {% endif %}
197 | {% set ns.current_slot_was_activated = true %}
198 | {% set ns.last_slot_was_active = true %}
199 | {% set ns.active_class_name = "active_slot active_slot_" + loadshedding['stage']|lower|replace(' ','_') %}
200 | {% endif %}
201 | {% endif %}
202 | {% endfor %}
203 | {% if not ns.current_slot_was_activated %}
204 | {% if show_end_times and ns.last_slot_was_active %}
205 | {% set percentage_of_region = (half_hour_time_slot_idx/timeslots)*100 %}
206 |
208 |
{{ half_hour_time_slot.strftime("%H:%M") }}
211 | {% endif %}
212 | {% set ns.last_slot_was_active = false %}
213 | {% endif %}
214 |
215 | {% endfor %}
216 | {% if day_offset_idx == 0 %}
217 | {% set current_time_indicator_progress = now().hour*2 + now().minute/30 %}
218 | {% set percentage_of_region = (current_time_indicator_progress/timeslots)*100 %}
219 |
221 | {% if not show_end_times %}
222 |
Now
224 | {% endif %}
225 | {% endif %}
226 |
227 |
228 | {% endfor %}
229 |
--------------------------------------------------------------------------------
/custom_components/load_shedding/sensor.py:
--------------------------------------------------------------------------------
1 | """Support for the LoadShedding service."""
2 |
3 | from __future__ import annotations
4 |
5 | from dataclasses import dataclass
6 | from datetime import UTC, datetime, timedelta
7 | from typing import Any, cast
8 |
9 | from load_shedding.providers import Area, Stage
10 |
11 | from homeassistant.components.sensor import RestoreSensor, SensorEntityDescription
12 | from homeassistant.config_entries import ConfigEntry
13 | from homeassistant.const import ATTR_ATTRIBUTION, STATE_OFF, STATE_ON
14 | from homeassistant.core import HomeAssistant, callback
15 | from homeassistant.helpers.entity import Entity
16 | from homeassistant.helpers.entity_platform import AddEntitiesCallback
17 | from homeassistant.helpers.typing import StateType
18 | from homeassistant.helpers.update_coordinator import CoordinatorEntity
19 |
20 | from . import LoadSheddingDevice
21 | from .const import (
22 | ATTR_AREA,
23 | ATTR_AREA_ID,
24 | ATTR_END_IN,
25 | ATTR_END_TIME,
26 | ATTR_FORECAST,
27 | ATTR_LAST_UPDATE,
28 | ATTR_NEXT_END_TIME,
29 | ATTR_NEXT_STAGE,
30 | ATTR_NEXT_START_TIME,
31 | ATTR_PLANNED,
32 | ATTR_QUOTA,
33 | ATTR_SCHEDULE,
34 | ATTR_STAGE,
35 | ATTR_START_IN,
36 | ATTR_START_TIME,
37 | ATTRIBUTION,
38 | DOMAIN,
39 | NAME,
40 | )
41 |
42 | DEFAULT_DATA = {
43 | ATTR_STAGE: Stage.NO_LOAD_SHEDDING.value,
44 | ATTR_START_TIME: 0,
45 | ATTR_END_TIME: 0,
46 | ATTR_END_IN: 0,
47 | ATTR_START_IN: 0,
48 | ATTR_NEXT_STAGE: Stage.NO_LOAD_SHEDDING.value,
49 | ATTR_NEXT_START_TIME: 0,
50 | ATTR_NEXT_END_TIME: 0,
51 | ATTR_PLANNED: [],
52 | ATTR_FORECAST: [],
53 | ATTR_SCHEDULE: [],
54 | ATTR_LAST_UPDATE: None,
55 | ATTR_ATTRIBUTION: ATTRIBUTION.format(provider="sepush.co.za"),
56 | }
57 |
58 | CLEAN_DATA = {
59 | ATTR_PLANNED: [],
60 | ATTR_FORECAST: [],
61 | ATTR_SCHEDULE: [],
62 | }
63 |
64 |
65 | async def async_setup_entry(
66 | hass: HomeAssistant, entry: ConfigEntry, async_add_entities: AddEntitiesCallback
67 | ) -> None:
68 | """Add LoadShedding entities from a config_entry."""
69 | coordinators = hass.data.get(DOMAIN, {}).get(entry.entry_id)
70 | stage_coordinator = coordinators.get(ATTR_STAGE)
71 | area_coordinator = coordinators.get(ATTR_AREA)
72 | quota_coordinator = coordinators.get(ATTR_QUOTA)
73 |
74 | entities: list[Entity] = []
75 | for idx in stage_coordinator.data:
76 | stage_entity = LoadSheddingStageSensorEntity(stage_coordinator, idx)
77 | entities.append(stage_entity)
78 |
79 | for area in area_coordinator.areas:
80 | area_entity = LoadSheddingAreaSensorEntity(area_coordinator, area)
81 | entities.append(area_entity)
82 |
83 | quota_entity = LoadSheddingQuotaSensorEntity(quota_coordinator)
84 | entities.append(quota_entity)
85 |
86 | async_add_entities(entities)
87 |
88 |
89 | @dataclass
90 | class LoadSheddingSensorDescription(SensorEntityDescription):
91 | """Class describing LoadShedding sensor entities."""
92 |
93 |
94 | class LoadSheddingStageSensorEntity(
95 | LoadSheddingDevice, CoordinatorEntity, RestoreSensor
96 | ):
97 | """Define a LoadShedding Stage entity."""
98 |
99 | def __init__(self, coordinator: CoordinatorEntity, idx: str) -> None:
100 | """Initialize."""
101 | super().__init__(coordinator)
102 | self.idx = idx
103 | self.data = self.coordinator.data.get(self.idx)
104 |
105 | self.entity_description = LoadSheddingSensorDescription(
106 | key=f"{DOMAIN} stage",
107 | icon="mdi:lightning-bolt-outline",
108 | name=f"{DOMAIN} stage",
109 | entity_registry_enabled_default=True,
110 | )
111 | self._attr_unique_id = f"{self.coordinator.config_entry.entry_id}_{self.idx}"
112 | self.entity_id = f"{DOMAIN}.{DOMAIN}_stage_{idx}"
113 |
114 | async def async_added_to_hass(self) -> None:
115 | """Handle entity which will be added."""
116 | if restored_data := await self.async_get_last_sensor_data():
117 | self._attr_native_value = restored_data.native_value
118 | await super().async_added_to_hass()
119 |
120 | @property
121 | def name(self) -> str | None:
122 | """Return the stage sensor name."""
123 | name = self.data.get("name", "Unknown")
124 | return f"{name} Stage"
125 |
126 | @property
127 | def native_value(self) -> StateType:
128 | """Return the stage state."""
129 | if not self.data:
130 | return self._attr_native_value
131 |
132 | planned = self.data.get(ATTR_PLANNED, [])
133 | if not planned:
134 | return Stage.NO_LOAD_SHEDDING
135 |
136 | stage = planned[0].get(ATTR_STAGE, Stage.NO_LOAD_SHEDDING)
137 |
138 | self._attr_native_value = cast(StateType, stage)
139 | return self._attr_native_value
140 |
141 | @property
142 | def extra_state_attributes(self) -> dict[str, list, Any]:
143 | """Return the state attributes."""
144 | if not hasattr(self, "_attr_extra_state_attributes"):
145 | self._attr_extra_state_attributes = {}
146 |
147 | self.data = self.coordinator.data.get(self.idx)
148 | if not self.data:
149 | return self._attr_extra_state_attributes
150 |
151 | if not self.data:
152 | return self._attr_extra_state_attributes
153 |
154 | now = datetime.now(UTC)
155 | data = dict(self._attr_extra_state_attributes)
156 | if events := self.data.get(ATTR_PLANNED, []):
157 | data[ATTR_PLANNED] = []
158 | for event in events:
159 | if ATTR_END_TIME in event and event.get(ATTR_END_TIME) < now:
160 | continue
161 |
162 | planned = {
163 | ATTR_STAGE: event.get(ATTR_STAGE),
164 | ATTR_START_TIME: event.get(ATTR_START_TIME),
165 | }
166 | if ATTR_END_TIME in event:
167 | planned[ATTR_END_TIME] = event.get(ATTR_END_TIME)
168 |
169 | data[ATTR_PLANNED].append(planned)
170 |
171 | cur_stage = Stage.NO_LOAD_SHEDDING
172 |
173 | planned = []
174 | if ATTR_PLANNED in data:
175 | planned = data[ATTR_PLANNED]
176 | cur_stage = planned[0].get(ATTR_STAGE, Stage.NO_LOAD_SHEDDING)
177 |
178 | attrs = get_sensor_attrs(planned, cur_stage)
179 | attrs[ATTR_PLANNED] = planned
180 | attrs[ATTR_LAST_UPDATE] = self.coordinator.last_update
181 | attrs = clean(attrs)
182 |
183 | self._attr_extra_state_attributes.update(attrs)
184 | return self._attr_extra_state_attributes
185 |
186 | @callback
187 | def _handle_coordinator_update(self) -> None:
188 | """Handle updated data from the coordinator."""
189 | if data := self.coordinator.data:
190 | self.data = data.get(self.idx)
191 | # Explicitly get the native value to force state update
192 | self._attr_native_value = self.native_value
193 | self.async_write_ha_state()
194 |
195 |
196 | class LoadSheddingAreaSensorEntity(
197 | LoadSheddingDevice, CoordinatorEntity, RestoreSensor
198 | ):
199 | """Define a LoadShedding Area sensor entity."""
200 |
201 | def __init__(self, coordinator: CoordinatorEntity, area: Area) -> None:
202 | """Initialize."""
203 | super().__init__(coordinator)
204 | self.area = area
205 | self.data = self.coordinator.data.get(self.area.id)
206 |
207 | self.entity_description = LoadSheddingSensorDescription(
208 | key=f"{DOMAIN} schedule {area.id}",
209 | icon="mdi:calendar",
210 | name=f"{DOMAIN} schedule {area.name}",
211 | entity_registry_enabled_default=True,
212 | )
213 | self._attr_unique_id = (
214 | f"{self.coordinator.config_entry.entry_id}_sensor_{area.id}"
215 | )
216 | self.entity_id = f"{DOMAIN}.{DOMAIN}_area_{area.id}"
217 |
218 | async def async_added_to_hass(self) -> None:
219 | """Handle entity which will be added."""
220 | if restored_data := await self.async_get_last_sensor_data():
221 | self._attr_native_value = restored_data.native_value
222 | await super().async_added_to_hass()
223 |
224 | @property
225 | def name(self) -> str | None:
226 | """Return the area sensor name."""
227 | return self.area.name
228 |
229 | @property
230 | def native_value(self) -> StateType:
231 | """Return the area state."""
232 | if not self.data:
233 | return self._attr_native_value
234 |
235 | events = self.data.get(ATTR_FORECAST, [])
236 |
237 | if not events:
238 | return STATE_OFF
239 |
240 | now = datetime.now(UTC)
241 |
242 | for event in events:
243 | if ATTR_END_TIME in event and event.get(ATTR_END_TIME) < now:
244 | continue
245 |
246 | if event.get(ATTR_START_TIME) <= now <= event.get(ATTR_END_TIME):
247 | self._attr_native_value = cast(StateType, STATE_ON)
248 | break
249 |
250 | if event.get(ATTR_START_TIME) > now:
251 | self._attr_native_value = cast(StateType, STATE_OFF)
252 | break
253 |
254 | if event.get(ATTR_STAGE) == Stage.NO_LOAD_SHEDDING:
255 | self._attr_native_value = cast(StateType, STATE_OFF)
256 | break
257 |
258 | return self._attr_native_value
259 |
260 | @property
261 | def extra_state_attributes(self) -> dict[str, list, Any]:
262 | """Return the state attributes."""
263 | if not hasattr(self, "_attr_extra_state_attributes"):
264 | self._attr_extra_state_attributes = {}
265 |
266 | if not self.data:
267 | return self._attr_extra_state_attributes
268 |
269 | now = datetime.now(UTC)
270 | data = dict(self._attr_extra_state_attributes)
271 | if events := self.data.get(ATTR_FORECAST, []):
272 | data[ATTR_FORECAST] = []
273 | for event in events:
274 | if ATTR_END_TIME in event and event.get(ATTR_END_TIME) < now:
275 | continue
276 |
277 | forecast = {
278 | ATTR_STAGE: event.get(ATTR_STAGE),
279 | ATTR_START_TIME: event.get(ATTR_START_TIME),
280 | ATTR_END_TIME: event.get(ATTR_END_TIME),
281 | }
282 |
283 | data[ATTR_FORECAST].append(forecast)
284 |
285 | forecast = []
286 | if ATTR_FORECAST in data:
287 | forecast = data[ATTR_FORECAST]
288 |
289 | attrs = get_sensor_attrs(forecast)
290 | attrs[ATTR_AREA_ID] = self.area.id
291 | attrs[ATTR_FORECAST] = forecast
292 | attrs[ATTR_LAST_UPDATE] = self.coordinator.last_update
293 | attrs = clean(attrs)
294 |
295 | self._attr_extra_state_attributes.update(attrs)
296 | return self._attr_extra_state_attributes
297 |
298 | @callback
299 | def _handle_coordinator_update(self) -> None:
300 | """Handle updated data from the coordinator."""
301 | if data := self.coordinator.data:
302 | self.data = data.get(self.area.id)
303 | # Explicitly get the native value to force state update
304 | self._attr_native_value = self.native_value
305 | self.async_write_ha_state()
306 |
307 |
308 | class LoadSheddingQuotaSensorEntity(
309 | LoadSheddingDevice, CoordinatorEntity, RestoreSensor
310 | ):
311 | """Define a LoadShedding Quota entity."""
312 |
313 | def __init__(self, coordinator: CoordinatorEntity) -> None:
314 | """Initialize the quota sensor."""
315 | super().__init__(coordinator)
316 | self.data = self.coordinator.data
317 |
318 | self.entity_description = LoadSheddingSensorDescription(
319 | key=f"{DOMAIN} SePush Quota",
320 | icon="mdi:api",
321 | name=f"{DOMAIN} SePush Quota",
322 | entity_registry_enabled_default=True,
323 | )
324 | self._attr_name = f"{NAME} SePush Quota"
325 | self._attr_unique_id = f"{self.coordinator.config_entry.entry_id}_se_push_quota"
326 | self.entity_id = f"{DOMAIN}.{DOMAIN}_sepush_api_quota"
327 |
328 | @property
329 | def name(self) -> str | None:
330 | """Return the quota sensor name."""
331 | return "SePush API Quota"
332 |
333 | @property
334 | def native_value(self) -> StateType:
335 | """Return the stage state."""
336 | if not self.data:
337 | return self._attr_native_value
338 |
339 | count = int(self.data.get("count", 0))
340 | self._attr_native_value = cast(StateType, count)
341 | return self._attr_native_value
342 |
343 | @property
344 | def extra_state_attributes(self) -> dict[str, list, Any]:
345 | """Return the state attributes."""
346 | if not hasattr(self, "_attr_extra_state_attributes"):
347 | self._attr_extra_state_attributes = {}
348 |
349 | if not self.data:
350 | return self._attr_extra_state_attributes
351 |
352 | attrs = self.data
353 | attrs[ATTR_LAST_UPDATE] = self.coordinator.last_update
354 | attrs = clean(attrs)
355 |
356 | self._attr_extra_state_attributes.update(attrs)
357 | return self._attr_extra_state_attributes
358 |
359 | @callback
360 | def _handle_coordinator_update(self) -> None:
361 | """Handle updated data from the coordinator."""
362 | if data := self.coordinator.data:
363 | self.data = data
364 | # Explicitly get the native value to force state update
365 | self._attr_native_value = self.native_value
366 | self.async_write_ha_state()
367 |
368 |
369 | def stage_forecast_to_data(stage_forecast: list) -> list:
370 | """Convert stage forecast to serializable data."""
371 | data = []
372 | for forecast in stage_forecast:
373 | transformed_list = [
374 | {
375 | ATTR_STAGE: forecast.get(ATTR_STAGE).value,
376 | ATTR_START_TIME: schedule[0].isoformat(),
377 | ATTR_END_TIME: schedule[1].isoformat(),
378 | }
379 | for schedule in forecast.get(ATTR_SCHEDULE, [])
380 | ]
381 | data.extend(transformed_list)
382 | return data
383 |
384 |
385 | def get_sensor_attrs(forecast: list, stage: Stage = Stage.NO_LOAD_SHEDDING) -> dict:
386 | """Get sensor attributes for the given forecast and stage."""
387 | if not forecast:
388 | return {
389 | ATTR_STAGE: stage.value,
390 | }
391 |
392 | now = datetime.now(UTC)
393 | data = dict(DEFAULT_DATA)
394 | data[ATTR_STAGE] = stage.value
395 |
396 | cur, nxt = {}, {}
397 | if now < forecast[0].get(ATTR_START_TIME):
398 | # before
399 | nxt = forecast[0]
400 | elif forecast[0].get(ATTR_START_TIME) <= now <= forecast[0].get(ATTR_END_TIME, now):
401 | # during
402 | cur = forecast[0]
403 | if len(forecast) > 1:
404 | nxt = forecast[1]
405 | elif forecast[0].get(ATTR_END_TIME) < now:
406 | # after
407 | if len(forecast) > 1:
408 | nxt = forecast[1]
409 |
410 | if cur:
411 | try:
412 | data[ATTR_STAGE] = cur.get(ATTR_STAGE).value
413 | except AttributeError:
414 | data[ATTR_STAGE] = Stage.NO_LOAD_SHEDDING.value
415 | data[ATTR_START_TIME] = cur.get(ATTR_START_TIME).isoformat()
416 | if ATTR_END_TIME in cur:
417 | data[ATTR_END_TIME] = cur.get(ATTR_END_TIME).isoformat()
418 |
419 | end_time = cur.get(ATTR_END_TIME)
420 | ends_in = end_time - now
421 | ends_in = ends_in - timedelta(microseconds=ends_in.microseconds)
422 | ends_in = int(ends_in.total_seconds() / 60) # minutes
423 | data[ATTR_END_IN] = ends_in
424 |
425 | if nxt:
426 | try:
427 | data[ATTR_NEXT_STAGE] = nxt.get(ATTR_STAGE).values
428 | except AttributeError:
429 | data[ATTR_NEXT_STAGE] = Stage.NO_LOAD_SHEDDING.value
430 |
431 | data[ATTR_NEXT_START_TIME] = nxt.get(ATTR_START_TIME).isoformat()
432 | if ATTR_END_TIME in nxt:
433 | data[ATTR_NEXT_END_TIME] = nxt.get(ATTR_END_TIME).isoformat()
434 |
435 | start_time = nxt.get(ATTR_START_TIME)
436 | starts_in = start_time - now
437 | starts_in = starts_in - timedelta(microseconds=starts_in.microseconds)
438 | starts_in = int(starts_in.total_seconds() / 60) # minutes
439 | data[ATTR_START_IN] = starts_in
440 |
441 | return data
442 |
443 |
444 | def clean(data: dict) -> dict:
445 | """Remove default values from dict."""
446 | for key, value in CLEAN_DATA.items():
447 | if key not in data:
448 | continue
449 | if data[key] == value:
450 | del data[key]
451 |
452 | return data
453 |
--------------------------------------------------------------------------------
/custom_components/load_shedding/config_flow.py:
--------------------------------------------------------------------------------
1 | """Adds config flow for LoadShedding."""
2 |
3 | from __future__ import annotations
4 |
5 | import logging
6 | from typing import Any
7 |
8 | from load_shedding import Provider, Province, get_areas
9 | from load_shedding.libs.sepush import SePush, SePushError
10 | from load_shedding.providers import ProviderError, Stage
11 | import voluptuous as vol
12 |
13 | from homeassistant import config_entries
14 | from homeassistant.config_entries import (
15 | ConfigEntry,
16 | ConfigFlow,
17 | OptionsFlow,
18 | OptionsFlowWithConfigEntry,
19 | )
20 | from homeassistant.const import CONF_API_KEY, CONF_DESCRIPTION, CONF_ID, CONF_NAME
21 | from homeassistant.core import callback
22 | from homeassistant.data_entry_flow import FlowResult
23 |
24 | from .const import (
25 | CONF_ACTION,
26 | CONF_ADD_AREA,
27 | CONF_AREA_ID,
28 | CONF_AREAS,
29 | CONF_DELETE_AREA,
30 | CONF_MIN_EVENT_DURATION,
31 | CONF_MULTI_STAGE_EVENTS,
32 | CONF_SEARCH,
33 | CONF_SETUP_API,
34 | DOMAIN,
35 | NAME,
36 | )
37 |
38 | _LOGGER = logging.getLogger(__name__)
39 |
40 |
41 | @config_entries.HANDLERS.register(DOMAIN)
42 | class LoadSheddingFlowHandler(ConfigFlow, domain=DOMAIN):
43 | """Config flow for LoadShedding."""
44 |
45 | VERSION = 5
46 |
47 | def __init__(self) -> None:
48 | """Initialize the flow handler."""
49 | self.provider: Provider = None
50 | self.api_key: str = ""
51 | self.areas: dict = {}
52 |
53 | @staticmethod
54 | @callback
55 | def async_get_options_flow(config_entry: ConfigEntry) -> OptionsFlow:
56 | """Get the options flow."""
57 | return LoadSheddingOptionsFlowHandler(config_entry)
58 |
59 | @classmethod
60 | @callback
61 | def async_supports_options_flow(cls, config_entry: ConfigEntry) -> bool:
62 | """Return options flow support for this handler."""
63 | return True
64 |
65 | async def async_step_user(
66 | self, user_input: dict[str, Any] | None = None
67 | ) -> FlowResult:
68 | """Handle a flow initialized by the user."""
69 |
70 | await self._async_handle_discovery_without_unique_id()
71 | return await self.async_step_sepush()
72 |
73 | async def async_step_sepush(
74 | self, user_input: dict[str, Any] | None = None
75 | ) -> FlowResult:
76 | """Handle the flow step to configure SePush."""
77 | self.provider = Provider.SE_PUSH
78 | errors = {}
79 | data_schema = vol.Schema(
80 | {
81 | vol.Required(CONF_API_KEY): str,
82 | }
83 | )
84 |
85 | if not user_input:
86 | user_input = {}
87 |
88 | # API Key
89 | self.api_key = user_input.get(CONF_API_KEY, "")
90 |
91 | if self.api_key:
92 | try:
93 | # Validate the token by checking the allowance.
94 | sepush = SePush(token=self.api_key)
95 | await self.hass.async_add_executor_job(sepush.check_allowance)
96 | except SePushError as err:
97 | status_code = err.__cause__.args[0]
98 | if status_code == 400:
99 | errors["base"] = "sepush_400"
100 | elif status_code == 403:
101 | errors["base"] = "sepush_403"
102 | elif status_code == 429:
103 | errors["base"] = "sepush_429"
104 | elif status_code == 500:
105 | errors["base"] = "sepush_500"
106 | else:
107 | _LOGGER.error("Unable to initialise SePush API: %s", err)
108 | errors["base"] = "provider_error"
109 | else:
110 | return await self.async_step_lookup_areas(user_input)
111 |
112 | return self.async_show_form(
113 | step_id="sepush",
114 | data_schema=data_schema,
115 | errors=errors,
116 | )
117 |
118 | async def async_step_lookup_areas(
119 | self, user_input: dict[str, Any] | None = None
120 | ) -> FlowResult:
121 | """Handle the flow step to search for and select an area."""
122 | errors = {}
123 |
124 | stages = {}
125 | for stage in [
126 | Stage.STAGE_1,
127 | Stage.STAGE_2,
128 | Stage.STAGE_3,
129 | Stage.STAGE_4,
130 | Stage.STAGE_5,
131 | Stage.STAGE_6,
132 | Stage.STAGE_7,
133 | Stage.STAGE_8,
134 | ]:
135 | stages[stage.value] = f"{stage}"
136 | data_schema = vol.Schema(
137 | {
138 | vol.Required(CONF_SEARCH): str,
139 | }
140 | )
141 |
142 | if not user_input:
143 | return self.async_show_form(
144 | step_id="lookup_areas",
145 | data_schema=data_schema,
146 | errors=errors,
147 | )
148 |
149 | data_schema = vol.Schema(
150 | {
151 | vol.Required(CONF_SEARCH, default=user_input.get(CONF_SEARCH)): str,
152 | }
153 | )
154 |
155 | search_text = user_input.get(CONF_SEARCH)
156 | if not search_text:
157 | return self.async_show_form(
158 | step_id="lookup_areas",
159 | data_schema=data_schema,
160 | errors=errors,
161 | )
162 |
163 | if not user_input.get(CONF_AREA_ID):
164 | area_ids = {}
165 | try:
166 | provider = self.provider(token=self.api_key)
167 | results = await self.hass.async_add_executor_job(
168 | get_areas, provider, search_text
169 | )
170 | except ProviderError as err:
171 | _LOGGER.debug("Provider error", exc_info=True)
172 | _LOGGER.error("Unable to initialise SePush API: %s", err)
173 | errors["base"] = "provider_error"
174 | else:
175 | self.areas = {}
176 | for area in results:
177 | self.areas[area.id] = area
178 |
179 | area_ids[area.id] = f"{area.name}"
180 |
181 | if area.municipality:
182 | area_ids[area.id] += f", {area.municipality}"
183 | if area.province is not Province.UNKNOWN:
184 | area_ids[area.id] += f", {area.province}"
185 |
186 | if not self.areas:
187 | errors[CONF_SEARCH] = "no_results_found"
188 |
189 | if not errors:
190 | data_schema = vol.Schema(
191 | {
192 | vol.Required(
193 | CONF_SEARCH, default=user_input.get(CONF_SEARCH)
194 | ): str,
195 | vol.Optional(CONF_AREA_ID): vol.In(area_ids),
196 | }
197 | )
198 |
199 | return self.async_show_form(
200 | step_id="lookup_areas",
201 | data_schema=data_schema,
202 | errors=errors,
203 | )
204 |
205 | return await self.async_step_select_area(user_input)
206 |
207 | async def async_step_select_area(
208 | self, user_input: dict[str, Any] | None = None
209 | ) -> FlowResult:
210 | """Handle the flow step to create a area."""
211 | area_id = user_input.get(CONF_AREA_ID)
212 | area = self.areas.get(area_id)
213 |
214 | description = f"{area.name}"
215 |
216 | if area.municipality:
217 | description += f", {area.municipality}"
218 | if area.province is not Province.UNKNOWN:
219 | description += f", {area.province}"
220 |
221 | data = {}
222 | options = {
223 | CONF_API_KEY: self.api_key,
224 | CONF_AREAS: [
225 | {
226 | CONF_DESCRIPTION: description,
227 | CONF_NAME: area.name,
228 | CONF_ID: area.id,
229 | },
230 | ],
231 | }
232 |
233 | return self.async_create_entry(
234 | title=NAME,
235 | data=data,
236 | description="Load Shedding configuration",
237 | options=options,
238 | )
239 |
240 |
241 | class LoadSheddingOptionsFlowHandler(OptionsFlowWithConfigEntry):
242 | """Load Shedding config flow options handler."""
243 |
244 | def __init__(self, config_entry: ConfigEntry) -> None:
245 | """Initialize options flow."""
246 | super().__init__(config_entry)
247 | self.provider = Provider.SE_PUSH
248 | self.api_key = config_entry.options.get(CONF_API_KEY)
249 | self.areas = {}
250 |
251 | async def async_step_init(
252 | self, user_input: dict[str, Any] | None = None
253 | ) -> FlowResult: # pylint: disable=unused-argument
254 | """Manage the options."""
255 |
256 | CONF_ACTIONS = {
257 | CONF_SETUP_API: "Configure API",
258 | CONF_ADD_AREA: "Add area",
259 | CONF_DELETE_AREA: "Remove area",
260 | }
261 |
262 | if user_input is not None:
263 | if user_input.get(CONF_ACTION) == CONF_SETUP_API:
264 | return await self.async_step_sepush()
265 | if user_input.get(CONF_ACTION) == CONF_ADD_AREA:
266 | return await self.async_step_add_area()
267 | if user_input.get(CONF_ACTION) == CONF_DELETE_AREA:
268 | return await self.async_step_delete_area()
269 | self.options[CONF_MULTI_STAGE_EVENTS] = user_input.get(
270 | CONF_MULTI_STAGE_EVENTS
271 | )
272 | self.options[CONF_MIN_EVENT_DURATION] = user_input.get(
273 | CONF_MIN_EVENT_DURATION
274 | )
275 | return self.async_create_entry(title=NAME, data=self.options)
276 |
277 | OPTIONS_SCHEMA = vol.Schema(
278 | {
279 | vol.Optional(CONF_ACTION): vol.In(CONF_ACTIONS),
280 | vol.Optional(
281 | CONF_MULTI_STAGE_EVENTS,
282 | default=self.options.get(CONF_MULTI_STAGE_EVENTS, True),
283 | ): bool,
284 | vol.Optional(
285 | CONF_MIN_EVENT_DURATION,
286 | default=self.options.get(CONF_MIN_EVENT_DURATION, 31),
287 | ): int,
288 | }
289 | )
290 | return self.async_show_form(
291 | step_id="init",
292 | data_schema=OPTIONS_SCHEMA,
293 | )
294 |
295 | async def async_step_sepush(
296 | self, user_input: dict[str, Any] | None = None
297 | ) -> FlowResult:
298 | """Handle the flow step to configure SePush."""
299 | self.provider = Provider.SE_PUSH
300 |
301 | if not user_input:
302 | user_input = {}
303 |
304 | api_key = user_input.get(CONF_API_KEY)
305 | errors = {}
306 | if api_key:
307 | try:
308 | # Validate the token by checking the allowance.
309 | sepush = SePush(token=api_key)
310 | esp = await self.hass.async_add_executor_job(sepush.check_allowance)
311 | _LOGGER.debug("Validate API Key Response: %s", esp)
312 | except SePushError as err:
313 | status_code = err.__cause__.args[0]
314 | if status_code == 400:
315 | errors["base"] = "sepush_400"
316 | elif status_code == 403:
317 | errors["base"] = "sepush_403"
318 | elif status_code == 429:
319 | errors["base"] = "sepush_429"
320 | elif status_code == 500:
321 | errors["base"] = "sepush_500"
322 | else:
323 | _LOGGER.error("Unable to initialise SePush API: %s", err)
324 | errors["base"] = "provider_error"
325 | else:
326 | self.api_key = api_key
327 | self.options[CONF_API_KEY] = api_key
328 | return self.async_create_entry(title=NAME, data=self.options)
329 |
330 | data_schema = vol.Schema(
331 | {
332 | vol.Required(CONF_API_KEY, default=self.api_key): str,
333 | }
334 | )
335 | return self.async_show_form(
336 | step_id="sepush",
337 | data_schema=data_schema,
338 | errors=errors,
339 | )
340 |
341 | async def async_step_add_area(
342 | self, user_input: dict[str, Any] | None = None
343 | ) -> FlowResult:
344 | """Handle the flow step to search for and select an area."""
345 | return await self.async_step_lookup_areas(user_input=user_input)
346 |
347 | async def async_step_lookup_areas(
348 | self, user_input: dict[str, Any] | None = None
349 | ) -> FlowResult:
350 | """Handle the flow step to search for and select an area."""
351 | errors = {}
352 |
353 | stages = {}
354 | for stage in [
355 | Stage.STAGE_1,
356 | Stage.STAGE_2,
357 | Stage.STAGE_3,
358 | Stage.STAGE_4,
359 | Stage.STAGE_5,
360 | Stage.STAGE_6,
361 | Stage.STAGE_7,
362 | Stage.STAGE_8,
363 | ]:
364 | stages[stage.value] = f"{stage}"
365 |
366 | data_schema = vol.Schema(
367 | {
368 | vol.Required(CONF_SEARCH): str,
369 | }
370 | )
371 |
372 | if not user_input:
373 | return self.async_show_form(
374 | step_id="lookup_areas",
375 | data_schema=data_schema,
376 | errors=errors,
377 | )
378 |
379 | data_schema = vol.Schema(
380 | {
381 | vol.Required(CONF_SEARCH, default=user_input.get(CONF_SEARCH)): str,
382 | }
383 | )
384 |
385 | search_text = user_input.get(CONF_SEARCH)
386 | if not search_text:
387 | return self.async_show_form(
388 | step_id="lookup_areas",
389 | data_schema=data_schema,
390 | errors=errors,
391 | )
392 |
393 | if not user_input.get(CONF_AREA_ID):
394 | area_ids = {}
395 | try:
396 | provider = self.provider(token=self.api_key)
397 | results = await self.hass.async_add_executor_job(
398 | get_areas, provider, search_text
399 | )
400 | except ProviderError:
401 | _LOGGER.debug("Provider error", exc_info=True)
402 | errors["base"] = "provider_error"
403 | else:
404 | self.areas = {}
405 | for area in results:
406 | self.areas[area.id] = area
407 |
408 | area_ids[area.id] = f"{area.name}"
409 |
410 | if area.municipality:
411 | area_ids[area.id] += f", {area.municipality}"
412 | if area.province is not Province.UNKNOWN:
413 | area_ids[area.id] += f", {area.province}"
414 |
415 | if not self.areas:
416 | errors[CONF_SEARCH] = "no_results_found"
417 |
418 | if not errors:
419 | data_schema = vol.Schema(
420 | {
421 | vol.Required(
422 | CONF_SEARCH, default=user_input.get(CONF_SEARCH)
423 | ): str,
424 | vol.Optional(CONF_AREA_ID): vol.In(area_ids),
425 | }
426 | )
427 |
428 | return self.async_show_form(
429 | step_id="lookup_areas",
430 | data_schema=data_schema,
431 | errors=errors,
432 | )
433 |
434 | return await self.async_step_select_area(user_input)
435 |
436 | async def async_step_select_area(
437 | self, user_input: dict[str, Any] | None = None
438 | ) -> FlowResult:
439 | """Handle the flow step to create a area."""
440 | area = self.areas.get(user_input.get(CONF_AREA_ID))
441 |
442 | description = f"{area.name}"
443 | if area.municipality:
444 | description += f", {area.municipality}"
445 | if area.province is not Province.UNKNOWN:
446 | description += f", {area.province}"
447 |
448 | self.options[CONF_AREAS].append(
449 | {
450 | CONF_DESCRIPTION: description,
451 | CONF_NAME: area.name,
452 | CONF_ID: area.id,
453 | }
454 | )
455 |
456 | result = self.async_create_entry(title=NAME, data=self.options)
457 | return result
458 |
459 | async def async_step_delete_area(
460 | self, user_input: dict[str, Any] | None = None
461 | ) -> FlowResult:
462 | """Handle the flow step to delete an area."""
463 |
464 | if user_input is None:
465 | area_idx = {}
466 | for idx, area in enumerate(self.options.get(CONF_AREAS, [])):
467 | area_idx[idx] = area.get(CONF_NAME)
468 |
469 | data_schema = vol.Schema(
470 | {
471 | vol.Optional(CONF_AREA_ID): vol.In(area_idx),
472 | }
473 | )
474 |
475 | return self.async_show_form(
476 | step_id="delete_area",
477 | data_schema=data_schema,
478 | )
479 | else:
480 | new_areas = []
481 | for idx, area in enumerate(self.options.get(CONF_AREAS, [])):
482 | if idx == user_input.get(CONF_AREA_ID):
483 | continue
484 | new_areas.append(area)
485 |
486 | self.options[CONF_AREAS] = new_areas
487 | return self.async_create_entry(title=NAME, data=self.options)
488 |
--------------------------------------------------------------------------------
/custom_components/load_shedding/__init__.py:
--------------------------------------------------------------------------------
1 | """The LoadShedding component."""
2 |
3 | from __future__ import annotations
4 |
5 | from datetime import UTC, datetime, timedelta, timezone
6 | import logging
7 | from typing import Any
8 |
9 | from load_shedding.libs.sepush import SePush, SePushError
10 | from load_shedding.providers import Area, Stage
11 | import urllib3
12 |
13 | from homeassistant.config_entries import ConfigEntry
14 | from homeassistant.const import (
15 | ATTR_IDENTIFIERS,
16 | ATTR_MANUFACTURER,
17 | ATTR_MODEL,
18 | ATTR_NAME,
19 | ATTR_SW_VERSION,
20 | ATTR_VIA_DEVICE,
21 | CONF_API_KEY,
22 | CONF_ID,
23 | CONF_NAME,
24 | CONF_SCAN_INTERVAL,
25 | Platform,
26 | )
27 | from homeassistant.core import HomeAssistant
28 | from homeassistant.helpers.entity import DeviceInfo, Entity
29 | from homeassistant.helpers.typing import ConfigType
30 | from homeassistant.helpers.update_coordinator import DataUpdateCoordinator, UpdateFailed
31 |
32 | from .const import (
33 | API,
34 | AREA_UPDATE_INTERVAL,
35 | ATTR_AREA,
36 | ATTR_END_TIME,
37 | ATTR_EVENTS,
38 | ATTR_FORECAST,
39 | ATTR_PLANNED,
40 | ATTR_QUOTA,
41 | ATTR_SCHEDULE,
42 | ATTR_STAGE,
43 | ATTR_START_TIME,
44 | CONF_AREAS,
45 | CONF_MIN_EVENT_DURATION,
46 | DEFAULT_SCAN_INTERVAL,
47 | DOMAIN,
48 | MANUFACTURER,
49 | NAME,
50 | QUOTA_UPDATE_INTERVAL,
51 | STAGE_UPDATE_INTERVAL,
52 | VERSION,
53 | )
54 |
55 | _LOGGER = logging.getLogger(__name__)
56 |
57 | PLATFORMS = [Platform.CALENDAR, Platform.SENSOR]
58 |
59 |
60 | async def async_setup(hass: HomeAssistant, config: ConfigType) -> bool:
61 | """Set up this integration using YAML is not supported."""
62 | return True
63 |
64 |
65 | async def async_setup_entry(hass: HomeAssistant, config_entry: ConfigEntry) -> bool:
66 | """Set up LoadShedding as config entry."""
67 | if not hass.data.get(DOMAIN):
68 | hass.data.setdefault(DOMAIN, {})
69 |
70 | sepush: SePush = None
71 | if api_key := config_entry.options.get(CONF_API_KEY):
72 | sepush: SePush = SePush(token=api_key)
73 | if not sepush:
74 | return False
75 |
76 | stage_coordinator = LoadSheddingStageCoordinator(hass, sepush)
77 | stage_coordinator.update_interval = timedelta(
78 | seconds=config_entry.options.get(CONF_SCAN_INTERVAL, DEFAULT_SCAN_INTERVAL)
79 | )
80 |
81 | area_coordinator = LoadSheddingAreaCoordinator(
82 | hass, sepush, stage_coordinator=stage_coordinator
83 | )
84 | area_coordinator.update_interval = timedelta(
85 | seconds=config_entry.options.get(CONF_SCAN_INTERVAL, DEFAULT_SCAN_INTERVAL)
86 | )
87 | for conf in config_entry.options.get(CONF_AREAS, []):
88 | area = Area(
89 | id=conf.get(CONF_ID),
90 | name=conf.get(CONF_NAME),
91 | )
92 | area_coordinator.add_area(area)
93 | if not area_coordinator.areas:
94 | return False
95 |
96 | quota_coordinator = LoadSheddingQuotaCoordinator(hass, sepush)
97 | quota_coordinator.update_interval = timedelta(seconds=QUOTA_UPDATE_INTERVAL)
98 |
99 | hass.data[DOMAIN][config_entry.entry_id] = {
100 | ATTR_STAGE: stage_coordinator,
101 | ATTR_AREA: area_coordinator,
102 | ATTR_QUOTA: quota_coordinator,
103 | }
104 |
105 | config_entry.async_on_unload(config_entry.add_update_listener(update_listener))
106 |
107 | await stage_coordinator.async_config_entry_first_refresh()
108 | await area_coordinator.async_config_entry_first_refresh()
109 | await quota_coordinator.async_config_entry_first_refresh()
110 | await hass.config_entries.async_forward_entry_setups(config_entry, PLATFORMS)
111 |
112 | return True
113 |
114 |
115 | async def async_unload_entry(hass: HomeAssistant, config_entry: ConfigEntry) -> bool:
116 | """Unload Load Shedding Entry from config_entry."""
117 | unload_ok = await hass.config_entries.async_unload_platforms(
118 | config_entry, PLATFORMS
119 | )
120 | return unload_ok
121 |
122 |
123 | async def async_reload_entry(hass: HomeAssistant, config_entry: ConfigEntry):
124 | """Reload config entry."""
125 | await hass.config_entries.async_reload(config_entry.entry_id)
126 |
127 |
128 | async def update_listener(hass: HomeAssistant, config_entry: ConfigEntry) -> bool:
129 | """Update listener."""
130 | return await hass.config_entries.async_reload(config_entry.entry_id)
131 |
132 |
133 | async def async_migrate_entry(hass: HomeAssistant, config_entry: ConfigEntry) -> bool:
134 | """Migrate old entry."""
135 | LATEST_VERSION = 1
136 | LATEST_MINOR_VERSION = 4
137 | if (
138 | config_entry.version == LATEST_VERSION
139 | and config_entry.minor_version == LATEST_MINOR_VERSION
140 | ):
141 | return False
142 |
143 | _LOGGER.debug(
144 | "Migrating from version %s to %s", config_entry.version, LATEST_VERSION
145 | )
146 |
147 | if config_entry.version == 3:
148 | old_data = {**config_entry.data}
149 | old_options = {**config_entry.options}
150 | new_data = {}
151 | new_options = {
152 | CONF_API_KEY: old_data.get(CONF_API_KEY),
153 | CONF_AREAS: old_options.get(CONF_AREAS, {}),
154 | }
155 |
156 | hass.config_entries.async_update_entry(
157 | config_entry,
158 | data=new_data,
159 | options=new_options,
160 | version=1,
161 | minor_version=4,
162 | )
163 |
164 | if config_entry.version == 4:
165 | old_data = {**config_entry.data}
166 | old_options = {**config_entry.options}
167 | new_data = {}
168 | new_options = {
169 | CONF_API_KEY: old_options.get(CONF_API_KEY),
170 | CONF_AREAS: [],
171 | }
172 | for field in old_options:
173 | if field == CONF_AREAS:
174 | areas = old_options.get(CONF_AREAS, {})
175 | for area_id in areas:
176 | new_options[CONF_AREAS].append(areas[area_id])
177 | continue
178 |
179 | value = old_options.get(field)
180 | if value is not None:
181 | new_options[field] = value
182 |
183 | hass.config_entries.async_update_entry(
184 | config_entry,
185 | data=new_data,
186 | options=new_options,
187 | version=1,
188 | minor_version=5,
189 | )
190 |
191 | _LOGGER.info("Migration to version %s successful", config_entry.version)
192 | return True
193 |
194 |
195 | class LoadSheddingStageCoordinator(DataUpdateCoordinator[dict[str, Any]]):
196 | """Class to manage fetching LoadShedding Stage."""
197 |
198 | def __init__(self, hass: HomeAssistant, sepush: SePush) -> None:
199 | """Initialize the stage coordinator."""
200 | super().__init__(hass, _LOGGER, name=f"{DOMAIN}")
201 | self.data = {}
202 | self.sepush = sepush
203 | self.last_update: datetime | None = None
204 |
205 | async def _async_update_data(self) -> dict:
206 | """Retrieve latest load shedding data."""
207 |
208 | now = datetime.now(UTC).replace(microsecond=0)
209 | diff = 0
210 | if self.last_update is not None:
211 | diff = (now - self.last_update).seconds
212 |
213 | if 0 < diff < STAGE_UPDATE_INTERVAL:
214 | return self.data
215 |
216 | try:
217 | stage = await self.async_update_stage()
218 | except SePushError as err:
219 | _LOGGER.error("Unable to get stage: %s", err)
220 | self.data = {}
221 | except UpdateFailed as err:
222 | _LOGGER.exception("Unable to get stage: %s", err)
223 | self.data = {}
224 | else:
225 | self.data = stage
226 | self.last_update = now
227 |
228 | return self.data
229 |
230 | async def async_update_stage(self) -> dict:
231 | """Retrieve latest stage."""
232 | now = datetime.now(UTC).replace(microsecond=0)
233 | esp = await self.hass.async_add_executor_job(self.sepush.status)
234 |
235 | data = {}
236 | statuses = esp.get("status", {})
237 | for idx, area in statuses.items():
238 | stage = Stage(int(area.get("stage", "0")))
239 | start_time = datetime.fromisoformat(area.get("stage_updated"))
240 | start_time = start_time.replace(second=0, microsecond=0)
241 | planned = [
242 | {
243 | ATTR_STAGE: stage,
244 | ATTR_START_TIME: start_time.astimezone(UTC),
245 | }
246 | ]
247 |
248 | next_stages = area.get("next_stages", [])
249 | for i, next_stage in enumerate(next_stages):
250 | # Prev
251 | prev_end = datetime.fromisoformat(
252 | next_stage.get("stage_start_timestamp")
253 | )
254 | prev_end = prev_end.replace(second=0, microsecond=0)
255 | planned[i][ATTR_END_TIME] = prev_end.astimezone(UTC)
256 |
257 | # Next
258 | stage = Stage(int(next_stage.get("stage", "0")))
259 | start_time = datetime.fromisoformat(
260 | next_stage.get("stage_start_timestamp")
261 | )
262 | start_time = start_time.replace(second=0, microsecond=0)
263 | planned.append(
264 | {
265 | ATTR_STAGE: stage,
266 | ATTR_START_TIME: start_time.astimezone(UTC),
267 | }
268 | )
269 |
270 | filtered = []
271 | for stage in planned:
272 | if ATTR_END_TIME not in stage:
273 | stage[ATTR_END_TIME] = stage[ATTR_START_TIME] + timedelta(days=7)
274 | if ATTR_END_TIME in stage and stage.get(ATTR_END_TIME) >= now:
275 | filtered.append(stage)
276 |
277 | data[idx] = {
278 | ATTR_NAME: area.get("name", ""),
279 | ATTR_PLANNED: filtered,
280 | }
281 |
282 | return data
283 |
284 |
285 | class LoadSheddingAreaCoordinator(DataUpdateCoordinator[dict[str, Any]]):
286 | """Class to manage fetching LoadShedding Area."""
287 |
288 | def __init__(
289 | self,
290 | hass: HomeAssistant,
291 | sepush: SePush,
292 | stage_coordinator: DataUpdateCoordinator,
293 | ) -> None:
294 | """Initialize the area coordinator."""
295 | super().__init__(hass, _LOGGER, name=f"{DOMAIN}")
296 | self.data = {}
297 | self.sepush = sepush
298 | self.last_update: datetime | None = None
299 | self.areas: list[Area] = []
300 | self.stage_coordinator = stage_coordinator
301 |
302 | def add_area(self, area: Area = None) -> None:
303 | """Add a area to update."""
304 | self.areas.append(area)
305 |
306 | async def _async_update_data(self) -> dict:
307 | """Retrieve latest load shedding data."""
308 |
309 | now = datetime.now(UTC).replace(microsecond=0)
310 | diff = 0
311 | if self.last_update is not None:
312 | diff = (now - self.last_update).seconds
313 |
314 | if 0 < diff < AREA_UPDATE_INTERVAL:
315 | await self.async_area_forecast()
316 | return self.data
317 |
318 | try:
319 | area = await self.async_update_area()
320 | except SePushError as err:
321 | _LOGGER.error("Unable to get area schedule: %s", err)
322 | self.data = {}
323 | except UpdateFailed as err:
324 | _LOGGER.exception("Unable to get area schedule: %s", err)
325 | self.data = {}
326 | else:
327 | self.data = area
328 | self.last_update = now
329 |
330 | await self.async_area_forecast()
331 | return self.data
332 |
333 | async def async_update_area(self) -> dict:
334 | """Retrieve area data."""
335 | area_id_data: dict = {}
336 |
337 | for area in self.areas:
338 | esp = await self.hass.async_add_executor_job(self.sepush.area, area.id)
339 |
340 | # Get events for area
341 | events = []
342 | for event in esp.get("events", {}):
343 | note = event.get("note")
344 | parts = str(note).split(" ")
345 | try:
346 | stage = Stage(int(parts[1]))
347 | except ValueError:
348 | stage = Stage.NO_LOAD_SHEDDING
349 | if note == str(Stage.LOAD_REDUCTION):
350 | stage = Stage.LOAD_REDUCTION
351 |
352 | start = datetime.fromisoformat(event.get("start")).astimezone(UTC)
353 | end = datetime.fromisoformat(event.get("end")).astimezone(UTC)
354 |
355 | events.append(
356 | {
357 | ATTR_STAGE: stage,
358 | ATTR_START_TIME: start,
359 | ATTR_END_TIME: end,
360 | }
361 | )
362 |
363 | # Get schedule for area
364 | stage_schedule = {}
365 | for day in esp.get("schedule", {}).get("days", []):
366 | date = datetime.strptime(day.get("date"), "%Y-%m-%d")
367 | stage_timeslots = day.get("stages", [])
368 | for i, timeslots in enumerate(stage_timeslots):
369 | stage = Stage(i + 1)
370 | if stage not in stage_schedule:
371 | stage_schedule[stage] = []
372 | for timeslot in timeslots:
373 | start_str, end_str = timeslot.strip().split("-")
374 | start = utc_dt(date, datetime.strptime(start_str, "%H:%M"))
375 | end = utc_dt(date, datetime.strptime(end_str, "%H:%M"))
376 | if end < start:
377 | end = end + timedelta(days=1)
378 | stage_schedule[stage].append(
379 | {
380 | ATTR_STAGE: stage,
381 | ATTR_START_TIME: start,
382 | ATTR_END_TIME: end,
383 | }
384 | )
385 |
386 | area_id_data[area.id] = {
387 | ATTR_EVENTS: events,
388 | ATTR_SCHEDULE: stage_schedule,
389 | }
390 |
391 | return area_id_data
392 |
393 | async def async_area_forecast(self) -> None:
394 | """Derive area forecast from planned stages and area schedule."""
395 |
396 | cape_town = "capetown"
397 | eskom = "eskom"
398 |
399 | stages = self.stage_coordinator.data
400 | eskom_stages = stages.get(eskom, {}).get(ATTR_PLANNED, [])
401 | cape_town_stages = stages.get(cape_town, {}).get(ATTR_PLANNED, [])
402 |
403 | for area_id, data in self.data.items():
404 | stage_schedules = data.get(ATTR_SCHEDULE)
405 |
406 | planned_stages = (
407 | cape_town_stages if area_id.startswith(cape_town) else eskom_stages
408 | )
409 | forecast = []
410 | for planned in planned_stages:
411 | planned_stage = planned.get(ATTR_STAGE)
412 | planned_start_time = planned.get(ATTR_START_TIME)
413 | planned_end_time = planned.get(ATTR_END_TIME)
414 |
415 | if planned_stage in [Stage.NO_LOAD_SHEDDING]:
416 | continue
417 |
418 | schedule = stage_schedules.get(planned_stage, [])
419 |
420 | for timeslot in schedule:
421 | start_time = timeslot.get(ATTR_START_TIME)
422 | end_time = timeslot.get(ATTR_END_TIME)
423 |
424 | if start_time >= planned_end_time:
425 | continue
426 | if end_time <= planned_start_time:
427 | continue
428 |
429 | # Clip schedules that overlap planned start time and end time
430 | if (
431 | start_time <= planned_start_time
432 | and end_time <= planned_end_time
433 | ):
434 | start_time = planned_start_time
435 | if (
436 | start_time >= planned_start_time
437 | and end_time >= planned_end_time
438 | ):
439 | end_time = planned_end_time
440 |
441 | if start_time == end_time:
442 | continue
443 |
444 | # Minimum event duration
445 | min_event_dur = self.stage_coordinator.config_entry.options.get(
446 | CONF_MIN_EVENT_DURATION, 30
447 | ) # minutes
448 | if end_time - start_time < timedelta(minutes=min_event_dur):
449 | continue
450 |
451 | forecast.append(
452 | {
453 | ATTR_STAGE: planned_stage,
454 | ATTR_START_TIME: start_time,
455 | ATTR_END_TIME: end_time,
456 | }
457 | )
458 |
459 | if not forecast:
460 | events = data.get(ATTR_EVENTS)
461 |
462 | for timeslot in events:
463 | stage = timeslot.get(ATTR_STAGE)
464 | start_time = timeslot.get(ATTR_START_TIME)
465 | end_time = timeslot.get(ATTR_END_TIME)
466 |
467 | # Minimum event duration
468 | min_event_dur = self.stage_coordinator.config_entry.options.get(
469 | CONF_MIN_EVENT_DURATION, 30
470 | ) # minutes
471 | if end_time - start_time < timedelta(minutes=min_event_dur):
472 | continue
473 |
474 | forecast.append(
475 | {
476 | ATTR_STAGE: stage,
477 | ATTR_START_TIME: start_time,
478 | ATTR_END_TIME: end_time,
479 | }
480 | )
481 |
482 | data[ATTR_FORECAST] = forecast
483 |
484 |
485 | def utc_dt(date: datetime, time: datetime) -> datetime:
486 | """Given a date and time in SAST, this function returns a datetime object in UTC."""
487 | sast = timezone(timedelta(hours=+2), "SAST")
488 |
489 | return time.replace(
490 | year=date.year,
491 | month=date.month,
492 | day=date.day,
493 | second=0,
494 | microsecond=0,
495 | tzinfo=sast,
496 | ).astimezone(UTC)
497 |
498 |
499 | class LoadSheddingQuotaCoordinator(DataUpdateCoordinator[dict[str, Any]]):
500 | """Class to manage fetching LoadShedding Quota."""
501 |
502 | def __init__(self, hass: HomeAssistant, sepush: SePush) -> None:
503 | """Initialize the quota coordinator."""
504 | super().__init__(hass, _LOGGER, name=f"{DOMAIN}")
505 | self.data = {}
506 | self.sepush = sepush
507 | self.last_update: datetime | None = None
508 |
509 | async def _async_update_data(self) -> dict:
510 | """Retrieve latest load shedding data."""
511 |
512 | now = datetime.now(UTC).replace(microsecond=0)
513 | try:
514 | quota = await self.async_update_quota()
515 | except SePushError as err:
516 | _LOGGER.error("Unable to get quota: %s", err)
517 | self.data = {}
518 | except UpdateFailed as err:
519 | _LOGGER.exception("Unable to get quota: %s", err)
520 | else:
521 | self.data = quota
522 | self.last_update = now
523 |
524 | return self.data
525 |
526 | async def async_update_quota(self) -> dict:
527 | """Retrieve latest quota."""
528 | esp = await self.hass.async_add_executor_job(self.sepush.check_allowance)
529 |
530 | return esp.get("allowance", {})
531 |
532 |
533 | class LoadSheddingDevice(Entity):
534 | """Define a LoadShedding device."""
535 |
536 | def __init__(self, coordinator) -> None:
537 | """Initialize the device."""
538 | super().__init__(coordinator)
539 | self.device_id = "{NAME}"
540 |
541 | @property
542 | def device_info(self) -> DeviceInfo:
543 | """Return device information about this LoadShedding receiver."""
544 | return {
545 | ATTR_IDENTIFIERS: {(DOMAIN, self.device_id)},
546 | ATTR_NAME: f"{NAME}",
547 | ATTR_MANUFACTURER: MANUFACTURER,
548 | ATTR_MODEL: API,
549 | ATTR_SW_VERSION: VERSION,
550 | ATTR_VIA_DEVICE: (DOMAIN, self.device_id),
551 | }
552 |
--------------------------------------------------------------------------------