├── .github ├── CODEOWNERS ├── dependabot.yml ├── workflows │ ├── validate-hacs.yml │ ├── release-drafter.yml │ ├── release.yml │ ├── stats.yaml │ ├── lint_python.yml │ └── codeql-analysis.yml ├── scripts │ ├── gen_stats.sh │ └── update_hacs_manifest.py └── release-drafter.yml ├── .prettierrc.yml ├── custom_components └── zha_toolkit │ ├── .gitignore │ ├── manifest.json │ ├── tuya.py │ ├── default.py │ ├── ha.py │ ├── _user.py │ ├── ezsp_backup.py │ ├── zha.py │ ├── params.py │ ├── zdo.py │ ├── zcl_cmd.py │ ├── neighbours.py │ ├── znp.py │ ├── groups.py │ ├── ota.py │ ├── misc.py │ ├── ezsp.py │ ├── scan_device.py │ └── binds.py ├── .gitignore ├── icon ├── icon.png ├── icon@2x.png └── icon.svg ├── requirements_test.txt ├── .prettierignore ├── images ├── ServiceResponse.png └── service-config-ui.png ├── examples ├── images │ ├── state_basic_cluster.png │ └── service_basic_cluster.png ├── service_call_read_basic_cluster.yaml ├── script_TRV_setTemperatureReporting.yaml ├── script_request_all_light_states.yaml ├── script_use_zha_devices_response.yaml ├── fetchOTAfw.sh ├── script_Thermometer_setReporting.yaml ├── script_read_basic_cluster.yaml ├── script_use_zha_devices.yaml ├── README.md ├── script_danfoss_ally_adaptation_run_init.yaml └── script_danfoss_ally_configure.yaml ├── pyproject.toml ├── hacs.json ├── scripts ├── installNoHacsWithGit.sh └── installNoHacsFromZip.sh ├── blueprints ├── backup.yaml ├── backup_znp.yaml ├── script_Thermometer_setReporting.yaml ├── README.md ├── blueprint_danfoss_ally_configure_script.yaml ├── danfoss_ally_remote_temperature.yaml └── danfoss_ally_remote_temperature_min_delay.yaml ├── setup.cfg ├── Contributing.md ├── .pre-commit-config.yaml └── STATS.md /.github/CODEOWNERS: -------------------------------------------------------------------------------- 1 | * @mdeweerd 2 | -------------------------------------------------------------------------------- /.prettierrc.yml: -------------------------------------------------------------------------------- 1 | quoteProps: preserve 2 | -------------------------------------------------------------------------------- /custom_components/zha_toolkit/.gitignore: -------------------------------------------------------------------------------- 1 | local 2 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | __pycache__ 2 | nwk_backup.json 3 | .mypy_cache 4 | **/*.swp 5 | -------------------------------------------------------------------------------- /icon/icon.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tomasbedrich/zha-toolkit/main/icon/icon.png -------------------------------------------------------------------------------- /requirements_test.txt: -------------------------------------------------------------------------------- 1 | # For tests 2 | pytest-homeassistant-custom-component>=0.4.8 3 | -------------------------------------------------------------------------------- /.prettierignore: -------------------------------------------------------------------------------- 1 | # Keep workflow happy 2 | .* 3 | #example** 4 | #hacs.json 5 | README.md 6 | -------------------------------------------------------------------------------- /icon/icon@2x.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tomasbedrich/zha-toolkit/main/icon/icon@2x.png -------------------------------------------------------------------------------- /images/ServiceResponse.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tomasbedrich/zha-toolkit/main/images/ServiceResponse.png -------------------------------------------------------------------------------- /images/service-config-ui.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tomasbedrich/zha-toolkit/main/images/service-config-ui.png -------------------------------------------------------------------------------- /examples/images/state_basic_cluster.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tomasbedrich/zha-toolkit/main/examples/images/state_basic_cluster.png -------------------------------------------------------------------------------- /examples/images/service_basic_cluster.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tomasbedrich/zha-toolkit/main/examples/images/service_basic_cluster.png -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [tool.yamlfix] 2 | # allow_duplicate_keys = true 3 | #line_length = 280 4 | line_length = 80 5 | # none_representation = "null" 6 | -------------------------------------------------------------------------------- /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | version: 2 2 | updates: 3 | - package-ecosystem: pip 4 | directory: "/" 5 | schedule: 6 | interval: weekly 7 | time: "01:30" 8 | open-pull-requests-limit: 10 9 | -------------------------------------------------------------------------------- /examples/service_call_read_basic_cluster.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # Example of service call to read basic cluster 3 | # Needs: script_read_basic_cluster.yaml 4 | # 5 | service: script.1645121662206 6 | data: 7 | entity_name: button.bureau_identify 8 | csv: ../www/lidl_basic_cluster.csv 9 | -------------------------------------------------------------------------------- /hacs.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "🧰 ZHA Toolkit - Service for advanced Zigbee Usage", 3 | "content_in_root": false, 4 | "zip_release": true, 5 | "filename": "zha-toolkit.zip", 6 | "render_readme": true, 7 | "persistent_directory": "local", 8 | "homeassistant": "2021.1" 9 | } 10 | -------------------------------------------------------------------------------- /custom_components/zha_toolkit/manifest.json: -------------------------------------------------------------------------------- 1 | { 2 | "domain": "zha_toolkit", 3 | "name": "\ud83e\uddf0 ZHA Toolkit", 4 | "codeowners": ["@mdeweerd"], 5 | "dependencies": ["zha"], 6 | "documentation": "https://github.com/mdeweerd/zha-toolkit", 7 | "iot_class": "local_polling", 8 | "issue_tracker": "https://github.com/mdeweerd/zha-toolkit/issues", 9 | "requirements": ["pytz"], 10 | "version": "1.0.0" 11 | } 12 | -------------------------------------------------------------------------------- /scripts/installNoHacsWithGit.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | cd config/ || exit 3 | ( 4 | git clone -n --depth=1 --filter=tree:0 https://github.com/mdeweerd/zha-toolkit.git 5 | cd zha-toolkit || exit 6 | git sparse-checkout set --no-cone custom_components 7 | git checkout 8 | ) 9 | ( 10 | [[ -r custom_components ]] && cd custom_components && ln -s ../zha-toolkit/custom_components/zha_toolkit . 11 | ) 12 | # To update: 13 | ( 14 | cd zha-toolkit || exit 15 | git pull 16 | ) 17 | -------------------------------------------------------------------------------- /scripts/installNoHacsFromZip.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # To update / install : 3 | cd config || exit 4 | ( 5 | mkdir -p custom_components/zha_toolkit 6 | cd custom_components/zha_toolkit || exit 7 | rm zha-toolkit.zip >& /dev/null 8 | curl -s https://api.github.com/repos/mdeweerd/zha-toolkit/releases/latest \ 9 | | grep "browser_download_url.*/zha-toolkit.zip" \ 10 | | cut -d : -f 2,3 \ 11 | | tr -d \" \ 12 | | wget -qi - 13 | unzip -o zha-toolkit.zip 14 | rm zha-toolkit.zip 15 | ) 16 | -------------------------------------------------------------------------------- /.github/workflows/validate-hacs.yml: -------------------------------------------------------------------------------- 1 | --- 2 | name: Validate with hassfest 3 | 4 | on: 5 | push: 6 | pull_request: 7 | schedule: 8 | - cron: 0 0 * * * 9 | 10 | jobs: 11 | validate: 12 | runs-on: ubuntu-latest 13 | steps: 14 | - uses: actions/checkout@v4 15 | - uses: home-assistant/actions/hassfest@master 16 | hacs: 17 | name: HACS Action 18 | runs-on: ubuntu-latest 19 | steps: 20 | - uses: actions/checkout@v4 21 | - name: HACS Action 22 | uses: hacs/action@main 23 | with: 24 | category: integration 25 | -------------------------------------------------------------------------------- /.github/workflows/release-drafter.yml: -------------------------------------------------------------------------------- 1 | --- 2 | name: Release Drafter 3 | 4 | on: 5 | push: 6 | branches: [main] 7 | pull_request: 8 | types: [opened, reopened, synchronize] 9 | 10 | jobs: 11 | update_release_draft: 12 | name: Update release draft 13 | runs-on: ubuntu-latest 14 | steps: 15 | - name: Checkout 16 | uses: actions/checkout@v4 17 | with: 18 | fetch-depth: 0 19 | - name: Create Release 20 | uses: release-drafter/release-drafter@v5 21 | with: 22 | disable-releaser: github.ref != 'refs/heads/main' 23 | env: 24 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 25 | -------------------------------------------------------------------------------- /.github/scripts/gen_stats.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | DEST=$(dirname "$0")/../../STATS.md 4 | 5 | TEMPLATE='- ![badge VERSION](https://img.shields.io/github/downloads/mdeweerd/zha-toolkit/VERSION/total.svg)' 6 | 7 | # Exclude stuff that results in invalid badges 8 | EXCLUDES="v0.7.9 v0.7.7 v0.7.6 v0.7.5 v0.7.3 v0.7.2 v0.7.1 v0.7.23 v0.7.24 v0.8.30 v0.9.6 v0.9.8" 9 | 10 | ( 11 | echo '# Badges showing number of downloads per version' 12 | echo 13 | for tag in latest $(git tag -l --sort=-creatordate v*[0-9]) ; do 14 | if [[ "$EXCLUDES" != *"$tag"* ]] ; then 15 | echo "${TEMPLATE//VERSION/$tag}" 16 | fi 17 | done 18 | ) > "${DEST}" 19 | -------------------------------------------------------------------------------- /blueprints/backup.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | blueprint: 3 | name: Daily Coordinator Backup - Monthly rotation 4 | description: >- 5 | Backup Zigbee Coordinator Configuration (ZNP/ezsp(bellows)), 6 | monthly rotation 7 | domain: automation 8 | input: 9 | backup_time: 10 | name: Backup time 11 | description: >- 12 | Time at which the daily backup should be made. 13 | selector: 14 | time: 15 | trigger: 16 | - platform: time 17 | at: !input backup_time 18 | condition: [] 19 | action: 20 | - service: zha_toolkit.execute 21 | data: 22 | command: backup 23 | command_data: '{{ now().strftime("_%d") }}' 24 | event_success: zha_coordinator_backup_success 25 | event_fail: zha_coordinator_backup_failed 26 | event_done: zha_coordinator_backup_done 27 | mode: restart 28 | -------------------------------------------------------------------------------- /.github/release-drafter.yml: -------------------------------------------------------------------------------- 1 | --- 2 | name-template: v$RESOLVED_VERSION 🧰 3 | tag-template: v$RESOLVED_VERSION 4 | change-template: '- #$NUMBER $TITLE @$AUTHOR' 5 | sort-direction: ascending 6 | categories: 7 | - title: 🚀 Features 8 | labels: 9 | - feature 10 | - enhancement 11 | 12 | - title: 🐛 Bug Fixes 13 | labels: 14 | - fix 15 | - bugfix 16 | - bug 17 | 18 | - title: 🧰 Maintenance 19 | label: chore 20 | 21 | version-resolver: 22 | major: 23 | labels: 24 | - major 25 | minor: 26 | labels: 27 | - minor 28 | patch: 29 | labels: 30 | - patch 31 | default: patch 32 | template: | 33 | ## Changes 34 | 35 | $CHANGES 36 | 37 | ## ⭐️ Thank you so much for helping out to keep this integration awesome 38 | $CONTRIBUTORS 39 | autolabeler: 40 | - label: bug 41 | branch: 42 | - /fix\/.+/ 43 | - label: enhancement 44 | branch: 45 | - /feature\/.+/ 46 | -------------------------------------------------------------------------------- /custom_components/zha_toolkit/tuya.py: -------------------------------------------------------------------------------- 1 | from custom_components.zha_toolkit import utils as u 2 | from custom_components.zha_toolkit.params import INTERNAL_PARAMS as p 3 | 4 | 5 | async def tuya_magic( 6 | app, listener, ieee, cmd, data, service, params, event_data 7 | ): 8 | """ 9 | Send 'magic spell' sequence to device to try to get 'normal' behavior. 10 | """ 11 | 12 | dev = app.get_device(ieee) 13 | basic_cluster = dev.endpoints[1].in_clusters[0] 14 | 15 | # The magic spell is needed only once. 16 | # TODO: Improve by doing this only once (successfully). 17 | 18 | # Magic spell - part 1 19 | attr_to_read = [4, 0, 1, 5, 7, 0xFFFE] 20 | res = await u.cluster_read_attributes( 21 | basic_cluster, attr_to_read, tries=params[p.TRIES] 22 | ) 23 | 24 | event_data["result"] = res 25 | 26 | # Magic spell - part 2 (skipped - does not seem to be needed) 27 | # attr_to_write={0xffde:13} 28 | # basic_cluster.write_attributes(attr_to_write, tries=3) 29 | -------------------------------------------------------------------------------- /blueprints/backup_znp.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | blueprint: 3 | name: Daily ZNP Backup - Monthly rotation 4 | description: Backup ZNP Zigbee configuration, monthly rotation 5 | domain: automation 6 | input: 7 | backup_time: 8 | name: Backup time 9 | description: >- 10 | Time at which the daily backup should be madeaction should start 11 | selector: 12 | time: 13 | trigger: 14 | - platform: time 15 | at: !input backup_time 16 | condition: [] 17 | action: 18 | - service: zha_toolkit.execute 19 | data: 20 | command: znp_backup 21 | command_data: '{{ now().strftime("_%d") }}' 22 | event_success: znp_backup_success 23 | event_fail: znp_backup_failed 24 | event_done: znp_backup_done 25 | - service: zha_toolkit.execute 26 | data: 27 | command: znp_nvram_backup 28 | command_data: '{{ now().strftime("_%d") }}' 29 | event_success: znp_nvram_backup_success 30 | event_fail: znp_nvram_backup_failed 31 | event_done: znp_nvram_backup_done 32 | mode: restart 33 | -------------------------------------------------------------------------------- /examples/script_TRV_setTemperatureReporting.yaml: -------------------------------------------------------------------------------- 1 | alias: Zigbee TRV Configure Temperature Reports 2 | sequence: 3 | - service: zha_toolkit.conf_report 4 | data: 5 | ieee: "{{ entity_name }}" 6 | cluster: 513 7 | attribute: 0 8 | tries: 100 9 | event_done: zha_done 10 | reportable_change: 20 11 | max_interval: 300 12 | min_interval: 19 13 | - service: zha_toolkit.conf_report_read 14 | data: 15 | ieee: "{{ entity_name }}" 16 | cluster: 513 17 | attribute: 0 18 | tries: 100 19 | event_done: zha_done 20 | fields: 21 | entity_name: 22 | name: entity_name 23 | description: A Zigbee Entity (all entities of the device resolve to the same address) 24 | required: true 25 | selector: 26 | entity: 27 | integration: zha 28 | mode: restart 29 | icon: mdi:home-thermometer 30 | description: >- 31 | This script configures the selected TRV (Thermostatatic Radiator Valve) to 32 | report its temperature at least every 5 minutes or every 0.2°C whichever 33 | occurs first. 34 | -------------------------------------------------------------------------------- /.github/workflows/release.yml: -------------------------------------------------------------------------------- 1 | --- 2 | name: Release 3 | 4 | on: 5 | release: 6 | types: [published] 7 | workflow_dispatch: 8 | 9 | jobs: 10 | release_zip_file: 11 | name: Prepare release asset 12 | runs-on: ubuntu-latest 13 | steps: 14 | - name: Check out repository 15 | uses: actions/checkout@v4 16 | 17 | # - name: Get version 18 | # id: version 19 | # uses: home-assistant/actions/helpers/version@master 20 | 21 | - name: Set version number 22 | run: | 23 | python3 ${{ github.workspace }}/.github/scripts/update_hacs_manifest.py --version ${{ github.ref_name }} 24 | 25 | - name: Create zip 26 | run: | 27 | cd custom_components/zha_toolkit 28 | zip zha-toolkit.zip -r ./ 29 | - name: Upload zip to release 30 | uses: svenstaro/upload-release-action@v1-release 31 | with: 32 | repo_token: ${{ secrets.GITHUB_TOKEN }} 33 | file: ./custom_components/zha_toolkit/zha-toolkit.zip 34 | asset_name: zha-toolkit.zip 35 | tag: ${{ github.ref }} 36 | overwrite: true 37 | -------------------------------------------------------------------------------- /examples/script_request_all_light_states.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | alias: "[Lighting] Update on/off states" 3 | description: >- 4 | Request on/off state of all ZHA lights. 5 | This allows to cope with lights that do not notify their state as expected. 6 | In particular, when batch/group on/off commands are executes, the light states 7 | are not updated in Home Assistant. 8 | The result of the read requests will "force" Home Assistant to update. 9 | You can call this script in an automation triggered by the group action, or on a 10 | timely basis. 11 | (Original script by @HarvsG in https://github.com/mdeweerd/zha-toolkit/issues/113#issuecomment-1335616201) 12 | trigger: [] 13 | condition: [] 14 | action: 15 | - repeat: 16 | for_each: >- 17 | {{states.light | map(attribute='entity_id') | select('in', 18 | integration_entities('zha')) | list }} 19 | sequence: 20 | - continue_on_error: true 21 | service: zha_toolkit.attr_read 22 | data: 23 | ieee: "{{ repeat.item }}" 24 | cluster: 6 25 | attribute: 0 26 | fail_exception: false 27 | tries: 3 28 | mode: single 29 | -------------------------------------------------------------------------------- /setup.cfg: -------------------------------------------------------------------------------- 1 | [tool:pytest] 2 | asyncio_mode=strict 3 | 4 | [flake8] 5 | exclude = .venv,.git,.tox 6 | # To work with Black 7 | max-line-length = 79 8 | # B028 is manually surrounded by quotes, consider using the `!r` 9 | # W503 line break before binary operator 10 | # E501 line too long 11 | ignore = 12 | B028, 13 | W503 14 | 15 | per-file-ignores = 16 | custom_components/zha_toolkit/__init__.py:E501 17 | 18 | # per-file-ignores = 19 | # example/*:F811,F401,F403 20 | 21 | [isort] 22 | profile = black 23 | line_length = 79 24 | 25 | [pylint.MESSAGES CONTROL] 26 | disable = invalid-name, unused-argument, broad-except, missing-docstring, fixme, 27 | consider-using-f-string, 28 | too-many-branches, too-many-statements, too-many-arguments, protected-access, 29 | import-error, too-many-locals, import-outside-toplevel, 30 | logging-fstring-interpolation, line-too-long, duplicate-code 31 | 32 | [pylint.FORMAT] 33 | max-line-length = 79 34 | 35 | [codespell] 36 | builtin=clear,rare,informal,usage,code,names 37 | ignore-words-list=hass,master,weerd,uint 38 | skip=./.* 39 | quiet-level=2 40 | 41 | [mypy] 42 | mypy_path = $MYPY_CONFIG_FILE_DIR 43 | explicit_package_bases = yes 44 | -------------------------------------------------------------------------------- /examples/script_use_zha_devices_response.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | alias: Loop over zha_devices, extract some device data. For HA>=2023.7 3 | sequence: 4 | - service: zha_toolkit.zha_devices 5 | response_variable: dev_data 6 | - service: system_log.write 7 | data: 8 | logger: zha_devices 9 | level: error 10 | message: '{{ "Got device_data %s" % ( dev_data.devices ) }}' 11 | - service: system_log.write 12 | alias: List unavailable only 13 | data: 14 | logger: zha_devices 15 | level: error 16 | message: > 17 | {% set ns = namespace(names=[]) %} 18 | {% for item in dev_data.devices if not item.available %} 19 | {% set ns.names = ns.names + [ "'%s'" % (item.name) ] %} 20 | {% endfor %} 21 | Items: {{ ns.names | join(', ') }} 22 | - repeat: 23 | for_each: "{{ dev_data.devices }}" 24 | sequence: 25 | - service: system_log.write 26 | data: 27 | logger: zha_devices 28 | level: error 29 | message: >- 30 | {{ "Item '%s' Power: %s dBm Available: %s" % ( 31 | repeat.item.name, repeat.item.rssi, repeat.item.available 32 | ) }} 33 | mode: single 34 | -------------------------------------------------------------------------------- /blueprints/script_Thermometer_setReporting.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | blueprint: 3 | domain: script 4 | name: Zigbee Thermometer Configure Reporting 5 | description: A script that configures the reporting of a zigbee thermometer. 6 | source_url: https://github.com/mdeweerd/zha-toolkit/blob/master/blueprints/script_Thermometer_setReporting.yaml 7 | input: 8 | entity_name: 9 | name: entity_name 10 | description: 11 | A Zigbee Entity (all entities of the device resolve to the same 12 | address) 13 | selector: 14 | entity: 15 | integration: zha 16 | sequence: 17 | - service: zha_toolkit.conf_report 18 | data: 19 | ieee: "{{ entity_name }}" 20 | cluster: 1026 21 | attribute: 0 22 | tries: 100 23 | event_done: zha_done 24 | reportable_change: 20 25 | max_interval: 300 26 | min_interval: 19 27 | - service: zha_toolkit.conf_report_read 28 | data: 29 | ieee: "{{ entity_name }}" 30 | cluster: 1026 31 | attribute: 0 32 | tries: 100 33 | event_done: zha_done 34 | mode: restart 35 | icon: mdi:thermometer-check 36 | description: >- 37 | This script configures the selected Zigbee Thermometer to report its 38 | temperature at least every 5 minutes or every 0.2°C whichever occurs first. 39 | -------------------------------------------------------------------------------- /.github/workflows/stats.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | name: stats 3 | on: [create] 4 | jobs: 5 | gen_stats: 6 | if: ${{ startsWith(github.ref, 'refs/tags/v') }} 7 | runs-on: ubuntu-latest 8 | steps: 9 | # Credit: https://stackoverflow.com/questions/58033366/how-to-get-the-current-branch-within-github-actions 10 | - name: Extract branch name 11 | shell: bash 12 | #run: echo "##[set-output name=branch;]$(echo ${GITHUB_REF#refs/heads/})" 13 | run: echo "branch=$(echo ${GITHUB_REF#refs/heads/}) >> $GITHUB_OUTPUT" 14 | id: extract_branch 15 | - uses: actions/checkout@v4 16 | #with: 17 | # ref: ${{ steps.extract_branch.outputs.branch }} 18 | # fetch-depth: 0 19 | - run: ${{ github.workspace }}/.github/scripts/gen_stats.sh 20 | - name: Commit updated resources 21 | uses: test-room-7/action-update-file@v1 22 | with: 23 | file-path: STATS.md 24 | commit-msg: '[Bot] stats - Update STATS.md' 25 | github-token: ${{ secrets.GITHUB_TOKEN }} 26 | # - uses: stefanzweifel/git-auto-commit-action@v4 27 | # with: 28 | # commit_message: '[Bot] stats - Update STATS.md' 29 | # commit_user_name: stats 30 | # commit_user_email: stats@nill 31 | # commit_author: STATS BOT 32 | -------------------------------------------------------------------------------- /custom_components/zha_toolkit/default.py: -------------------------------------------------------------------------------- 1 | import importlib 2 | import logging 3 | import sys 4 | 5 | LOGGER = logging.getLogger(__name__) 6 | 7 | 8 | async def default(app, listener, ieee, cmd, data, service, params, event_data): 9 | """Default handler that delegates CORE_ACTION to CORE.py/ACTION""" 10 | 11 | # This defaults handler enables adding new handler methods 12 | # by adding a file such as "CORE.py" containing the 13 | # ACTION. The corresponding service name is "CORE_ACTION". 14 | # 15 | # This avoids having to add the mapping in __init__.py 16 | # and also allows the user to freely add new services. 17 | 18 | # get our package name to know where to load from 19 | package_name = vars(sys.modules[__name__])["__package__"] 20 | 21 | # The module name is before the '_' and the command 22 | # is the entire string 23 | if isinstance(cmd, str): 24 | module_name = cmd[: cmd.index("_")] 25 | else: 26 | # When cmd is not a string, it must be a list [ MODULE, CMD ] 27 | module_name = cmd[0] 28 | cmd = cmd[1] 29 | 30 | LOGGER.debug( 31 | f"Trying to import {package_name}.{module_name} to call {cmd}" 32 | ) 33 | m = importlib.import_module(f".{module_name}", package=package_name) 34 | 35 | importlib.reload(m) 36 | 37 | # Get handler (cmd) in loaded module. 38 | handler = getattr(m, cmd) 39 | # Call the handler 40 | await handler(app, listener, ieee, cmd, data, service, params, event_data) 41 | -------------------------------------------------------------------------------- /blueprints/README.md: -------------------------------------------------------------------------------- 1 | - `backup.yaml`:\ 2 | Script for daily backup of supported zigbee coordinators. 3 | - `backup_znp.yaml`:\ 4 | Script for daily backup of ZNP coordinator. 5 | - `blueprint_danfoss_ally_configure_script.yaml`:\ 6 | Sample blueprint script 7 | to configure Danfoss Ally (see other script example for a more complete 8 | configuration) 9 | - `danfoss_ally_remote_temperature.yaml`:\ 10 | Send temperature to Danfoss Ally 11 | TRV at most every X minutes and at least every Y minutes. Uses restart to 12 | interrupt long wait ("y minutes") 13 | - `danfoss_ally_remote_temperature_min_delay.yaml`:\ 14 | Send temperature to 15 | Danfoss Ally at most every X minutes. Uses single to block too fast 16 | updates. In case the temperature is stable over a very long time, you 17 | should ensure that HA considers it is updated on every change. 18 | - `danfoss_ally_remote_temperature_min_delay_fake_change.yaml`:\ 19 | Same as 20 | `..._min_delay.yaml`. Work in progress - needs update of 21 | `home-assistant-variables`. Uses 22 | [snarky-snark/home-assistant-variables](https://github.com/snarky-snark/home-assistant-variables) 23 | to fake temperature update even when stable by applying slight change in 24 | temperature at the end of the minimum delay. So if the temperature is 25 | stable, it will still be seen as a change. 26 | - `script_Thermometer_setReporting.yaml`:\ 27 | Blueprint Script to configure 28 | reporting of a zigbee device with Temperature Measurement Cluster 0x0402. 29 | -------------------------------------------------------------------------------- /examples/fetchOTAfw.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | # NOTE: you can now download by using the HA Service `zha-toolkit/ota_notify`. 3 | # 4 | # Important: 5 | # Requires `jq` (https://stedolan.github.io/jq/) 6 | # If not available on your system, on alpine you just 7 | # need to add the [jq package](https://pkgs.alpinelinux.org/package/edge/main/x86/jq). 8 | # 9 | # In configuration.yaml, set the fw directory: 10 | # (Note: only the otau_directory option is shown) 11 | # 12 | # ```yaml 13 | # zha: 14 | # zigpy_config: 15 | # ota: 16 | # otau_directory: /config/zb_ota 17 | # ``` 18 | # 19 | # Create the directory you have chosen (`/config/zb_ota` 20 | # in the example). Then add this script in that directory. 21 | # Make the script executable (`chmod +x fetchOTAfw.sh`) and 22 | # run it. 23 | # 24 | # 25 | # If you find FW that is not in that list, check out the 26 | # [instructions](https://github.com/Koenkk/zigbee-OTA#adding-new-and-updating-existing-ota-files) 27 | # to add them. 28 | # 29 | 30 | # List all FW files that were already downloaded. 31 | # The files usually have the FW version in their name, making them unique. 32 | ls -- *.ZIGBEE *.OTA *.sbl-ota *.bin *.ota *.zigbee > existing.list 33 | 34 | # Get and filter the list from Koenk's list, download the files 35 | # shellcheck disable=SC2016 36 | curl https://raw.githubusercontent.com/Koenkk/zigbee-OTA/master/index.json |\ 37 | jq -r '.[] |.url' |\ 38 | grep -v -f existing.list |\ 39 | xargs bash -c 'for f do wget --no-clobber $f || rm ${f##*/} ; done' 40 | 41 | # Delete the helper file used to filter already downloaded files 42 | rm existing.list 43 | -------------------------------------------------------------------------------- /examples/script_Thermometer_setReporting.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | alias: Zigbee Thermometer Configure Reporting 3 | fields: 4 | entity_name: 5 | name: entity_name 6 | description: A Zigbee Entity (all entities of the device resolve to the same address) 7 | required: true 8 | selector: 9 | entity: 10 | integration: zha 11 | sequence: 12 | - alias: 13 | Configure the temperature cluster of the device so that it reports every 14 | 0.2°C every 19 seconds at most, or sends a report at least every 5 minutes 15 | service: zha_toolkit.conf_report 16 | data: 17 | ieee: "{{ entity_name }}" 18 | cluster: 1026 19 | attribute: 0 20 | tries: 100 21 | event_done: zha_done 22 | reportable_change: 20 23 | max_interval: 300 24 | min_interval: 19 25 | - alias: 26 | Read back the report configuration so that it can be verified in the zha_done 27 | event data 28 | service: zha_toolkit.conf_report_read 29 | data: 30 | ieee: "{{ entity_name }}" 31 | cluster: 1026 32 | attribute: 0 33 | tries: 100 34 | event_done: zha_done 35 | - alias: Ensure that the cluster is bound to the coordinator 36 | service: zha_toolkit.bind_ieee 37 | data: 38 | ieee: 0 # 0 or false selects the coordinator in zha-toolkit 39 | cluster: 1026 40 | tries: 100 41 | event_done: zha_done 42 | mode: restart 43 | icon: mdi:thermometer-check 44 | description: >- 45 | This script configures the selected Zigbee Thermometer to report its 46 | temperature at least every 5 minutes or every 0.2°C whichever occurs first. 47 | -------------------------------------------------------------------------------- /examples/script_read_basic_cluster.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | alias: Read Basic Cluster 3 | description: 4 | Read main attributes of cluster to CSV file and 'sensor.basic_cluster' 5 | state. 6 | fields: 7 | entity_name: 8 | name: entity_name 9 | description: A Zigbee Entity (all entities of the device resolve to the same address) 10 | required: true 11 | selector: 12 | entity: 13 | integration: zha 14 | csv: 15 | name: csv 16 | description: >- 17 | Csv filename '../www/basic.csv' can be downloaded from 18 | YOURINSTANCEURL/local/basic.csv . 19 | example: ../www/basic.csv 20 | required: true 21 | selector: 22 | text: 23 | sequence: 24 | - repeat: 25 | count: "7" 26 | sequence: 27 | - variables: 28 | current: "{{ ( repeat.index - 1 ) }}" 29 | - service: system_log.write 30 | data: 31 | logger: entity_name.read_basic_cluster_script 32 | level: warning 33 | message: "{{ 'Read Attribute %u' % (repeat.index, ) }}" 34 | - service: zha_toolkit.attr_read 35 | data: 36 | ieee: "{{ entity_name }}" 37 | cluster: 0 38 | attribute: "{{ current }}" 39 | tries: 3 40 | state_id: sensor.basic_cluster 41 | state_attr: '{{ "%s%04X" % (entity_name, current|int) }}' 42 | allow_create: true 43 | csvout: "{{ csv }}" 44 | - service: system_log.write 45 | data: 46 | logger: entity_name.basic_cluster_read 47 | level: warning 48 | message: Basic cluster read done 49 | mode: restart 50 | -------------------------------------------------------------------------------- /examples/script_use_zha_devices.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | alias: Loop over zha_devices, extract some device data 3 | sequence: 4 | - parallel: 5 | - sequence: 6 | - wait_for_trigger: 7 | - platform: event 8 | event_type: zha_devices_ready 9 | - service: system_log.write 10 | data: 11 | logger: zha_devices 12 | level: error 13 | message: '{{ "Got event %s" % ( wait.trigger.event.data.devices ) }}' 14 | - service: system_log.write 15 | alias: List unavailable only 16 | data: 17 | logger: zha_devices 18 | level: error 19 | message: > 20 | {% set ns = namespace(names=[]) %} 21 | {% for item in wait.trigger.event.data.devices if not item.available %} 22 | {% set ns.names = ns.names + [ "'%s'" % (item.name) ] %} 23 | {% endfor %} 24 | Items: {{ ns.names | join(', ') }} 25 | - repeat: 26 | for_each: "{{ wait.trigger.event.data.devices }}" 27 | sequence: 28 | - service: system_log.write 29 | data: 30 | logger: zha_devices 31 | level: error 32 | message: >- 33 | {{ "Item '%s' Power: %s dBm Available: %s" % ( 34 | repeat.item.name, repeat.item.rssi, repeat.item.available 35 | ) }} 36 | - service: zha_toolkit.zha_devices 37 | data: 38 | event_done: zha_devices_ready 39 | mode: single 40 | -------------------------------------------------------------------------------- /.github/workflows/lint_python.yml: -------------------------------------------------------------------------------- 1 | --- 2 | name: lint_python 3 | on: [pull_request, push] 4 | jobs: 5 | lint_python: 6 | runs-on: ubuntu-latest 7 | steps: 8 | - name: Extract branch name 9 | shell: bash 10 | # run: echo "##[set-output name=branch;]$(echo ${GITHUB_REF#refs/heads/})" 11 | run: echo "branch=$(echo ${GITHUB_REF#refs/heads/}) >> $GITHUB_OUTPUT" 12 | id: extract_branch 13 | - uses: actions/checkout@v4 14 | with: 15 | ref: ${{ steps.extract_branch.outputs.branch }} 16 | - uses: actions/setup-python@v4 17 | - run: pip install --upgrade pip wheel 18 | - run: >- 19 | pip install bandit black codespell flake8 flake8-2020 flake8-bugbear 20 | flake8-comprehensions mccabe pycodestyle pyflakes mypy pytest pyupgrade safety 21 | - run: bandit --recursive --skip B101,B311 . 22 | - run: black --check . || true 23 | - run: codespell --ignore-words-list="hass" 24 | - run: >- 25 | flake8 . --count --show-source --statistics 26 | - run: isort --check-only --profile black . || true 27 | - run: pip install -r requirements.txt || pip install --editable . || true 28 | - run: mkdir --parents --verbose .mypy_cache 29 | - run: >- 30 | mypy --ignore-missing-imports --install-types --non-interactive . || 31 | true 32 | - run: pytest . || true 33 | # - run: pytest --doctest-modules . || true 34 | - run: shopt -s globstar && pyupgrade --py37-plus **/*.py || true 35 | # Safety checks identifies issues in python packages - too much hassle. 36 | # - run: safety check 37 | # Not maintained: 38 | # - uses: pre-commit.ci/action@v3.0.0 39 | - uses: stefanzweifel/git-auto-commit-action@v4 40 | with: 41 | commit_message: '[Bot] lint_python - formatting updates!' 42 | # commit_user_name: lint_python 43 | # commit_user_email: lint_python@nill 44 | # commit_author: lint_python bot 45 | -------------------------------------------------------------------------------- /examples/README.md: -------------------------------------------------------------------------------- 1 | ## Read Attributes from Basic Cluster to CSV and state 2 | 3 | (Note: scripts can be called as a service) 4 | 5 | - `script_read_basic_cluster.yaml`:\ 6 | Script to add to HA (Configuration > 7 | Scripts): 8 | - `service_call_read_basic_cluster.yaml`:\ 9 | Example of service 10 | call.\ 11 | ![image](images/service_basic_cluster.png) 12 | - Values in state:\ 13 | ![image](images/state_basic_cluster.png) 14 | - Values in CSV: 15 | 16 | ```csv 17 | 2022-02-17T18:27:35.646226+00:00,Basic,zcl_version,3,0x0000,0x0000,1,60:a4:23:ff:fe:91:fc:9a, 18 | 2022-02-17T18:27:35.797180+00:00,Basic,app_version,80,0x0001,0x0000,1,60:a4:23:ff:fe:91:fc:9a, 19 | 2022-02-17T18:27:35.934612+00:00,Basic,stack_version,0,0x0002,0x0000,1,60:a4:23:ff:fe:91:fc:9a, 20 | 2022-02-17T18:27:36.071951+00:00,Basic,hw_version,1,0x0003,0x0000,1,60:a4:23:ff:fe:91:fc:9a, 21 | 2022-02-17T18:27:36.212760+00:00,Basic,manufacturer,_TZ3000_dbou1ap4,0x0004,0x0000,1,60:a4:23:ff:fe:91:fc:9a, 22 | 2022-02-17T18:27:36.352902+00:00,Basic,model,TS0505A,0x0005,0x0000,1,60:a4:23:ff:fe:91:fc:9a, 23 | 2022-02-17T18:27:36.488601+00:00,Basic,date_code,,0x0006,0x0000,1,60:a4:23:ff:fe:91:fc:9a, 24 | ``` 25 | 26 | ## Configure temperature reports by TRV or Thermometer 27 | 28 | (Note: scripts can be called as a service) 29 | 30 | - `script_TRV_setTemperatureReporting.yaml`:\ 31 | Script to configure a TRV to 32 | report every 5 minutes or when temperature changed by 0.2°C. 33 | - `script_Thermometer_setReporting.yaml`:\ 34 | Script to configure a 35 | Thermometer to report every 5 minutes or when temperature changed by 36 | 0.2°C. 37 | 38 | ## Download firmware from different sources. 39 | 40 | See `fetchOTAfw.sh` for instructions. The download functionality is now 41 | integrated in 42 | [ota_notify](https://github.com/mdeweerd/zha-toolkit#ota_notify) which is 43 | more selective. If you choose to use the script, you still need to trigger 44 | the OTA update (which can be done using ota_notify). 45 | 46 | ## FW resources 47 | 48 | - LEDVANCE/OSRAM: https://update.ledvance.com/firmware-overview 49 | -------------------------------------------------------------------------------- /icon/icon.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | -------------------------------------------------------------------------------- /custom_components/zha_toolkit/ha.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | import logging 4 | 5 | from homeassistant.helpers.template import Template 6 | from homeassistant.util import dt as dt_util 7 | 8 | from . import utils as u 9 | from .params import INTERNAL_PARAMS as p 10 | 11 | LOGGER = logging.getLogger(__name__) 12 | 13 | 14 | async def ha_set_state( # noqa: C901 15 | app, listener, ieee, cmd, data, service, params, event_data 16 | ): 17 | success = True 18 | 19 | val = params[p.ATTR_VAL] 20 | state_field = None 21 | 22 | state_template_str = params[p.STATE_VALUE_TEMPLATE] 23 | if state_template_str is not None: 24 | template = Template( 25 | "{{ " + state_template_str + " }}", u.get_hass(listener) 26 | ) 27 | new_value = template.async_render(value=val, attr_val=val) 28 | val = new_value 29 | 30 | # Write value to provided state or state attribute 31 | if params[p.STATE_ID] is None: 32 | raise ValueError("'state_id' is required") 33 | 34 | if params[p.STATE_ATTR] is not None: 35 | state_field = f"{params[p.STATE_ID]}[{params[p.STATE_ATTR]}]" 36 | else: 37 | state_field = f"{params[p.STATE_ID]}" 38 | 39 | LOGGER.debug( 40 | "Set state '%s' -> %s", 41 | state_field, 42 | val, 43 | ) 44 | u.set_state( 45 | u.get_hass(listener), 46 | params[p.STATE_ID], 47 | val, 48 | key=params[p.STATE_ATTR], 49 | allow_create=params[p.ALLOW_CREATE], 50 | ) 51 | 52 | event_data["success"] = success 53 | 54 | if success and (params[p.CSV_FILE] is not None): 55 | fields = [] 56 | label = params[p.CSV_LABEL] 57 | 58 | fields.append(dt_util.utcnow().isoformat()) 59 | fields.append(state_field) 60 | fields.append(val) 61 | fields.append(label) 62 | 63 | u.append_to_csvfile( 64 | fields, 65 | "csv", 66 | params[p.CSV_FILE], 67 | f"{state_field}={val}", 68 | listener=listener, 69 | ) 70 | LOGGER.debug(f"ha_set_state info Written to CSV {params[p.CSV_FILE]}") 71 | 72 | if u.isJsonable(val): 73 | val = repr(val) 74 | 75 | # For internal use 76 | return success 77 | -------------------------------------------------------------------------------- /blueprints/blueprint_danfoss_ally_configure_script.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | blueprint: 3 | domain: script 4 | name: Danfoss Ally TRV configuration 5 | description: 6 | "IMPORTANT NOTE: This blueprint is provided as an example. In practice\ 7 | \ having a blueprint for a script does not seem of much use, if you think it is\ 8 | \ useful discuss about it in a github discussion or issue!\nUse the script in\ 9 | \ the example directory instead\nA script that configures the reporting of a Danfoss\ 10 | \ Ally TRV. zigbee thermometer. You can listen on the 'zha_done' event to see\ 11 | \ some of the configuration results. Sets report configuration and enables window\ 12 | \ open function." 13 | source_url: https://github.com/mdeweerd/zha-toolkit/blob/master/blueprints/blueprint_danfoss_ally_configure_script.yaml 14 | input: 15 | device_ref: 16 | name: Ally TRV Device 17 | description: A Danfoss Ally Thermostatic Regulation Valve (TRV) to configure 18 | selector: 19 | device: 20 | manufacturer: Danfoss 21 | entity: 22 | domain: climate 23 | integration: zha 24 | variables: 25 | device: !input device_ref 26 | ieee: "{{(device_attr(device, 'identifiers')|list)[0][1]}}" 27 | sequence: 28 | - alias: Configure reporting of local_temperature in Thermostat cluster 29 | service: zha_toolkit.conf_report 30 | data: 31 | ieee: "{{ ieee }}" 32 | cluster: 0x0201 33 | attribute: 0 34 | tries: 100 35 | event_done: zha_done 36 | reportable_change: 20 37 | max_interval: 300 38 | min_interval: 19 39 | - alias: Read back reporting configuration, for debugging 40 | service: zha_toolkit.conf_report_read 41 | data: 42 | ieee: "{{ ieee }}" 43 | cluster: 0x0201 44 | attribute: 0 45 | tries: 100 46 | event_done: zha_done 47 | - alias: Enable close window functionality 48 | service: zha_toolkit.attr_write 49 | data: 50 | ieee: "{{ ieee }}" 51 | cluster: 513 52 | attribute: 16387 53 | attr_val: 0 54 | manf: 4678 55 | mode: restart 56 | icon: mdi:thermometer-check 57 | description: >- 58 | This script configures the selected Danfoss Ally TRV. 59 | Report temperature at least every 5 minutes or every 0.2°C whichever occurs first. 60 | Enable the window open detection setting. 61 | -------------------------------------------------------------------------------- /.github/scripts/update_hacs_manifest.py: -------------------------------------------------------------------------------- 1 | #!/bin/env python3 2 | # 3 | # Takes --version X.Y.Z or -V X.Y.Z option and sets version in manifest.json. 4 | # Must be launched from the root of the repository. 5 | # 6 | # Modified from : https://raw.githubusercontent.com/bramstroker/homeassistant-zha-toolkit/master/.github/scripts/update_hacs_manifest.py # noqa: E501 7 | # 8 | # MIT License 9 | # 10 | # Copyright (c) 2021 Bram Gerritsen 11 | # Copyright (c) 2022 Mario DE WEERD 12 | # 13 | # Permission is hereby granted, free of charge, to any person obtaining a copy 14 | # of this software and associated documentation files (the "Software"), to deal 15 | # in the Software without restriction, including without limitation the rights 16 | # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 17 | # copies of the Software, and to permit persons to whom the Software is 18 | # furnished to do so, subject to the following conditions: 19 | # 20 | # The above copyright notice and this permission notice shall be included in 21 | # all copies or substantial portions of the Software. 22 | # 23 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 24 | # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 25 | # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 26 | # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 27 | # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 28 | # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 29 | # SOFTWARE. 30 | 31 | """Update the manifest file.""" 32 | import json 33 | import os 34 | import sys 35 | 36 | 37 | def update_manifest(): 38 | """Update the manifest file.""" 39 | version = "0.0.0" 40 | for index, value in enumerate(sys.argv): 41 | if value in ["--version", "-V"]: 42 | version = sys.argv[index + 1] 43 | 44 | with open( 45 | f"{os.getcwd()}/custom_components/zha_toolkit/manifest.json", 46 | encoding="utf_8", 47 | ) as manifestfile: 48 | manifest = json.load(manifestfile) 49 | 50 | manifest["version"] = version 51 | 52 | with open( 53 | f"{os.getcwd()}/custom_components/zha_toolkit/manifest.json", 54 | "w", 55 | encoding="utf_8", 56 | ) as manifestfile: 57 | manifestfile.write(json.dumps(manifest, indent=4, sort_keys=True)) 58 | 59 | 60 | update_manifest() 61 | -------------------------------------------------------------------------------- /.github/workflows/codeql-analysis.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # For most projects, this workflow file will not need changing; you simply need 3 | # to commit it to your repository. 4 | # 5 | # You may wish to alter this file to override the set of languages analyzed, 6 | # or to provide custom queries or build logic. 7 | # 8 | # ******** NOTE ******** 9 | # We have attempted to detect the languages in your repository. Please check 10 | # the `language` matrix defined below to confirm you have the correct set of 11 | # supported CodeQL languages. 12 | # 13 | name: CodeQL 14 | 15 | on: 16 | push: 17 | branches: [dev] 18 | pull_request: 19 | # The branches below must be a subset of the branches above 20 | branches: [dev] 21 | schedule: 22 | - cron: 15 8 * * 6 23 | 24 | jobs: 25 | analyze: 26 | name: Analyze 27 | runs-on: ubuntu-latest 28 | permissions: 29 | actions: read 30 | contents: read 31 | security-events: write 32 | 33 | strategy: 34 | fail-fast: false 35 | matrix: 36 | language: [python] 37 | # CodeQL supports 38 | # ['cpp', 'csharp', 'go', 'java', 'javascript', 'python', 'ruby'] 39 | # Learn more about CodeQL language support at 40 | # https://git.io/codeql-language-support 41 | 42 | steps: 43 | - name: Checkout repository 44 | uses: actions/checkout@v4 45 | 46 | # Initializes the CodeQL tools for scanning. 47 | - name: Initialize CodeQL 48 | uses: github/codeql-action/init@v1 49 | with: 50 | languages: ${{ matrix.language }} 51 | # If you wish to specify custom queries, you can do so here 52 | # or in a config file. 53 | # By default, queries listed here will override any specified 54 | # in a config file. 55 | # Prefix the list here with "+" to use these queries and those 56 | # in the config file. 57 | # queries: ./path/to/local/query, your-org/your-repo/queries@main 58 | 59 | # Autobuild attempts to build any compiled languages 60 | # (C/C++, C#, or Java). 61 | # If this step fails, then you should remove it and run the build 62 | # manually (see below) 63 | - name: Autobuild 64 | uses: github/codeql-action/autobuild@v1 65 | 66 | # ℹ️ Command-line programs to run using the OS shell. 67 | # 📚 https://git.io/JvXDl 68 | 69 | # ✏️ If the Autobuild fails above, remove it and uncomment the 70 | # following three lines and modify them (or add more) to build your 71 | # code if your project uses a compiled language 72 | 73 | # - run: | 74 | # make bootstrap 75 | # make release 76 | 77 | - name: Perform CodeQL Analysis 78 | uses: github/codeql-action/analyze@v1 79 | -------------------------------------------------------------------------------- /Contributing.md: -------------------------------------------------------------------------------- 1 | # Contributing 2 | 3 | Feel free to contribute to 4 | [zha-toolkit](https://github.com/mdeweerd/zha-toolkit). 5 | 6 | You can contribute with regards to the documentations, examples, 7 | blueprints, and code. 8 | 9 | ## Documentation 10 | 11 | Not all commands are documented yet, and the existing documentation can be 12 | improved. 13 | 14 | The undocumented commands are mostly commands that were in 15 | [zha_custom](https://github.com/Adminiuga/zha_custom). 16 | 17 | Ideally you install `pre-commit` (See below) 18 | 19 | ## Coding 20 | 21 | Because most of the code is reloaded on each call, you do not have to 22 | restart Home Assistant on each change. That's fairly practical to adjust 23 | existing functionality and add new ones. 24 | 25 | ## Adding commands 26 | 27 | A new command results in several updates to define it: 28 | 29 | - The main handler function.\ 30 | The ideal is to name it `_`. 31 | 32 | The next steps are not required to get started, you can do it once you're 33 | happy with the functionality of your new command. They are required to 34 | properly define the new command as a HA service command: 35 | 36 | - In `params.py`: Add the handler name as a constant. 37 | - In `__init__.py`: 38 | - `SERVICE_SCHEMAS`: Add definitions of mandatory and optional 39 | parameters. 40 | - `CMD_TO_INTERNAL_MAP`: Add a mapping if the method name is not like 41 | `_`. 42 | - In `services.yaml`: 43 | - Add a new entry (alphabetically located) to define the UI fields for 44 | service calls. 45 | 46 | You can check that these updates are correct by calling the service 47 | `zha_toolkit.register_services` which will reload `services.yaml` and 48 | `SERVICE_SCHEMAS` to add/redefine zha-toolkit services. 49 | 50 | ### Handler method definition: 51 | 52 | The example below shows all the parameters you need to define for a new 53 | handler method. 54 | 55 | This example is located in `hello.py`. Therefore, the start of the function 56 | name (`hello`) matches the module name. 57 | 58 | ```python 59 | async def hello_world(app, listener, ieee, cmd, data, service, params, event_data): 60 | pass 61 | ``` 62 | 63 | Because of the naming, it is immediately available using the 64 | `zha_toolkit.execute` service: 65 | 66 | ```yaml 67 | service: zha_toolkit.execute 68 | data: 69 | command: hello_world 70 | param1: content1 71 | param2: content2 72 | ``` 73 | 74 | Once you made the required steps to add the command as a service itself, 75 | you can call it as: 76 | 77 | ```yaml 78 | service: zha_toolkit.hello_world 79 | data: 80 | param1: content1 81 | param2: content2 82 | ``` 83 | 84 | ### `pre-commit` 85 | 86 | `pre-commit` is a tool that helps execute a set of other tools prior to git 87 | activity. 88 | 89 | The repository is set up to format the files you're about to submit, warn 90 | about potential errors, preventing from checking in to the main branch. 91 | 92 | To do so, you need to set up `pre-commit` which is easy in itself. 93 | `pre-commit` will setup the other tools. 94 | 95 | Setting up is as simple as: 96 | 97 | - `pip install pre-commit` 98 | - `pre-commit install` from the base of your repository clone. 99 | 100 | That will run automatic corrections and verifications on the code that you 101 | are committing. If you want to skip automatic checks at some point to be 102 | able to check in, just do: `pre-commit uninstall` at the base of the 103 | repository. Don't forget to install it again once you committed. 104 | -------------------------------------------------------------------------------- /custom_components/zha_toolkit/_user.py: -------------------------------------------------------------------------------- 1 | # 2 | # Sample 'user.py' script 3 | # 4 | # 'user.py' should be located in the 'local' directory of the 5 | # zha_toolkit custom component. 6 | # 7 | import logging 8 | 9 | from zigpy import types as t 10 | 11 | from custom_components.zha_toolkit import utils as u 12 | from custom_components.zha_toolkit.params import INTERNAL_PARAMS as p 13 | 14 | LOGGER = logging.getLogger(__name__) 15 | 16 | 17 | async def user_test( 18 | app, listener, ieee, cmd, data, service, params, event_data 19 | ): 20 | # To be called as a service: 21 | # 22 | # ```yaml 23 | # service: zha_toolkit.execute 24 | # data: 25 | # command: user_test 26 | # ``` 27 | 28 | # Just a stub, does nothing special 29 | LOGGER.debug("User test called") 30 | 31 | 32 | async def user_sinope_write_test( 33 | app, listener, ieee, cmd, data, service, params, event_data 34 | ): 35 | # To be called as a service: 36 | # 37 | # ```yaml 38 | # service: zha_toolkit.execute 39 | # data: 40 | # command: user_sinope_write_test 41 | # ``` 42 | 43 | # User ignores all parameters from service and uses local values 44 | # This user specific example writes attributes to a precise 45 | # sinope thermostat. 46 | 47 | ieee = t.EUI64.deserialize(b"\xae\x09\x01\x00\x40\x91\x0b\x50")[0] 48 | 49 | dev = app.get_device(ieee) 50 | 51 | cluster = dev.endpoints[1].thermostat 52 | 53 | res = await cluster.read_attributes([9]) 54 | LOGGER.info("Reading attr status: %s", res) 55 | 56 | attrs = {0x0009: 0b00001000, 0x0012: 1400, 0x001C: 0xFF} 57 | LOGGER.debug("Writing test attrs to thermostat cluster: %s", attrs) 58 | res = await cluster.write_attributes(attrs) 59 | event_data["result"] = res 60 | LOGGER.info("Writing attrs status: %s", res) 61 | 62 | 63 | async def user_zigpy_deconz( 64 | app, listener, ieee, cmd, data, service, params, event_data 65 | ): 66 | # To be called as a service: 67 | # 68 | # ```yaml 69 | # service: zha_toolkit.execute 70 | # data: 71 | # command: user_zigpy_deconz 72 | # ``` 73 | 74 | # User changes channel of EZSP 75 | LOGGER.debug("Removing EZSP") 76 | res = await app._ezsp.setRadioChannel(20) 77 | LOGGER.debug("Set channel %s", res) 78 | return 79 | 80 | # User skipped this previous custom code (due to return above) 81 | # pylint: disable=unreachable 82 | LOGGER.debug("Getting model from iris: %s", service) 83 | 84 | ieee = t.EUI64(b"\x00\x0d\x6f\x00\x0f\x3a\xf6\xa6") 85 | dev = app.get_device(ieee=ieee) 86 | 87 | cluster = dev.endpoints[2].basic 88 | res = await cluster.read_attributes( 89 | ["model", "manufacturer"], allow_cache=False 90 | ) 91 | LOGGER.info("Iris 2nd ep attr read: %s", res) 92 | 93 | 94 | async def user_tuya_magic( 95 | app, listener, ieee, cmd, data, service, params, event_data 96 | ): 97 | """ 98 | Send Tuya 'magic spell' sequence to device 99 | to try to get 'normal' behavior. 100 | """ 101 | 102 | dev = app.get_device(ieee) 103 | basic_cluster = dev.endpoints[1].in_clusters[0] 104 | 105 | # The magic spell is needed only once. 106 | # TODO: Improve by doing this only once (successfully). 107 | 108 | # Magic spell - part 1 109 | attr_to_read = [4, 0, 1, 5, 7, 0xFFFE] 110 | res = await u.cluster_read_attributes( 111 | basic_cluster, attr_to_read, tries=params[p.TRIES] 112 | ) 113 | 114 | event_data["result"] = res 115 | 116 | # Magic spell - part 2 (skipped - does not seem to be needed) 117 | # attr_to_write={0xffde:13} 118 | # basic_cluster.write_attributes(attr_to_write, tries=3) 119 | -------------------------------------------------------------------------------- /blueprints/danfoss_ally_remote_temperature.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | blueprint: 3 | domain: automation 4 | name: Ally Temp Update 5 | description: 6 | "Update Danfoss Ally TRV external temperature with min/max refresh 7 | rate Original source: https://community.home-assistant.io/t/danfoss-ally-trv-working-with-remote-temp-sensor/276686/149" 8 | source_url: https://github.com/mdeweerd/zha-toolkit/blob/master/blueprints/danfoss_ally_remote_temperature.yaml 9 | input: 10 | ally_device: 11 | name: Ally TRV Device 12 | description: Temperature reading will be sent to this device 13 | selector: 14 | device: 15 | manufacturer: Danfoss 16 | entity: 17 | domain: climate 18 | temp_sensor_id: 19 | name: Temperature Sensor 20 | description: 21 | External sensor from which the temperature will be read. Expects 22 | data format 12.3 (corresponding to °C) 23 | selector: 24 | entity: 25 | device_class: temperature 26 | min_update_minutes: 27 | name: Minimum update interval 28 | description: > 29 | Updates will not be sent if time from last update is less than minimum interval. 30 | Normally 30 min for uncovered, 5 min for covered. 31 | default: 5 32 | selector: 33 | number: 34 | max: 360 35 | min: 1 36 | unit_of_measurement: minutes 37 | mode: box 38 | max_update_minutes: 39 | name: Maximum update interval 40 | description: > 41 | Updates must be sent at least every 30 minutes for covered radiators, 42 | and 3 hours for uncovered radiators. 43 | Set to 30 min or 150 min. 44 | default: 150 45 | selector: 46 | number: 47 | max: 180 48 | min: 1 49 | unit_of_measurement: minutes 50 | mode: box 51 | temperature_offset: 52 | name: Temperature offset to apply to temperature measured by sensor 53 | description: > 54 | When the offset is -1.5 and the value measured by the sensor is 20 °C, then 55 | the temperature provide to the TRV will be 18.5 °C. 56 | default: 0 57 | selector: 58 | number: 59 | max: 4.0 60 | min: -4.0 61 | step: 0.1 62 | unit_of_measurement: °C 63 | mode: box 64 | variables: 65 | device: !input ally_device 66 | ieee: "{{(device_attr(device, 'identifiers')|list)[0][1]}}" 67 | min_update_minutes: !input min_update_minutes 68 | temp_sensor_id: !input temp_sensor_id 69 | temp_offset: !input temperature_offset 70 | trigger: 71 | - platform: state 72 | entity_id: 73 | - !input temp_sensor_id 74 | - platform: homeassistant 75 | event: start 76 | condition: 77 | - condition: template 78 | value_template: > 79 | {{ as_timestamp(now()) - as_timestamp(state_attr(this.entity_id,'last_triggered'),0)|int 80 | >= (60 * min_update_minutes) }} 81 | action: 82 | - alias: Repeat until restarted to report temperature, or expired max_update delay 83 | repeat: 84 | while: "{{ 1 == 1 }}" 85 | sequence: 86 | - alias: Write remote temperature to Danfoss Ally 87 | service: zha_toolkit.attr_write 88 | data: 89 | ieee: "{{ ieee }}" 90 | cluster: 0x0201 91 | attribute: 0x4015 92 | attr_val: "{{ (((states(temp_sensor_id)|float) + temp_offset) * 100) | round(0) }}" 93 | manf: 4678 94 | - alias: 95 | Wait until the maximum update delay expires (automation restarts 96 | when temperature changes before) 97 | delay: 98 | minutes: !input max_update_minutes 99 | mode: restart 100 | -------------------------------------------------------------------------------- /custom_components/zha_toolkit/ezsp_backup.py: -------------------------------------------------------------------------------- 1 | # Code from 2 | # https://raw.githubusercontent.com/puddly/bellows/puddly/open-coordinator-backup/bellows/cli/backup.py 3 | # slightly adapted 4 | # 5 | 6 | import datetime 7 | import logging 8 | 9 | import bellows 10 | import bellows.types as t 11 | 12 | LOGGER = logging.getLogger(__name__) 13 | 14 | 15 | EMBER_TABLE_ENTRY_UNUSED_NODE_ID = 0xFFFF 16 | EMBER_UNKNOWN_NODE_ID = 0xFFFD 17 | EMBER_DISCOVERY_ACTIVE_NODE_ID = 0xFFFC 18 | 19 | 20 | async def _backup(ezsp): 21 | # (status,) = await ezsp.networkInit() 22 | # assert status == t.EmberStatus.SUCCESS 23 | 24 | (status, node_type, network) = await ezsp.getNetworkParameters() 25 | assert status == t.EmberStatus.SUCCESS 26 | assert node_type == ezsp.types.EmberNodeType.COORDINATOR 27 | 28 | (ieee,) = await ezsp.getEui64() 29 | 30 | (status, nwk_key) = await ezsp.getKey( 31 | ezsp.types.EmberKeyType.CURRENT_NETWORK_KEY 32 | ) 33 | assert status == t.EmberStatus.SUCCESS 34 | 35 | (status, security_level) = await ezsp.getConfigurationValue( 36 | ezsp.types.EzspConfigId.CONFIG_SECURITY_LEVEL 37 | ) 38 | assert status == t.EmberStatus.SUCCESS 39 | 40 | (status, _tclk) = await ezsp.getKey( 41 | ezsp.types.EmberKeyType.TRUST_CENTER_LINK_KEY 42 | ) 43 | assert status == t.EmberStatus.SUCCESS 44 | 45 | addresses = {} 46 | 47 | for idx in range(0, 255 + 1): 48 | (nwk,) = await ezsp.getAddressTableRemoteNodeId(idx) 49 | (eui64,) = await ezsp.getAddressTableRemoteEui64(idx) 50 | 51 | if nwk == EMBER_TABLE_ENTRY_UNUSED_NODE_ID: 52 | continue 53 | if nwk == EMBER_UNKNOWN_NODE_ID: 54 | LOGGER.warning("NWK address for %s is unknown!", eui64) 55 | continue 56 | if nwk == EMBER_DISCOVERY_ACTIVE_NODE_ID: 57 | LOGGER.warning( 58 | "NWK address discovery for %s is currently ongoing", eui64 59 | ) 60 | continue 61 | 62 | LOGGER.debug("NWK for %s is %s", eui64, nwk) 63 | addresses[eui64] = nwk 64 | 65 | keys = {} 66 | 67 | for idx in range(0, 192): 68 | (status, key_struct) = await ezsp.getKeyTableEntry(idx) 69 | LOGGER.debug( 70 | "Got key at index %s status: %s key_struct: %s", 71 | idx, 72 | status, 73 | key_struct, 74 | ) 75 | 76 | if status == t.EmberStatus.SUCCESS: 77 | keys[key_struct.partnerEUI64] = key_struct 78 | elif status == t.EmberStatus.INDEX_OUT_OF_RANGE: 79 | break 80 | 81 | now = datetime.datetime.now().astimezone() 82 | result = { 83 | "metadata": { 84 | "version": 1, 85 | "format": "zigpy/open-coordinator-backup", 86 | "source": f"bellows@{bellows.__version__}", 87 | "internal": { 88 | "creation_time": now.isoformat(timespec="seconds"), 89 | }, 90 | }, 91 | "coordinator_ieee": ieee.serialize()[::-1].hex(), 92 | "pan_id": network.panId.serialize()[::-1].hex(), 93 | "extended_pan_id": network.extendedPanId.serialize()[::-1].hex(), 94 | "nwk_update_id": network.nwkUpdateId, 95 | "security_level": security_level, 96 | "channel": network.radioChannel, 97 | "channel_mask": list(network.channels), 98 | "network_key": { 99 | "key": nwk_key.key.serialize().hex(), 100 | "sequence_number": nwk_key.sequenceNumber, 101 | "frame_counter": nwk_key.outgoingFrameCounter, 102 | }, 103 | "devices": [ 104 | { 105 | "ieee_address": ieee.serialize()[::-1].hex(), 106 | "link_key": { 107 | "key": key.key.serialize().hex(), 108 | "rx_counter": key.incomingFrameCounter, 109 | "tx_counter": key.outgoingFrameCounter, 110 | }, 111 | "nwk_address": addresses[ieee].serialize()[::-1].hex(), 112 | } 113 | for ieee, key in keys.items() 114 | if ieee in addresses 115 | ], 116 | } 117 | return result 118 | -------------------------------------------------------------------------------- /custom_components/zha_toolkit/zha.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | import logging 4 | from typing import Any 5 | 6 | from . import utils as u 7 | from .params import INTERNAL_PARAMS as p 8 | 9 | LOGGER = logging.getLogger(__name__) 10 | 11 | 12 | async def zha_devices( 13 | app, listener, ieee, cmd, data, service, params, event_data 14 | ): 15 | doGenerateCSV = params[p.CSV_FILE] is not None 16 | 17 | # Determine fields to render. 18 | # If the user provides a list, it is also used to 19 | # limit the contents of "devices" in the event_data. 20 | if data is not None and isinstance(data, list): 21 | selectDeviceFields = True 22 | columns = data 23 | else: 24 | selectDeviceFields = False 25 | columns = [ 26 | "ieee", 27 | "nwk", 28 | "manufacturer", 29 | "model", 30 | "name", 31 | "quirk_applied", 32 | "quirk_class", 33 | "manufacturer_code", 34 | "power_source", 35 | "lqi", 36 | "rssi", 37 | "last_seen", 38 | "available", 39 | "device_type", 40 | "user_given_name", 41 | "device_reg_id", 42 | "area_id", 43 | ] 44 | # TODO: Skipped in columns, needs special handling 45 | # 'signature' 46 | # 'endpoints' 47 | 48 | devices = [device.zha_device_info for device in listener.devices.values()] 49 | 50 | if ieee is not None: 51 | ieee = str(ieee) 52 | # Select only the device with the given address 53 | devices = [d for d in devices if str(d["ieee"]) == ieee] 54 | 55 | # Set default value for 'devices' in event_data, 56 | # may be slimmed down. Ensures that devices is set in case 57 | # an exception occurs. 58 | event_data["devices"] = devices 59 | event_data["selectDeviceFields"] = selectDeviceFields 60 | 61 | if params[p.CSV_LABEL] is not None and isinstance( 62 | params[p.CSV_LABEL], str 63 | ): 64 | try: 65 | # Lambda function gets column and returns False if None 66 | # This makes compares possible for ints) 67 | devices = sorted( 68 | devices, 69 | key=lambda item: ( # pylint: disable=C3002 70 | lambda a: ( 71 | a is None, 72 | str.lower(a) if isinstance(a, str) else a, 73 | ) 74 | )(item[params[p.CSV_LABEL]]), 75 | ) 76 | except Exception: # nosec 77 | pass 78 | 79 | if doGenerateCSV or selectDeviceFields: 80 | if doGenerateCSV: 81 | # Write CSV header 82 | u.append_to_csvfile( 83 | columns, 84 | "csv", 85 | params[p.CSV_FILE], 86 | "device_dump['HEADER']", 87 | listener=listener, 88 | overwrite=True, 89 | ) 90 | 91 | slimmedDevices: list[Any] = [] 92 | for d in devices: 93 | # Fields for CSV 94 | csvFields: list[int | str | None] = [] 95 | # Fields for slimmed devices dict 96 | rawFields: dict[str, Any] = {} 97 | 98 | for c in columns: 99 | if c not in d.keys(): 100 | csvFields.append(None) 101 | else: 102 | val = d[c] 103 | rawFields[c] = val 104 | if c in ["manufacturer", "nwk"] and isinstance(val, int): 105 | val = f"0x{val:04X}" 106 | 107 | csvFields.append(d[c]) 108 | 109 | slimmedDevices.append(rawFields) 110 | 111 | if doGenerateCSV: 112 | LOGGER.debug("Device %r", csvFields) 113 | u.append_to_csvfile( 114 | csvFields, 115 | "csv", 116 | params[p.CSV_FILE], 117 | f"device_dump[{d['ieee']}]", 118 | listener=listener, 119 | ) 120 | if selectDeviceFields: 121 | event_data["devices"] = slimmedDevices 122 | -------------------------------------------------------------------------------- /examples/script_danfoss_ally_adaptation_run_init.yaml: -------------------------------------------------------------------------------- 1 | alias: Danfoss Ally Start Adaptation Run 2 | description: >- 3 | This script resets the adaptation status of the valve by unmounting and 4 | remounting the valve. It then tries to initiate an adaptation run 5 | immediately. 6 | sequence: 7 | - variables: 8 | ieee: "{{(device_attr(device, 'identifiers')|list)[0][1]}}" 9 | csv: danfoss_adaptation_run.csv 10 | default_tries: 3 11 | - alias: Set the valve in mounting mode 12 | service: zha_toolkit.attr_write 13 | data: 14 | ieee: "{{ ieee }}" 15 | cluster: 513 16 | attribute: 16403 17 | attr_val: 1 18 | manf: 4678 19 | read_before_write: false 20 | csvout: "{{ csv }}" 21 | tries: "{{ default_tries}}" 22 | event_done: zha_done 23 | - alias: Wait until the device is in mounting mode (short press) 24 | repeat: 25 | until: 26 | - condition: template 27 | value_template: "{{ is_state_attr('var.allyscript', device + 'mounting', 0) }}" 28 | sequence: 29 | - delay: 30 | hours: 0 31 | minutes: 0 32 | seconds: 2 33 | milliseconds: 0 34 | alias: Wait between successive reads 35 | - alias: Read mount status (should be false) 36 | service: zha_toolkit.attr_read 37 | data: 38 | ieee: "{{ ieee }}" 39 | cluster: 513 40 | attribute: 16402 41 | manf: 4678 42 | csvout: "{{ csv }}" 43 | tries: "{{ default_tries}}" 44 | event_done: zha_done 45 | state_id: var.allyscript 46 | state_attr: "{{ device + 'mounting' }}" 47 | allow_create: true 48 | - alias: Wait until the user mounts the device 49 | repeat: 50 | until: 51 | - condition: template 52 | value_template: "{{ is_state_attr('var.allyscript', device + 'mounting', 0) }}" 53 | sequence: 54 | - delay: 55 | hours: 0 56 | minutes: 0 57 | seconds: 2 58 | milliseconds: 0 59 | alias: Wait between successive reads 60 | - alias: Read mount status (should be false) 61 | service: zha_toolkit.attr_read 62 | data: 63 | ieee: "{{ ieee }}" 64 | cluster: 513 65 | attribute: 16402 66 | manf: 4678 67 | csvout: "{{ csv }}" 68 | tries: "{{ default_tries}}" 69 | event_done: zha_done 70 | state_id: var.allyscript 71 | state_attr: "{{ device + 'mounting' }}" 72 | allow_create: true 73 | - alias: Read the adaptation status (should not be 2) 74 | service: zha_toolkit.attr_read 75 | data: 76 | ieee: "{{ ieee }}" 77 | cluster: 513 78 | attribute: 16461 79 | manf: 4678 80 | tries: "{{ default_tries}}" 81 | csvout: "{{ csv }}" 82 | event_done: zha_done 83 | - alias: Set Adaptation Run control to automatic 84 | service: zha_toolkit.attr_write 85 | data: 86 | ieee: "{{ ieee }}" 87 | cluster: 513 88 | attribute: 16460 89 | attr_val: 1 90 | manf: 4678 91 | tries: "{{ default_tries}}" 92 | csvout: "{{ csv }}" 93 | event_done: zha_done 94 | - alias: Initiate Adaptation Run 95 | service: zha_toolkit.attr_write 96 | data: 97 | ieee: "{{ ieee }}" 98 | cluster: 513 99 | attribute: 16460 100 | attr_val: 1 101 | manf: 4678 102 | read_before_write: false 103 | tries: "{{ default_tries}}" 104 | csvout: "{{ csv }}" 105 | event_done: zha_done 106 | - alias: Wait a bit 107 | delay: 108 | hours: 0 109 | minutes: 0 110 | seconds: 10 111 | milliseconds: 0 112 | - alias: Read the adaptation status (Expected to be 1, but not observed as such). 113 | service: zha_toolkit.attr_read 114 | data: 115 | ieee: "{{ ieee }}" 116 | cluster: 513 117 | attribute: 16461 118 | manf: 4678 119 | tries: "{{ default_tries}}" 120 | csvout: "{{ csv }}" 121 | event_done: zha_done 122 | mode: restart 123 | fields: 124 | device: 125 | name: Ally TRV Device 126 | description: A Danfoss Ally Thermostatic Regulation Valve (TRV) to configure 127 | required: true 128 | default: 7d16a871a8caa808d80e23f5d92ca65d 129 | selector: 130 | device: 131 | manufacturer: Danfoss 132 | entity: 133 | domain: climate 134 | integration: zha 135 | -------------------------------------------------------------------------------- /blueprints/danfoss_ally_remote_temperature_min_delay.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | blueprint: 3 | domain: automation 4 | name: Ally Temp Update Min Delay 5 | description: Update Danfoss Ally TRV external temperature with min refresh rate 6 | source_url: https://github.com/mdeweerd/zha-toolkit/blob/master/blueprints/danfoss_ally_remote_temperature_min_delay.yaml 7 | input: 8 | ally_device: 9 | name: Ally TRV Device 10 | description: Temperature reading will be sent to this device 11 | selector: 12 | device: 13 | manufacturer: Danfoss 14 | entity: 15 | domain: climate 16 | temp_sensor_id: 17 | name: Temperature Sensor 18 | description: 19 | External sensor from which the temperature will be read. Expects 20 | data format 12.3 (corresponding to °C) 21 | selector: 22 | entity: 23 | device_class: temperature 24 | min_update_minutes: 25 | name: Minimum update interval 26 | description: > 27 | Updates will not be sent if time from last update is less than minimum interval. 28 | Normally 30 min for uncovered, 5 min for covered. 29 | default: 30 30 | selector: 31 | number: 32 | max: 299 33 | min: 1 34 | unit_of_measurement: minutes 35 | mode: box 36 | temperature_offset: 37 | name: Temperature offset to apply to temperature measured by sensor 38 | description: > 39 | When the offset is -1.5 and the value measured by the sensor is 20 °C, then 40 | the temperature provide to the TRV will be 18.5 °C. 41 | default: 0 42 | selector: 43 | number: 44 | max: 4.0 45 | min: -4.0 46 | step: 0.1 47 | unit_of_measurement: °C 48 | mode: box 49 | variables: 50 | device: !input ally_device 51 | ieee: "{{(device_attr(device, 'identifiers')|list)[0][1]}}" 52 | min_update_minutes: !input min_update_minutes 53 | temp_sensor_id: !input temp_sensor_id 54 | temp_offset: !input temperature_offset 55 | temperature: "{{ states(temp_sensor_id) }}" 56 | trigger: 57 | - platform: state 58 | entity_id: 59 | - !input temp_sensor_id 60 | - platform: event 61 | event_type: homeassistant_start 62 | id: ha_restart 63 | condition: 64 | - condition: template 65 | value_template: "{{ temperature != -32768 }}" 66 | action: 67 | - alias: Store ZHA reported temperature in state attribute 68 | service: zha_toolkit.ha_set_state 69 | data: 70 | state_id: "{{ temp_sensor_id }}" 71 | state_attr: best_val 72 | attr_val: "{{ (temperature|round(2)) }}" 73 | - alias: 74 | Try to get more precise temperature (should work if zigbee temperature 75 | sensor) 76 | service: zha_toolkit.attr_read 77 | data: 78 | ieee: "{{ temp_sensor_id }}" 79 | use_cache: true 80 | cluster: 1026 81 | attribute: 0 82 | state_id: "{{ temp_sensor_id }}" 83 | state_attr: best_val 84 | state_value_template: value/100 85 | - alias: 86 | Fake small change in temperature so that the next sensor update triggers 87 | an update/change event in case the write fails 88 | service: zha_toolkit.ha_set_state 89 | data: 90 | state_id: "{{ temp_sensor_id }}" 91 | attr_val: "{{ (temperature|round(2)) - 0.001 }}" 92 | - alias: Write remote temperature to Danfoss Ally 93 | service: zha_toolkit.attr_write 94 | data: 95 | ieee: "{{ ieee }}" 96 | cluster: 0x0201 97 | attribute: 0x4015 98 | manf: 0x1246 99 | attr_val: 100 | '{{ (((state_attr(temp_sensor_id, "best_val")|float) + temp_offset) 101 | * 100) | round(0) }}' 102 | read_before_write: false 103 | write_if_equal: true 104 | fail_exception: true 105 | tries: 3 106 | - alias: 107 | Wait until the minimum update delay expires (the automation blocks itself 108 | because it is in single mode) 109 | delay: 110 | minutes: !input min_update_minutes 111 | - alias: 112 | Set slightly changed temperature if it is valid to force update. Otherwise, 113 | a valid temperature will trigger anyway. 114 | if: 115 | - condition: not 116 | conditions: 117 | - condition: state 118 | entity_id: !input temp_sensor_id 119 | state: "-32768" 120 | then: 121 | - alias: 122 | Fake small change in temperature so that the next sensor update triggers 123 | an update/change event 124 | service: zha_toolkit.ha_set_state 125 | data: 126 | state_id: "{{ temp_sensor_id }}" 127 | attr_val: "{{ (states(temp_sensor_id)|round(2)) + 0.001 }}" 128 | mode: single 129 | max_exceeded: silent 130 | -------------------------------------------------------------------------------- /custom_components/zha_toolkit/params.py: -------------------------------------------------------------------------------- 1 | # Constants related to parameters 2 | 3 | 4 | # Constants representing input parameter keys 5 | class USER_PARAMS_consts: # pylint: disable=too-few-public-methods 6 | __slots__ = () 7 | CMD = "cmd" 8 | ENDPOINT = "endpoint" 9 | DST_ENDPOINT = "dst_endpoint" 10 | CLUSTER = "cluster" 11 | ATTRIBUTE = "attribute" 12 | ATTR_TYPE = "attr_type" 13 | ATTR_VAL = "attr_val" 14 | CODE = "code" 15 | MIN_INTRVL = "min_interval" 16 | MAX_INTRVL = "max_interval" 17 | REPTBLE_CHG = "reportable_change" 18 | DIR = "dir" 19 | MANF = "manf" 20 | TRIES = "tries" 21 | EXPECT_REPLY = "expect_reply" 22 | ARGS = "args" 23 | STATE_ID = "state_id" 24 | STATE_ATTR = "state_attr" 25 | ALLOW_CREATE = "allow_create" 26 | EVENT_SUCCESS = "event_success" 27 | EVENT_FAIL = "event_fail" 28 | EVENT_DONE = "event_done" 29 | FORCE_UPDATE = "force_update" 30 | FAIL_EXCEPTION = "fail_exception" 31 | READ_BEFORE_WRITE = "read_before_write" 32 | READ_AFTER_WRITE = "read_after_write" 33 | STATE_VALUE_TEMPLATE = "state_value_template" 34 | WRITE_IF_EQUAL = "write_if_equal" 35 | OUTCSV = "csvout" 36 | CSVLABEL = "csvlabel" 37 | DOWNLOAD = "download" 38 | PATH = "path" 39 | USE_CACHE = "use_cache" 40 | 41 | 42 | class SERVICE_consts: # pylint: disable=too-few-public-methods 43 | __slots__ = () 44 | # General 45 | EXECUTE = "execute" 46 | # Specific 47 | ADD_GROUP = "add_group" 48 | ADD_TO_GROUP = "add_to_group" 49 | ALL_ROUTES_AND_NEIGHBOURS = "all_routes_and_neighbours" 50 | ATTR_READ = "attr_read" 51 | ATTR_WRITE = "attr_write" 52 | BACKUP = "backup" 53 | BIND_GROUP = "bind_group" 54 | BIND_IEEE = "bind_ieee" 55 | BINDS_GET = "binds_get" 56 | BINDS_REMOVE_ALL = "binds_remove_all" 57 | CONF_REPORT = "conf_report" 58 | CONF_REPORT_READ = "conf_report_read" 59 | EZSP_ADD_KEY = "ezsp_add_key" 60 | EZSP_BACKUP = "ezsp_backup" 61 | EZSP_CLEAR_KEYS = "ezsp_clear_keys" 62 | EZSP_GET_CONFIG_VALUE = "ezsp_get_config_value" 63 | EZSP_GET_IEEE_BY_NWK = "ezsp_get_ieee_by_nwk" 64 | EZSP_GET_KEYS = "ezsp_get_keys" 65 | EZSP_GET_POLICY = "ezsp_get_policy" 66 | EZSP_GET_TOKEN = "ezsp_get_token" # nosec 67 | EZSP_GET_VALUE = "ezsp_get_value" 68 | EZSP_SET_CHANNEL = "ezsp_set_channel" 69 | EZSP_START_MFG = "ezsp_start_mfg" 70 | GET_GROUPS = "get_groups" 71 | GET_ROUTES_AND_NEIGHBOURS = "get_routes_and_neighbours" 72 | GET_ZLL_GROUPS = "get_zll_groups" 73 | ZHA_DEVICES = "zha_devices" 74 | HANDLE_JOIN = "handle_join" 75 | HA_SET_STATE = "ha_set_state" 76 | IEEE_PING = "ieee_ping" 77 | LEAVE = "leave" 78 | MISC_REINITIALIZE = "misc_reinitialize" 79 | MISC_SETTIME = "misc_settime" 80 | OTA_NOTIFY = "ota_notify" 81 | REJOIN = "rejoin" 82 | REGISTER_SERVICES = "register_services" 83 | REMOVE_ALL_GROUPS = "remove_all_groups" 84 | REMOVE_FROM_GROUP = "remove_from_group" 85 | REMOVE_GROUP = "remove_group" 86 | SCAN_DEVICE = "scan_device" 87 | STATE_VALUE_TEMPLATE = "state_value_template" 88 | TUYA_MAGIC = "tuya_magic" 89 | UNBIND_COORDINATOR = "unbind_coordinator" 90 | UNBIND_GROUP = "unbind_group" 91 | ZCL_CMD = "zcl_cmd" 92 | ZDO_FLOOD_PARENT_ANNCE = "zdo_flood_parent_annce" 93 | ZDO_JOIN_WITH_CODE = "zdo_join_with_code" 94 | ZDO_SCAN_NOW = "zdo_scan_now" 95 | ZDO_UPDATE_NWK_ID = "zdo_update_nwk_id" 96 | ZNP_BACKUP = "znp_backup" 97 | ZNP_NVRAM_BACKUP = "znp_nvram_backup" 98 | ZNP_NVRAM_RESET = "znp_nvram_reset" 99 | ZNP_NVRAM_RESTORE = "znp_nvram_restore" 100 | ZNP_RESTORE = "znp_restore" 101 | 102 | 103 | # Constants representing internal parameters keys 104 | class INTERNAL_PARAMS_consts: # pylint: disable=too-few-public-methods 105 | __slots__ = () 106 | ALLOW_CREATE = "allow_create" 107 | ARGS = "args" 108 | ATTR_ID = "attr_id" 109 | ATTR_TYPE = "attr_type" 110 | ATTR_VAL = "attr_val" 111 | CLUSTER_ID = "cluster_id" 112 | CMD_ID = "cmd_id" 113 | CODE = "code" 114 | DIR = "dir" 115 | EP_ID = "endpoint_id" 116 | DST_EP_ID = "dst_endpoint_id" 117 | EVT_DONE = "event_done" 118 | EVT_FAIL = "event_fail" 119 | EVT_SUCCESS = "event_success" 120 | EXPECT_REPLY = "expect_reply" 121 | FAIL_EXCEPTION = "fail_exception" 122 | FORCE_UPDATE = "force_update" 123 | MANF = "manf" 124 | MAX_INTERVAL = "max_interval" 125 | MIN_INTERVAL = "min_interval" 126 | READ_AFTER_WRITE = "read_after_write" 127 | READ_BEFORE_WRITE = "read_before_write" 128 | REPORTABLE_CHANGE = "reportable_change" 129 | STATE_ATTR = "state_attr" 130 | STATE_ID = "state_id" 131 | STATE_VALUE_TEMPLATE = "state_value_template" 132 | TRIES = "tries" 133 | WRITE_IF_EQUAL = "write_if_equal" 134 | CSV_FILE = "csvfile" 135 | CSV_LABEL = "csvlabel" 136 | DOWNLOAD = "download" 137 | PATH = "path" 138 | USE_CACHE = "use_cache" 139 | 140 | 141 | INTERNAL_PARAMS = INTERNAL_PARAMS_consts() 142 | USER_PARAMS = USER_PARAMS_consts() 143 | SERVICES = SERVICE_consts() 144 | -------------------------------------------------------------------------------- /custom_components/zha_toolkit/zdo.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import logging 3 | 4 | import zigpy.device 5 | import zigpy.types as t 6 | import zigpy.zdo 7 | import zigpy.zdo.types as zdo_t 8 | 9 | from . import utils as u 10 | from .params import INTERNAL_PARAMS as p 11 | 12 | LOGGER = logging.getLogger(__name__) 13 | 14 | 15 | def add_task_info(event_data, task): 16 | event_data["task"] = {"name": task.get_name(), "done": task.done()} 17 | 18 | 19 | async def leave(app, listener, ieee, cmd, data, service, params, event_data): 20 | if ieee is None or not data: 21 | raise ValueError("Need 'ieee' and command_data'") 22 | 23 | LOGGER.debug( 24 | "running 'leave' command. Telling 0x%s to remove %s: %s", 25 | data, 26 | ieee, 27 | service, 28 | ) 29 | 30 | parent = await u.get_device(app, listener, data) 31 | 32 | # Get tries 33 | tries = params[p.TRIES] 34 | 35 | res = await u.retry_wrapper( 36 | parent.zdo.request, 37 | zdo_t.ZDOCmd.Mgmt_Leave_req, 38 | ieee, 39 | 0x02, 40 | tries=tries, 41 | ) 42 | event_data["result_leave"] = res 43 | LOGGER.debug("0x%04x: Mgmt_Leave_req: %s", parent.nwk, res) 44 | 45 | 46 | async def ieee_ping( 47 | app, listener, ieee, cmd, data, service, params, event_data 48 | ): 49 | if ieee is None: 50 | LOGGER.warning( 51 | "Incorrect parameters for 'ieee_ping' command: %s", service 52 | ) 53 | return 54 | 55 | # The device is the parent device 56 | dev = app.get_device(ieee) 57 | 58 | # Get tries 59 | tries = params[p.TRIES] 60 | 61 | LOGGER.debug("running 'ieee_ping' command to 0x%s", dev.nwk) 62 | 63 | res = await u.retry_wrapper( 64 | dev.zdo.request, 65 | zdo_t.ZDOCmd.IEEE_addr_req, 66 | dev.nwk, # nwk_addr_of_interest 67 | 0x00, # request_type (0=single device response) 68 | 0x00, # Start index 69 | tries=tries, 70 | ) 71 | event_data["result_ping"] = res 72 | LOGGER.debug("0x%04x: IEEE_addr_req: %s", dev.nwk, res) 73 | 74 | 75 | async def zdo_join_with_code( 76 | app, listener, ieee, cmd, data, service, params, event_data 77 | ): 78 | import bellows.types as bt 79 | 80 | node = ieee # Was: t.EUI64.convert("04:cf:8c:df:3c:75:e1:e7") 81 | 82 | # Original code: 83 | # 84 | # code = ( 85 | # b"\xA8\x16\x92\x7F\xB1\x9B\x78\x55\xC1" 86 | # + b"\xD7\x76\x0D\x5C\xAD\x63\x7F\x69\xCC" 87 | # ) 88 | code = params[p.CODE] 89 | # Note: Router is awake, there is no need for "tries" 90 | res = await app.permit_with_key(node, code, 60) 91 | link_key = bt.EmberKeyData(b"ZigBeeAlliance09") 92 | res = await app._ezsp.addTransientLinkKey(node, link_key) 93 | LOGGER.debug("permit with key: %s", res) 94 | res = await app.permit(60) 95 | 96 | 97 | async def zdo_update_nwk_id( 98 | app, listener, ieee, cmd, data, service, params, event_data 99 | ): 100 | """Update NWK id. data contains new NWK id.""" 101 | if data is None: 102 | LOGGER.error("Need NWK update id in the data") 103 | return 104 | 105 | nwk_upd_id = t.uint8_t(data) 106 | 107 | await zigpy.device.broadcast( 108 | app, 109 | 0, 110 | zdo_t.ZDOCmd.Mgmt_NWK_Update_req, 111 | 0, 112 | 0, 113 | 0x0000, 114 | 0x00, 115 | 0xEE, 116 | b"\xee" 117 | + t.Channels.ALL_CHANNELS.serialize() 118 | + b"\xFF" 119 | + nwk_upd_id.serialize() 120 | + b"\x00\x00", 121 | ) 122 | 123 | res = await app._ezsp.getNetworkParameters() 124 | event_data["result_update"] = res 125 | LOGGER.debug("Network params: %s", res) 126 | 127 | 128 | async def zdo_scan_now( 129 | app, listener, ieee, cmd, data, service, params, event_data 130 | ): 131 | """Scan topology""" 132 | 133 | LOGGER.debug("Scanning topology") 134 | task = asyncio.create_task(app.topology.scan()) 135 | add_task_info(event_data, task) 136 | 137 | 138 | async def zdo_flood_parent_annce( 139 | app, listener, ieee, cmd, data, service, params, event_data 140 | ): 141 | LOGGER.debug("flooding network with parent annce") 142 | 143 | flooder_task = getattr(app, "flooder_task", None) 144 | if flooder_task and not flooder_task.done(): 145 | flooder_task.cancel() 146 | LOGGER.debug("Stop flooding network with parent annce messages") 147 | app.flooder_task = None 148 | event_data["task"] = None 149 | return 150 | 151 | flooder_task = asyncio.create_task(_flood_with_parent_annce(app)) 152 | add_task_info(event_data, flooder_task) 153 | app.flooder_task = flooder_task 154 | 155 | 156 | async def _flood_with_parent_annce(app): 157 | coord = app.get_device(app.ieee) 158 | 159 | while True: 160 | children = [ 161 | nei.device.ieee 162 | for nei in coord.neighbors 163 | if nei.device.node_desc.is_end_device 164 | ] 165 | coord.debug("Have the following children: %s", children) 166 | await zigpy.zdo.broadcast( 167 | app, 168 | zigpy.zdo.types.ZDOCmd.Parent_annce, 169 | 0x0000, 170 | 0x00, 171 | children, 172 | broadcast_address=t.BroadcastAddress.ALL_ROUTERS_AND_COORDINATOR, 173 | ) 174 | await asyncio.sleep(0.1) 175 | -------------------------------------------------------------------------------- /.pre-commit-config.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | files: ^(.*\.(py|json|md|sh|yaml|cfg|txt))$ 3 | exclude: ^(\.[^/]*cache/.*|.*/_user.py)$ 4 | repos: 5 | - repo: https://github.com/verhovsky/pyupgrade-docs 6 | rev: v0.3.0 7 | hooks: 8 | - id: pyupgrade-docs 9 | - repo: https://github.com/executablebooks/mdformat 10 | # Do this before other tools "fixing" the line endings 11 | rev: 0.7.17 12 | hooks: 13 | - id: mdformat 14 | name: Format Markdown 15 | entry: mdformat # Executable to run, with fixed options 16 | language: python 17 | types: [markdown] 18 | args: [--wrap, '75', --number] 19 | additional_dependencies: 20 | - mdformat-toc 21 | - mdformat-beautysh 22 | # -mdformat-shfmt 23 | # -mdformat-tables 24 | - mdformat-config 25 | - mdformat-black 26 | - mdformat-web 27 | - mdformat-gfm 28 | - repo: https://github.com/asottile/blacken-docs 29 | rev: 1.16.0 30 | hooks: 31 | - id: blacken-docs 32 | additional_dependencies: [black==22.6.0] 33 | stages: [manual] # Manual because already done by mdformat-black 34 | - repo: https://github.com/pre-commit/pre-commit-hooks 35 | rev: v4.5.0 36 | hooks: 37 | - id: fix-byte-order-marker 38 | - id: mixed-line-ending 39 | - id: end-of-file-fixer 40 | - id: trailing-whitespace 41 | - id: no-commit-to-branch 42 | args: [--branch, main] 43 | - id: check-yaml 44 | args: [--unsafe] 45 | - id: debug-statements 46 | - id: check-json 47 | - id: check-builtin-literals 48 | - id: check-ast 49 | - id: check-merge-conflict 50 | - id: check-executables-have-shebangs 51 | - id: check-shebang-scripts-are-executable 52 | - id: check-docstring-first 53 | - id: check-case-conflict 54 | # - id: check-toml 55 | - repo: https://github.com/pre-commit/mirrors-prettier 56 | rev: v3.0.3 57 | hooks: 58 | - id: prettier 59 | - repo: https://github.com/adrienverge/yamllint.git 60 | rev: v1.32.0 61 | hooks: 62 | - id: yamllint 63 | args: 64 | - --no-warnings 65 | - -d 66 | - '{extends: relaxed, rules: {line-length: {max: 90}}}' 67 | - repo: https://github.com/lovesegfault/beautysh.git 68 | rev: v6.2.1 69 | hooks: 70 | - id: beautysh 71 | - repo: https://github.com/shellcheck-py/shellcheck-py 72 | rev: v0.9.0.6 73 | hooks: 74 | - id: shellcheck 75 | files: ^[^\.].*\.sh$ 76 | # add "shellcheck disable=SC2086" codes to files, rather than global excludes: 77 | # args: [-x,-e2086,-e2004,-e2207,-e2002,-e2116] 78 | # args: ["--severity=warning"] # Optionally only show errors and warnings 79 | - repo: https://github.com/asottile/pyupgrade 80 | rev: v3.15.0 81 | hooks: 82 | - id: pyupgrade 83 | args: 84 | - --py39-plus 85 | - repo: https://github.com/psf/black 86 | rev: 23.9.1 87 | hooks: 88 | - id: black 89 | args: 90 | - --safe 91 | - --quiet 92 | - -l 79 93 | - repo: https://github.com/Lucas-C/pre-commit-hooks-bandit 94 | rev: v1.0.6 95 | hooks: 96 | - id: python-bandit-vulnerability-check 97 | args: [--skip, 'B101,B311', --recursive, .] 98 | 99 | - repo: https://github.com/fsouza/autoflake8 100 | rev: v0.4.1 101 | hooks: 102 | - id: autoflake8 103 | args: 104 | - -i 105 | - -r 106 | - --expand-star-imports 107 | - custom_components 108 | - repo: https://github.com/PyCQA/flake8 109 | rev: 6.1.0 110 | hooks: 111 | - id: flake8 112 | additional_dependencies: 113 | # - pyproject-flake8>=0.0.1a5 114 | - flake8-bugbear>=22.7.1 115 | - flake8-comprehensions>=3.10.1 116 | - flake8-2020>=1.7.0 117 | - mccabe>=0.7.0 118 | - pycodestyle>=2.9.1 119 | - pyflakes>=2.5.0 120 | - repo: https://github.com/PyCQA/isort 121 | rev: 5.12.0 122 | hooks: 123 | - id: isort 124 | - repo: https://github.com/codespell-project/codespell 125 | rev: v2.2.6 126 | hooks: 127 | - id: codespell 128 | args: 129 | # - --builtin=clear,rare,informal,usage,code,names,en-GB_to_en-US 130 | - --builtin=clear,rare,informal,usage,code,names 131 | - --ignore-words-list=hass,master 132 | - --skip="./.*" 133 | - --quiet-level=2 134 | - repo: https://github.com/pre-commit/mirrors-pylint 135 | rev: v3.0.0a5 136 | hooks: 137 | - id: pylint 138 | args: 139 | - --reports=no 140 | - --py-version=3.10 141 | #additional_dependencies: 142 | #- homeassistant-stubs>=2023.1.7 143 | # exclude: ^$ 144 | - repo: https://github.com/pre-commit/mirrors-mypy 145 | rev: v1.5.1 146 | hooks: 147 | - id: mypy 148 | args: 149 | # - --verbose 150 | # - --config-file=setup.cfg 151 | - --ignore-missing-imports 152 | - --install-types 153 | - --non-interactive 154 | - --check-untyped-defs 155 | - --show-error-codes 156 | - --show-error-context 157 | additional_dependencies: 158 | - zigpy>=0.43.0 159 | - cryptography==3.3.2 # Compatible/Available on cygwin 160 | #- homeassistant-stubs>=2023.1.7 161 | #- pydantic 162 | -------------------------------------------------------------------------------- /custom_components/zha_toolkit/zcl_cmd.py: -------------------------------------------------------------------------------- 1 | import logging 2 | from typing import Any 3 | 4 | from . import utils as u 5 | from .params import INTERNAL_PARAMS as p 6 | from .params import USER_PARAMS as P 7 | 8 | LOGGER = logging.getLogger(__name__) 9 | 10 | 11 | ERR003_PARAMETER_MISSING = "Expecting parameter '{}'" 12 | ERR004_NOT_IN_CLUSTER = "In cluster 0x%04X not found for '%s', endpoint %s" 13 | ERR005_NOT_OUT_CLUSTER = "Out cluster 0x%04X not found for '%s', endpoint %s" 14 | 15 | 16 | async def zcl_cmd(app, listener, ieee, cmd, data, service, params, event_data): 17 | from zigpy import types as t 18 | from zigpy.zcl import foundation 19 | 20 | # Verify parameter presence 21 | 22 | if ieee is None: 23 | msg = ERR003_PARAMETER_MISSING.format("ieee") 24 | LOGGER.error(msg) 25 | raise Exception(msg) 26 | 27 | dev = app.get_device(ieee=ieee) 28 | # The next line will also update the endpoint if it is not set 29 | cluster = u.get_cluster_from_params(dev, params, event_data) 30 | 31 | # Extract parameters 32 | 33 | # Endpoint to send command to 34 | ep_id = params[p.EP_ID] 35 | # Cluster to send command to 36 | cluster_id = params[p.CLUSTER_ID] 37 | # The command to send 38 | cmd_id = params[p.CMD_ID] 39 | if cmd_id is None: 40 | raise Exception(ERR003_PARAMETER_MISSING, P.CMD) 41 | 42 | # The direction (to in or out cluster) 43 | dir_int = params[p.DIR] 44 | 45 | # Get manufacturer 46 | manf = params[p.MANF] 47 | 48 | # Get tries 49 | tries = params[p.TRIES] 50 | 51 | # Get expect_reply 52 | expect_reply = params[p.EXPECT_REPLY] 53 | 54 | cmd_args = params[p.ARGS] 55 | 56 | # Direction 0 = Client to Server, as in protocol bit 57 | is_in_cluster = dir_int == 0 58 | 59 | if ep_id not in dev.endpoints: 60 | msg = f"Endpoint {ep_id} not found for '{repr(ieee)}'" 61 | LOGGER.error(msg) 62 | raise Exception(msg) 63 | 64 | endpoint = dev.endpoints[ep_id] 65 | 66 | org_cluster_cmd_defs = {} 67 | 68 | # Exception caught in the try/catch below to throw after 69 | # restoring cluster definitions 70 | caught_e = None 71 | 72 | try: 73 | if is_in_cluster: 74 | if cluster_id not in endpoint.in_clusters: 75 | msg = ERR004_NOT_IN_CLUSTER.format( 76 | cluster_id, repr(ieee), ep_id 77 | ) 78 | LOGGER.error(msg) 79 | raise Exception(msg) 80 | 81 | # Cluster is found 82 | cluster = endpoint.in_clusters[cluster_id] 83 | 84 | # Change command specification ourselves ... 85 | 86 | if (cluster_id == 5) and (cmd_id == 0): 87 | org_cluster_cmd_defs[0] = cluster.server_commands[0] 88 | cluster.server_commands[0] = ( 89 | "add", 90 | ( 91 | t.uint16_t, 92 | t.uint8_t, 93 | t.uint16_t, 94 | t.CharacterString, 95 | t.Optional(t.List[t.uint8_t]), 96 | ), 97 | False, 98 | ) 99 | elif cmd_id not in cluster.server_commands: 100 | cmd_schema: list[Any] = [] 101 | 102 | if cmd_args is not None: 103 | cmd_schema = [t.uint8_t] * len(cmd_args) 104 | 105 | cmd_def = foundation.ZCLCommandDef( 106 | name=f"zha_toolkit_dummy_cmd{cmd_id}", 107 | id=cmd_id, 108 | schema=cmd_schema, 109 | direction=foundation.Direction.Client_to_Server, 110 | is_manufacturer_specific=(manf is not None), 111 | ) 112 | 113 | org_cluster_cmd_defs[cmd_id] = None 114 | cluster.server_commands[cmd_id] = cmd_def 115 | 116 | event_data["cmd_reply"] = await u.retry_wrapper( 117 | cluster.command, 118 | cmd_id, 119 | *cmd_args, 120 | manufacturer=manf, 121 | expect_reply=expect_reply, 122 | tries=tries, 123 | ) 124 | else: 125 | if cluster_id not in endpoint.out_clusters: 126 | msg = ERR005_NOT_OUT_CLUSTER.format( 127 | cluster_id, repr(ieee), ep_id 128 | ) 129 | LOGGER.error(msg) 130 | raise Exception(msg) 131 | 132 | # Found cluster 133 | cluster = endpoint.out_clusters[cluster_id] 134 | 135 | # Note: client_command not tested 136 | event_data["cmd_reply"] = await cluster.client_command( 137 | cmd_id, *cmd_args, manufacturer=manf 138 | ) 139 | except Exception as e: 140 | caught_e = e 141 | finally: 142 | # Restore replaced cluster command definitions 143 | # LOGGER.debug("replaced %s", org_cluster_cmd_defs) 144 | for key, cmd_def in org_cluster_cmd_defs.items(): 145 | if is_in_cluster: 146 | if cmd_def is not None: 147 | cluster.server_commands[key] = cmd_def 148 | else: 149 | del cluster.server_commands[key] 150 | 151 | else: 152 | if cmd_def is not None: 153 | cluster.client_commands[key] = cmd_def 154 | else: 155 | del cluster.client_commands[key] 156 | if caught_e is not None: 157 | raise caught_e 158 | 159 | # Could check cluster.client_command, cluster_server commands 160 | -------------------------------------------------------------------------------- /examples/script_danfoss_ally_configure.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | alias: Danfoss Ally TRV configuration 3 | sequence: 4 | - variables: 5 | ieee: "{{ (device_attr(device, 'identifiers')|list)[0][1] }}" 6 | default_tries: 3 7 | - alias: Configure reporting of local_temperature in Thermostat cluster 8 | service: zha_toolkit.conf_report 9 | data: 10 | ieee: "{{ ieee }}" 11 | cluster: 513 12 | attribute: 0 13 | tries: "{{ default_tries }}" 14 | event_done: zha_done 15 | reportable_change: 20 16 | max_interval: 300 17 | min_interval: 19 18 | - alias: Read back reporting configuration, for debugging 19 | service: zha_toolkit.conf_report_read 20 | data: 21 | ieee: "{{ ieee }}" 22 | cluster: 513 23 | attribute: 0 24 | tries: "{{ default_tries }}" 25 | event_done: zha_done 26 | - alias: Set lower limit for setpoint 27 | service: zha_toolkit.attr_write 28 | data: 29 | ieee: "{{ ieee }}" 30 | cluster: 513 31 | attribute: 21 32 | attr_val: "{{ ( set_min_temperature | float * 100) | int }}" 33 | tries: "{{ default_tries }}" 34 | csvout: danfoss_config.csv 35 | - alias: Set upper limit for setpoint 36 | service: zha_toolkit.attr_write 37 | data: 38 | ieee: "{{ ieee }}" 39 | cluster: 513 40 | attribute: 22 41 | attr_val: "{{ ( set_max_temperature | float * 100) | int }}" 42 | tries: "{{ default_tries }}" 43 | csvout: danfoss_config.csv 44 | - alias: Set Display rotation 45 | service: zha_toolkit.attr_write 46 | data: 47 | ieee: "{{ ieee }}" 48 | cluster: 516 49 | attribute: 16384 50 | attr_val: "{{ 0 if view_direction else 1 }}" 51 | manf: 4678 52 | event_done: zha_done 53 | tries: "{{ default_tries }}" 54 | csvout: danfoss_config.csv 55 | - alias: Set open window detection 56 | service: zha_toolkit.attr_write 57 | data: 58 | ieee: "{{ ieee }}" 59 | cluster: 513 60 | attribute: 16465 61 | attr_val: "{{ 1 if enable_open_window else 0 }}" 62 | manf: 4678 63 | event_done: zha_done 64 | tries: "{{ default_tries }}" 65 | csvout: danfoss_config.csv 66 | - alias: Check if window open reporting is configured, for debugging 67 | service: zha_toolkit.conf_report_read 68 | data: 69 | ieee: "{{ ieee }}" 70 | cluster: 513 71 | attribute: 16384 72 | manf: 4678 73 | tries: "{{ default_tries }}" 74 | event_done: zha_done 75 | - alias: Set TRV orientation (horizontal/vertical) 76 | service: zha_toolkit.attr_write 77 | data: 78 | ieee: "{{ ieee }}" 79 | cluster: 513 80 | attribute: 16404 81 | attr_val: "{{ 1 if orientation else 0 }}" 82 | manf: 4678 83 | event_done: zha_done 84 | tries: "{{ default_tries }}" 85 | csvout: danfoss_config.csv 86 | - alias: Set time 87 | service: zha_toolkit.misc_settime 88 | data: 89 | ieee: "{{ ieee }}" 90 | event_done: zha_done 91 | tries: "{{ default_tries }}" 92 | csvout: danfoss_config.csv 93 | - alias: Set time status to synchronised 94 | service: zha_toolkit.attr_write 95 | data: 96 | ieee: "{{ ieee }}" 97 | cluster: 10 98 | attribute: 1 99 | attr_val: 2 100 | tries: "{{ default_tries }}" 101 | csvout: danfoss_config.csv 102 | - alias: Set covered mode 103 | service: zha_toolkit.attr_write 104 | data: 105 | ieee: "{{ ieee }}" 106 | cluster: 513 107 | attribute: 16406 108 | attr_val: "{{ 1 if covered else 0 }}" 109 | manf: 4678 110 | tries: "{{ default_tries}}" 111 | csvout: danfoss_config.csv 112 | - alias: Check heat request reporting configuration 113 | service: zha_toolkit.conf_report_read 114 | data: 115 | ieee: "{{ ieee }}" 116 | cluster: 513 117 | attribute: 16433 118 | manf: 4678 119 | tries: "{{ default_tries }}" 120 | event_done: zha_done 121 | - alias: Read Heat Supply Request 122 | service: zha_toolkit.attr_read 123 | data: 124 | ieee: "{{ ieee }}" 125 | cluster: 513 126 | attribute: 16433 127 | manf: 4678 128 | tries: "{{ default_tries }}" 129 | csvout: danfoss_config.csv 130 | description: >- 131 | A script that configures a Danfoss Ally TRV zigbee thermostat. You can listen 132 | on the 'zha_done' event to see some of the configuration results. Sets report 133 | configuration and enables window open function. 134 | fields: 135 | device: 136 | name: Ally TRV Device 137 | description: A Danfoss Ally Thermostatic Regulation Valve (TRV) to configure 138 | required: true 139 | selector: 140 | device: 141 | manufacturer: Danfoss 142 | entity: 143 | domain: climate 144 | integration: zha 145 | set_min_temperature: 146 | name: Min user temperature 147 | description: The minimum temperature a user can set 148 | default: 8 149 | example: 8 150 | required: true 151 | selector: 152 | number: 153 | min: 8 154 | max: 22 155 | step: 0.5 156 | unit_of_measurement: °C 157 | mode: box 158 | set_max_temperature: 159 | name: Max user temperature 160 | description: The maximum temperature a user can set 161 | default: 22 162 | example: 22 163 | required: true 164 | selector: 165 | number: 166 | min: 8 167 | max: 22 168 | step: 0.5 169 | unit_of_measurement: °C 170 | mode: box 171 | enable_open_window: 172 | name: Enable open window detection 173 | description: When true, the valve detects open window and stops heating 174 | default: true 175 | example: true 176 | required: true 177 | selector: 178 | boolean: 179 | view_direction: 180 | name: Viewing direction/display rotation 181 | description: >- 182 | * When true, the text can be read when looking towards the valve (factory 183 | default), * When false, the text can be read when looking from the valve. 184 | default: true 185 | example: true 186 | required: true 187 | selector: 188 | boolean: 189 | orientation: 190 | name: TRV orientation 191 | description: >- 192 | - When false, mounted horizontaly, - When true, mounted vertically. This 193 | selects the temperature gradient measured in the valve on radiator. 194 | default: false 195 | example: false 196 | required: true 197 | selector: 198 | boolean: 199 | covered: 200 | name: TRV covered setting 201 | description: >- 202 | - When true, the radiator is covered (you should use the automation to 203 | send temperature for external thermometer). 204 | default: false 205 | example: false 206 | required: true 207 | selector: 208 | boolean: 209 | mode: single 210 | icon: mdi:thermostat 211 | -------------------------------------------------------------------------------- /custom_components/zha_toolkit/neighbours.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | import asyncio 4 | import enum 5 | import logging 6 | import os 7 | from random import uniform 8 | 9 | import zigpy.zdo.types as zdo_t 10 | from zigpy.exceptions import DeliveryError 11 | 12 | from . import utils as u 13 | 14 | LOGGER = logging.getLogger(__name__) 15 | 16 | 17 | async def get_routes_and_neighbours( 18 | app, listener, ieee, cmd, data, service, params, event_data 19 | ): 20 | if ieee is None: 21 | LOGGER.error("missing ieee") 22 | return 23 | 24 | LOGGER.debug("Getting routes and neighbours: %s", service) 25 | device = app.get_device(ieee=ieee) 26 | event_data["result"] = await _routes_and_neighbours(device, listener) 27 | 28 | ieee_tail = "".join([f"{o:02X}" for o in device.ieee]) 29 | 30 | fname = os.path.join( 31 | u.get_hass(listener).config.config_dir, 32 | "scans", 33 | f"routes_and_neighbours_{ieee_tail}.json", 34 | ) 35 | u.helper_save_json(fname, event_data["result"]) 36 | 37 | LOGGER.debug("Wrote scan results to '%s'", fname) 38 | 39 | 40 | async def _routes_and_neighbours(device, listener): 41 | try: 42 | routes = await asyncio.wait_for(async_get_routes(device), 180) 43 | except asyncio.TimeoutError: 44 | routes = [] 45 | await asyncio.sleep(uniform(1.0, 1.5)) 46 | try: 47 | nbns = await asyncio.wait_for(async_get_neighbours(device), 180) 48 | except asyncio.TimeoutError: 49 | nbns = [] 50 | 51 | return {"routes": routes, "neighbours": nbns} 52 | 53 | 54 | async def all_routes_and_neighbours( 55 | app, listener, ieee, cmd, data, service, params, event_data 56 | ): 57 | LOGGER.debug("Getting routes and neighbours for all devices: %s", service) 58 | 59 | counter = 1 60 | devs = [d for d in app.devices.values() if not d.node_desc.is_end_device] 61 | all_routes = {} 62 | for device in devs: 63 | LOGGER.debug( 64 | "%s: Querying routes and neighbours: %s out of %s", 65 | device.ieee, 66 | counter, 67 | len(devs), 68 | ) 69 | all_routes[str(device.ieee)] = await _routes_and_neighbours( 70 | device, listener 71 | ) 72 | LOGGER.debug("%s: Got %s out of %s", device.ieee, counter, len(devs)) 73 | counter += 1 74 | 75 | event_data["result"] = all_routes 76 | 77 | all_routes_name = os.path.join( 78 | u.get_hass(listener).config.config_dir, 79 | "scans", 80 | "all_routes_and_neighbours.json", 81 | ) 82 | u.helper_save_json(all_routes_name, all_routes) 83 | 84 | 85 | async def async_get_neighbours(device): 86 | """Pull neighbour table from a device.""" 87 | 88 | def _process_neighbour(nbg): 89 | """Return dict of a neighbour entry.""" 90 | 91 | # LOGGER.debug(f"NEIGHBOR: {nbg!r}") 92 | res = {} 93 | res["pan_id"] = str(nbg.extended_pan_id) 94 | res["ieee"] = str(nbg.ieee) 95 | res["nwk"] = str(nbg.nwk) 96 | res["device_type"] = nbg.device_type.name 97 | res["rx_on_when_idle"] = nbg.rx_on_when_idle.name 98 | res["relationship"] = nbg.relationship.name 99 | res["permit_joining"] = nbg.permit_joining.name 100 | res["depth"] = nbg.depth 101 | res["lqi"] = nbg.lqi 102 | return res 103 | 104 | result = [] 105 | idx = 0 106 | while True: 107 | try: 108 | status, val = await device.zdo.request( 109 | zdo_t.ZDOCmd.Mgmt_Lqi_req, idx 110 | ) 111 | LOGGER.debug( 112 | "%s: neighbour request Status: %s. Response: %r", 113 | device.ieee, 114 | status, 115 | val, 116 | ) 117 | if zdo_t.Status.SUCCESS != status: 118 | LOGGER.debug( 119 | "%s: device does not support 'Mgmt_Lqi_req'", device.ieee 120 | ) 121 | break 122 | except DeliveryError: 123 | LOGGER.debug("%s: Could not deliver 'Mgmt_Lqi_req'", device.ieee) 124 | break 125 | 126 | LOGGER.debug(f"NEIGHBORS: {val!r}") 127 | 128 | if hasattr(val, "neighbor_table_list"): 129 | neighbours = val.neighbor_table_list 130 | entries = val.entries 131 | else: 132 | neighbours = val.NeighborTableList 133 | entries = val.Entries 134 | 135 | for neighbour in neighbours: 136 | result.append(_process_neighbour(neighbour)) 137 | idx += 1 138 | 139 | if idx >= entries: 140 | break 141 | 142 | await asyncio.sleep(uniform(1.0, 1.5)) 143 | 144 | return sorted(result, key=lambda x: x["ieee"]) 145 | 146 | 147 | async def async_get_routes(device): 148 | """Pull routing table from a device.""" 149 | 150 | def _process_route(route): 151 | """Return a dict representing routing entry.""" 152 | 153 | class RouteStatus(enum.IntEnum): 154 | Active = 0x0 155 | Discovery_Underway = 0x1 156 | Discovery_Failed = 0x2 157 | Inactive = 0x3 158 | Validation_Underway = 0x4 159 | 160 | res: dict[str, str | bool | None | int] = {} 161 | res["destination"] = f"0x{route.DstNWK:04x}" 162 | res["next_hop"] = f"0x{route.NextHop:04x}" 163 | raw = route.RouteStatus & 0x07 164 | try: 165 | cooked = RouteStatus(raw).name 166 | except ValueError: 167 | cooked = f"reserved_{raw:02x}" 168 | res["status"] = cooked 169 | res["memory_constrained"] = bool((route.RouteStatus >> 3) & 0x01) 170 | res["many_to_one"] = bool((route.RouteStatus >> 4) & 0x01) 171 | res["route_record_required"] = bool((route.RouteStatus >> 5) & 0x01) 172 | return res 173 | 174 | routes = [] 175 | idx = 0 176 | while True: 177 | try: 178 | status, val = await device.zdo.request( 179 | zdo_t.ZDOCmd.Mgmt_Rtg_req, idx 180 | ) 181 | LOGGER.debug( 182 | "%s: route request Status:%s. Routes: %r", 183 | device.ieee, 184 | status, 185 | val, 186 | ) 187 | if zdo_t.Status.SUCCESS != status: 188 | LOGGER.debug( 189 | "%s: Does not support 'Mgmt_rtg_req': %s", 190 | device.ieee, 191 | status, 192 | ) 193 | break 194 | except DeliveryError: 195 | LOGGER.debug("%s: Could not deliver 'Mgmt_rtg_req'", device.ieee) 196 | break 197 | 198 | LOGGER.debug(f"Mgmt_Rtg_rsp: {val!r}") 199 | for route in val.RoutingTableList: 200 | routes.append(_process_route(route)) 201 | idx += 1 202 | if idx >= val.Entries: 203 | break 204 | await asyncio.sleep(uniform(1.0, 1.5)) 205 | 206 | return routes 207 | -------------------------------------------------------------------------------- /custom_components/zha_toolkit/znp.py: -------------------------------------------------------------------------------- 1 | import logging 2 | 3 | from zigpy import types as t 4 | 5 | from . import utils as u 6 | 7 | # from zigpy.zcl import foundation 8 | # import zigpy.zcl as zcl 9 | 10 | LOGGER = logging.getLogger(__name__) 11 | 12 | 13 | async def znp_backup( 14 | app, listener, ieee, cmd, data, service, params, event_data 15 | ): 16 | """Backup ZNP network information.""" 17 | 18 | LOGGER.debug("ZNP_BACKUP") 19 | 20 | if u.get_radiotype(app) != u.RadioType.ZNP: 21 | msg = f"{cmd} is only for ZNP" 22 | LOGGER.debug(msg) 23 | raise Exception(msg) 24 | 25 | # Import stuff we need 26 | import json 27 | 28 | from zigpy_znp.tools.network_backup import backup_network 29 | 30 | # Get backup information 31 | backup_obj = await backup_network(app._znp) 32 | 33 | # Store backup information to file 34 | 35 | # Set name with regards to local path 36 | out_dir = u.get_local_dir() 37 | 38 | # Ensure that data is an empty string when not set 39 | if data is None: 40 | data = "" 41 | 42 | fname = out_dir + "nwk_backup" + str(data) + ".json" 43 | 44 | event_data["backup_file"] = fname 45 | 46 | LOGGER.debug("Writing to %s", fname) 47 | with open(fname, "w", encoding="utf_8") as f: 48 | f.write(json.dumps(backup_obj, indent=4)) 49 | 50 | 51 | async def znp_restore( 52 | app, listener, ieee, cmd, data, service, params, event_data 53 | ): 54 | """Restore ZNP network information.""" 55 | 56 | if u.get_radiotype(app) != u.RadioType.ZNP: 57 | msg = f"'{cmd}' is only available for ZNP" 58 | LOGGER.debug(msg) 59 | raise Exception(msg) 60 | 61 | # Get/set parameters 62 | 63 | # command_data (data): 64 | # counter_increment (defaults to 2500) 65 | 66 | counter_increment = u.str2int(data) 67 | 68 | if not isinstance(counter_increment, int): 69 | counter_increment = 2500 70 | 71 | counter_increment = t.uint32_t(counter_increment) 72 | 73 | from datetime import datetime 74 | 75 | current_datetime = datetime.now().strftime("_%Y%m%d_%H%M%S") 76 | 77 | # Safety: backup current configuration 78 | await znp_backup( 79 | app, listener, ieee, cmd, current_datetime, service, params, event_data 80 | ) 81 | 82 | # Import stuff we need for restoring 83 | import json 84 | 85 | from zigpy_znp.tools.common import validate_backup_json 86 | from zigpy_znp.tools.network_restore import json_backup_to_zigpy_state 87 | 88 | # Set name with regards to local path 89 | fname = u.get_local_dir() + "nwk_backup.json" 90 | LOGGER.info("Restore from '%s'", fname) 91 | 92 | event_data["restore_file"] = fname 93 | 94 | # Read backup file 95 | with open(fname, encoding="utf_8") as f: 96 | backup = json.load(f) 97 | 98 | # validate the backup file 99 | LOGGER.info("Validating backup contents") 100 | validate_backup_json(backup) 101 | LOGGER.info("Backup contents validated") 102 | 103 | network_info, node_info = json_backup_to_zigpy_state(backup) 104 | 105 | network_info.network_key.tx_counter += counter_increment 106 | 107 | # Network already formed in HA 108 | # app._znp.startup(force_form=True) 109 | 110 | # Write back information from backup 111 | LOGGER.info("Writing to device") 112 | await app._znp.write_network_info( 113 | network_info=network_info, node_info=node_info 114 | ) 115 | 116 | # LOGGER.debug("List of attributes/methods in app %s", dir(app)) 117 | LOGGER.debug("List of attributes/methods in znp %s", dir(app._znp)) 118 | 119 | # Shutdown znp? 120 | LOGGER.info( 121 | "Write done, call pre_shutdown(). Restart the device/HA after this." 122 | ) 123 | await app._znp.pre_shutdown() 124 | LOGGER.info("pre_shutdown() Done.") 125 | 126 | # TODO: restart znp, HA? 127 | 128 | 129 | async def znp_nvram_backup( 130 | app, listener, ieee, cmd, data, service, params, event_data 131 | ): 132 | """Save ZNP NVRAM to file for backup""" 133 | 134 | if u.get_radiotype(app) != u.RadioType.ZNP: 135 | msg = f"'{cmd}' is only available for ZNP" 136 | LOGGER.debug(msg) 137 | raise Exception(msg) 138 | 139 | # Store backup information to file 140 | import json 141 | 142 | from zigpy_znp.tools.nvram_read import nvram_read 143 | 144 | # Set name with regards to local path 145 | out_dir = u.get_local_dir() 146 | 147 | LOGGER.info("Reading NVRAM from device") 148 | backup_obj = await nvram_read(app._znp) 149 | 150 | # Ensure that data is an empty string when not set 151 | if data is None: 152 | data = "" 153 | 154 | fname = out_dir + "nvram_backup" + str(data) + ".json" 155 | 156 | LOGGER.info("Saving NVRAM to '%s'", fname) 157 | with open(fname, "w", encoding="utf_8") as f: 158 | f.write(json.dumps(backup_obj, indent=4)) 159 | LOGGER.info("NVRAM backup saved to '%s'", fname) 160 | 161 | 162 | async def znp_nvram_restore( 163 | app, listener, ieee, cmd, data, service, params, event_data 164 | ): 165 | """Restore ZNP NVRAM from file""" 166 | 167 | if u.get_radiotype(app) != u.RadioType.ZNP: 168 | msg = f"'{cmd}' is only available for ZNP" 169 | LOGGER.debug(msg) 170 | raise Exception(msg) 171 | 172 | # Safety: backup current configuration 173 | from datetime import datetime 174 | 175 | current_datetime = datetime.now().strftime("_%Y%m%d_%H%M%S") 176 | await znp_nvram_backup( 177 | app, listener, ieee, cmd, current_datetime, service, params, event_data 178 | ) 179 | 180 | # Restore NVRAM backup from file 181 | import json 182 | 183 | from zigpy_znp.tools.nvram_write import nvram_write 184 | 185 | # Set name with regards to local path 186 | out_dir = u.get_local_dir() 187 | 188 | # Ensure that data is an empty string when not set 189 | if data is None: 190 | data = "" 191 | 192 | fname = out_dir + "nvram_backup" + str(data) + ".json" 193 | 194 | LOGGER.info("Restoring NVRAM from '%s'", fname) 195 | with open(fname, "w", encoding="utf_8") as f: 196 | nvram_obj = json.load(f) 197 | 198 | await nvram_write(app._znp, nvram_obj) 199 | LOGGER.info("Restored NVRAM from '%s'", fname) 200 | 201 | # TODO: restart znp, HA? 202 | 203 | 204 | async def znp_nvram_reset( 205 | app, listener, ieee, cmd, data, service, params, event_data 206 | ): 207 | """Reset ZNP NVRAM""" 208 | 209 | if u.get_radiotype(app) != u.RadioType.ZNP: 210 | msg = f"'{cmd}' is only available for ZNP" 211 | LOGGER.debug(msg) 212 | raise Exception(msg) 213 | 214 | from datetime import datetime 215 | 216 | current_datetime = datetime.now().strftime("_%Y%m%d_%H%M%S") 217 | 218 | # Safety: backup current configuration 219 | await znp_nvram_backup( 220 | app, listener, ieee, cmd, current_datetime, service, params, event_data 221 | ) 222 | 223 | # Import stuff we need for resetting 224 | from zigpy_znp.tools.nvram_reset import nvram_reset 225 | 226 | # Write back information from backup 227 | LOGGER.info("Reset NVRAM") 228 | await nvram_reset(app._znp) 229 | 230 | # Shutdown znp? 231 | # LOGGER.info("Call pre_shutdown(). Restart the device/HA after this.") 232 | # await app._znp.pre_shutdown() 233 | # LOGGER.info("pre_shutdown() Done.") 234 | 235 | # TODO: restart znp, HA? 236 | -------------------------------------------------------------------------------- /STATS.md: -------------------------------------------------------------------------------- 1 | # Badges showing number of downloads per version 2 | 3 | - ![badge latest](https://img.shields.io/github/downloads/mdeweerd/zha-toolkit/latest/total.svg) 4 | - ![badge v1.1.2](https://img.shields.io/github/downloads/mdeweerd/zha-toolkit/v1.1.2/total.svg) 5 | - ![badge v1.0.0](https://img.shields.io/github/downloads/mdeweerd/zha-toolkit/v1.0.0/total.svg) 6 | - ![badge v0.9.9](https://img.shields.io/github/downloads/mdeweerd/zha-toolkit/v0.9.9/total.svg) 7 | - ![badge v0.9.7](https://img.shields.io/github/downloads/mdeweerd/zha-toolkit/v0.9.7/total.svg) 8 | - ![badge v0.9.5](https://img.shields.io/github/downloads/mdeweerd/zha-toolkit/v0.9.5/total.svg) 9 | - ![badge v0.9.4](https://img.shields.io/github/downloads/mdeweerd/zha-toolkit/v0.9.4/total.svg) 10 | - ![badge v0.9.3](https://img.shields.io/github/downloads/mdeweerd/zha-toolkit/v0.9.3/total.svg) 11 | - ![badge v0.9.2](https://img.shields.io/github/downloads/mdeweerd/zha-toolkit/v0.9.2/total.svg) 12 | - ![badge v0.9.1](https://img.shields.io/github/downloads/mdeweerd/zha-toolkit/v0.9.1/total.svg) 13 | - ![badge v0.8.40](https://img.shields.io/github/downloads/mdeweerd/zha-toolkit/v0.8.40/total.svg) 14 | - ![badge v0.8.39](https://img.shields.io/github/downloads/mdeweerd/zha-toolkit/v0.8.39/total.svg) 15 | - ![badge v0.8.38](https://img.shields.io/github/downloads/mdeweerd/zha-toolkit/v0.8.38/total.svg) 16 | - ![badge v0.8.37](https://img.shields.io/github/downloads/mdeweerd/zha-toolkit/v0.8.37/total.svg) 17 | - ![badge v0.8.36](https://img.shields.io/github/downloads/mdeweerd/zha-toolkit/v0.8.36/total.svg) 18 | - ![badge v0.8.35](https://img.shields.io/github/downloads/mdeweerd/zha-toolkit/v0.8.35/total.svg) 19 | - ![badge v0.8.34](https://img.shields.io/github/downloads/mdeweerd/zha-toolkit/v0.8.34/total.svg) 20 | - ![badge v0.8.33](https://img.shields.io/github/downloads/mdeweerd/zha-toolkit/v0.8.33/total.svg) 21 | - ![badge v0.8.32](https://img.shields.io/github/downloads/mdeweerd/zha-toolkit/v0.8.32/total.svg) 22 | - ![badge v0.8.31](https://img.shields.io/github/downloads/mdeweerd/zha-toolkit/v0.8.31/total.svg) 23 | - ![badge v0.8.29](https://img.shields.io/github/downloads/mdeweerd/zha-toolkit/v0.8.29/total.svg) 24 | - ![badge v0.8.28](https://img.shields.io/github/downloads/mdeweerd/zha-toolkit/v0.8.28/total.svg) 25 | - ![badge v0.8.27](https://img.shields.io/github/downloads/mdeweerd/zha-toolkit/v0.8.27/total.svg) 26 | - ![badge v0.8.26](https://img.shields.io/github/downloads/mdeweerd/zha-toolkit/v0.8.26/total.svg) 27 | - ![badge v0.8.25](https://img.shields.io/github/downloads/mdeweerd/zha-toolkit/v0.8.25/total.svg) 28 | - ![badge v0.8.24](https://img.shields.io/github/downloads/mdeweerd/zha-toolkit/v0.8.24/total.svg) 29 | - ![badge v0.8.23](https://img.shields.io/github/downloads/mdeweerd/zha-toolkit/v0.8.23/total.svg) 30 | - ![badge v0.8.22](https://img.shields.io/github/downloads/mdeweerd/zha-toolkit/v0.8.22/total.svg) 31 | - ![badge v0.8.21](https://img.shields.io/github/downloads/mdeweerd/zha-toolkit/v0.8.21/total.svg) 32 | - ![badge v0.8.20](https://img.shields.io/github/downloads/mdeweerd/zha-toolkit/v0.8.20/total.svg) 33 | - ![badge v0.8.19](https://img.shields.io/github/downloads/mdeweerd/zha-toolkit/v0.8.19/total.svg) 34 | - ![badge v0.8.18](https://img.shields.io/github/downloads/mdeweerd/zha-toolkit/v0.8.18/total.svg) 35 | - ![badge v0.8.17](https://img.shields.io/github/downloads/mdeweerd/zha-toolkit/v0.8.17/total.svg) 36 | - ![badge v0.8.16](https://img.shields.io/github/downloads/mdeweerd/zha-toolkit/v0.8.16/total.svg) 37 | - ![badge v0.8.15](https://img.shields.io/github/downloads/mdeweerd/zha-toolkit/v0.8.15/total.svg) 38 | - ![badge v0.8.14](https://img.shields.io/github/downloads/mdeweerd/zha-toolkit/v0.8.14/total.svg) 39 | - ![badge v0.8.13](https://img.shields.io/github/downloads/mdeweerd/zha-toolkit/v0.8.13/total.svg) 40 | - ![badge v0.8.12](https://img.shields.io/github/downloads/mdeweerd/zha-toolkit/v0.8.12/total.svg) 41 | - ![badge v0.8.11](https://img.shields.io/github/downloads/mdeweerd/zha-toolkit/v0.8.11/total.svg) 42 | - ![badge v0.8.10](https://img.shields.io/github/downloads/mdeweerd/zha-toolkit/v0.8.10/total.svg) 43 | - ![badge v0.8.9](https://img.shields.io/github/downloads/mdeweerd/zha-toolkit/v0.8.9/total.svg) 44 | - ![badge v0.8.8](https://img.shields.io/github/downloads/mdeweerd/zha-toolkit/v0.8.8/total.svg) 45 | - ![badge v0.8.7](https://img.shields.io/github/downloads/mdeweerd/zha-toolkit/v0.8.7/total.svg) 46 | - ![badge v0.8.6](https://img.shields.io/github/downloads/mdeweerd/zha-toolkit/v0.8.6/total.svg) 47 | - ![badge v0.8.5](https://img.shields.io/github/downloads/mdeweerd/zha-toolkit/v0.8.5/total.svg) 48 | - ![badge v0.8.4](https://img.shields.io/github/downloads/mdeweerd/zha-toolkit/v0.8.4/total.svg) 49 | - ![badge v0.8.2](https://img.shields.io/github/downloads/mdeweerd/zha-toolkit/v0.8.2/total.svg) 50 | - ![badge v0.8.1](https://img.shields.io/github/downloads/mdeweerd/zha-toolkit/v0.8.1/total.svg) 51 | - ![badge v0.8.0](https://img.shields.io/github/downloads/mdeweerd/zha-toolkit/v0.8.0/total.svg) 52 | - ![badge v0.7.27](https://img.shields.io/github/downloads/mdeweerd/zha-toolkit/v0.7.27/total.svg) 53 | - ![badge v0.7.26](https://img.shields.io/github/downloads/mdeweerd/zha-toolkit/v0.7.26/total.svg) 54 | - ![badge v0.7.25](https://img.shields.io/github/downloads/mdeweerd/zha-toolkit/v0.7.25/total.svg) 55 | - ![badge v0.7.22](https://img.shields.io/github/downloads/mdeweerd/zha-toolkit/v0.7.22/total.svg) 56 | - ![badge v0.7.21](https://img.shields.io/github/downloads/mdeweerd/zha-toolkit/v0.7.21/total.svg) 57 | - ![badge v0.7.20](https://img.shields.io/github/downloads/mdeweerd/zha-toolkit/v0.7.20/total.svg) 58 | - ![badge v0.7.19](https://img.shields.io/github/downloads/mdeweerd/zha-toolkit/v0.7.19/total.svg) 59 | - ![badge v0.7.18](https://img.shields.io/github/downloads/mdeweerd/zha-toolkit/v0.7.18/total.svg) 60 | - ![badge v0.7.17](https://img.shields.io/github/downloads/mdeweerd/zha-toolkit/v0.7.17/total.svg) 61 | - ![badge v0.7.16](https://img.shields.io/github/downloads/mdeweerd/zha-toolkit/v0.7.16/total.svg) 62 | - ![badge v0.7.15](https://img.shields.io/github/downloads/mdeweerd/zha-toolkit/v0.7.15/total.svg) 63 | - ![badge v0.7.14](https://img.shields.io/github/downloads/mdeweerd/zha-toolkit/v0.7.14/total.svg) 64 | - ![badge v0.7.13](https://img.shields.io/github/downloads/mdeweerd/zha-toolkit/v0.7.13/total.svg) 65 | - ![badge v0.7.12](https://img.shields.io/github/downloads/mdeweerd/zha-toolkit/v0.7.12/total.svg) 66 | - ![badge v0.7.11](https://img.shields.io/github/downloads/mdeweerd/zha-toolkit/v0.7.11/total.svg) 67 | - ![badge v0.7.10](https://img.shields.io/github/downloads/mdeweerd/zha-toolkit/v0.7.10/total.svg) 68 | - ![badge v0.7.8](https://img.shields.io/github/downloads/mdeweerd/zha-toolkit/v0.7.8/total.svg) 69 | - ![badge v0.7.0](https://img.shields.io/github/downloads/mdeweerd/zha-toolkit/v0.7.0/total.svg) 70 | - ![badge v0.5.11](https://img.shields.io/github/downloads/mdeweerd/zha-toolkit/v0.5.11/total.svg) 71 | - ![badge v0.5.10](https://img.shields.io/github/downloads/mdeweerd/zha-toolkit/v0.5.10/total.svg) 72 | - ![badge v0.5.9](https://img.shields.io/github/downloads/mdeweerd/zha-toolkit/v0.5.9/total.svg) 73 | - ![badge v0.5.8](https://img.shields.io/github/downloads/mdeweerd/zha-toolkit/v0.5.8/total.svg) 74 | - ![badge v0.5.7](https://img.shields.io/github/downloads/mdeweerd/zha-toolkit/v0.5.7/total.svg) 75 | - ![badge v0.5.6](https://img.shields.io/github/downloads/mdeweerd/zha-toolkit/v0.5.6/total.svg) 76 | - ![badge v0.5.5](https://img.shields.io/github/downloads/mdeweerd/zha-toolkit/v0.5.5/total.svg) 77 | - ![badge v0.5.4](https://img.shields.io/github/downloads/mdeweerd/zha-toolkit/v0.5.4/total.svg) 78 | - ![badge v0.5.3](https://img.shields.io/github/downloads/mdeweerd/zha-toolkit/v0.5.3/total.svg) 79 | - ![badge v0.5.0](https://img.shields.io/github/downloads/mdeweerd/zha-toolkit/v0.5.0/total.svg) 80 | -------------------------------------------------------------------------------- /custom_components/zha_toolkit/groups.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | import logging 4 | from typing import Any 5 | 6 | from . import utils as u 7 | from .params import INTERNAL_PARAMS as p 8 | 9 | LOGGER = logging.getLogger(__name__) 10 | 11 | 12 | async def get_groups( 13 | app, listener, ieee, cmd, data, service, params, event_data 14 | ): 15 | if ieee is None: 16 | LOGGER.error("missing ieee") 17 | return 18 | 19 | src_dev = app.get_device(ieee=ieee) 20 | 21 | groups: dict[int, dict[str, Any]] = {} 22 | endpoint_id = params[p.EP_ID] 23 | 24 | event_data["result"] = [] 25 | for ep_id, ep in src_dev.endpoints.items(): 26 | if ep_id == 0 or (endpoint_id is not None and ep_id != endpoint_id): 27 | continue 28 | try: 29 | ep_info: dict[str, Any] = {} 30 | res = await u.retry_wrapper( 31 | ep.groups.read_attributes, 32 | ["name_support"], 33 | tries=params[p.TRIES], 34 | ) 35 | event_data["result"].append(res) 36 | 37 | name_support = res[0]["name_support"] 38 | ep_info["name_support"] = int(name_support) 39 | LOGGER.debug( 40 | "Group on 0x%04X EP %u name support: %s", 41 | src_dev.nwk, 42 | ep_id, 43 | name_support, 44 | ) 45 | 46 | all_groups = await u.retry_wrapper( 47 | ep.groups.get_membership, [], tries=params[p.TRIES] 48 | ) 49 | LOGGER.debug( 50 | "Groups on 0x%04X EP %u : %s", src_dev.nwk, ep_id, all_groups 51 | ) 52 | ep_info["groups"] = all_groups[1] 53 | groups[ep_id] = ep_info 54 | except AttributeError: 55 | LOGGER.debug( 56 | "0x%04X/EP %u: no group cluster found", src_dev.nwk, ep_id 57 | ) 58 | 59 | event_data["groups"] = groups 60 | 61 | 62 | async def add_group( 63 | app, listener, ieee, cmd, data, service, params, event_data 64 | ): 65 | if ieee is None or not data: 66 | raise ValueError("ieee and command_data required") 67 | 68 | src_dev = app.get_device(ieee=ieee) 69 | 70 | group_id = u.str2int(data) 71 | endpoint_id = params[p.EP_ID] 72 | 73 | result = [] 74 | for ep_id, ep in src_dev.endpoints.items(): 75 | if ep_id == 0 or (endpoint_id is not None and ep_id != endpoint_id): 76 | # Skip ZDO or endpoints that are not selected 77 | continue 78 | try: 79 | res = await u.retry_wrapper( 80 | ep.groups.add, 81 | group_id, 82 | f"group {group_id}", 83 | tries=params[p.TRIES], 84 | ) 85 | result.append(res) 86 | LOGGER.debug( 87 | "0x%04x EP %u: Setting group 0x%04x: %s", 88 | src_dev.nwk, 89 | ep_id, 90 | group_id, 91 | res, 92 | ) 93 | except AttributeError: 94 | LOGGER.debug( 95 | "0x%04x EP %u : no group cluster found", src_dev.nwk, ep_id 96 | ) 97 | 98 | event_data["result"] = result 99 | 100 | 101 | async def remove_group( 102 | app, listener, ieee, cmd, data, service, params, event_data 103 | ): 104 | if ieee is None or not data: 105 | raise ValueError("ieee and command_data required") 106 | 107 | src_dev = app.get_device(ieee=ieee) 108 | 109 | group_id = u.str2int(data) 110 | endpoint_id = params[p.EP_ID] 111 | 112 | result = [] 113 | for ep_id, ep in src_dev.endpoints.items(): 114 | if ep_id == 0 or (endpoint_id is not None and ep_id != endpoint_id): 115 | # Skip ZDO or endpoints that are not selected 116 | continue 117 | try: 118 | res = await ep.groups.remove(group_id) 119 | result.append(res) 120 | LOGGER.debug( 121 | "0x%04x EP %u: Removing group 0x%04x: %s", 122 | src_dev.nwk, 123 | ep_id, 124 | group_id, 125 | res, 126 | ) 127 | except AttributeError: 128 | LOGGER.debug( 129 | "0x%04x EP %u: no group cluster found", src_dev.nwk, ep_id 130 | ) 131 | 132 | event_data["result"] = result 133 | 134 | 135 | async def remove_all_groups( 136 | app, listener, ieee, cmd, data, service, params, event_data 137 | ): 138 | LOGGER.debug("running 'remove all group' command: %s", service) 139 | if ieee is None: 140 | return 141 | 142 | src_dev = app.get_device(ieee=ieee) 143 | endpoint_id = params[p.EP_ID] 144 | result = [] 145 | 146 | for ep_id, ep in src_dev.endpoints.items(): 147 | if ep_id == 0 or (endpoint_id is not None and ep_id != endpoint_id): 148 | continue 149 | try: 150 | res = await ep.groups.remove_all() 151 | result.append(res) 152 | LOGGER.debug("0x%04x: Removing all groups: %s", src_dev.nwk, res) 153 | except AttributeError: 154 | LOGGER.debug( 155 | "0x%04x: no group cluster on endpoint #%d", src_dev.nwk, ep_id 156 | ) 157 | 158 | event_data["result"] = result 159 | 160 | 161 | async def add_to_group( 162 | app, listener, ieee, cmd, data, service, params, event_data 163 | ): 164 | if data is None or ieee is None: 165 | LOGGER.error("invalid arguments for subscribe_group()") 166 | return 167 | 168 | dev = app.get_device(ieee=ieee) 169 | 170 | grp_id = u.str2int(data) 171 | endpoint_id = params[p.EP_ID] 172 | 173 | result = [] 174 | for ep_id, ep in dev.endpoints.items(): 175 | if ep_id == 0 or (endpoint_id is not None and ep_id != endpoint_id): 176 | continue 177 | LOGGER.debug("Subscribing %s EP %u to group: %s", ieee, ep_id, grp_id) 178 | res = await ep.add_to_group(grp_id, f"Group {data}") 179 | result.append(res) 180 | LOGGER.info( 181 | "Subscribed %s EP %u to group: %s Result: %r", 182 | ieee, 183 | ep_id, 184 | grp_id, 185 | res, 186 | ) 187 | 188 | event_data["result"] = result 189 | 190 | 191 | async def remove_from_group( 192 | app, listener, ieee, cmd, data, service, params, event_data 193 | ): 194 | if data is None or ieee is None: 195 | raise ValueError("ieee and command_data required") 196 | 197 | dev = app.get_device(ieee) 198 | 199 | grp_id = u.str2int(data) 200 | endpoint_id = params[p.EP_ID] 201 | 202 | result = [] 203 | for ep_id, ep in dev.endpoints.items(): 204 | if ep_id == 0 or (endpoint_id is not None and ep_id != endpoint_id): 205 | continue 206 | LOGGER.debug( 207 | "Unsubscribing %s EP %u from group: %s", ieee, ep_id, grp_id 208 | ) 209 | res = await ep.remove_from_group(grp_id) 210 | result.append(res) 211 | LOGGER.info( 212 | "Unsubscribed %s EP %u from group: %s Result: %r", 213 | ieee, 214 | ep_id, 215 | grp_id, 216 | res, 217 | ) 218 | 219 | event_data["result"] = result 220 | 221 | 222 | async def get_zll_groups( 223 | app, listener, ieee, cmd, data, service, params, event_data 224 | ): 225 | from zigpy.zcl.clusters.lightlink import LightLink 226 | 227 | if ieee is None: 228 | LOGGER.error("missing ieee") 229 | return 230 | 231 | dev = app.get_device(ieee=ieee) 232 | 233 | clusters = [ 234 | ep.in_clusters[LightLink.cluster_id] 235 | for epid, ep in dev.endpoints.items() 236 | if epid and LightLink.cluster_id in ep.in_clusters 237 | ] 238 | zll_cluster = None 239 | try: 240 | zll_cluster = next(iter(clusters)) 241 | except Exception: 242 | LOGGER.warning("No cluster in clusters") 243 | 244 | if not zll_cluster: 245 | msg = f"Couldn't find ZLL Commissioning cluster on {dev.ieee}" 246 | event_data["warning"] = msg 247 | LOGGER.warning(msg) 248 | return 249 | 250 | res = await zll_cluster.get_group_identifiers(0) 251 | groups = [g.group_id for g in res[2]] 252 | LOGGER.debug("Get group identifiers response: %s", groups) 253 | 254 | event_data["groups"] = groups 255 | -------------------------------------------------------------------------------- /custom_components/zha_toolkit/ota.py: -------------------------------------------------------------------------------- 1 | import json 2 | import logging 3 | import os 4 | from glob import glob 5 | 6 | import aiohttp 7 | import zigpy 8 | 9 | from . import DEFAULT_OTAU 10 | from . import utils as u 11 | from .params import INTERNAL_PARAMS as p 12 | 13 | LOGGER = logging.getLogger(__name__) 14 | KOENKK_LIST_URL = ( 15 | "https://raw.githubusercontent.com/Koenkk/zigbee-OTA/master/index.json" 16 | ) 17 | 18 | SONOFF_LIST_URL = "https://zigbee-ota.sonoff.tech/releases/upgrade.json" 19 | 20 | 21 | async def download_koenkk_ota(listener, ota_dir): 22 | # Get all FW files that were already downloaded. 23 | # The files usually have the FW version in their name, making them unique. 24 | ota_glob_expr = [ 25 | "*.ZIGBEE", 26 | "*.OTA", 27 | "*.sbl-ota", 28 | "*.bin", 29 | "*.ota", 30 | "*.zigbee", 31 | ] 32 | 33 | # Dictionary to do more efficient lookups 34 | LOGGER.debug("List OTA files available on file system") 35 | ota_files_on_disk = {} 36 | for glob_expr in ota_glob_expr: 37 | for path in [ 38 | os.path.basename(x) for x in glob(os.path.join(ota_dir, glob_expr)) 39 | ]: 40 | ota_files_on_disk[path] = True 41 | 42 | # LOGGER.debug(f"OTA files on disk {ota_files_on_disk!r}") 43 | 44 | # Get manufacturers 45 | manfs = {} 46 | for info in [ 47 | device.zha_device_info for device in listener.devices.values() 48 | ]: 49 | manfs[info["manufacturer_code"]] = True 50 | 51 | LOGGER.debug(f"Get Koenkk FW list and check for manfs {manfs.keys()!r}") 52 | new_fw_info = {} 53 | async with aiohttp.ClientSession() as req: 54 | async with req.get(KOENKK_LIST_URL) as rsp: 55 | data = json.loads(await rsp.read()) 56 | for fw_info in data: 57 | if fw_info["url"]: 58 | filename = fw_info["url"].split("/")[-1] 59 | # Try to get fw corresponding to device manufacturers 60 | fw_manf = fw_info["manufacturerCode"] 61 | 62 | if fw_manf in manfs and filename not in ota_files_on_disk: 63 | LOGGER.debug( 64 | "OTA file to download for manf %u (0x%04X): '%s'", 65 | fw_manf, 66 | fw_manf, 67 | filename, 68 | ) 69 | new_fw_info[filename] = fw_info 70 | 71 | for filename, fw_info in new_fw_info.items(): 72 | async with aiohttp.ClientSession() as req: 73 | url = fw_info["url"] 74 | try: 75 | out_filename = os.path.join(ota_dir, filename) 76 | 77 | LOGGER.info("Download '%s' to '%s'", url, out_filename) 78 | async with req.get(url) as rsp: 79 | data = await rsp.read() 80 | 81 | with open(out_filename, "wb") as ota_file: 82 | LOGGER.debug("Try to write '%s'", out_filename) 83 | ota_file.write(data) 84 | except Exception as e: 85 | LOGGER.warning("Exception getting '%s': %s", url, e) 86 | 87 | 88 | async def download_sonoff_ota(listener, ota_dir): 89 | # Get all FW files that were already downloaded. 90 | # The files usually have the FW version in their name, making them unique. 91 | ota_glob_expr = [ 92 | "*.ZIGBEE", 93 | "*.OTA", 94 | "*.sbl-ota", 95 | "*.bin", 96 | "*.ota", 97 | "*.zigbee", 98 | ] 99 | 100 | # Dictionary to do more efficient lookups 101 | LOGGER.debug("List OTA files available on file system") 102 | ota_files_on_disk = {} 103 | for glob_expr in ota_glob_expr: 104 | for path in [ 105 | os.path.basename(x) for x in glob(os.path.join(ota_dir, glob_expr)) 106 | ]: 107 | ota_files_on_disk[path] = True 108 | 109 | # LOGGER.debug(f"OTA files on disk {ota_files_on_disk!r}") 110 | 111 | # Get manufacturers 112 | manfs = {} 113 | for info in [ 114 | device.zha_device_info for device in listener.devices.values() 115 | ]: 116 | manfs[info["manufacturer_code"]] = True 117 | 118 | LOGGER.debug(f"Get SONOFF FW list and check for manfs {manfs.keys()!r}") 119 | new_fw_info = {} 120 | async with aiohttp.ClientSession() as req: 121 | async with req.get(SONOFF_LIST_URL) as rsp: 122 | data = json.loads(await rsp.read()) 123 | for fw_info in data: 124 | if fw_info["fw_binary_url"]: 125 | filename = fw_info["fw_binary_url"].split("/")[-1] 126 | # Try to get fw corresponding to device manufacturers 127 | fw_manf = fw_info["fw_manufacturer_id"] 128 | fw_model_id = fw_info["model_id"] 129 | 130 | # Note: could check against model id in the future 131 | if fw_manf in manfs and filename not in ota_files_on_disk: 132 | LOGGER.debug( 133 | "OTA file to download for manf %u (0x%04X)" 134 | " Model:'%s': '%s'", 135 | fw_manf, 136 | fw_manf, 137 | fw_model_id, 138 | filename, 139 | ) 140 | new_fw_info[filename] = fw_info 141 | 142 | for filename, fw_info in new_fw_info.items(): 143 | async with aiohttp.ClientSession() as req: 144 | url = fw_info["fw_binary_url"] 145 | try: 146 | out_filename = os.path.join(ota_dir, filename) 147 | 148 | LOGGER.info("Download '%s' to '%s'", url, out_filename) 149 | async with req.get(url) as rsp: 150 | data = await rsp.read() 151 | 152 | with open(out_filename, "wb") as ota_file: 153 | LOGGER.debug("Try to write '%s'", out_filename) 154 | ota_file.write(data) 155 | except Exception as e: 156 | LOGGER.warning("Exception getting '%s': %s", url, e) 157 | 158 | 159 | async def download_zigpy_ota(app, listener): 160 | LOGGER.debug("Zigpy download procedure starting") 161 | for _, (ota, _) in app.ota._listeners.items(): 162 | if isinstance(ota, zigpy.ota.provider.FileStore): 163 | # Skip files provider 164 | continue 165 | await ota.refresh_firmware_list() 166 | for image_key, image in ota._cache.items(): 167 | url = getattr(image, "url", None) 168 | LOGGER.error("Try getting %r, %r, %r", image_key, url, image) 169 | try: 170 | img = await app.ota.get_ota_image( 171 | image_key.manufacturer_id, image_key.image_type, model=None 172 | ) 173 | LOGGER.info("Got image %r", getattr(img, "header", None)) 174 | except Exception as e: 175 | LOGGER.error("%r while getting %r - %s", e, image_key, url) 176 | 177 | 178 | async def ota_update_images( 179 | app, listener, ieee, cmd, data, service, params, event_data 180 | ): 181 | for _, (ota, _) in app.ota._listeners.items(): 182 | await ota.refresh_firmware_list() 183 | 184 | 185 | async def ota_notify( 186 | app, listener, ieee, cmd, data, service, params, event_data 187 | ): 188 | LOGGER.error("OTA_notify") 189 | event_data["PAR"] = params 190 | if params[p.DOWNLOAD]: 191 | if params[p.PATH]: 192 | ota_dir = params[p.PATH] 193 | else: 194 | ota_dir = DEFAULT_OTAU 195 | 196 | LOGGER.debug( 197 | "OTA image download to '%s' (Default dir is:'%s')", 198 | ota_dir, 199 | DEFAULT_OTAU, 200 | ) 201 | 202 | await download_zigpy_ota(app, listener) 203 | await download_koenkk_ota(listener, ota_dir) 204 | await download_sonoff_ota(listener, ota_dir) 205 | 206 | # Get tries 207 | tries = params[p.TRIES] 208 | 209 | # Update internal image database 210 | await ota_update_images( 211 | app, listener, ieee, cmd, data, service, params, event_data 212 | ) 213 | 214 | if ieee is None: 215 | LOGGER.error("missing ieee") 216 | return 217 | 218 | LOGGER.debug("running 'image_notify' command: %s", service) 219 | 220 | device = app.get_device(ieee=ieee) 221 | 222 | cluster = None 223 | for epid, ep in device.endpoints.items(): 224 | if epid == 0: 225 | continue 226 | if 0x0019 in ep.out_clusters: 227 | cluster = ep.out_clusters[0x0019] 228 | break 229 | if cluster is None: 230 | LOGGER.debug("No OTA cluster found") 231 | return 232 | basic = device.endpoints[cluster.endpoint.endpoint_id].basic 233 | await u.retry_wrapper(basic.bind, tries=tries) 234 | ret = await u.retry_wrapper( 235 | basic.configure_reporting, "sw_build_id", 0, 1800, 1, tries=tries 236 | ) 237 | LOGGER.debug("Configured reporting: %s", ret) 238 | 239 | ret = None 240 | if not u.is_zigpy_ge("0.45.0"): 241 | ret = await cluster.image_notify(0, 100) 242 | else: 243 | cmd_args = [0, 100] 244 | ret = await u.retry_wrapper( 245 | cluster.client_command, 246 | 0, # cmd_id 247 | *cmd_args, 248 | # expect_reply = True, 249 | tries=tries, 250 | ) 251 | 252 | LOGGER.debug("Sent image notify command to 0x%04x: %s", device.nwk, ret) 253 | event_data["result"] = ret 254 | -------------------------------------------------------------------------------- /custom_components/zha_toolkit/misc.py: -------------------------------------------------------------------------------- 1 | # import asyncio 2 | import asyncio 3 | import logging 4 | 5 | import zigpy.types as t 6 | from zigpy.exceptions import ControllerException, DeliveryError 7 | 8 | from . import utils as u 9 | from .params import INTERNAL_PARAMS as p 10 | 11 | LOGGER = logging.getLogger(__name__) 12 | 13 | 14 | async def get_routes( 15 | app, listener, ieee, cmd, data, service, params, event_data 16 | ): 17 | LOGGER.debug("getting routes command: %s", service) 18 | 19 | for dev in app.devices.values(): 20 | if hasattr(dev, "relays"): 21 | status = f"has routes: {dev.relays}" 22 | else: 23 | status = "doesn't have routes" 24 | LOGGER.debug("Device %s/%s %s", dev.nwk, dev.model, status) 25 | 26 | LOGGER.debug("finished device get_routes") 27 | 28 | 29 | async def backup(app, listener, ieee, cmd, data, service, params, event_data): 30 | """Backup Coordinator Configuration.""" 31 | 32 | radio_type = u.get_radiotype(app) 33 | 34 | if radio_type == u.RadioType.ZNP: 35 | from . import znp 36 | 37 | await znp.znp_backup( 38 | app, 39 | listener, 40 | ieee, 41 | cmd, 42 | data, 43 | service, 44 | event_data=event_data, 45 | params=params, 46 | ) 47 | await znp.znp_nvram_backup( 48 | app, 49 | listener, 50 | ieee, 51 | cmd, 52 | data, 53 | service, 54 | event_data=event_data, 55 | params=params, 56 | ) 57 | elif radio_type == u.RadioType.EZSP: 58 | from . import ezsp 59 | 60 | await ezsp.ezsp_backup( 61 | app, 62 | listener, 63 | ieee, 64 | cmd, 65 | data, 66 | service, 67 | event_data=event_data, 68 | params=params, 69 | ) 70 | else: 71 | raise Exception(f"Radio type {radio_type} not supported for backup") 72 | 73 | 74 | async def handle_join( 75 | app, listener, ieee, cmd, data, service, params, event_data 76 | ): 77 | """Rediscover a device. 78 | ieee -- ieee of the device 79 | data -- nwk of the device in decimal format 80 | """ 81 | LOGGER.debug("running 'handle_join' command: %s", service) 82 | if ieee is None: 83 | LOGGER.debug("Provide 'ieee' parameter for %s", cmd) 84 | raise ValueError("ieee parameter missing") 85 | 86 | dev = app.get_device(ieee=ieee) 87 | 88 | if data is None: 89 | if dev is None: 90 | LOGGER.debug( 91 | f"Device {ieee!r} missing in device table, provide NWK address" 92 | ) 93 | raise Exception(f"Missing NWK for unknown device '{ieee}'") 94 | 95 | data = dev.nwk 96 | 97 | # Handle join will initialize the device if it isn't yet, otherwise 98 | # only scan groups 99 | # misc_reinitialise is more complete 100 | 101 | event_data["result"] = app.handle_join(u.str2int(data), ieee, None) 102 | 103 | 104 | async def misc_reinitialize( 105 | app, listener, ieee, cmd, data, service, params, event_data 106 | ): 107 | """Reinitialize a device, rediscover endpoints 108 | ieee -- ieee of the device 109 | """ 110 | if ieee is None: 111 | msg = f"Provide 'ieee' parameter for {cmd}" 112 | LOGGER.debug(msg) 113 | raise ValueError(ieee) 114 | 115 | dev = app.get_device(ieee=ieee) 116 | LOGGER.debug(f"{ieee!r} - Set initialisations=False, call handle_join") 117 | # dev.has_non_zdo_endpoints = False # Force rescan 118 | # Can't set: dev.non_zdo_endpoints = False # Force rescan 119 | dev.endpoints = {0: dev.zdo} # Force rescan 120 | 121 | # dev._znp = u.get_radio(app) 122 | # dev.node_desc = None # Force rescan 123 | 124 | dev.all_endpoint_init = False # Force rescan 125 | dev.model = None # Force rescan 126 | dev.manufacturer = None # Force rescan 127 | # event_data["result"] = await dev.schedule_initialize() 128 | event_data["result"] = await dev.initialize() 129 | 130 | 131 | async def rejoin(app, listener, ieee, cmd, data, service, params, event_data): 132 | """Leave and rejoin command. 133 | data -- device ieee to allow joining through 134 | ieee -- ieee of the device to leave and rejoin 135 | """ 136 | if ieee is None: 137 | LOGGER.error("missing ieee") 138 | return 139 | LOGGER.debug("running 'rejoin' command: %s", service) 140 | src = app.get_device(ieee=ieee) 141 | 142 | if data is None: 143 | await app.permit() 144 | else: 145 | await app.permit(node=t.EUI64.convert_ieee(data)) 146 | 147 | method = 1 148 | res = None 149 | 150 | if method == 0: 151 | # Works on HA 2021.12.10 & ZNP - rejoin is 1: 152 | res = await u.retry_wrapper( 153 | src.zdo.request, 0x0034, src.ieee, 0x01, params[p.TRIES] 154 | ) 155 | elif method == 1: 156 | # Works on ZNP but apparently not on bellows: 157 | triesToGo = params[p.TRIES] 158 | tryIdx = 0 159 | event_data["success"] = False 160 | while triesToGo >= 1: 161 | triesToGo = triesToGo - 1 162 | tryIdx += 1 163 | try: 164 | LOGGER.debug(f"Leave with rejoin - try {tryIdx}") 165 | res = await src.zdo.leave(remove_children=False, rejoin=True) 166 | event_data["success"] = True 167 | triesToGo = 0 # Stop loop 168 | # event_data["success"] = ( 169 | # resf[0][0].status == f.Status.SUCCESS 170 | # ) 171 | except ( 172 | DeliveryError, 173 | ControllerException, 174 | asyncio.TimeoutError, 175 | ) as d: 176 | event_data["errors"].append(repr(d)) 177 | continue 178 | except Exception as e: # Catch all others 179 | triesToGo = 0 # Stop loop 180 | LOGGER.debug("Leave with rejoin exception %s", e) 181 | event_data["errors"].append(repr(e)) 182 | 183 | elif method == 2: 184 | # Results in rejoin bit 0 on ZNP 185 | LOGGER.debug("Using Method 2 for Leave") 186 | res = await u.retry_wrapper( 187 | src.zdo.request, 0x0034, src.ieee, 0x80, params[p.TRIES] 188 | ) 189 | elif method == 3: 190 | # Results in rejoin and leave children bit set on ZNP 191 | LOGGER.debug("Using Method 3 for Leave") 192 | res = await u.retry_wrapper( 193 | src.zdo.request, 0x0034, src.ieee, 0xFF, params[p.TRIES] 194 | ) 195 | elif method == 4: 196 | # Results in rejoin and leave children bit set on ZNP 197 | LOGGER.debug("Using Method 4 for Leave") 198 | res = await u.retry_wrapper( 199 | src.zdo.request, 0x0034, src.ieee, 0x83, params[p.TRIES] 200 | ) 201 | else: 202 | res = "Not executed, no valid 'method' defined in code" 203 | 204 | event_data["result"] = res 205 | LOGGER.debug("%s -> %s: leave and rejoin result: %s", src, ieee, res) 206 | 207 | 208 | async def misc_settime( 209 | app, listener, ieee, cmd, data, service, params, event_data 210 | ): 211 | from bisect import bisect 212 | from datetime import datetime 213 | 214 | import pytz 215 | from homeassistant.util.dt import DEFAULT_TIME_ZONE, utcnow 216 | 217 | LOGGER.debug(f"Default time zone {DEFAULT_TIME_ZONE}") 218 | tz = pytz.timezone(str(DEFAULT_TIME_ZONE)) 219 | 220 | utc_time = utcnow().astimezone(pytz.UTC).replace(tzinfo=None) 221 | index = bisect( 222 | tz._utc_transition_times, utc_time # type:ignore[union-attr] 223 | ) 224 | 225 | if index is not None: 226 | if ( 227 | tz._utc_transition_times[index] # type:ignore[union-attr] 228 | .replace(tzinfo=pytz.UTC) 229 | .astimezone(tz) 230 | .dst() 231 | .total_seconds() 232 | == 0 233 | ): 234 | # First date must be start of dst period 235 | index = index - 1 236 | 237 | dst1_obj = tz._utc_transition_times[index] # type:ignore[union-attr] 238 | dst2_obj = tz._utc_transition_times[ # type:ignore[union-attr] 239 | index + 1 240 | ] 241 | epoch2000 = datetime(2000, 1, 1, tzinfo=None) 242 | dst1 = (dst1_obj - epoch2000).total_seconds() 243 | dst2 = (dst2_obj - epoch2000).total_seconds() 244 | dst1_aware = tz._utc_transition_times[ # type:ignore[union-attr] 245 | index 246 | ].replace(tzinfo=pytz.UTC) 247 | dst2_aware = tz._utc_transition_times[ # type:ignore[union-attr] 248 | index + 1 249 | ].replace(tzinfo=pytz.UTC) 250 | 251 | dst1_local = dst1_aware.astimezone(tz) 252 | dst2_local = dst2_aware.astimezone(tz) 253 | 254 | dst_shift = dst1_local.dst().total_seconds() 255 | utc_offset = dst2_local.utcoffset().total_seconds() 256 | 257 | LOGGER.debug( 258 | f"Next dst changes {dst1_obj} .. {dst2_obj}" 259 | f" EPOCH 2000 {dst1} .. {dst2}" 260 | ) 261 | LOGGER.debug( 262 | f"Local {dst1_local} {dst2_local} in {tz}" 263 | f" {dst1_local.dst().total_seconds()}" 264 | f" {dst2_local.dst().total_seconds()}" 265 | ) 266 | LOGGER.debug(f"UTC OFFSET: {utc_offset} DST OFFSET: {dst_shift}") 267 | 268 | dev = app.get_device(ieee=ieee) 269 | params[p.CLUSTER_ID] = 0x000A # Time Cluster 270 | cluster = u.get_cluster_from_params(dev, params, event_data) 271 | 272 | # Prepare read and write lists 273 | attr_read_list = [ 274 | 0, 275 | 1, 276 | 2, 277 | 3, 278 | 4, 279 | 5, 280 | ] # Time, Timestatus, Timezone, DstStart, DstEnd, DstShift 281 | 282 | if params[p.READ_BEFORE_WRITE]: 283 | event_data["read_before"] = await cluster.read_attributes( 284 | attr_read_list 285 | ) 286 | 287 | EPOCH2000_TIMESTAMP = 946684800 288 | utctime_towrite = utcnow().timestamp() - EPOCH2000_TIMESTAMP 289 | attr_write_list = { 290 | 0x0000: utctime_towrite, # Time 291 | 0x0002: utc_offset, # Timezone - int32 292 | 0x0003: dst1, # DstStart - uint32 293 | 0x0004: dst2, # DstEnd - uint32 294 | 0x0005: dst_shift, # DstEnd - uint32 295 | } 296 | 297 | event_data["result_write"] = await cluster.write_attributes( 298 | attr_write_list 299 | ) 300 | 301 | if params[p.READ_AFTER_WRITE]: 302 | event_data["read_after"] = await cluster.read_attributes( 303 | attr_read_list 304 | ) 305 | -------------------------------------------------------------------------------- /custom_components/zha_toolkit/ezsp.py: -------------------------------------------------------------------------------- 1 | import binascii 2 | import logging 3 | 4 | import bellows 5 | import bellows.types as bt 6 | import zigpy.zdo.types 7 | from zigpy import types as t 8 | 9 | from . import utils as u 10 | 11 | LOGGER = logging.getLogger(__name__) 12 | 13 | 14 | async def ezsp_set_channel( 15 | app, listener, ieee, cmd, data, service, params, event_data 16 | ): 17 | ch = t.uint8_t(data) 18 | assert 11 << ch << 26 19 | ch_mask = zigpy.types.Channels(1 << ch) 20 | 21 | LOGGER.info("Setting EZSP channel to: %s/%s", ch, ch_mask) 22 | 23 | aps_frame = bellows.types.EmberApsFrame( 24 | profileId=0x0000, 25 | clusterId=zigpy.zdo.types.ZDOCmd.Mgmt_NWK_Update_req, 26 | sourceEndpoint=0x00, 27 | destinationEndpoint=0x00, 28 | options=bellows.types.EmberApsOption.APS_OPTION_NONE, 29 | groupId=0x0000, 30 | sequence=0xDE, 31 | ) 32 | 33 | status, _, network_params = await app._ezsp.getNetworkParameters() 34 | if status != bellows.types.EmberStatus.SUCCESS: 35 | msg = ( 36 | f"Couldn't get network parameters, abort channel change: {status}" 37 | ) 38 | event_data["errors"].append(msg) 39 | raise Exception(msg) 40 | 41 | event_data["nwk_params"] = network_params 42 | 43 | payload = b"\xDE" + ch_mask.serialize() + b"\xFE" 44 | payload += network_params.nwkUpdateId.serialize() 45 | 46 | status, _ = await app._ezsp.sendBroadcast( 47 | zigpy.types.BroadcastAddress.ALL_DEVICES, 48 | aps_frame, 49 | 0x00, 50 | 0x01, 51 | payload, 52 | ) 53 | success = status == bellows.types.EmberStatus.SUCCESS 54 | event_data["success"] = success 55 | 56 | if not success: 57 | return 58 | 59 | res = await app._ezsp.setRadioChannel(ch) 60 | event_data["result"] = res 61 | LOGGER.info("Set channel status: %s", res) 62 | 63 | 64 | async def ezsp_get_token( 65 | app, listener, ieee, cmd, data, service, params, event_data 66 | ): 67 | token = t.uint8_t(data) 68 | event_data["tokens_info"] = {} 69 | for token in range(0, 31): 70 | LOGGER.info(f"Getting {token} token...") 71 | res = await app._ezsp.getToken(token) 72 | tkInfo = { 73 | "status": res[0], 74 | "data": binascii.hexlify(res[1].serialize()), 75 | } 76 | event_data["tokens_info"][token] = tkInfo 77 | LOGGER.info(f"Getting token {token} status: {res[0]}") 78 | LOGGER.info(f"Getting token {token} data: {res[1]}") 79 | LOGGER.info( 80 | f"Getting token {token} data: " 81 | "{binascii.hexlify(res[1].serialize())}" 82 | ) 83 | 84 | 85 | async def ezsp_start_mfg( 86 | app, listener, ieee, cmd, data, service, params, event_data 87 | ): 88 | event_data["results"] = [] 89 | LOGGER.info("Starting mfg lib") 90 | res = await app._ezsp.mfglibStart(True) 91 | event_data["results"].append(res) 92 | LOGGER.info("starting mfg lib result: %s", res) 93 | 94 | channel = 11 95 | res = await app._ezsp.mfglibSetChannel(channel) 96 | event_data["results"].append(res) 97 | LOGGER.info("mfg lib change channel: %s", res) 98 | 99 | res = await app._ezsp.mfglibEnd() 100 | event_data["results"].append(res) 101 | LOGGER.info("mfg lib change channel: %s", res) 102 | 103 | 104 | async def ezsp_get_keys( 105 | app, listener, ieee, cmd, data, service, params, event_data 106 | ): 107 | LOGGER.info("getting all keys") 108 | result = {} 109 | erase = data is not None and data 110 | warnings = [] 111 | 112 | for idx in range(0, 192): 113 | LOGGER.debug("Getting key index %s", idx) 114 | (status, key_struct) = await app._ezsp.getKeyTableEntry(idx) 115 | if status == app._ezsp.types.EmberStatus.SUCCESS: 116 | result[idx] = key_struct 117 | if key_struct.partnerEUI64 not in app.devices: 118 | warn = "Partner {} for key {} is not present".format( 119 | key_struct.partnerEUI64, 120 | idx, 121 | ) 122 | warnings.append(warn) 123 | LOGGER.warning(warn) 124 | if erase: 125 | await app._ezsp.eraseKeyTableEntry(idx) 126 | elif status == app._ezsp.types.EmberStatus.INDEX_OUT_OF_RANGE: 127 | break 128 | else: 129 | warn = f"No key at {idx} idx: {status}" 130 | warnings.append(warn) 131 | LOGGER.warning(warn) 132 | 133 | event_data["warnings"] = warnings 134 | event_data["result"] = result 135 | for idx, item in result.items(): 136 | LOGGER.info("EZSP %s key: %s", idx, item) 137 | _, _, nwkParams = await app._ezsp.getNetworkParameters() 138 | LOGGER.info("Current network: %s", nwkParams) 139 | event_data["network"] = nwkParams 140 | 141 | 142 | async def ezsp_add_transient_key( 143 | app, listener, ieee, cmd, data, service, params, event_data 144 | ): 145 | LOGGER.info("adding well known link key as transient key") 146 | if ieee is None: 147 | msg = "No ieee to install transient key for" 148 | LOGGER.error(msg) 149 | raise Exception(msg) 150 | 151 | (status,) = await app._ezsp.addTransientLinkKey(ieee, b"ZigbeeAlliance09") 152 | LOGGER.debug("Installed key for %s: %s", ieee, status) 153 | event_data["result"] = status 154 | 155 | 156 | async def ezsp_get_ieee_by_nwk( 157 | app, listener, ieee, cmd, data, service, params, event_data 158 | ): 159 | LOGGER.info("Lookup IEEE by nwk") 160 | nwk = u.str2int(data) 161 | status, eui64 = await app._ezsp.lookupEui64ByNodeId(nwk) 162 | LOGGER.debug("nwk: 0x%04x, ieee: %s, status: %s", nwk, eui64, status) 163 | event_data["nwk"] = nwk 164 | event_data["ieee"] = repr(eui64) 165 | event_data["status"] = status 166 | 167 | 168 | async def ezsp_get_policy( 169 | app, listener, ieee, cmd, data, service, params, event_data 170 | ): 171 | policy = int(data) 172 | 173 | LOGGER.info("Getting EZSP %s policy id", policy) 174 | _status, value = await app._ezsp.getPolicy(policy) 175 | LOGGER.debug( 176 | "policy: %s, value: %s", app._ezsp.types.EzspPolicyId(policy), value 177 | ) 178 | event_data["policy"] = repr(app._ezsp.types.EzspPolicyId(policy)) 179 | event_data["policy_value"] = repr(value) 180 | 181 | 182 | async def ezsp_clear_keys( 183 | app, listener, ieee, cmd, data, service, params, event_data 184 | ): 185 | LOGGER.info("Clear key table") 186 | (status,) = await app._ezsp.clearKeyTable() 187 | LOGGER.info("Cleared key table: %s", status) 188 | event_data["status"] = status 189 | 190 | 191 | async def ezsp_get_config_value( 192 | app, listener, ieee, cmd, data, service, params, event_data 193 | ): 194 | if data is None: 195 | msg = "Need EZSP config value" 196 | LOGGER.error(msg) 197 | raise Exception(msg) 198 | 199 | cfg_id = app._ezsp.types.EzspConfigId(data) 200 | LOGGER.info("Getting EZSP configuration value: %s", cfg_id) 201 | (status, value) = await app._ezsp.getConfigurationValue(cfg_id) 202 | if status != app._ezsp.types.EzspStatus.SUCCESS: 203 | msg = f"Couldn't get {status} configuration value: {cfg_id}" 204 | LOGGER.error(msg) 205 | raise Exception(msg) 206 | 207 | LOGGER.info("%s = %s", cfg_id.name, value) 208 | event_data["result"] = value 209 | 210 | 211 | async def ezsp_get_value( 212 | app, listener, ieee, cmd, data, service, params, event_data 213 | ): 214 | if data is None: 215 | msg = "Need EZSP value id" 216 | LOGGER.error(msg) 217 | raise Exception(msg) 218 | 219 | value_id = app._ezsp.types.EzspValueId(data) 220 | LOGGER.info("Getting EZSP value: %s", value_id) 221 | (status, value) = await app._ezsp.getValue(value_id) 222 | if status != app._ezsp.types.EzspStatus.SUCCESS: 223 | msg = f"Couldn't get {status} value: {value_id}" 224 | LOGGER.error(msg) 225 | raise Exception(msg) 226 | 227 | LOGGER.info("%s = %s", value_id.name, value) 228 | event_data["ezsp_" + value_id.name] = repr(value) 229 | 230 | 231 | # Legacy implementation 232 | # 233 | # See https://github.com/zigpy/bellows/tree/dev/bellows/cli 234 | # 235 | # Code essentially from 236 | # https://github.com/zigpy/bellows/blob/dev/bellows/cli/backup.py 237 | # 238 | async def ezsp_backup_legacy( 239 | app, listener, ieee, cmd, data, service, params, event_data 240 | ): 241 | if u.get_radiotype(app) != u.RadioType.EZSP: 242 | msg = f"'{cmd}' is only available for BELLOWS/EZSP" 243 | LOGGER.debug(msg) 244 | raise Exception(msg) 245 | 246 | # Import stuff we need 247 | import json 248 | 249 | from bellows.cli.backup import ( # isort:skip 250 | ATTR_NODE_TYPE, 251 | ATTR_NODE_ID, 252 | ATTR_NODE_EUI64, 253 | ATTR_PAN_ID, 254 | ATTR_EXT_PAN_ID, 255 | ATTR_RADIO_CHANNEL, 256 | ATTR_RADIO_TX_PWR, 257 | ATTR_NWK_UPDATE_ID, 258 | ATTR_CHANNELS, 259 | ATTR_KEY_GLOBAL, 260 | ATTR_KEY_NWK, 261 | ATTR_KEY_PARTNER, 262 | ATTR_KEY_TABLE, 263 | _backup_keys, 264 | ) 265 | 266 | (status, node_type, network) = await app._ezsp.getNetworkParameters() 267 | assert status == bt.EmberStatus.SUCCESS 268 | assert node_type == app._ezsp.types.EmberNodeType.COORDINATOR 269 | LOGGER.debug("Network params: %s", network) 270 | 271 | (node_id,) = await app._ezsp.getNodeId() 272 | (ieee,) = await app._ezsp.getEui64() 273 | 274 | result = { 275 | ATTR_NODE_TYPE: node_type.value, 276 | ATTR_NODE_ID: node_id, 277 | ATTR_NODE_EUI64: str(ieee), 278 | ATTR_PAN_ID: network.panId, 279 | ATTR_EXT_PAN_ID: str(network.extendedPanId), 280 | ATTR_RADIO_CHANNEL: network.radioChannel, 281 | ATTR_RADIO_TX_PWR: network.radioTxPower, 282 | ATTR_NWK_UPDATE_ID: network.nwkUpdateId, 283 | ATTR_CHANNELS: network.channels, 284 | } 285 | 286 | for key_name, key_type in ( 287 | (ATTR_KEY_GLOBAL, app._ezsp.types.EmberKeyType.TRUST_CENTER_LINK_KEY), 288 | (ATTR_KEY_NWK, app._ezsp.types.EmberKeyType.CURRENT_NETWORK_KEY), 289 | ): 290 | (status, key) = await app._ezsp.getKey(key_type) 291 | assert status == bt.EmberStatus.SUCCESS 292 | LOGGER.debug("%s key: %s", key_name, key) 293 | result[key_name] = key.as_dict() 294 | # 295 | result[key_name][ATTR_KEY_PARTNER] = str(key.partnerEUI64) 296 | 297 | keys = await _backup_keys(app._ezsp) 298 | result[ATTR_KEY_TABLE] = keys 299 | 300 | # Store backup information to file 301 | 302 | # Set name with regards to local path 303 | out_dir = u.get_local_dir() 304 | 305 | # Ensure that data is an empty string when not set 306 | if data is None: 307 | data = "" 308 | 309 | fname = out_dir + "nwk_backup" + str(data) + ".json" 310 | 311 | with open(fname, "w", encoding="utf_8") as jsonfile: 312 | jsonfile.write(json.dumps(result, indent=4)) 313 | 314 | 315 | async def ezsp_dummy_networkInit(): 316 | return (bellows.types.EmberStatus.SUCCESS,) 317 | 318 | 319 | async def ezsp_click_get_echo(s): 320 | LOGGER.error(f"GET_ECHO: {s}") 321 | bellows.cli._result = s 322 | 323 | 324 | async def ezsp_backup( 325 | app, listener, ieee, cmd, data, service, params, event_data 326 | ): 327 | if u.get_radiotype(app) != u.RadioType.EZSP: 328 | msg = f"'{cmd}' is only available for BELLOWS/EZSP" 329 | LOGGER.debug(msg) 330 | raise Exception(msg) 331 | 332 | # Import stuff we need 333 | import io 334 | import json 335 | from contextlib import redirect_stdout 336 | 337 | from bellows.cli import backup as bellows_backup 338 | 339 | try: 340 | # Network is already initialised, fake result for backup function 341 | org_network_init = app._ezsp.networkInit 342 | app._ezsp.networkInit = ezsp_dummy_networkInit 343 | f = io.StringIO() 344 | with redirect_stdout(f): 345 | await bellows_backup._backup(app._ezsp) 346 | result = f.getvalue() 347 | finally: 348 | app._ezsp.networkInit = org_network_init # pylint: disable=E0601 349 | 350 | # Store backup information to file 351 | 352 | # Set name with regards to local path 353 | out_dir = u.get_local_dir() 354 | 355 | # Ensure that data is an empty string when not set 356 | if data is None: 357 | data = "" 358 | 359 | fname = out_dir + "nwk_backup" + str(data) + ".json" 360 | 361 | with open(fname, "w", encoding="utf_8") as jsonfile: 362 | jsonfile.write(json.dumps(json.loads(result), indent=4)) 363 | -------------------------------------------------------------------------------- /custom_components/zha_toolkit/scan_device.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | import asyncio 4 | import logging 5 | import re 6 | 7 | from zigpy import types as t 8 | from zigpy.exceptions import ControllerException, DeliveryError 9 | from zigpy.zcl import foundation 10 | 11 | from . import utils as u 12 | from .params import INTERNAL_PARAMS as p 13 | 14 | LOGGER = logging.getLogger(__name__) 15 | 16 | 17 | @u.retryable( 18 | ( 19 | DeliveryError, 20 | ControllerException, 21 | asyncio.CancelledError, 22 | asyncio.TimeoutError, 23 | ), 24 | tries=3, 25 | ) 26 | async def read_attr(cluster, attrs, manufacturer=None): 27 | return await cluster.read_attributes( 28 | attrs, allow_cache=False, manufacturer=manufacturer 29 | ) 30 | 31 | 32 | async def scan_results(device, endpoints=None, manufacturer=None, tries=3): 33 | """Construct scan results from information available in device""" 34 | result: dict[str, str | list | None] = { 35 | "ieee": str(device.ieee), 36 | "nwk": f"0x{device.nwk:04x}", 37 | } 38 | 39 | LOGGER.debug("Scanning device 0x{%04x}", device.nwk) 40 | 41 | # Get list of endpoints 42 | # None -> all endpoints 43 | # List or id -> Provided endpoints 44 | if endpoints is not None and isinstance(endpoints, int): 45 | endpoints = [endpoints] 46 | 47 | if ( 48 | endpoints is None 49 | or not isinstance(endpoints, list) 50 | or len(endpoints) == 0 51 | ): 52 | endpoints = [] 53 | for epid, _ep in device.endpoints.items(): 54 | endpoints.append(epid) 55 | 56 | LOGGER.debug("Endpoints %s", endpoints) 57 | 58 | ep_result = [] 59 | for epid in endpoints: 60 | if epid == 0: 61 | continue 62 | if epid in device.endpoints: 63 | LOGGER.debug("scanning endpoint #%i", epid) 64 | ep = device.endpoints[epid] 65 | result["model"] = ep.model 66 | result["manufacturer"] = ep.manufacturer 67 | if u.isManf(ep.manufacturer_id): 68 | result["manufacturer_id"] = f"0x{ep.manufacturer_id}" 69 | else: 70 | result["manufacturer_id"] = None 71 | endpoint = { 72 | "id": epid, 73 | "device_type": f"0x{ep.device_type:04x}", 74 | "profile": f"0x{ep.profile_id:04x}", 75 | } 76 | if epid != 242: 77 | LOGGER.debug( 78 | "Scanning endpoint #%i with manf '%r'", epid, manufacturer 79 | ) 80 | endpoint.update( 81 | await scan_endpoint(ep, manufacturer, tries=tries) 82 | ) 83 | if not u.isManf(manufacturer) and u.isManf(ep.manufacturer_id): 84 | LOGGER.debug( 85 | "Scanning endpoint #%i with manf '%r'", 86 | epid, 87 | ep.manufacturer_id, 88 | ) 89 | endpoint.update( 90 | await scan_endpoint( 91 | ep, ep.manufacturer_id, tries=tries 92 | ) 93 | ) 94 | ep_result.append(endpoint) 95 | 96 | result["endpoints"] = ep_result 97 | return result 98 | 99 | 100 | async def scan_endpoint(ep, manufacturer=None, tries=3): 101 | result = {} 102 | clusters = {} 103 | for cluster in ep.in_clusters.values(): 104 | LOGGER.debug( 105 | "Scanning input cluster 0x{:04x}/'{}' ".format( 106 | cluster.cluster_id, cluster.ep_attribute 107 | ) 108 | ) 109 | key = f"0x{cluster.cluster_id:04x}" 110 | clusters[key] = await scan_cluster( 111 | cluster, is_server=True, manufacturer=manufacturer, tries=tries 112 | ) 113 | result["in_clusters"] = dict(sorted(clusters.items(), key=lambda k: k[0])) 114 | 115 | clusters = {} 116 | for cluster in ep.out_clusters.values(): 117 | LOGGER.debug( 118 | "Scanning output cluster 0x{:04x}/'{}'".format( 119 | cluster.cluster_id, cluster.ep_attribute 120 | ) 121 | ) 122 | key = f"0x{cluster.cluster_id:04x}" 123 | clusters[key] = await scan_cluster( 124 | cluster, is_server=True, manufacturer=manufacturer, tries=tries 125 | ) 126 | result["out_clusters"] = dict(sorted(clusters.items(), key=lambda k: k[0])) 127 | return result 128 | 129 | 130 | async def scan_cluster(cluster, is_server=True, manufacturer=None, tries=3): 131 | if is_server: 132 | cmds_gen = "commands_generated" 133 | cmds_rec = "commands_received" 134 | else: 135 | cmds_rec = "commands_generated" 136 | cmds_gen = "commands_received" 137 | attributes = await discover_attributes_extended(cluster, None, tries=tries) 138 | LOGGER.debug("scan_cluster attributes (none): %s", attributes) 139 | if u.isManf(manufacturer): 140 | LOGGER.debug( 141 | "scan_cluster attributes (none) with manf '%s': %s", 142 | manufacturer, 143 | attributes, 144 | ) 145 | attributes.update( 146 | await discover_attributes_extended( 147 | cluster, manufacturer, tries=tries 148 | ) 149 | ) 150 | 151 | # LOGGER.debug("scan_cluster attributes: %s", attributes) 152 | 153 | return { 154 | "cluster_id": f"0x{cluster.cluster_id:04x}", 155 | "title": cluster.name, 156 | "name": cluster.ep_attribute, 157 | "attributes": attributes, 158 | cmds_rec: await discover_commands_received( 159 | cluster, is_server, tries=tries 160 | ), 161 | cmds_gen: await discover_commands_generated( 162 | cluster, is_server, tries=tries 163 | ), 164 | } 165 | 166 | 167 | async def discover_attributes_extended(cluster, manufacturer=None, tries=3): 168 | LOGGER.debug("Discovering attributes extended") 169 | result = {} 170 | to_read = [] 171 | attr_id = 0 # Start discovery at attr_id 0 172 | done = False 173 | 174 | while not done: # Repeat until all attributes are discovered or timeout 175 | try: 176 | done, rsp = await u.retry_wrapper( 177 | cluster.discover_attributes_extended, 178 | attr_id, # Start attribute identifier 179 | 16, # Number of attributes to discover in this request 180 | manufacturer=manufacturer, 181 | tries=tries, 182 | ) 183 | await asyncio.sleep(0.2) 184 | except (ValueError, DeliveryError, asyncio.TimeoutError) as ex: 185 | LOGGER.error( 186 | ( 187 | "Failed 'discover_attributes_extended'" 188 | " starting 0x%04x/0x%04x." 189 | " Error: %s" 190 | ), 191 | cluster.cluster_id, 192 | attr_id, 193 | ex, 194 | ) 195 | break 196 | if isinstance(rsp, foundation.Status): 197 | LOGGER.error( 198 | "got %s status for discover_attribute starting 0x%04x/0x%04x", 199 | rsp, 200 | cluster.cluster_id, 201 | attr_id, 202 | ) 203 | break 204 | LOGGER.debug("Cluster %s attr_recs: %s", cluster.cluster_id, rsp) 205 | for attr_rec in rsp: # Get attribute information from response 206 | attr_id = attr_rec.attrid 207 | attr_id = attr_rec.attrid 208 | attr_def = cluster.attributes.get( 209 | attr_rec.attrid, (str(attr_rec.attrid), None) 210 | ) 211 | if u.is_zigpy_ge("0.50.0") and isinstance( 212 | attr_def, foundation.ZCLAttributeDef 213 | ): 214 | attr_name = attr_def.name 215 | else: 216 | attr_name = attr_def[0] 217 | attr_type = foundation.DATA_TYPES.get(attr_rec.datatype) 218 | access_acl = t.uint8_t(attr_rec.acl) 219 | 220 | if attr_rec.datatype not in [0x48] and ( 221 | access_acl & foundation.AttributeAccessControl.READ != 0 222 | ): 223 | to_read.append(attr_id) 224 | 225 | attr_type_hex = f"0x{attr_rec.datatype:02x}" 226 | if attr_type: 227 | attr_type = [ 228 | attr_type_hex, 229 | attr_type[1].__name__, 230 | attr_type[2].__name__, 231 | ] 232 | else: 233 | attr_type = attr_type_hex 234 | try: 235 | access = re.sub( 236 | "^.*\\.", 237 | "", 238 | str(foundation.AttributeAccessControl(access_acl)), 239 | ) 240 | except ValueError: 241 | access = "undefined" 242 | 243 | result[attr_id] = { 244 | "attribute_id": f"0x{attr_id:04x}", 245 | "attribute_name": attr_name, 246 | "value_type": attr_type, 247 | "access": access, 248 | "access_acl": access_acl, 249 | } 250 | if u.isManf(manufacturer): 251 | result[attr_id]["manf_id"] = manufacturer 252 | attr_id += 1 253 | await asyncio.sleep(0.2) 254 | 255 | LOGGER.debug("Reading attrs: %s", to_read) 256 | chunk, to_read = to_read[:4], to_read[4:] 257 | while chunk: 258 | try: 259 | chunk = sorted(chunk) 260 | success, failed = await read_attr( 261 | cluster, chunk, manufacturer, tries=tries 262 | ) 263 | LOGGER.debug( 264 | "Reading attr success: %s, failed %s", success, failed 265 | ) 266 | for attr_id, value in success.items(): 267 | if isinstance(value, bytes): 268 | try: 269 | value = value.split(b"\x00")[0].decode().strip() 270 | except UnicodeDecodeError: 271 | value = value.hex() 272 | result[attr_id]["attribute_value"] = value 273 | except (DeliveryError, asyncio.TimeoutError) as ex: 274 | LOGGER.error( 275 | "Couldn't read 0x%04x/0x%04x: %s", 276 | cluster.cluster_id, 277 | attr_id, 278 | ex, 279 | ) 280 | chunk, to_read = to_read[:4], to_read[4:] 281 | await asyncio.sleep(0.2) 282 | 283 | return {f"0x{a_id:04x}": result[a_id] for a_id in sorted(result)} 284 | 285 | 286 | async def discover_commands_received( 287 | cluster, is_server, manufacturer=None, tries=3 288 | ): 289 | from zigpy.zcl.foundation import Status 290 | 291 | LOGGER.debug("Discovering commands received") 292 | # direction = "received" if is_server else "generated" # noqa: F841 293 | result = {} 294 | cmd_id = 0 # Discover commands starting from 0 295 | done = False 296 | 297 | while not done: 298 | try: 299 | done, rsp = await u.retry_wrapper( 300 | cluster.discover_commands_received, 301 | cmd_id, # Start index of commands to discover 302 | 16, # Number of commands to discover 303 | manufacturer=manufacturer, 304 | tries=tries, 305 | ) 306 | await asyncio.sleep(0.2) 307 | except (ValueError, DeliveryError, asyncio.TimeoutError) as ex: 308 | LOGGER.error( 309 | "Failed to discover 0x%04x commands starting %s. Error: %s", 310 | cluster.cluster_id, 311 | cmd_id, 312 | ex, 313 | ) 314 | break 315 | if isinstance(rsp, Status): 316 | LOGGER.error( 317 | "got %s status for discover_commands starting %s", rsp, cmd_id 318 | ) 319 | break 320 | for cmd_id in rsp: 321 | cmd_def = cluster.server_commands.get( 322 | cmd_id, (str(cmd_id), "not_in_zcl", None) 323 | ) 324 | if u.is_zigpy_ge("0.50.0") and isinstance( 325 | cmd_def, foundation.ZCLCommandDef 326 | ): 327 | cmd_name = cmd_def.name 328 | cmd_args = cmd_def.schema 329 | else: 330 | cmd_name, cmd_args, _ = cmd_def 331 | 332 | if not isinstance(cmd_args, str): 333 | try: 334 | cmd_args = [arg.__name__ for arg in cmd_args] 335 | except TypeError: 336 | # Unexpected type, get repr to make sure it is ok for json 337 | cmd_args = f"{cmd_args!r}" 338 | 339 | key = f"0x{cmd_id:02x}" 340 | result[key] = { 341 | "command_id": f"0x{cmd_id:02x}", 342 | "command_name": cmd_name, 343 | "command_arguments": cmd_args, 344 | } 345 | cmd_id += 1 346 | await asyncio.sleep(0.2) 347 | return dict(sorted(result.items(), key=lambda k: k[0])) 348 | 349 | 350 | async def discover_commands_generated( 351 | cluster, is_server, manufacturer=None, tries=3 352 | ): 353 | from zigpy.zcl.foundation import Status 354 | 355 | LOGGER.debug("Discovering commands generated") 356 | # direction = "generated" if is_server else "received" # noqa: F841 357 | result = {} 358 | cmd_id = 0 # Initial index of commands to discover 359 | done = False 360 | 361 | while not done: 362 | try: 363 | done, rsp = await u.retry_wrapper( 364 | cluster.discover_commands_generated, 365 | cmd_id, # Start index of commands to discover 366 | 16, # Number of commands to discover this run 367 | manufacturer=manufacturer, 368 | tries=tries, 369 | ) 370 | await asyncio.sleep(0.2) 371 | except (ValueError, DeliveryError, asyncio.TimeoutError) as ex: 372 | LOGGER.error( 373 | "Failed to discover generated 0x%04X commands" 374 | " starting %s. Error: %s", 375 | cluster.cluster_id, 376 | cmd_id, 377 | ex, 378 | ) 379 | break 380 | if isinstance(rsp, Status): 381 | LOGGER.error( 382 | "got %s status for discover_commands starting %s", rsp, cmd_id 383 | ) 384 | break 385 | for cmd_id in rsp: 386 | cmd_def = cluster.client_commands.get( 387 | cmd_id, (str(cmd_id), "not_in_zcl", None) 388 | ) 389 | if u.is_zigpy_ge("0.50.0") and isinstance( 390 | cmd_def, foundation.ZCLCommandDef 391 | ): 392 | cmd_name = cmd_def.name 393 | cmd_args = cmd_def.schema 394 | else: 395 | cmd_name, cmd_args, _ = cmd_def 396 | 397 | if not isinstance(cmd_args, str): 398 | try: 399 | cmd_args = [arg.__name__ for arg in cmd_args] 400 | except (TypeError, AttributeError): 401 | # Unexpected type, get repr to make sure it is ok for json 402 | cmd_args = f"{cmd_args!r}" 403 | 404 | key = f"0x{cmd_id:02x}" 405 | result[key] = { 406 | "command_id": f"0x{cmd_id:02x}", 407 | "command_name": cmd_name, 408 | "command_args": cmd_args, 409 | } 410 | cmd_id += 1 411 | await asyncio.sleep(0.2) 412 | return dict(sorted(result.items(), key=lambda k: k[0])) 413 | 414 | 415 | async def scan_device( 416 | app, listener, ieee, cmd, data, service, params, event_data 417 | ): 418 | if ieee is None: 419 | LOGGER.error("missing ieee") 420 | raise Exception("missing ieee") 421 | 422 | LOGGER.debug("Running 'scan_device'") 423 | 424 | device = app.get_device(ieee) 425 | 426 | endpoints = params[p.EP_ID] 427 | manf = params[p.MANF] 428 | tries = params[p.TRIES] 429 | 430 | if endpoints is None: 431 | endpoints = [] 432 | elif isinstance(endpoints, int): 433 | endpoints = [endpoints] 434 | elif not isinstance(endpoints, list): 435 | raise ValueError("Endpoint must be int or list of int") 436 | 437 | endpoints = sorted(set(endpoints)) # Uniqify and sort 438 | 439 | scan = await scan_results( 440 | device, endpoints, manufacturer=manf, tries=tries 441 | ) 442 | 443 | event_data["scan"] = scan 444 | 445 | model = scan.get("model") 446 | manufacturer = scan.get("manufacturer") 447 | 448 | if len(endpoints) == 0: 449 | ep_str = "" 450 | else: 451 | ep_str = "_" + ("_".join([f"{e:02x}" for e in endpoints])) 452 | 453 | postfix = f"{ep_str}_scan_results.txt" 454 | 455 | # Set a unique filename for each device, using the manf name and 456 | # the variable part of the device mac address 457 | if model is not None and u.isManf(manufacturer): 458 | ieee_tail = "".join([f"{o:02x}" for o in ieee[4::-1]]) 459 | file_name = f"{model}_{manufacturer}_{ieee_tail}{postfix}" 460 | else: 461 | ieee_tail = "".join([f"{o:02x}" for o in ieee[::-1]]) 462 | file_name = f"{ieee_tail}{postfix}" 463 | 464 | u.write_json_to_file( 465 | scan, 466 | subdir="scans", 467 | fname=file_name, 468 | desc="scan results", 469 | listener=listener, 470 | normalize_name=True, 471 | ) 472 | -------------------------------------------------------------------------------- /custom_components/zha_toolkit/binds.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | import logging 4 | 5 | import zigpy.zcl.foundation as f 6 | from zigpy import types as t 7 | from zigpy.zdo.types import MultiAddress, ZDOCmd 8 | 9 | from . import utils as u 10 | from .params import INTERNAL_PARAMS as p 11 | 12 | LOGGER = logging.getLogger(__name__) 13 | 14 | BINDABLE_OUT_CLUSTERS = [ 15 | 0x0006, # OnOff 16 | 0x0008, # Level 17 | 0x0300, # Color Control 18 | ] 19 | BINDABLE_IN_CLUSTERS = [ 20 | 0x0402, # Temperature 21 | ] 22 | 23 | 24 | async def bind_group( 25 | app, listener, ieee, cmd, data, service, params, event_data 26 | ): 27 | LOGGER.debug("running 'bind group' command: %s", service) 28 | if ieee is None: 29 | LOGGER.error("missing ieee") 30 | return 31 | 32 | src_dev = app.get_device(ieee=ieee) 33 | 34 | # Get tries 35 | tries = params[p.TRIES] 36 | 37 | if not data: 38 | LOGGER.error("missing cmd_data") 39 | return 40 | 41 | group_id = u.str2int(data) 42 | zdo = src_dev.zdo 43 | src_out_cls = BINDABLE_OUT_CLUSTERS 44 | src_in_cls = BINDABLE_IN_CLUSTERS 45 | 46 | # find src ep_id 47 | dst_addr = MultiAddress() 48 | dst_addr.addrmode = t.uint8_t(1) 49 | dst_addr.nwk = t.uint16_t(group_id) 50 | u_epid = params[p.EP_ID] 51 | u_cluster_id = params[p.CLUSTER_ID] 52 | 53 | if u_cluster_id is not None: 54 | src_out_cls = [u_cluster_id] 55 | src_in_cls = [u_cluster_id] 56 | 57 | results: dict[int, list[dict[str, int]]] = {} 58 | for src_out_cluster in src_out_cls: 59 | src_epid = None 60 | for ep_id, ep in src_dev.endpoints.items(): 61 | if u_epid is not None and ep_id != u_epid: 62 | # Endpoint not selected 63 | continue 64 | if ep_id == 0: 65 | continue 66 | if src_out_cluster in ep.out_clusters: 67 | src_epid = ep_id 68 | break 69 | if not src_epid: 70 | LOGGER.debug( 71 | "0x%04x: skipping %s cluster as non present", 72 | src_dev.nwk, 73 | src_out_cluster, 74 | ) 75 | continue 76 | if src_epid not in results: 77 | results[src_epid] = [] 78 | LOGGER.debug( 79 | "0x%04x: binding %s, ep: %s, cluster: %s", 80 | src_dev.nwk, 81 | str(src_dev.ieee), 82 | src_epid, 83 | src_out_cluster, 84 | ) 85 | bind_result = {"endpoint_id": src_epid, "cluster_id": src_out_cluster} 86 | 87 | res = await u.retry_wrapper( 88 | zdo.request, 89 | ZDOCmd.Bind_req, 90 | src_dev.ieee, 91 | src_epid, 92 | src_out_cluster, 93 | dst_addr, 94 | tries=tries, 95 | ) 96 | bind_result["result"] = res 97 | results[src_epid].append(bind_result) 98 | LOGGER.debug( 99 | "0x%04x/0x%02x/0x%04x(OUT): binding group 0x%04x: %s", 100 | src_dev.nwk, 101 | src_epid, 102 | src_out_cluster, 103 | group_id, 104 | res, 105 | ) 106 | 107 | # find src ep_id 108 | dst_addr = MultiAddress() 109 | dst_addr.addrmode = t.uint8_t(1) 110 | dst_addr.nwk = t.uint16_t(group_id) 111 | 112 | for src_in_cluster in src_in_cls: 113 | src_epid = None 114 | for ep_id, ep in src_dev.endpoints.items(): 115 | if u_epid is not None and ep_id != u_epid: 116 | # Endpoint not selected 117 | continue 118 | if ep_id == 0: 119 | continue 120 | if src_in_cluster in ep.in_clusters: 121 | src_epid = ep_id 122 | break 123 | if not src_epid: 124 | LOGGER.debug( 125 | "0x%04x: skipping %s cluster (not present)", 126 | src_dev.nwk, 127 | src_in_cluster, 128 | ) 129 | continue 130 | if src_epid not in results: 131 | results[src_epid] = [] 132 | LOGGER.debug( 133 | "0x%04x: binding %s, ep: %s, cluster: %s(IN)", 134 | src_dev.nwk, 135 | str(src_dev.ieee), 136 | src_epid, 137 | src_in_cluster, 138 | ) 139 | bind_result = {"endpoint_id": src_epid, "cluster_id": src_in_cluster} 140 | 141 | res = await u.retry_wrapper( 142 | zdo.request, 143 | ZDOCmd.Bind_req, 144 | src_dev.ieee, 145 | src_epid, 146 | src_in_cluster, 147 | dst_addr, 148 | tries=tries, 149 | ) 150 | bind_result["result"] = res 151 | results[src_epid].append(bind_result) 152 | LOGGER.debug( 153 | "0x%04x/0x%02x/0x%04x(IN): binding group 0x%04x: %s", 154 | src_dev.nwk, 155 | src_epid, 156 | src_in_cluster, 157 | group_id, 158 | res, 159 | ) 160 | event_data["result"] = results 161 | 162 | 163 | async def unbind_group( 164 | app, listener, ieee, cmd, data, service, params, event_data 165 | ): 166 | LOGGER.debug("running 'unbind group' command: %s", service) 167 | if ieee is None: 168 | LOGGER.error("missing ieee") 169 | return 170 | if not data: 171 | LOGGER.error("missing data (destination ieee)") 172 | return 173 | 174 | src_dev = app.get_device(ieee=ieee) 175 | 176 | group_id = u.str2int(data) 177 | 178 | # Get tries 179 | tries = params[p.TRIES] 180 | 181 | zdo = src_dev.zdo 182 | src_out_cls = BINDABLE_OUT_CLUSTERS 183 | 184 | dst_addr = MultiAddress() 185 | dst_addr.addrmode = t.uint8_t(1) 186 | dst_addr.nwk = t.uint16_t(group_id) 187 | results: dict[int, list[dict[str, int]]] = {} 188 | for src_out_cluster in src_out_cls: 189 | src_ep = None 190 | for ep_id, ep in src_dev.endpoints.items(): 191 | if ep_id == 0: 192 | continue 193 | if src_out_cluster in ep.out_clusters: 194 | src_ep = ep_id 195 | break 196 | if not src_ep: 197 | LOGGER.debug( 198 | "0x%04x: skipping %s cluster as non present", 199 | src_dev.nwk, 200 | src_out_cluster, 201 | ) 202 | continue 203 | 204 | if src_ep not in results: 205 | results[src_ep] = [] 206 | 207 | LOGGER.debug( 208 | "0x%04x: unbinding %s, ep: %s, cluster: %s", 209 | src_dev.nwk, 210 | str(src_dev.ieee), 211 | src_ep, 212 | src_out_cluster, 213 | ) 214 | 215 | unbind_result = {"endpoint_id": src_ep, "cluster_id": src_out_cluster} 216 | res = await u.retry_wrapper( 217 | zdo.request, 218 | ZDOCmd.Unbind_req, 219 | src_dev.ieee, 220 | src_ep, 221 | src_out_cluster, 222 | dst_addr, 223 | tries=tries, 224 | ) 225 | unbind_result["result"] = res 226 | results[src_ep].append(unbind_result) 227 | LOGGER.debug( 228 | "0x%04x: unbinding group 0x%04x: %s", src_dev.nwk, group_id, res 229 | ) 230 | 231 | event_data["result"] = results 232 | 233 | 234 | async def bind_ieee( 235 | app, listener, ieee, cmd, data, service, params, event_data 236 | ): 237 | if ieee is None: 238 | raise ValueError("'ieee' required") 239 | 240 | src_dev = app.get_device(ieee=ieee) 241 | if data in [0, False, "0", None]: 242 | # when command_data is set to 0 or false, bind to coordinator 243 | data = app.ieee 244 | 245 | dst_dev = await u.get_device(app, listener, data) 246 | 247 | # Get tries 248 | tries = params[p.TRIES] 249 | 250 | # Coordinator has nwk address 0 251 | isCoordinatorTarget = dst_dev.nwk == 0x0000 252 | 253 | zdo = src_dev.zdo 254 | src_out_clusters = BINDABLE_OUT_CLUSTERS 255 | src_in_clusters = BINDABLE_IN_CLUSTERS 256 | 257 | u_epid = params[p.EP_ID] 258 | u_dst_epid = params[p.DST_EP_ID] 259 | 260 | u_cluster_id = params[p.CLUSTER_ID] 261 | if u_cluster_id is not None: 262 | src_out_clusters = [u_cluster_id] 263 | src_in_clusters = [u_cluster_id] 264 | 265 | # TODO: Filter according to params[p.CLUSTER_ID] 266 | 267 | results: dict[int, dict] = {} 268 | 269 | for src_out_cluster in src_out_clusters: 270 | src_endpoints = [ 271 | ep_id 272 | for ep_id, ep in src_dev.endpoints.items() 273 | if ep_id != 0 274 | and src_out_cluster in ep.out_clusters 275 | and (u_epid is None or u_epid == ep_id) 276 | ] 277 | LOGGER.debug( 278 | "0x%04X: got endpoints %s for out-cluster 0x%04X", 279 | src_dev.nwk, 280 | src_endpoints, 281 | src_out_cluster, 282 | ) 283 | 284 | if not src_endpoints: 285 | LOGGER.debug( 286 | "0x%04X: skipping out-cluster 0x%04X as non present", 287 | src_dev.nwk, 288 | src_out_cluster, 289 | ) 290 | continue 291 | dst_addr = MultiAddress() 292 | dst_addr.addrmode = t.uint8_t(3) 293 | dst_addr.ieee = dst_dev.ieee 294 | 295 | # find dest ep 296 | dst_epid = None 297 | for ep_id, ep in dst_dev.endpoints.items(): 298 | if ep_id == 0: 299 | continue 300 | if ( 301 | isCoordinatorTarget or (src_out_cluster in ep.in_clusters) 302 | ) and (u_dst_epid is None or u_dst_epid == ep_id): 303 | dst_epid = ep_id 304 | break 305 | if not dst_epid: 306 | continue 307 | dst_addr.endpoint = t.uint8_t(dst_epid) 308 | 309 | for src_ep in src_endpoints: 310 | LOGGER.debug( 311 | "0x%04x: binding %s/EP:%s, out-cluster 0x%04X to %s/EP:%s", 312 | src_dev.nwk, 313 | str(src_dev.ieee), 314 | src_ep, 315 | src_out_cluster, 316 | str(dst_dev.ieee), 317 | dst_epid, 318 | ) 319 | res = await u.retry_wrapper( 320 | zdo.request, 321 | ZDOCmd.Bind_req, 322 | src_dev.ieee, 323 | src_ep, 324 | src_out_cluster, 325 | dst_addr, 326 | tries=tries, 327 | ) 328 | LOGGER.debug( 329 | "0x%04x: binding ieee %s: %s", 330 | src_dev.nwk, 331 | str(dst_dev.ieee), 332 | res, 333 | ) 334 | 335 | for src_in_cluster in src_in_clusters: 336 | src_endpoints = [ 337 | ep_id 338 | for ep_id, ep in src_dev.endpoints.items() 339 | if ep_id != 0 340 | and src_in_cluster in ep.in_clusters 341 | and (u_epid is None or u_epid == ep_id) 342 | ] 343 | LOGGER.debug( 344 | "0x%04X: got endpoints %s for in cluster 0x%04X", 345 | src_dev.nwk, 346 | src_endpoints, 347 | src_in_cluster, 348 | ) 349 | 350 | if not src_endpoints: 351 | LOGGER.debug( 352 | "0x%04X: skipping in-cluster 0x%04X as non present", 353 | src_dev.nwk, 354 | src_in_cluster, 355 | ) 356 | continue 357 | dst_addr = MultiAddress() 358 | dst_addr.addrmode = t.uint8_t(3) 359 | dst_addr.ieee = dst_dev.ieee 360 | 361 | # Find dest ep, accept first EP if coordinator 362 | dst_epid = None 363 | for ep_id, ep in dst_dev.endpoints.items(): 364 | if ep_id == 0: 365 | continue 366 | if isCoordinatorTarget or (src_in_cluster in ep.out_clusters): 367 | dst_epid = ep_id 368 | break 369 | if not dst_epid: 370 | continue 371 | dst_addr.endpoint = t.uint8_t(dst_epid) 372 | 373 | for src_ep in src_endpoints: 374 | LOGGER.debug( 375 | "0x%04X: binding %s/EP:%s, in-cluster: 0x%04X to %s/EP:%s", 376 | src_dev.nwk, 377 | str(src_dev.ieee), 378 | src_ep, 379 | src_in_cluster, 380 | str(dst_dev.ieee), 381 | dst_epid, 382 | ) 383 | if src_ep not in results: 384 | results[src_ep] = {} 385 | 386 | bind_result = { 387 | "src_endpoint_id": src_ep, 388 | "dst_endpoint_id": dst_epid, 389 | "cluster_id": src_in_cluster, 390 | } 391 | res = await u.retry_wrapper( 392 | zdo.request, 393 | ZDOCmd.Bind_req, 394 | src_dev.ieee, 395 | src_ep, 396 | src_in_cluster, 397 | dst_addr, 398 | tries=tries, 399 | ) 400 | bind_result["result"] = res 401 | results[src_ep] = bind_result 402 | LOGGER.debug( 403 | "0x%04X: binding ieee %s: %s", 404 | src_dev.nwk, 405 | str(dst_dev.ieee), 406 | res, 407 | ) 408 | 409 | event_data["result"] = results 410 | event_data["success"] = len(results) != 0 411 | 412 | 413 | async def unbind_coordinator( 414 | app, listener, ieee, cmd, data, service, params, event_data 415 | ): 416 | # Unbind bindings towards the coordinator: 417 | data = app.ieee 418 | 419 | # Use binds_remove_all with parameters 420 | await binds_remove_all( 421 | app, listener, ieee, cmd, data, service, params, event_data 422 | ) 423 | 424 | 425 | async def binds_remove_all( 426 | app, listener, ieee, cmd, data, service, params, event_data 427 | ): 428 | if ieee is None: 429 | LOGGER.error("missing ieee") 430 | return 431 | src_dev = app.get_device(ieee=ieee) 432 | zdo = src_dev.zdo 433 | 434 | # Get target ieee filter 435 | tgt_ieee = None 436 | if data is not None and data != "": 437 | try: 438 | tgt_ieee = t.EUI64.convert(data) 439 | # Get destination device if set 440 | except (ValueError, AttributeError): 441 | pass 442 | 443 | if tgt_ieee is None: 444 | # Conversion did not succeed, try other method 445 | # If this fails, then we do not catch the exception 446 | # as the field is not ok. 447 | tgt_ieee = (await u.get_device(app, listener, data)).ieee 448 | 449 | # Determine endpoints to unbind 450 | endpoints = [] 451 | 452 | u_endpoint_id = params[p.EP_ID] 453 | if u_endpoint_id is not None and u_endpoint_id != "": 454 | if not isinstance(u_endpoint_id, list): 455 | u_endpoint_id = [u_endpoint_id] 456 | 457 | # unbind user provided endpoints instead 458 | endpoints = u_endpoint_id 459 | 460 | # Determine clusters to unbind 461 | clusters = [] 462 | 463 | u_cluster_id = params[p.CLUSTER_ID] 464 | if u_cluster_id is not None and u_cluster_id != "": 465 | if not isinstance(u_cluster_id, list): 466 | u_cluster_id = [u_cluster_id] 467 | 468 | # unbind user provided clusters instead 469 | clusters = u_cluster_id 470 | 471 | await binds_get( 472 | app, listener, ieee, cmd, data, service, params, event_data 473 | ) 474 | # Bindings in event_data["result"] 475 | 476 | errors: list[str] = [] 477 | bindings_removed = [] 478 | bindings_skipped = [] 479 | try: 480 | for _i, binding in event_data["result"].items(): 481 | LOGGER.debug(f"Remove bind {binding!r}") 482 | addr_mode = binding["dst"]["addrmode"] 483 | 484 | res = None 485 | # Note, the code below is essentially two times the same 486 | # but the goal is to make a distincion between group 487 | # and ieee addressing for testing/evolutions. 488 | if addr_mode == 1: 489 | # group 490 | src_ieee = t.EUI64.convert(binding["src"]) 491 | ep_id = u.str2int(binding["src_ep"]) 492 | cluster_id = u.str2int(binding["cluster_id"]) 493 | 494 | dst_addr = MultiAddress() 495 | dst_addr.addrmode = addr_mode 496 | dst_addr.nwk = t.uint16_t(u.str2int(binding["dst"]["group"])) 497 | if "dst_ieee" in binding["dst"]: 498 | # Probably not useful, but for backward "compatibility" 499 | dst_ieee = t.EUI64.convert(binding["dst"]["dst_ieee"]) 500 | dst_addr.ieee = dst_ieee 501 | 502 | match_filter = ( 503 | (tgt_ieee is None or dst_ieee == tgt_ieee) 504 | and (len(endpoints) == 0 or ep_id in endpoints) 505 | and (len(clusters) == 0 or cluster_id in clusters) 506 | ) 507 | 508 | if match_filter: 509 | res = await u.retry_wrapper( 510 | zdo.request, 511 | ZDOCmd.Unbind_req, 512 | src_ieee, 513 | ep_id, 514 | cluster_id, 515 | dst_addr, 516 | tries=params[p.TRIES], 517 | ) 518 | # TODO: check success status 519 | bindings_removed.append(binding) 520 | event_data["replies"].append(res) 521 | elif addr_mode == 3: 522 | # direct 523 | src_ieee = t.EUI64.convert(binding["src"]) 524 | dst_ieee = t.EUI64.convert(binding["dst"]["dst_ieee"]) 525 | dst_addr = MultiAddress() 526 | dst_addr.addrmode = addr_mode 527 | dst_addr.ieee = dst_ieee 528 | dst_addr.endpoint = t.uint8_t(binding["dst"]["dst_ep"]) 529 | ep_id = u.str2int(binding["src_ep"]) 530 | cluster_id = u.str2int(binding["cluster_id"]) 531 | # LOGGER.debug( 532 | # f"filter {tgt_ieee} {dst_ieee} {clusters} {cluster_id}" 533 | # ) 534 | 535 | match_filter = ( 536 | (tgt_ieee is None or dst_ieee == tgt_ieee) 537 | and (len(endpoints) == 0 or ep_id in endpoints) 538 | and (len(clusters) == 0 or cluster_id in clusters) 539 | ) 540 | 541 | if match_filter: 542 | res = await u.retry_wrapper( 543 | zdo.request, 544 | ZDOCmd.Unbind_req, 545 | src_ieee, 546 | ep_id, 547 | cluster_id, 548 | dst_addr, 549 | tries=params[p.TRIES], 550 | ) 551 | # TODO: check success status 552 | bindings_removed.append(binding) 553 | event_data["replies"].append(res) 554 | 555 | if res is None: 556 | msg = f"Binding not supported or not selected: {binding!r}" 557 | bindings_skipped.append(binding) 558 | LOGGER.error(msg) 559 | errors.append(msg) 560 | except Exception as e: 561 | event_data["result"] = { 562 | "removed": bindings_removed, 563 | "skipped": bindings_skipped, 564 | } 565 | raise e 566 | 567 | event_data["result"] = { 568 | "removed": bindings_removed, 569 | "skipped": bindings_skipped, 570 | } 571 | event_data["success"] = len(bindings_skipped) == 0 572 | 573 | 574 | async def binds_get( 575 | app, listener, ieee, cmd, data, service, params, event_data 576 | ): 577 | """ 578 | Get bindings from device. 579 | """ 580 | 581 | if ieee is None: 582 | LOGGER.error("missing ieee") 583 | return 584 | src_dev = app.get_device(ieee=ieee) 585 | zdo = src_dev.zdo 586 | 587 | # Get tries 588 | tries = params[p.TRIES] 589 | 590 | idx = 0 591 | done = False 592 | 593 | event_data["replies"] = [] 594 | bindings = {} 595 | success = False 596 | 597 | while not done: 598 | # Todo: continue when reply is incomplete (update start index) 599 | reply = await u.retry_wrapper( 600 | zdo.request, ZDOCmd.Mgmt_Bind_req, idx, tries=tries 601 | ) 602 | event_data["replies"].append(reply) 603 | 604 | if ( 605 | isinstance(reply, list) 606 | and len(reply) >= 3 607 | and reply[0] == f.Status.SUCCESS 608 | ): 609 | total = reply[1] 610 | next_idx = reply[2] 611 | for binding in reply[3]: 612 | if binding.DstAddress.addrmode == 1: 613 | dst_info = { 614 | "addrmode": binding.DstAddress.addrmode, 615 | "group": f"0x{binding.DstAddress.nwk}", 616 | } 617 | elif binding.DstAddress.addrmode == 3: 618 | dst_info = { 619 | "addrmode": binding.DstAddress.addrmode, 620 | "dst_ieee": repr(binding.DstAddress.ieee), 621 | "dst_ep": binding.DstAddress.endpoint, 622 | } 623 | else: 624 | dst_info = binding.DstAddress 625 | 626 | bind_info = { 627 | "src": repr(binding.SrcAddress), 628 | "src_ep": binding.SrcEndpoint, 629 | "cluster_id": f"0x{binding.ClusterId:04X}", 630 | "dst": dst_info, 631 | } 632 | bindings[next_idx] = bind_info 633 | next_idx += 1 634 | 635 | if next_idx >= total: 636 | done = True 637 | success = True 638 | else: 639 | if idx >= next_idx: 640 | # Not progressing in list 641 | success = False 642 | done = True 643 | else: 644 | # Continue with next offset 645 | idx = next_idx 646 | else: 647 | event_data["warning"] = "Unexpected reply format or failure" 648 | done = True 649 | 650 | event_data["success"] = success 651 | event_data["result"] = bindings 652 | 653 | LOGGER.debug("Bindings for ieee {ieee!r}: %s", bindings) 654 | --------------------------------------------------------------------------------