├── .github ├── CODEOWNERS ├── dependabot.yml ├── release-drafter.yml ├── scripts │ ├── gen_stats.sh │ └── update_hacs_manifest.py └── workflows │ ├── codeql-analysis.yml │ ├── lint_python.yml.disabled │ ├── pre-commit.yml │ ├── release-drafter.yml │ ├── release.yml │ ├── stats.yaml │ └── validate-hacs.yml ├── .gitignore ├── .pre-commit-config.yaml ├── .prettierignore ├── .prettierrc.yml ├── Contributing.md ├── LICENSE ├── README.md ├── STATS.md ├── blueprints ├── README.md ├── backup.yaml ├── backup_znp.yaml ├── blueprint_danfoss_ally_configure_script.yaml ├── danfoss_ally_remote_temperature.yaml ├── danfoss_ally_remote_temperature_min_delay.yaml └── script_Thermometer_setReporting.yaml ├── custom_components └── zha_toolkit │ ├── .gitignore │ ├── __init__.py │ ├── _user.py │ ├── binds.py │ ├── config_flow.py │ ├── const.py │ ├── default.py │ ├── ezsp.py │ ├── ezsp_backup.py │ ├── groups.py │ ├── ha.py │ ├── manifest.json │ ├── misc.py │ ├── neighbours.py │ ├── ota.py │ ├── params.py │ ├── scan_device.py │ ├── services.yaml │ ├── translations │ └── en.json │ ├── tuya.py │ ├── utils.py │ ├── zcl_attr.py │ ├── zcl_cmd.py │ ├── zdo.py │ ├── zha.py │ └── znp.py ├── examples ├── README.md ├── fetchOTAfw.sh ├── images │ ├── service_basic_cluster.png │ └── state_basic_cluster.png ├── script_TRV_setTemperatureReporting.yaml ├── script_Thermometer_setReporting.yaml ├── script_configure_Lixee_reporting.yaml ├── script_danfoss_ally_adaptation_run_init.yaml ├── script_danfoss_ally_configure.yaml ├── script_danfoss_ally_settime.yaml ├── script_read_basic_cluster.yaml ├── script_request_all_light_states.yaml ├── script_use_zha_devices.yaml ├── script_use_zha_devices_response.yaml └── service_call_read_basic_cluster.yaml ├── hacs.json ├── icon ├── icon.png ├── icon.svg └── icon@2x.png ├── images ├── ServiceResponse.png └── service-config-ui.png ├── pyproject.toml ├── requirements_test.txt ├── scripts ├── installNoHacsFromZip.sh └── installNoHacsWithGit.sh └── setup.cfg /.github/CODEOWNERS: -------------------------------------------------------------------------------- 1 | * @mdeweerd 2 | -------------------------------------------------------------------------------- /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | version: 2 2 | updates: 3 | - package-ecosystem: pip 4 | directory: "/" 5 | schedule: 6 | interval: weekly 7 | time: "01:30" 8 | open-pull-requests-limit: 10 9 | -------------------------------------------------------------------------------- /.github/release-drafter.yml: -------------------------------------------------------------------------------- 1 | --- 2 | name-template: v$RESOLVED_VERSION 🧰 3 | tag-template: v$RESOLVED_VERSION 4 | change-template: '- #$NUMBER $TITLE @$AUTHOR' 5 | sort-direction: ascending 6 | categories: 7 | - title: 🚀 Features 8 | labels: 9 | - feature 10 | - enhancement 11 | 12 | - title: 🐛 Bug Fixes 13 | labels: 14 | - fix 15 | - bugfix 16 | - bug 17 | 18 | - title: 🧰 Maintenance 19 | label: chore 20 | 21 | version-resolver: 22 | major: 23 | labels: 24 | - major 25 | minor: 26 | labels: 27 | - minor 28 | patch: 29 | labels: 30 | - patch 31 | default: patch 32 | template: | 33 | ## Changes 34 | 35 | $CHANGES 36 | 37 | ## ⭐️ Thank you so much for helping out to keep this integration awesome 38 | $CONTRIBUTORS 39 | autolabeler: 40 | - label: bug 41 | branch: 42 | - /fix\/.+/ 43 | - label: enhancement 44 | branch: 45 | - /feature\/.+/ 46 | -------------------------------------------------------------------------------- /.github/scripts/gen_stats.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | DEST=$(dirname "$0")/../../STATS.md 4 | 5 | TEMPLATE='- ![badge VERSION](https://img.shields.io/github/downloads/mdeweerd/zha-toolkit/VERSION/total.svg)' 6 | 7 | # Exclude stuff that results in invalid badges 8 | EXCLUDES="v0.7.9 v0.7.7 v0.7.6 v0.7.5 v0.7.3 v0.7.2 v0.7.1 v0.7.23 v0.7.24 v0.8.30 v0.9.6 v0.9.8 v0.5.2 v0.5.0" 9 | 10 | ( 11 | echo '# Badges showing number of downloads per version' 12 | echo 13 | for tag in latest $(git tag -l --sort=-creatordate v*[0-9]) ; do 14 | if [[ "$EXCLUDES" != *"$tag"* ]] ; then 15 | echo "${TEMPLATE//VERSION/$tag}" 16 | fi 17 | done 18 | ) > "${DEST}" 19 | -------------------------------------------------------------------------------- /.github/scripts/update_hacs_manifest.py: -------------------------------------------------------------------------------- 1 | #!/bin/env python3 2 | # 3 | # Takes --version X.Y.Z or -V X.Y.Z option and sets version in manifest.json. 4 | # Must be launched from the root of the repository. 5 | # 6 | # Modified from : https://raw.githubusercontent.com/bramstroker/homeassistant-zha-toolkit/master/.github/scripts/update_hacs_manifest.py # noqa: E501 7 | # 8 | # MIT License 9 | # 10 | # Copyright (c) 2021 Bram Gerritsen 11 | # Copyright (c) 2022-2024 Mario DE WEERD 12 | # 13 | # Permission is hereby granted, free of charge, to any person obtaining a copy 14 | # of this software and associated documentation files (the "Software"), to deal 15 | # in the Software without restriction, including without limitation the rights 16 | # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 17 | # copies of the Software, and to permit persons to whom the Software is 18 | # furnished to do so, subject to the following conditions: 19 | # 20 | # The above copyright notice and this permission notice shall be included in 21 | # all copies or substantial portions of the Software. 22 | # 23 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 24 | # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 25 | # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 26 | # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 27 | # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 28 | # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 29 | # SOFTWARE. 30 | 31 | """Update the files with new version.""" 32 | import json 33 | import os 34 | import re 35 | import sys 36 | 37 | 38 | def update_manifest(path=None, version=None): 39 | """Update the manifest file.""" 40 | if path is None: 41 | return 42 | 43 | with open( 44 | path, 45 | encoding="utf_8", 46 | ) as manifestfile: 47 | manifest = json.load(manifestfile) 48 | 49 | manifest["version"] = version 50 | 51 | with open( 52 | path, 53 | "w", 54 | encoding="utf_8", 55 | ) as manifestfile: 56 | manifestfile.write(json.dumps(manifest, indent=4, sort_keys=True)) 57 | 58 | 59 | def replace_version_in_file(path: str, regex: str, version: str): 60 | # Remove any leading 'v' from the provided version 61 | new_version = version.lstrip("v") 62 | 63 | # Compile the regex pattern 64 | pattern = re.compile(regex) 65 | 66 | # Function to replace the version part in the match 67 | def version_replacer(match): 68 | print("YAS") 69 | # Extract the original version from the match 70 | original_version = match.group("version") 71 | 72 | # Determine if the original version started with 'v' 73 | if original_version.startswith("v"): 74 | replacement_version = f"v{new_version}" 75 | else: 76 | replacement_version = new_version 77 | 78 | # Replace the version in the matched string 79 | replacement_match = match.group(0).replace( 80 | original_version, replacement_version 81 | ) 82 | 83 | return replacement_match 84 | 85 | # Read the file content 86 | with open(path, encoding="utf_8") as file: 87 | content = file.read() 88 | 89 | # Replace the versions in the content 90 | new_content = pattern.sub(version_replacer, content) 91 | 92 | # Write the modified content back to the file 93 | with open(path, "w", encoding="utf_8") as file: 94 | file.write(new_content) 95 | 96 | 97 | newVersion = "0.0.0" 98 | for index, value in enumerate(sys.argv): 99 | if value in ["--version", "-V", "-v"]: 100 | newVersion = sys.argv[index + 1] 101 | 102 | 103 | filepath = f"{os.getcwd()}/custom_components/zha_toolkit/manifest.json" 104 | update_manifest(filepath, newVersion) 105 | 106 | # Example: 107 | # replace_version_in_file('file.txt', r'(?Pv?\d+\.\d+\.\d+)', '2.3.4') 108 | 109 | # filepath = f"{os.getcwd()}/README" 110 | # Example1: regex = r'toolkit version \[(?P.*?)]' 111 | # Example2: regex= r'(?Pv?\d+\.\d+\.\d+)' 112 | # replace_version_in_file(filepath, regex, newVersion) 113 | -------------------------------------------------------------------------------- /.github/workflows/codeql-analysis.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # For most projects, this workflow file will not need changing; you simply need 3 | # to commit it to your repository. 4 | # 5 | # You may wish to alter this file to override the set of languages analyzed, 6 | # or to provide custom queries or build logic. 7 | # 8 | # ******** NOTE ******** 9 | # We have attempted to detect the languages in your repository. Please check 10 | # the `language` matrix defined below to confirm you have the correct set of 11 | # supported CodeQL languages. 12 | # 13 | name: CodeQL 14 | 15 | on: 16 | push: 17 | branches: [dev] 18 | pull_request: 19 | # The branches below must be a subset of the branches above 20 | branches: [dev] 21 | schedule: 22 | - cron: 15 8 * * 6 23 | 24 | jobs: 25 | analyze: 26 | name: Analyze 27 | runs-on: ubuntu-latest 28 | permissions: 29 | actions: read 30 | contents: read 31 | security-events: write 32 | 33 | strategy: 34 | fail-fast: false 35 | matrix: 36 | language: [python] 37 | # CodeQL supports 38 | # ['cpp', 'csharp', 'go', 'java', 'javascript', 'python', 'ruby'] 39 | # Learn more about CodeQL language support at 40 | # https://git.io/codeql-language-support 41 | 42 | steps: 43 | - name: Checkout repository 44 | uses: actions/checkout@v4 45 | 46 | # Initializes the CodeQL tools for scanning. 47 | - name: Initialize CodeQL 48 | uses: github/codeql-action/init@v1 49 | with: 50 | languages: ${{ matrix.language }} 51 | # If you wish to specify custom queries, you can do so here 52 | # or in a config file. 53 | # By default, queries listed here will override any specified 54 | # in a config file. 55 | # Prefix the list here with "+" to use these queries and those 56 | # in the config file. 57 | # queries: ./path/to/local/query, your-org/your-repo/queries@main 58 | 59 | # Autobuild attempts to build any compiled languages 60 | # (C/C++, C#, or Java). 61 | # If this step fails, then you should remove it and run the build 62 | # manually (see below) 63 | - name: Autobuild 64 | uses: github/codeql-action/autobuild@v1 65 | 66 | # ℹ️ Command-line programs to run using the OS shell. 67 | # 📚 https://git.io/JvXDl 68 | 69 | # ✏️ If the Autobuild fails above, remove it and uncomment the 70 | # following three lines and modify them (or add more) to build your 71 | # code if your project uses a compiled language 72 | 73 | # - run: | 74 | # make bootstrap 75 | # make release 76 | 77 | - name: Perform CodeQL Analysis 78 | uses: github/codeql-action/analyze@v1 79 | -------------------------------------------------------------------------------- /.github/workflows/lint_python.yml.disabled: -------------------------------------------------------------------------------- 1 | --- 2 | name: lint_python 3 | on: [pull_request, push] 4 | jobs: 5 | lint_python: 6 | runs-on: ubuntu-latest 7 | steps: 8 | - name: Extract branch name 9 | shell: bash 10 | # run: echo "##[set-output name=branch;]$(echo ${GITHUB_REF#refs/heads/})" 11 | run: echo "branch=$(echo ${GITHUB_REF#refs/heads/}) >> $GITHUB_OUTPUT" 12 | id: extract_branch 13 | - uses: actions/checkout@v4 14 | with: 15 | ref: ${{ steps.extract_branch.outputs.branch }} 16 | - uses: actions/setup-python@v4 17 | - run: pip install --upgrade pip wheel 18 | - run: >- 19 | pip install bandit black codespell flake8 flake8-2020 flake8-bugbear 20 | flake8-comprehensions mccabe pycodestyle pyflakes mypy pytest pyupgrade safety 21 | - run: bandit --recursive --skip B101,B311 . 22 | - run: black --check . || true 23 | - run: codespell --ignore-words-list="hass" 24 | - run: >- 25 | flake8 . --count --show-source --statistics 26 | - run: isort --check-only --profile black . || true 27 | - run: pip install -r requirements.txt || pip install --editable . || true 28 | - run: mkdir --parents --verbose .mypy_cache 29 | - run: >- 30 | mypy --ignore-missing-imports --install-types --non-interactive . || 31 | true 32 | - run: pytest . || true 33 | # - run: pytest --doctest-modules . || true 34 | - run: shopt -s globstar && pyupgrade --py37-plus **/*.py || true 35 | # Safety checks identifies issues in python packages - too much hassle. 36 | # - run: safety check 37 | # Not maintained: 38 | # - uses: pre-commit.ci/action@v3.0.0 39 | - uses: stefanzweifel/git-auto-commit-action@v4 40 | with: 41 | commit_message: '[Bot] lint_python - formatting updates!' 42 | # commit_user_name: lint_python 43 | # commit_user_email: lint_python@nill 44 | # commit_author: lint_python bot 45 | -------------------------------------------------------------------------------- /.github/workflows/pre-commit.yml: -------------------------------------------------------------------------------- 1 | --- 2 | name: pre-commit 3 | on: 4 | pull_request: 5 | push: 6 | jobs: 7 | pre-commit: 8 | runs-on: ubuntu-latest 9 | env: 10 | RAW_LOG: pre-commit.log 11 | CS_XML: pre-commit.xml 12 | steps: 13 | - run: sudo apt-get update && sudo apt-get install cppcheck 14 | if: false 15 | - uses: actions/checkout@v4 16 | - uses: actions/setup-python@v5 17 | if: false 18 | with: 19 | cache: pip 20 | python-version: 3.12.1 21 | - run: python -m pip install pre-commit 22 | - uses: actions/cache/restore@v4 23 | with: 24 | path: ~/.cache/pre-commit/ 25 | key: pre-commit-4|${{ env.pythonLocation }}|${{ hashFiles('.pre-commit-config.yaml') }} 26 | - name: Run pre-commit hooks 27 | env: 28 | SKIP: no-commit-to-branch 29 | run: | 30 | set -o pipefail 31 | pre-commit gc 32 | pre-commit run --show-diff-on-failure --color=always --all-files | tee ${RAW_LOG} 33 | - name: Convert Raw Log to Checkstyle format (launch action) 34 | uses: mdeweerd/logToCheckStyle@v2024.2.9 35 | if: ${{ failure() }} 36 | with: 37 | in: ${{ env.RAW_LOG }} 38 | # out: ${{ env.CS_XML }} 39 | - uses: actions/cache/save@v4 40 | if: ${{ ! cancelled() }} 41 | with: 42 | path: ~/.cache/pre-commit/ 43 | key: pre-commit-4|${{ env.pythonLocation }}|${{ hashFiles('.pre-commit-config.yaml') }} 44 | - name: Provide log as artifact 45 | uses: actions/upload-artifact@v4 46 | if: ${{ ! cancelled() }} 47 | with: 48 | name: precommit-logs 49 | path: | 50 | ${{ env.RAW_LOG }} 51 | ${{ env.CS_XML }} 52 | retention-days: 2 53 | -------------------------------------------------------------------------------- /.github/workflows/release-drafter.yml: -------------------------------------------------------------------------------- 1 | --- 2 | name: Release Drafter 3 | 4 | on: 5 | push: 6 | branches: [main] 7 | pull_request: 8 | types: [opened, reopened, synchronize] 9 | 10 | jobs: 11 | update_release_draft: 12 | name: Update release draft 13 | runs-on: ubuntu-latest 14 | steps: 15 | - name: Checkout 16 | uses: actions/checkout@v4 17 | with: 18 | fetch-depth: 0 19 | - name: Create Release 20 | uses: release-drafter/release-drafter@v5 21 | with: 22 | disable-releaser: github.ref != 'refs/heads/main' 23 | env: 24 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 25 | -------------------------------------------------------------------------------- /.github/workflows/release.yml: -------------------------------------------------------------------------------- 1 | --- 2 | name: Release 3 | 4 | on: 5 | release: 6 | types: [published] 7 | workflow_dispatch: 8 | 9 | jobs: 10 | release_zip_file: 11 | name: Prepare release asset 12 | runs-on: ubuntu-latest 13 | steps: 14 | - name: Check out repository 15 | uses: actions/checkout@v4 16 | 17 | # - name: Get version 18 | # id: version 19 | # uses: home-assistant/actions/helpers/version@master 20 | 21 | - name: Set version number 22 | run: | 23 | python3 ${{ github.workspace }}/.github/scripts/update_hacs_manifest.py --version ${{ github.ref_name }} 24 | 25 | - name: Create zip 26 | run: | 27 | cd custom_components/zha_toolkit 28 | zip zha-toolkit.zip -r ./ 29 | - name: Upload zip to release 30 | uses: svenstaro/upload-release-action@v1-release 31 | with: 32 | repo_token: ${{ secrets.GITHUB_TOKEN }} 33 | file: ./custom_components/zha_toolkit/zha-toolkit.zip 34 | asset_name: zha-toolkit.zip 35 | tag: ${{ github.ref }} 36 | overwrite: true 37 | -------------------------------------------------------------------------------- /.github/workflows/stats.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | name: stats 3 | on: [create, workflow_dispatch] 4 | jobs: 5 | gen_stats: 6 | if: ${{ startsWith(github.ref, 'refs/tags/v') }} 7 | runs-on: ubuntu-latest 8 | steps: 9 | # Credit: https://stackoverflow.com/questions/58033366/how-to-get-the-current-branch-within-github-actions 10 | - name: Extract branch name 11 | #run: echo "##[set-output name=branch;]$(echo ${GITHUB_REF#refs/heads/})" 12 | run: echo "branch=$(echo ${GITHUB_REF#refs/heads/})" >> $GITHUB_OUTPUT 13 | id: extract_branch 14 | - uses: actions/checkout@v4 15 | with: 16 | ref: ${{ steps.extract_branch.outputs.branch }} 17 | - run: ${{ github.workspace }}/.github/scripts/gen_stats.sh 18 | - name: Commit changes 19 | run: |- 20 | for r in $(git remote) ; do git remote get-url --all $r ; done 21 | git config user.name github-actions 22 | git config user.email github-actions@github.com 23 | git commit -a -m '[Bot] stats - Update STATS.md' 24 | git push 25 | # commit_user_email: stats@nill 26 | # commit_author: STATS BOT 27 | - uses: dorny/paths-filter@v2 28 | id: changes 29 | with: 30 | filters: | 31 | man: 32 | - 'man.md' 33 | autotools: 34 | - 'aclocal.m4' 35 | - 'configure.ac' 36 | - uses: docker://pandoc/core:2.17 37 | if: ${{ github.event_name == 'workflow_dispath' || steps.changes.outputs.man == 'true' }} 38 | with: 39 | args: -s man.md -t man -o shc.1 40 | - uses: docker://pandoc/core:2.17 41 | if: ${{ github.event_name == 'workflow_dispath' || steps.changes.outputs.man == 'true' }} 42 | with: 43 | args: -s man.md -t html -o man.html 44 | - run: |- 45 | ./autogen.sh 46 | if: ${{ github.event_name == 'workflow_dispath' || steps.changes.outputs.autotools == 'true' }} 47 | - name: Commit changes 48 | if: ${{ github.event_name == 'workflow_dispath' || steps.changes.outputs.man == 'true' || steps.changes.outputs.autotools }} 49 | run: |- 50 | for r in $(git remote) ; do git remote get-url --all $r ; done 51 | git config user.name github-actions 52 | git config user.email github-actions@github.com 53 | git commit -a -m "ci: Github Action Generate Files" 54 | git push 55 | -------------------------------------------------------------------------------- /.github/workflows/validate-hacs.yml: -------------------------------------------------------------------------------- 1 | --- 2 | name: Validate with hassfest 3 | 4 | on: 5 | push: 6 | pull_request: 7 | schedule: 8 | - cron: 0 0 * * * 9 | 10 | jobs: 11 | validate: 12 | runs-on: ubuntu-latest 13 | steps: 14 | - uses: actions/checkout@v4 15 | - uses: home-assistant/actions/hassfest@master 16 | hacs: 17 | name: HACS Action 18 | runs-on: ubuntu-latest 19 | steps: 20 | - uses: actions/checkout@v4 21 | - name: HACS Action 22 | uses: hacs/action@main 23 | with: 24 | category: integration 25 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | __pycache__ 2 | nwk_backup.json 3 | .mypy_cache 4 | **/*.swp 5 | -------------------------------------------------------------------------------- /.pre-commit-config.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | files: ^(.*\.(py|json|md|sh|yaml|cfg|txt))$ 3 | exclude: ^(\.[^/]*cache/.*|.*/_user.py)$ 4 | repos: 5 | - repo: https://github.com/verhovsky/pyupgrade-docs 6 | rev: v0.3.0 7 | hooks: 8 | - id: pyupgrade-docs 9 | - repo: https://github.com/executablebooks/mdformat 10 | # Do this before other tools "fixing" the line endings 11 | rev: 0.7.17 12 | hooks: 13 | - id: mdformat 14 | name: Format Markdown 15 | entry: mdformat # Executable to run, with fixed options 16 | language: python 17 | types: [markdown] 18 | args: [--wrap, '75', --number] 19 | additional_dependencies: 20 | - mdformat-toc 21 | - mdformat-beautysh 22 | - setuptools # workaround for beautysh 23 | # -mdformat-shfmt 24 | # -mdformat-tables 25 | - mdformat-config 26 | - mdformat-black 27 | - mdformat-web 28 | - mdformat-gfm 29 | - repo: https://github.com/asottile/blacken-docs 30 | rev: 1.16.0 31 | hooks: 32 | - id: blacken-docs 33 | additional_dependencies: [black==22.6.0] 34 | stages: [manual] # Manual because already done by mdformat-black 35 | - repo: https://github.com/pre-commit/pre-commit-hooks 36 | rev: v4.5.0 37 | hooks: 38 | - id: fix-byte-order-marker 39 | - id: mixed-line-ending 40 | - id: end-of-file-fixer 41 | - id: trailing-whitespace 42 | - id: no-commit-to-branch 43 | args: [--branch, main] 44 | - id: check-yaml 45 | args: [--unsafe] 46 | - id: debug-statements 47 | - id: check-json 48 | - id: check-builtin-literals 49 | - id: check-ast 50 | - id: check-merge-conflict 51 | - id: check-executables-have-shebangs 52 | - id: check-shebang-scripts-are-executable 53 | - id: check-docstring-first 54 | - id: check-case-conflict 55 | # - id: check-toml 56 | - repo: https://github.com/pre-commit/mirrors-prettier 57 | rev: v3.0.3 58 | hooks: 59 | - id: prettier 60 | - repo: https://github.com/adrienverge/yamllint.git 61 | rev: v1.32.0 62 | hooks: 63 | - id: yamllint 64 | args: 65 | - --no-warnings 66 | - -d 67 | - '{extends: relaxed, rules: {line-length: {max: 90}}}' 68 | - repo: https://github.com/lovesegfault/beautysh.git 69 | rev: v6.2.1 70 | hooks: 71 | - id: beautysh 72 | additional_dependencies: 73 | - setuptools # workaround for beautysh 74 | 75 | - repo: https://github.com/shellcheck-py/shellcheck-py 76 | rev: v0.10.0.1 77 | hooks: 78 | - id: shellcheck 79 | files: ^[^\.].*\.sh$ 80 | # add "shellcheck disable=SC2086" codes to files, rather than global excludes: 81 | # args: [-x,-e2086,-e2004,-e2207,-e2002,-e2116] 82 | # args: ["--severity=warning"] # Optionally only show errors and warnings 83 | - repo: https://github.com/astral-sh/ruff-pre-commit 84 | rev: v0.1.9 85 | hooks: 86 | # Note, added for future reference, not used 87 | - id: ruff 88 | stages: [manual] 89 | - id: ruff-format 90 | stages: [manual] 91 | - repo: https://github.com/asottile/pyupgrade 92 | rev: v3.15.0 93 | hooks: 94 | - id: pyupgrade 95 | args: 96 | - --py39-plus 97 | - repo: https://github.com/psf/black 98 | rev: 23.9.1 99 | hooks: 100 | - id: black 101 | args: 102 | - --safe 103 | - --quiet 104 | - -l 79 105 | - repo: https://github.com/Lucas-C/pre-commit-hooks-bandit 106 | rev: v1.0.6 107 | hooks: 108 | - id: python-bandit-vulnerability-check 109 | args: [--skip, 'B101,B311', --recursive, .] 110 | 111 | - repo: https://github.com/fsouza/autoflake8 112 | rev: v0.4.1 113 | hooks: 114 | - id: autoflake8 115 | args: 116 | - -i 117 | - -r 118 | - --expand-star-imports 119 | - custom_components 120 | - repo: https://github.com/PyCQA/flake8 121 | rev: 6.1.0 122 | hooks: 123 | - id: flake8 124 | additional_dependencies: 125 | # - pyproject-flake8>=0.0.1a5 126 | - flake8-bugbear>=22.7.1 127 | - flake8-comprehensions>=3.10.1 128 | - flake8-2020>=1.7.0 129 | - mccabe>=0.7.0 130 | - pycodestyle>=2.9.1 131 | - pyflakes>=2.5.0 132 | - repo: https://github.com/PyCQA/isort 133 | rev: 5.12.0 134 | hooks: 135 | - id: isort 136 | - repo: https://github.com/codespell-project/codespell 137 | rev: v2.2.6 138 | hooks: 139 | - id: codespell 140 | args: 141 | # - --builtin=clear,rare,informal,usage,code,names,en-GB_to_en-US 142 | - --builtin=clear,rare,informal,usage,code,names 143 | - --ignore-words-list=hass,master 144 | - --skip="./.*" 145 | - --quiet-level=2 146 | - repo: https://github.com/pylint-dev/pylint 147 | rev: v3.0.3 148 | hooks: 149 | - id: pylint 150 | args: 151 | - --reports=no 152 | - --py-version=3.10 153 | additional_dependencies: 154 | - aiofiles>=0.4.0 155 | # - pylint-blocking-calls 156 | # - homeassistant-stubs>=2023.1.7 157 | # exclude: ^$ 158 | - repo: https://github.com/pre-commit/mirrors-mypy 159 | rev: v1.14.0 160 | hooks: 161 | - id: mypy 162 | args: 163 | # - --verbose 164 | # - --config-file=setup.cfg 165 | - --ignore-missing-imports 166 | - --install-types 167 | - --non-interactive 168 | - --check-untyped-defs 169 | - --show-error-codes 170 | - --show-error-context 171 | additional_dependencies: 172 | - zigpy==0.61.0 173 | - types_aiofiles>=0.4.0 174 | - types_pytz>=2023.1 175 | # - cryptography==3.3.2 # Compatible/Available on cygwin 176 | #- homeassistant-stubs>=2023.1.7 177 | #- pydantic 178 | -------------------------------------------------------------------------------- /.prettierignore: -------------------------------------------------------------------------------- 1 | # Keep workflow happy 2 | .* 3 | #example** 4 | #hacs.json 5 | README.md 6 | -------------------------------------------------------------------------------- /.prettierrc.yml: -------------------------------------------------------------------------------- 1 | quoteProps: preserve 2 | -------------------------------------------------------------------------------- /Contributing.md: -------------------------------------------------------------------------------- 1 | # Contributing 2 | 3 | Feel free to contribute to 4 | [zha-toolkit](https://github.com/mdeweerd/zha-toolkit). 5 | 6 | You can contribute with regards to the documentations, examples, 7 | blueprints, and code. 8 | 9 | ## Documentation 10 | 11 | Not all commands are documented yet, and the existing documentation can be 12 | improved. 13 | 14 | The undocumented commands are mostly commands that were in 15 | [zha_custom](https://github.com/Adminiuga/zha_custom). 16 | 17 | Ideally you install `pre-commit` (See below) 18 | 19 | ## Coding 20 | 21 | Because most of the code is reloaded on each call, you do not have to 22 | restart Home Assistant on each change. That's fairly practical to adjust 23 | existing functionality and add new ones. 24 | 25 | ## Adding commands 26 | 27 | A new command results in several updates to define it: 28 | 29 | - The main handler function.\ 30 | The ideal is to name it `_`. 31 | 32 | The next steps are not required to get started, you can do it once you're 33 | happy with the functionality of your new command. They are required to 34 | properly define the new command as a HA service command: 35 | 36 | - In `params.py`: Add the handler name as a constant. 37 | - In `__init__.py`: 38 | - `SERVICE_SCHEMAS`: Add definitions of mandatory and optional 39 | parameters. 40 | - `CMD_TO_INTERNAL_MAP`: Add a mapping if the method name is not like 41 | `_`. 42 | - In `services.yaml`: 43 | - Add a new entry (alphabetically located) to define the UI fields for 44 | service calls. 45 | 46 | You can check that these updates are correct by calling the service 47 | `zha_toolkit.register_services` which will reload `services.yaml` and 48 | `SERVICE_SCHEMAS` to add/redefine zha-toolkit services. 49 | 50 | ### Handler method definition: 51 | 52 | The example below shows all the parameters you need to define for a new 53 | handler method. 54 | 55 | This example is located in `hello.py`. Therefore, the start of the function 56 | name (`hello`) matches the module name. 57 | 58 | ```python 59 | async def hello_world(app, listener, ieee, cmd, data, service, params, event_data): 60 | pass 61 | ``` 62 | 63 | Because of the naming, it is immediately available using the 64 | `zha_toolkit.execute` service: 65 | 66 | ```yaml 67 | service: zha_toolkit.execute 68 | data: 69 | command: hello_world 70 | param1: content1 71 | param2: content2 72 | ``` 73 | 74 | Once you made the required steps to add the command as a service itself, 75 | you can call it as: 76 | 77 | ```yaml 78 | service: zha_toolkit.hello_world 79 | data: 80 | param1: content1 81 | param2: content2 82 | ``` 83 | 84 | ### `pre-commit` 85 | 86 | `pre-commit` is a tool that helps execute a set of other tools prior to git 87 | activity. 88 | 89 | The repository is set up to format the files you're about to submit, warn 90 | about potential errors, preventing from checking in to the main branch. 91 | 92 | To do so, you need to set up `pre-commit` which is easy in itself. 93 | `pre-commit` will setup the other tools. 94 | 95 | Setting up is as simple as: 96 | 97 | - `pip install pre-commit` 98 | - `pre-commit install` from the base of your repository clone. 99 | 100 | That will run automatic corrections and verifications on the code that you 101 | are committing. If you want to skip automatic checks at some point to be 102 | able to check in, just do: `pre-commit uninstall` at the base of the 103 | repository. Don't forget to install it again once you committed. 104 | -------------------------------------------------------------------------------- /STATS.md: -------------------------------------------------------------------------------- 1 | # Badges showing number of downloads per version 2 | 3 | - ![badge latest](https://img.shields.io/github/downloads/mdeweerd/zha-toolkit/latest/total.svg) 4 | - ![badge v1.1.25](https://img.shields.io/github/downloads/mdeweerd/zha-toolkit/v1.1.25/total.svg) 5 | - ![badge v1.1.24](https://img.shields.io/github/downloads/mdeweerd/zha-toolkit/v1.1.24/total.svg) 6 | - ![badge v1.1.23](https://img.shields.io/github/downloads/mdeweerd/zha-toolkit/v1.1.23/total.svg) 7 | - ![badge v1.1.22](https://img.shields.io/github/downloads/mdeweerd/zha-toolkit/v1.1.22/total.svg) 8 | - ![badge v1.1.21](https://img.shields.io/github/downloads/mdeweerd/zha-toolkit/v1.1.21/total.svg) 9 | - ![badge v1.1.20](https://img.shields.io/github/downloads/mdeweerd/zha-toolkit/v1.1.20/total.svg) 10 | - ![badge v1.1.19](https://img.shields.io/github/downloads/mdeweerd/zha-toolkit/v1.1.19/total.svg) 11 | - ![badge v1.1.18](https://img.shields.io/github/downloads/mdeweerd/zha-toolkit/v1.1.18/total.svg) 12 | - ![badge v1.1.17](https://img.shields.io/github/downloads/mdeweerd/zha-toolkit/v1.1.17/total.svg) 13 | - ![badge v1.1.16](https://img.shields.io/github/downloads/mdeweerd/zha-toolkit/v1.1.16/total.svg) 14 | - ![badge v1.1.15](https://img.shields.io/github/downloads/mdeweerd/zha-toolkit/v1.1.15/total.svg) 15 | - ![badge v1.1.14](https://img.shields.io/github/downloads/mdeweerd/zha-toolkit/v1.1.14/total.svg) 16 | - ![badge v1.1.13](https://img.shields.io/github/downloads/mdeweerd/zha-toolkit/v1.1.13/total.svg) 17 | - ![badge v1.1.12](https://img.shields.io/github/downloads/mdeweerd/zha-toolkit/v1.1.12/total.svg) 18 | - ![badge v1.1.11](https://img.shields.io/github/downloads/mdeweerd/zha-toolkit/v1.1.11/total.svg) 19 | - ![badge v1.1.10](https://img.shields.io/github/downloads/mdeweerd/zha-toolkit/v1.1.10/total.svg) 20 | - ![badge v1.1.9](https://img.shields.io/github/downloads/mdeweerd/zha-toolkit/v1.1.9/total.svg) 21 | - ![badge v1.1.8](https://img.shields.io/github/downloads/mdeweerd/zha-toolkit/v1.1.8/total.svg) 22 | - ![badge v1.1.7](https://img.shields.io/github/downloads/mdeweerd/zha-toolkit/v1.1.7/total.svg) 23 | - ![badge v1.1.6](https://img.shields.io/github/downloads/mdeweerd/zha-toolkit/v1.1.6/total.svg) 24 | - ![badge v1.1.5](https://img.shields.io/github/downloads/mdeweerd/zha-toolkit/v1.1.5/total.svg) 25 | - ![badge v1.1.4](https://img.shields.io/github/downloads/mdeweerd/zha-toolkit/v1.1.4/total.svg) 26 | - ![badge v1.1.3](https://img.shields.io/github/downloads/mdeweerd/zha-toolkit/v1.1.3/total.svg) 27 | - ![badge v1.1.2](https://img.shields.io/github/downloads/mdeweerd/zha-toolkit/v1.1.2/total.svg) 28 | - ![badge v1.0.0](https://img.shields.io/github/downloads/mdeweerd/zha-toolkit/v1.0.0/total.svg) 29 | - ![badge v0.9.9](https://img.shields.io/github/downloads/mdeweerd/zha-toolkit/v0.9.9/total.svg) 30 | - ![badge v0.9.7](https://img.shields.io/github/downloads/mdeweerd/zha-toolkit/v0.9.7/total.svg) 31 | - ![badge v0.9.5](https://img.shields.io/github/downloads/mdeweerd/zha-toolkit/v0.9.5/total.svg) 32 | - ![badge v0.9.4](https://img.shields.io/github/downloads/mdeweerd/zha-toolkit/v0.9.4/total.svg) 33 | - ![badge v0.9.3](https://img.shields.io/github/downloads/mdeweerd/zha-toolkit/v0.9.3/total.svg) 34 | - ![badge v0.9.2](https://img.shields.io/github/downloads/mdeweerd/zha-toolkit/v0.9.2/total.svg) 35 | - ![badge v0.9.1](https://img.shields.io/github/downloads/mdeweerd/zha-toolkit/v0.9.1/total.svg) 36 | - ![badge v0.8.40](https://img.shields.io/github/downloads/mdeweerd/zha-toolkit/v0.8.40/total.svg) 37 | - ![badge v0.8.39](https://img.shields.io/github/downloads/mdeweerd/zha-toolkit/v0.8.39/total.svg) 38 | - ![badge v0.8.38](https://img.shields.io/github/downloads/mdeweerd/zha-toolkit/v0.8.38/total.svg) 39 | - ![badge v0.8.37](https://img.shields.io/github/downloads/mdeweerd/zha-toolkit/v0.8.37/total.svg) 40 | - ![badge v0.8.36](https://img.shields.io/github/downloads/mdeweerd/zha-toolkit/v0.8.36/total.svg) 41 | - ![badge v0.8.35](https://img.shields.io/github/downloads/mdeweerd/zha-toolkit/v0.8.35/total.svg) 42 | - ![badge v0.8.34](https://img.shields.io/github/downloads/mdeweerd/zha-toolkit/v0.8.34/total.svg) 43 | - ![badge v0.8.33](https://img.shields.io/github/downloads/mdeweerd/zha-toolkit/v0.8.33/total.svg) 44 | - ![badge v0.8.32](https://img.shields.io/github/downloads/mdeweerd/zha-toolkit/v0.8.32/total.svg) 45 | - ![badge v0.8.31](https://img.shields.io/github/downloads/mdeweerd/zha-toolkit/v0.8.31/total.svg) 46 | - ![badge v0.8.29](https://img.shields.io/github/downloads/mdeweerd/zha-toolkit/v0.8.29/total.svg) 47 | - ![badge v0.8.28](https://img.shields.io/github/downloads/mdeweerd/zha-toolkit/v0.8.28/total.svg) 48 | - ![badge v0.8.27](https://img.shields.io/github/downloads/mdeweerd/zha-toolkit/v0.8.27/total.svg) 49 | - ![badge v0.8.26](https://img.shields.io/github/downloads/mdeweerd/zha-toolkit/v0.8.26/total.svg) 50 | - ![badge v0.8.25](https://img.shields.io/github/downloads/mdeweerd/zha-toolkit/v0.8.25/total.svg) 51 | - ![badge v0.8.24](https://img.shields.io/github/downloads/mdeweerd/zha-toolkit/v0.8.24/total.svg) 52 | - ![badge v0.8.23](https://img.shields.io/github/downloads/mdeweerd/zha-toolkit/v0.8.23/total.svg) 53 | - ![badge v0.8.22](https://img.shields.io/github/downloads/mdeweerd/zha-toolkit/v0.8.22/total.svg) 54 | - ![badge v0.8.21](https://img.shields.io/github/downloads/mdeweerd/zha-toolkit/v0.8.21/total.svg) 55 | - ![badge v0.8.20](https://img.shields.io/github/downloads/mdeweerd/zha-toolkit/v0.8.20/total.svg) 56 | - ![badge v0.8.19](https://img.shields.io/github/downloads/mdeweerd/zha-toolkit/v0.8.19/total.svg) 57 | - ![badge v0.8.18](https://img.shields.io/github/downloads/mdeweerd/zha-toolkit/v0.8.18/total.svg) 58 | - ![badge v0.8.17](https://img.shields.io/github/downloads/mdeweerd/zha-toolkit/v0.8.17/total.svg) 59 | - ![badge v0.8.16](https://img.shields.io/github/downloads/mdeweerd/zha-toolkit/v0.8.16/total.svg) 60 | - ![badge v0.8.15](https://img.shields.io/github/downloads/mdeweerd/zha-toolkit/v0.8.15/total.svg) 61 | - ![badge v0.8.14](https://img.shields.io/github/downloads/mdeweerd/zha-toolkit/v0.8.14/total.svg) 62 | - ![badge v0.8.13](https://img.shields.io/github/downloads/mdeweerd/zha-toolkit/v0.8.13/total.svg) 63 | - ![badge v0.8.12](https://img.shields.io/github/downloads/mdeweerd/zha-toolkit/v0.8.12/total.svg) 64 | - ![badge v0.8.11](https://img.shields.io/github/downloads/mdeweerd/zha-toolkit/v0.8.11/total.svg) 65 | - ![badge v0.8.10](https://img.shields.io/github/downloads/mdeweerd/zha-toolkit/v0.8.10/total.svg) 66 | - ![badge v0.8.9](https://img.shields.io/github/downloads/mdeweerd/zha-toolkit/v0.8.9/total.svg) 67 | - ![badge v0.8.8](https://img.shields.io/github/downloads/mdeweerd/zha-toolkit/v0.8.8/total.svg) 68 | - ![badge v0.8.7](https://img.shields.io/github/downloads/mdeweerd/zha-toolkit/v0.8.7/total.svg) 69 | - ![badge v0.8.6](https://img.shields.io/github/downloads/mdeweerd/zha-toolkit/v0.8.6/total.svg) 70 | - ![badge v0.8.5](https://img.shields.io/github/downloads/mdeweerd/zha-toolkit/v0.8.5/total.svg) 71 | - ![badge v0.8.4](https://img.shields.io/github/downloads/mdeweerd/zha-toolkit/v0.8.4/total.svg) 72 | - ![badge v0.8.2](https://img.shields.io/github/downloads/mdeweerd/zha-toolkit/v0.8.2/total.svg) 73 | - ![badge v0.8.1](https://img.shields.io/github/downloads/mdeweerd/zha-toolkit/v0.8.1/total.svg) 74 | - ![badge v0.8.0](https://img.shields.io/github/downloads/mdeweerd/zha-toolkit/v0.8.0/total.svg) 75 | - ![badge v0.7.27](https://img.shields.io/github/downloads/mdeweerd/zha-toolkit/v0.7.27/total.svg) 76 | - ![badge v0.7.26](https://img.shields.io/github/downloads/mdeweerd/zha-toolkit/v0.7.26/total.svg) 77 | - ![badge v0.7.25](https://img.shields.io/github/downloads/mdeweerd/zha-toolkit/v0.7.25/total.svg) 78 | - ![badge v0.7.22](https://img.shields.io/github/downloads/mdeweerd/zha-toolkit/v0.7.22/total.svg) 79 | - ![badge v0.7.21](https://img.shields.io/github/downloads/mdeweerd/zha-toolkit/v0.7.21/total.svg) 80 | - ![badge v0.7.20](https://img.shields.io/github/downloads/mdeweerd/zha-toolkit/v0.7.20/total.svg) 81 | - ![badge v0.7.19](https://img.shields.io/github/downloads/mdeweerd/zha-toolkit/v0.7.19/total.svg) 82 | - ![badge v0.7.18](https://img.shields.io/github/downloads/mdeweerd/zha-toolkit/v0.7.18/total.svg) 83 | - ![badge v0.7.17](https://img.shields.io/github/downloads/mdeweerd/zha-toolkit/v0.7.17/total.svg) 84 | - ![badge v0.7.16](https://img.shields.io/github/downloads/mdeweerd/zha-toolkit/v0.7.16/total.svg) 85 | - ![badge v0.7.15](https://img.shields.io/github/downloads/mdeweerd/zha-toolkit/v0.7.15/total.svg) 86 | - ![badge v0.7.14](https://img.shields.io/github/downloads/mdeweerd/zha-toolkit/v0.7.14/total.svg) 87 | - ![badge v0.7.13](https://img.shields.io/github/downloads/mdeweerd/zha-toolkit/v0.7.13/total.svg) 88 | - ![badge v0.7.12](https://img.shields.io/github/downloads/mdeweerd/zha-toolkit/v0.7.12/total.svg) 89 | - ![badge v0.7.11](https://img.shields.io/github/downloads/mdeweerd/zha-toolkit/v0.7.11/total.svg) 90 | - ![badge v0.7.10](https://img.shields.io/github/downloads/mdeweerd/zha-toolkit/v0.7.10/total.svg) 91 | - ![badge v0.7.8](https://img.shields.io/github/downloads/mdeweerd/zha-toolkit/v0.7.8/total.svg) 92 | - ![badge v0.7.0](https://img.shields.io/github/downloads/mdeweerd/zha-toolkit/v0.7.0/total.svg) 93 | - ![badge v0.5.11](https://img.shields.io/github/downloads/mdeweerd/zha-toolkit/v0.5.11/total.svg) 94 | - ![badge v0.5.10](https://img.shields.io/github/downloads/mdeweerd/zha-toolkit/v0.5.10/total.svg) 95 | - ![badge v0.5.9](https://img.shields.io/github/downloads/mdeweerd/zha-toolkit/v0.5.9/total.svg) 96 | - ![badge v0.5.8](https://img.shields.io/github/downloads/mdeweerd/zha-toolkit/v0.5.8/total.svg) 97 | - ![badge v0.5.7](https://img.shields.io/github/downloads/mdeweerd/zha-toolkit/v0.5.7/total.svg) 98 | - ![badge v0.5.6](https://img.shields.io/github/downloads/mdeweerd/zha-toolkit/v0.5.6/total.svg) 99 | - ![badge v0.5.5](https://img.shields.io/github/downloads/mdeweerd/zha-toolkit/v0.5.5/total.svg) 100 | - ![badge v0.5.4](https://img.shields.io/github/downloads/mdeweerd/zha-toolkit/v0.5.4/total.svg) 101 | - ![badge v0.5.3](https://img.shields.io/github/downloads/mdeweerd/zha-toolkit/v0.5.3/total.svg) 102 | -------------------------------------------------------------------------------- /blueprints/README.md: -------------------------------------------------------------------------------- 1 | - `backup.yaml`:\ 2 | Script for daily backup of supported zigbee coordinators. 3 | - `backup_znp.yaml`:\ 4 | Script for daily backup of ZNP coordinator. 5 | - `blueprint_danfoss_ally_configure_script.yaml`:\ 6 | Sample blueprint script 7 | to configure Danfoss Ally (see other script example for a more complete 8 | configuration) 9 | - `danfoss_ally_remote_temperature.yaml`:\ 10 | Send temperature to Danfoss Ally 11 | TRV at most every X minutes and at least every Y minutes. Uses restart to 12 | interrupt long wait ("y minutes") 13 | - `danfoss_ally_remote_temperature_min_delay.yaml`:\ 14 | Send temperature to 15 | Danfoss Ally at most every X minutes. Uses single to block too fast 16 | updates. In case the temperature is stable over a very long time, you 17 | should ensure that HA considers it is updated on every change. 18 | - `danfoss_ally_remote_temperature_min_delay_fake_change.yaml`:\ 19 | Same as 20 | `..._min_delay.yaml`. Work in progress - needs update of 21 | `home-assistant-variables`. Uses 22 | [snarky-snark/home-assistant-variables](https://github.com/snarky-snark/home-assistant-variables) 23 | to fake temperature update even when stable by applying slight change in 24 | temperature at the end of the minimum delay. So if the temperature is 25 | stable, it will still be seen as a change. 26 | - `script_Thermometer_setReporting.yaml`:\ 27 | Blueprint Script to configure 28 | reporting of a zigbee device with Temperature Measurement Cluster 0x0402. 29 | -------------------------------------------------------------------------------- /blueprints/backup.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | blueprint: 3 | name: Daily Coordinator Backup - Monthly rotation 4 | description: >- 5 | Backup Zigbee Coordinator Configuration (ZNP/ezsp(bellows)), 6 | monthly rotation 7 | domain: automation 8 | input: 9 | backup_time: 10 | name: Backup time 11 | description: >- 12 | Time at which the daily backup should be made. 13 | selector: 14 | time: 15 | trigger: 16 | - platform: time 17 | at: !input backup_time 18 | condition: [] 19 | action: 20 | - service: zha_toolkit.execute 21 | data: 22 | command: backup 23 | command_data: '{{ now().strftime("_%d") }}' 24 | event_success: zha_coordinator_backup_success 25 | event_fail: zha_coordinator_backup_failed 26 | event_done: zha_coordinator_backup_done 27 | mode: restart 28 | -------------------------------------------------------------------------------- /blueprints/backup_znp.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | blueprint: 3 | name: Daily ZNP Backup - Monthly rotation 4 | description: Backup ZNP Zigbee configuration, monthly rotation 5 | domain: automation 6 | input: 7 | backup_time: 8 | name: Backup time 9 | description: >- 10 | Time at which the daily backup should be madeaction should start 11 | selector: 12 | time: 13 | trigger: 14 | - platform: time 15 | at: !input backup_time 16 | condition: [] 17 | action: 18 | - service: zha_toolkit.execute 19 | data: 20 | command: znp_backup 21 | command_data: '{{ now().strftime("_%d") }}' 22 | event_success: znp_backup_success 23 | event_fail: znp_backup_failed 24 | event_done: znp_backup_done 25 | - service: zha_toolkit.execute 26 | data: 27 | command: znp_nvram_backup 28 | command_data: '{{ now().strftime("_%d") }}' 29 | event_success: znp_nvram_backup_success 30 | event_fail: znp_nvram_backup_failed 31 | event_done: znp_nvram_backup_done 32 | mode: restart 33 | -------------------------------------------------------------------------------- /blueprints/blueprint_danfoss_ally_configure_script.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | blueprint: 3 | domain: script 4 | name: Danfoss Ally TRV configuration 5 | description: 6 | "IMPORTANT NOTE: This blueprint is provided as an example. In practice\ 7 | \ having a blueprint for a script does not seem of much use, if you think it is\ 8 | \ useful discuss about it in a github discussion or issue!\nUse the script in\ 9 | \ the example directory instead\nA script that configures the reporting of a Danfoss\ 10 | \ Ally TRV. zigbee thermometer. You can listen on the 'zha_done' event to see\ 11 | \ some of the configuration results. Sets report configuration and enables window\ 12 | \ open function." 13 | source_url: https://github.com/mdeweerd/zha-toolkit/blob/master/blueprints/blueprint_danfoss_ally_configure_script.yaml 14 | input: 15 | device_ref: 16 | name: Ally TRV Device 17 | description: A Danfoss Ally Thermostatic Regulation Valve (TRV) to configure 18 | selector: 19 | device: 20 | manufacturer: Danfoss 21 | entity: 22 | domain: climate 23 | integration: zha 24 | variables: 25 | device: !input device_ref 26 | ieee: "{{(device_attr(device, 'identifiers')|list)[0][1]}}" 27 | sequence: 28 | - alias: Configure reporting of local_temperature in Thermostat cluster 29 | service: zha_toolkit.conf_report 30 | data: 31 | ieee: "{{ ieee }}" 32 | cluster: 0x0201 33 | attribute: 0 34 | tries: 100 35 | event_done: zha_done 36 | reportable_change: 20 37 | max_interval: 300 38 | min_interval: 19 39 | - alias: Read back reporting configuration, for debugging 40 | service: zha_toolkit.conf_report_read 41 | data: 42 | ieee: "{{ ieee }}" 43 | cluster: 0x0201 44 | attribute: 0 45 | tries: 100 46 | event_done: zha_done 47 | - alias: Enable close window functionality 48 | service: zha_toolkit.attr_write 49 | data: 50 | ieee: "{{ ieee }}" 51 | cluster: 513 52 | attribute: 16387 53 | attr_val: 0 54 | manf: 4678 55 | mode: restart 56 | icon: mdi:thermometer-check 57 | description: >- 58 | This script configures the selected Danfoss Ally TRV. 59 | Report temperature at least every 5 minutes or every 0.2°C whichever occurs first. 60 | Enable the window open detection setting. 61 | -------------------------------------------------------------------------------- /blueprints/danfoss_ally_remote_temperature.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | blueprint: 3 | domain: automation 4 | name: Ally Temp Update 5 | description: 6 | "Update Danfoss Ally TRV external temperature with min/max refresh 7 | rate Original source: https://community.home-assistant.io/t/danfoss-ally-trv-working-with-remote-temp-sensor/276686/149" 8 | source_url: https://github.com/mdeweerd/zha-toolkit/blob/master/blueprints/danfoss_ally_remote_temperature.yaml 9 | input: 10 | ally_device: 11 | name: Ally TRV Device 12 | description: Temperature reading will be sent to this device 13 | selector: 14 | device: 15 | manufacturer: Danfoss 16 | entity: 17 | domain: climate 18 | temp_sensor_id: 19 | name: Temperature Sensor 20 | description: 21 | External sensor from which the temperature will be read. Expects 22 | data format 12.3 (corresponding to °C) 23 | selector: 24 | entity: 25 | device_class: temperature 26 | min_update_minutes: 27 | name: Minimum update interval 28 | description: > 29 | Updates will not be sent if time from last update is less than minimum interval. 30 | Normally 30 min for uncovered, 5 min for covered. 31 | default: 5 32 | selector: 33 | number: 34 | max: 360 35 | min: 1 36 | unit_of_measurement: minutes 37 | mode: box 38 | max_update_minutes: 39 | name: Maximum update interval 40 | description: > 41 | Updates must be sent at least every 30 minutes for covered radiators, 42 | and 3 hours for uncovered radiators. 43 | Set to 30 min or 150 min. 44 | default: 150 45 | selector: 46 | number: 47 | max: 180 48 | min: 1 49 | unit_of_measurement: minutes 50 | mode: box 51 | temperature_offset: 52 | name: Temperature offset to apply to temperature measured by sensor 53 | description: > 54 | When the offset is -1.5 and the value measured by the sensor is 20 °C, then 55 | the temperature provide to the TRV will be 18.5 °C. 56 | default: 0 57 | selector: 58 | number: 59 | max: 4.0 60 | min: -4.0 61 | step: 0.1 62 | unit_of_measurement: °C 63 | mode: box 64 | variables: 65 | device: !input ally_device 66 | ieee: "{{(device_attr(device, 'identifiers')|list)[0][1]}}" 67 | min_update_minutes: !input min_update_minutes 68 | temp_sensor_id: !input temp_sensor_id 69 | temp_offset: !input temperature_offset 70 | trigger: 71 | - platform: state 72 | entity_id: 73 | - !input temp_sensor_id 74 | - platform: homeassistant 75 | event: start 76 | condition: 77 | - condition: template 78 | value_template: > 79 | {{ as_timestamp(now()) - as_timestamp(state_attr(this.entity_id,'last_triggered'),0)|int 80 | >= (60 * min_update_minutes) }} 81 | action: 82 | - alias: Repeat until restarted to report temperature, or expired max_update delay 83 | repeat: 84 | while: "{{ 1 == 1 }}" 85 | sequence: 86 | - alias: Write remote temperature to Danfoss Ally 87 | service: zha_toolkit.attr_write 88 | data: 89 | ieee: "{{ ieee }}" 90 | cluster: 0x0201 91 | attribute: 0x4015 92 | attr_val: "{{ (((states(temp_sensor_id)|float) + temp_offset) * 100) | round(0) }}" 93 | manf: 4678 94 | - alias: 95 | Wait until the maximum update delay expires (automation restarts 96 | when temperature changes before) 97 | delay: 98 | minutes: !input max_update_minutes 99 | mode: restart 100 | -------------------------------------------------------------------------------- /blueprints/danfoss_ally_remote_temperature_min_delay.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | blueprint: 3 | domain: automation 4 | name: Ally Temp Update Min Delay 5 | description: Update Danfoss Ally TRV external temperature with min refresh rate 6 | source_url: https://github.com/mdeweerd/zha-toolkit/blob/master/blueprints/danfoss_ally_remote_temperature_min_delay.yaml 7 | input: 8 | ally_device: 9 | name: Ally TRV Device 10 | description: Temperature reading will be sent to this device 11 | selector: 12 | device: 13 | manufacturer: Danfoss 14 | entity: 15 | domain: climate 16 | temp_sensor_id: 17 | name: Temperature Sensor 18 | description: 19 | External sensor from which the temperature will be read. Expects 20 | data format 12.3 (corresponding to °C) 21 | selector: 22 | entity: 23 | device_class: temperature 24 | min_update_minutes: 25 | name: Minimum update interval 26 | description: > 27 | Updates will not be sent if time from last update is less than minimum interval. 28 | Normally 30 min for uncovered, 5 min for covered. 29 | default: 30 30 | selector: 31 | number: 32 | max: 299 33 | min: 1 34 | unit_of_measurement: minutes 35 | mode: box 36 | temperature_offset: 37 | name: Temperature offset to apply to temperature measured by sensor 38 | description: > 39 | When the offset is -1.5 and the value measured by the sensor is 20 °C, then 40 | the temperature provide to the TRV will be 18.5 °C. 41 | default: 0 42 | selector: 43 | number: 44 | max: 4.0 45 | min: -4.0 46 | step: 0.1 47 | unit_of_measurement: °C 48 | mode: box 49 | variables: 50 | device: !input ally_device 51 | ieee: "{{(device_attr(device, 'identifiers')|list)[0][1]}}" 52 | min_update_minutes: !input min_update_minutes 53 | temp_sensor_id: !input temp_sensor_id 54 | temp_offset: !input temperature_offset 55 | temperature: "{{ states(temp_sensor_id) }}" 56 | trigger: 57 | - platform: state 58 | entity_id: 59 | - !input temp_sensor_id 60 | - platform: event 61 | event_type: homeassistant_start 62 | id: ha_restart 63 | condition: 64 | - condition: template 65 | value_template: "{{ temperature != -32768 }}" 66 | action: 67 | - alias: Store ZHA reported temperature in state attribute 68 | service: zha_toolkit.ha_set_state 69 | data: 70 | state_id: "{{ temp_sensor_id }}" 71 | state_attr: best_val 72 | attr_val: "{{ (temperature|round(2)) }}" 73 | - alias: 74 | Try to get more precise temperature (should work if zigbee temperature 75 | sensor) 76 | service: zha_toolkit.attr_read 77 | data: 78 | ieee: "{{ temp_sensor_id }}" 79 | use_cache: true 80 | cluster: 1026 81 | attribute: 0 82 | state_id: "{{ temp_sensor_id }}" 83 | state_attr: best_val 84 | state_value_template: value/100 85 | - alias: 86 | Fake small change in temperature so that the next sensor update triggers 87 | an update/change event in case the write fails 88 | service: zha_toolkit.ha_set_state 89 | data: 90 | state_id: "{{ temp_sensor_id }}" 91 | attr_val: "{{ (temperature|round(2)) - 0.001 }}" 92 | - alias: Write remote temperature to Danfoss Ally 93 | service: zha_toolkit.attr_write 94 | data: 95 | ieee: "{{ ieee }}" 96 | cluster: 0x0201 97 | attribute: 0x4015 98 | manf: 0x1246 99 | attr_val: 100 | '{{ (((state_attr(temp_sensor_id, "best_val")|float) + temp_offset) 101 | * 100) | round(0) }}' 102 | read_before_write: false 103 | write_if_equal: true 104 | fail_exception: true 105 | tries: 3 106 | - alias: 107 | Wait until the minimum update delay expires (the automation blocks itself 108 | because it is in single mode) 109 | delay: 110 | minutes: !input min_update_minutes 111 | - alias: 112 | Set slightly changed temperature if it is valid to force update. Otherwise, 113 | a valid temperature will trigger anyway. 114 | if: 115 | - condition: not 116 | conditions: 117 | - condition: state 118 | entity_id: !input temp_sensor_id 119 | state: "-32768" 120 | then: 121 | - alias: 122 | Fake small change in temperature so that the next sensor update triggers 123 | an update/change event 124 | service: zha_toolkit.ha_set_state 125 | data: 126 | state_id: "{{ temp_sensor_id }}" 127 | attr_val: "{{ (states(temp_sensor_id)|round(2)) + 0.001 }}" 128 | mode: single 129 | max_exceeded: silent 130 | -------------------------------------------------------------------------------- /blueprints/script_Thermometer_setReporting.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | blueprint: 3 | domain: script 4 | name: Zigbee Thermometer Configure Reporting 5 | description: A script that configures the reporting of a zigbee thermometer. 6 | source_url: https://github.com/mdeweerd/zha-toolkit/blob/master/blueprints/script_Thermometer_setReporting.yaml 7 | input: 8 | entity_name: 9 | name: entity_name 10 | description: 11 | A Zigbee Entity (all entities of the device resolve to the same 12 | address) 13 | selector: 14 | entity: 15 | integration: zha 16 | sequence: 17 | - service: zha_toolkit.conf_report 18 | data: 19 | ieee: "{{ entity_name }}" 20 | cluster: 1026 21 | attribute: 0 22 | tries: 100 23 | event_done: zha_done 24 | reportable_change: 20 25 | max_interval: 300 26 | min_interval: 19 27 | - service: zha_toolkit.conf_report_read 28 | data: 29 | ieee: "{{ entity_name }}" 30 | cluster: 1026 31 | attribute: 0 32 | tries: 100 33 | event_done: zha_done 34 | mode: restart 35 | icon: mdi:thermometer-check 36 | description: >- 37 | This script configures the selected Zigbee Thermometer to report its 38 | temperature at least every 5 minutes or every 0.2°C whichever occurs first. 39 | -------------------------------------------------------------------------------- /custom_components/zha_toolkit/.gitignore: -------------------------------------------------------------------------------- 1 | local 2 | -------------------------------------------------------------------------------- /custom_components/zha_toolkit/_user.py: -------------------------------------------------------------------------------- 1 | # 2 | # Sample 'user.py' script 3 | # 4 | # 'user.py' should be located in the 'local' directory of the 5 | # zha_toolkit custom component. 6 | # 7 | import logging 8 | 9 | from zigpy import types as t 10 | 11 | from custom_components.zha_toolkit import utils as u 12 | from custom_components.zha_toolkit.params import INTERNAL_PARAMS as p 13 | 14 | LOGGER = logging.getLogger(__name__) 15 | 16 | 17 | async def user_test( 18 | app, listener, ieee, cmd, data, service, params, event_data 19 | ): 20 | # To be called as a service: 21 | # 22 | # ```yaml 23 | # service: zha_toolkit.execute 24 | # data: 25 | # command: user_test 26 | # ``` 27 | 28 | # Just a stub, does nothing special 29 | LOGGER.debug("User test called") 30 | 31 | 32 | async def user_sinope_write_test( 33 | app, listener, ieee, cmd, data, service, params, event_data 34 | ): 35 | # To be called as a service: 36 | # 37 | # ```yaml 38 | # service: zha_toolkit.execute 39 | # data: 40 | # command: user_sinope_write_test 41 | # ``` 42 | 43 | # User ignores all parameters from service and uses local values 44 | # This user specific example writes attributes to a precise 45 | # sinope thermostat. 46 | 47 | ieee = t.EUI64.deserialize(b"\xae\x09\x01\x00\x40\x91\x0b\x50")[0] 48 | 49 | dev = await u.get_device(app, listener, ieee) 50 | 51 | cluster = dev.endpoints[1].thermostat 52 | 53 | res = await cluster.read_attributes([9]) 54 | LOGGER.info("Reading attr status: %s", res) 55 | 56 | attrs = {0x0009: 0b00001000, 0x0012: 1400, 0x001C: 0xFF} 57 | LOGGER.debug("Writing test attrs to thermostat cluster: %s", attrs) 58 | res = await cluster.write_attributes(attrs) 59 | event_data["result"] = res 60 | LOGGER.info("Writing attrs status: %s", res) 61 | 62 | 63 | async def user_zigpy_deconz( 64 | app, listener, ieee, cmd, data, service, params, event_data 65 | ): 66 | # To be called as a service: 67 | # 68 | # ```yaml 69 | # service: zha_toolkit.execute 70 | # data: 71 | # command: user_zigpy_deconz 72 | # ``` 73 | 74 | # User changes channel of EZSP 75 | LOGGER.debug("Removing EZSP") 76 | res = await app._ezsp.setRadioChannel(20) 77 | LOGGER.debug("Set channel %s", res) 78 | return 79 | 80 | # User skipped this previous custom code (due to return above) 81 | # pylint: disable=unreachable 82 | LOGGER.debug("Getting model from iris: %s", service) 83 | 84 | ieee = t.EUI64(b"\x00\x0d\x6f\x00\x0f\x3a\xf6\xa6") 85 | dev = await u.get_device(app, listener, ieee) 86 | 87 | cluster = dev.endpoints[2].basic 88 | res = await cluster.read_attributes( 89 | ["model", "manufacturer"], allow_cache=False 90 | ) 91 | LOGGER.info("Iris 2nd ep attr read: %s", res) 92 | 93 | 94 | async def user_tuya_magic( 95 | app, listener, ieee, cmd, data, service, params, event_data 96 | ): 97 | """ 98 | Send Tuya 'magic spell' sequence to device 99 | to try to get 'normal' behavior. 100 | """ 101 | 102 | dev = await u.get_device(app, listener, ieee) 103 | basic_cluster = dev.endpoints[1].in_clusters[0] 104 | 105 | # The magic spell is needed only once. 106 | # TODO: Improve by doing this only once (successfully). 107 | 108 | # Magic spell - part 1 109 | attr_to_read = [4, 0, 1, 5, 7, 0xFFFE] 110 | res = await u.cluster_read_attributes( 111 | basic_cluster, attr_to_read, tries=params[p.TRIES] 112 | ) 113 | 114 | event_data["result"] = res 115 | 116 | # Magic spell - part 2 (skipped - does not seem to be needed) 117 | # attr_to_write={0xffde:13} 118 | # basic_cluster.write_attributes(attr_to_write, tries=3) 119 | -------------------------------------------------------------------------------- /custom_components/zha_toolkit/config_flow.py: -------------------------------------------------------------------------------- 1 | # Inspired from https://aarongodfrey.dev/home%20automation 2 | # /building_a_home_assistant_custom_component_part_3/ 3 | # from copy import deepcopy 4 | import logging 5 | from typing import Any, Optional 6 | 7 | from homeassistant import config_entries 8 | 9 | from . import utils as u 10 | from .const import DOMAIN 11 | 12 | # from homeassistant.const import CONF_ACCESS_TOKEN,CONF_NAME 13 | # from homeassistant.const import CONF_PATH,CONF_URL 14 | # from homeassistant.core import callback 15 | # from homeassistant.helpers.aiohttp_client import async_get_clientsession 16 | # import homeassistant.helpers.config_validation as cv 17 | # from homeassistant.helpers.entity_registry import ( 18 | # async_entries_for_config_entry, 19 | # async_get_registry, 20 | # ) 21 | # import voluptuous as vol 22 | 23 | 24 | _LOGGER = logging.getLogger(__name__) 25 | 26 | # INITIAL_CONFIG_SCHEMA = vol.Schema( 27 | # #{vol.Required(CONF_SKEY): cv.string, vol.Optional(CONF_O_KEY): cv.string} 28 | # ) 29 | # EXTRA_CONF_SCHEMA = vol.Schema( 30 | # { 31 | # #vol.Required(CONF_PATH): cv.string, 32 | # #vol.Optional(CONF_NAME): cv.string, 33 | # #vol.Optional("add_another"): cv.boolean, 34 | # } 35 | # ) 36 | 37 | # OPTIONS_SCHEMA = vol.Schema({vol.Optional(CONF_NM, default="go"): cv.string}) 38 | 39 | 40 | class ZhaToolkitCustomConfigFlow( 41 | config_entries.ConfigFlow, domain=DOMAIN 42 | ): # type:ignore[call-arg] 43 | """Zha Toolkit Custom config flow.""" 44 | 45 | VERSION = 0 46 | MINOR_VERSION = 1 47 | 48 | data: Optional[dict[str, Any]] 49 | 50 | async def my_async_create_entry(self): 51 | if self.data is None: 52 | self.data = {} 53 | self.data["VERSION"] = await u.getVersion() 54 | # Create the configuration entry 55 | return self.async_create_entry(title="ZHA Toolkit", data=self.data) 56 | 57 | async def async_step_user( 58 | self, user_input: Optional[dict[str, Any]] = None 59 | ): 60 | """Invoked when a user initiates a flow via the user interface.""" 61 | # errors: dict[str, str] = {} 62 | # Nothing special to configure, end configuration step 63 | return self.my_async_create_entry() 64 | 65 | 66 | # if user_input is not None: 67 | # # Initially None, but not None when user entered data. 68 | # try: 69 | # await validate_something( 70 | # user_input[CONF_ACCESS_TOKEN], self.hass 71 | # ) 72 | # except ValueError: 73 | # errors["base"] = "error_message" # key in `strings.json` 74 | # if not errors: 75 | # # Input is valid, set data. 76 | # self.data = user_input 77 | # self.data[CONF_SOME_KEY] = [] 78 | # # Return the form of the next step. 79 | # return await self.async_step_repo() 80 | 81 | # return self.async_show_form( 82 | # step_id="user", data_schema=INITIAL_CONFIG_SCHEMA, errors=errors 83 | # ) 84 | 85 | # async def async_step_repo( 86 | # self, user_input: Optional[Dict[str, Any]] = None 87 | # ): 88 | # """Second step in config flow to add a repo to watch.""" 89 | # errors: Dict[str, str] = {} 90 | # if user_input is not None: 91 | # # Validate the path. 92 | # try: 93 | # await validate_path( 94 | # user_input[CONF_PATH], 95 | # self.data[CONF_ACCESS_TOKEN], 96 | # self.hass, 97 | # ) 98 | # except ValueError: 99 | # errors["base"] = "invalid_path" 100 | 101 | # if not errors: 102 | # # Input is valid, set data. 103 | # self.data[CONF_REPOS].append( 104 | # { 105 | # "path": user_input[CONF_PATH], 106 | # "name": user_input.get( 107 | # CONF_NAME, user_input[CONF_PATH] 108 | # ), 109 | # } 110 | # ) 111 | # # If user ticked the box show this form again so they can add 112 | # # an additional repo. 113 | # if user_input.get("add_another", False): 114 | # return await self.async_step_repo() 115 | 116 | # # User is done adding repos, create the config entry. 117 | # return self.async_create_entry( 118 | # title="GitHub Custom", data=self.data 119 | # ) 120 | 121 | # return self.async_show_form( 122 | # step_id="repo", data_schema=EXTRA_CONF_SCHEMA, errors=errors 123 | # ) 124 | 125 | # @staticmethod 126 | # @callback 127 | # def async_get_options_flow(config_entry): 128 | # """Get the options flow for this handler.""" 129 | # return OptionsFlowHandler(config_entry) 130 | 131 | 132 | # class OptionsFlowHandler(config_entries.OptionsFlow): 133 | # """Handles options flow for the component.""" 134 | 135 | # def __init__(self, config_entry: config_entries.ConfigEntry) -> None: 136 | # self.config_entry = config_entry 137 | 138 | # async def async_step_init( 139 | # self, user_input: Dict[str, Any] = None 140 | # ) -> Dict[str, Any]: 141 | # """Manage the options for the custom component.""" 142 | # errors: Dict[str, str] = {} 143 | # # Grab all configured repos from the entity registry so we can populate 144 | # # the multi-select dropdown that will allow a user to remove a repo. 145 | # entity_registry = await async_get_registry(self.hass) 146 | # entries = async_entries_for_config_entry( 147 | # entity_registry, self.config_entry.entry_id 148 | # ) 149 | # # Default value for our multi-select. 150 | # all_repos = {e.entity_id: e.original_name for e in entries} 151 | # repo_map = {e.entity_id: e for e in entries} 152 | 153 | # if user_input is not None: 154 | # updated_repos = deepcopy(self.config_entry.data[CONF_REPOS]) 155 | 156 | # # Remove any unchecked repos. 157 | # removed_entities = [ 158 | # entity_id 159 | # for entity_id in repo_map.keys() 160 | # if entity_id not in user_input["repos"] 161 | # ] 162 | # for entity_id in removed_entities: 163 | # # Unregister from HA 164 | # entity_registry.async_remove(entity_id) 165 | # # Remove from our configured repos. 166 | # entry = repo_map[entity_id] 167 | # entry_path = entry.unique_id 168 | # updated_repos = [ 169 | # e for e in updated_repos if e["path"] != entry_path 170 | # ] 171 | 172 | # if user_input.get(CONF_PATH): 173 | # # Validate the path. 174 | # access_token = self.hass.data[DOMAIN][ 175 | # self.config_entry.entry_id 176 | # ][CONF_ACCESS_TOKEN] 177 | # try: 178 | # await validate_path( 179 | # user_input[CONF_PATH], access_token, self.hass 180 | # ) 181 | # except ValueError: 182 | # errors["base"] = "invalid_path" 183 | 184 | # if not errors: 185 | # # Add the new repo. 186 | # updated_repos.append( 187 | # { 188 | # "path": user_input[CONF_PATH], 189 | # "name": user_input.get( 190 | # CONF_NAME, user_input[CONF_PATH] 191 | # ), 192 | # } 193 | # ) 194 | 195 | # if not errors: 196 | # # Value of data will be set on the options property of our 197 | # # config_entry instance. 198 | # return self.async_create_entry( 199 | # title="", 200 | # data={CONF_REPOS: updated_repos}, 201 | # ) 202 | 203 | # options_schema = vol.Schema( 204 | # { 205 | # vol.Optional( 206 | # "repos", default=list(all_repos.keys()) 207 | # ): cv.multi_select(all_repos), 208 | # vol.Optional(CONF_PATH): cv.string, 209 | # vol.Optional(CONF_NAME): cv.string, 210 | # } 211 | # ) 212 | # return self.async_show_form( 213 | # step_id="init", data_schema=options_schema, errors=errors 214 | # ) 215 | -------------------------------------------------------------------------------- /custom_components/zha_toolkit/const.py: -------------------------------------------------------------------------------- 1 | DOMAIN = "zha_toolkit" 2 | -------------------------------------------------------------------------------- /custom_components/zha_toolkit/default.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | import importlib 4 | import logging 5 | import sys 6 | from typing import TYPE_CHECKING 7 | 8 | if TYPE_CHECKING: 9 | from types import ModuleType 10 | 11 | LOGGER = logging.getLogger(__name__) 12 | 13 | 14 | async def default(app, listener, ieee, cmd, data, service, params, event_data): 15 | """Default handler that delegates CORE_ACTION to CORE.py/ACTION""" 16 | 17 | # This defaults handler enables adding new handler methods 18 | # by adding a file such as "CORE.py" containing the 19 | # ACTION. The corresponding service name is "CORE_ACTION". 20 | # 21 | # This avoids having to add the mapping in __init__.py 22 | # and also allows the user to freely add new services. 23 | 24 | # get our package name to know where to load from 25 | package_name = vars(sys.modules[__name__])["__package__"] 26 | 27 | # The module name is before the '_' and the command 28 | # is the entire string 29 | if isinstance(cmd, str): 30 | module_name = cmd[: cmd.index("_")] 31 | else: 32 | # When cmd is not a string, it must be a list [ MODULE, CMD ] 33 | module_name = cmd[0] 34 | cmd = cmd[1] 35 | 36 | def _reload_command_module() -> ModuleType: 37 | LOGGER.debug( 38 | f"Trying to import {package_name}.{module_name} to call {cmd}" 39 | ) 40 | m = importlib.import_module(f".{module_name}", package=package_name) 41 | 42 | importlib.reload(m) 43 | return m 44 | 45 | m = await listener.hass.async_add_import_executor_job( 46 | _reload_command_module 47 | ) 48 | # Get handler (cmd) in loaded module. 49 | handler = getattr(m, cmd) 50 | # Call the handler 51 | await handler(app, listener, ieee, cmd, data, service, params, event_data) 52 | -------------------------------------------------------------------------------- /custom_components/zha_toolkit/ezsp.py: -------------------------------------------------------------------------------- 1 | import binascii 2 | import logging 3 | 4 | import bellows 5 | import bellows.types as bt 6 | import zigpy.zdo.types 7 | from zigpy import types as t 8 | 9 | from . import utils as u 10 | 11 | LOGGER = logging.getLogger(__name__) 12 | 13 | 14 | async def ezsp_set_channel( 15 | app, listener, ieee, cmd, data, service, params, event_data 16 | ): 17 | ch = t.uint8_t(data) 18 | assert 11 << ch << 26 19 | ch_mask = zigpy.types.Channels(1 << ch) 20 | 21 | LOGGER.info("Setting EZSP channel to: %s/%s", ch, ch_mask) 22 | 23 | aps_frame = bellows.types.EmberApsFrame( 24 | profileId=0x0000, 25 | clusterId=zigpy.zdo.types.ZDOCmd.Mgmt_NWK_Update_req, 26 | sourceEndpoint=0x00, 27 | destinationEndpoint=0x00, 28 | options=bellows.types.EmberApsOption.APS_OPTION_NONE, 29 | groupId=0x0000, 30 | sequence=0xDE, 31 | ) 32 | 33 | status, _, network_params = await app._ezsp.getNetworkParameters() 34 | if status != bellows.types.EmberStatus.SUCCESS: 35 | msg = ( 36 | f"Couldn't get network parameters, abort channel change: {status}" 37 | ) 38 | event_data["errors"].append(msg) 39 | raise RuntimeError(msg) 40 | 41 | event_data["nwk_params"] = network_params 42 | 43 | payload = b"\xDE" + ch_mask.serialize() + b"\xFE" 44 | payload += network_params.nwkUpdateId.serialize() 45 | 46 | status, _ = await app._ezsp.sendBroadcast( 47 | zigpy.types.BroadcastAddress.ALL_DEVICES, 48 | aps_frame, 49 | 0x00, 50 | 0x01, 51 | payload, 52 | ) 53 | success = status == bellows.types.EmberStatus.SUCCESS 54 | event_data["success"] = success 55 | 56 | if not success: 57 | return 58 | 59 | res = await app._ezsp.setRadioChannel(ch) 60 | event_data["result"] = res 61 | LOGGER.info("Set channel status: %s", res) 62 | 63 | 64 | async def ezsp_get_token( 65 | app, listener, ieee, cmd, data, service, params, event_data 66 | ): 67 | token = t.uint8_t(data) 68 | event_data["tokens_info"] = {} 69 | for token in range(0, 31): 70 | LOGGER.info(f"Getting {token} token...") 71 | res = await app._ezsp.getToken(token) 72 | tkInfo = { 73 | "status": res[0], 74 | "data": binascii.hexlify(res[1].serialize()), 75 | } 76 | event_data["tokens_info"][token] = tkInfo 77 | LOGGER.info(f"Getting token {token} status: {res[0]}") 78 | LOGGER.info(f"Getting token {token} data: {res[1]}") 79 | LOGGER.info( 80 | f"Getting token {token} data: " 81 | "{binascii.hexlify(res[1].serialize())}" 82 | ) 83 | 84 | 85 | async def ezsp_start_mfg( 86 | app, listener, ieee, cmd, data, service, params, event_data 87 | ): 88 | event_data["results"] = [] 89 | LOGGER.info("Starting mfg lib") 90 | res = await app._ezsp.mfglibStart(True) 91 | event_data["results"].append(res) 92 | LOGGER.info("starting mfg lib result: %s", res) 93 | 94 | channel = 11 95 | res = await app._ezsp.mfglibSetChannel(channel) 96 | event_data["results"].append(res) 97 | LOGGER.info("mfg lib change channel: %s", res) 98 | 99 | res = await app._ezsp.mfglibEnd() 100 | event_data["results"].append(res) 101 | LOGGER.info("mfg lib change channel: %s", res) 102 | 103 | 104 | async def ezsp_get_keys( 105 | app, listener, ieee, cmd, data, service, params, event_data 106 | ): 107 | LOGGER.info("getting all keys") 108 | result = {} 109 | erase = data is not None and data 110 | warnings = [] 111 | 112 | for idx in range(0, 192): 113 | LOGGER.debug("Getting key index %s", idx) 114 | (status, key_struct) = await app._ezsp.getKeyTableEntry(idx) 115 | if status == app._ezsp.types.EmberStatus.SUCCESS: 116 | result[idx] = key_struct 117 | if key_struct.partnerEUI64 not in app.devices: 118 | warn = "Partner {} for key {} is not present".format( 119 | key_struct.partnerEUI64, 120 | idx, 121 | ) 122 | warnings.append(warn) 123 | LOGGER.warning(warn) 124 | if erase: 125 | await app._ezsp.eraseKeyTableEntry(idx) 126 | elif status == app._ezsp.types.EmberStatus.INDEX_OUT_OF_RANGE: 127 | break 128 | else: 129 | warn = f"No key at {idx} idx: {status}" 130 | warnings.append(warn) 131 | LOGGER.warning(warn) 132 | 133 | event_data["warnings"] = warnings 134 | event_data["result"] = result 135 | for idx, item in result.items(): 136 | LOGGER.info("EZSP %s key: %s", idx, item) 137 | _, _, nwkParams = await app._ezsp.getNetworkParameters() 138 | LOGGER.info("Current network: %s", nwkParams) 139 | event_data["network"] = nwkParams 140 | 141 | 142 | async def ezsp_add_transient_key( 143 | app, listener, ieee, cmd, data, service, params, event_data 144 | ): 145 | LOGGER.info("adding well known link key as transient key") 146 | if ieee is None: 147 | msg = "No ieee to install transient key for" 148 | LOGGER.error(msg) 149 | raise ValueError(msg) 150 | 151 | (status,) = await app._ezsp.addTransientLinkKey(ieee, b"ZigbeeAlliance09") 152 | LOGGER.debug("Installed key for %s: %s", ieee, status) 153 | event_data["result"] = status 154 | 155 | 156 | async def ezsp_get_ieee_by_nwk( 157 | app, listener, ieee, cmd, data, service, params, event_data 158 | ): 159 | LOGGER.info("Lookup IEEE by nwk") 160 | nwk = u.str2int(data) 161 | status, eui64 = await app._ezsp.lookupEui64ByNodeId(nwk) 162 | LOGGER.debug("nwk: 0x%04x, ieee: %s, status: %s", nwk, eui64, status) 163 | event_data["nwk"] = nwk 164 | event_data["ieee"] = repr(eui64) 165 | event_data["status"] = status 166 | 167 | 168 | async def ezsp_get_policy( 169 | app, listener, ieee, cmd, data, service, params, event_data 170 | ): 171 | policy = int(data) 172 | 173 | LOGGER.info("Getting EZSP %s policy id", policy) 174 | _status, value = await app._ezsp.getPolicy(policy) 175 | LOGGER.debug( 176 | "policy: %s, value: %s", app._ezsp.types.EzspPolicyId(policy), value 177 | ) 178 | event_data["policy"] = repr(app._ezsp.types.EzspPolicyId(policy)) 179 | event_data["policy_value"] = repr(value) 180 | 181 | 182 | async def ezsp_clear_keys( 183 | app, listener, ieee, cmd, data, service, params, event_data 184 | ): 185 | LOGGER.info("Clear key table") 186 | (status,) = await app._ezsp.clearKeyTable() 187 | LOGGER.info("Cleared key table: %s", status) 188 | event_data["status"] = status 189 | 190 | 191 | async def ezsp_get_config_value( 192 | app, listener, ieee, cmd, data, service, params, event_data 193 | ): 194 | if data is None: 195 | msg = "Need EZSP config value" 196 | LOGGER.error(msg) 197 | raise ValueError(msg) 198 | 199 | cfg_id = app._ezsp.types.EzspConfigId(data) 200 | LOGGER.info("Getting EZSP configuration value: %s", cfg_id) 201 | (status, value) = await app._ezsp.getConfigurationValue(cfg_id) 202 | if status != app._ezsp.types.EzspStatus.SUCCESS: 203 | msg = f"Couldn't get {status} configuration value: {cfg_id}" 204 | LOGGER.error(msg) 205 | raise RuntimeError(msg) 206 | 207 | LOGGER.info("%s = %s", cfg_id.name, value) 208 | event_data["result"] = value 209 | 210 | 211 | async def ezsp_get_value( 212 | app, listener, ieee, cmd, data, service, params, event_data 213 | ): 214 | if data is None: 215 | msg = "Need EZSP value id" 216 | LOGGER.error(msg) 217 | raise ValueError(msg) 218 | 219 | value_id = app._ezsp.types.EzspValueId(data) 220 | LOGGER.info("Getting EZSP value: %s", value_id) 221 | (status, value) = await app._ezsp.getValue(value_id) 222 | if status != app._ezsp.types.EzspStatus.SUCCESS: 223 | msg = f"Couldn't get {status} value: {value_id}" 224 | LOGGER.error(msg) 225 | raise RuntimeError(msg) 226 | 227 | LOGGER.info("%s = %s", value_id.name, value) 228 | event_data["ezsp_" + value_id.name] = repr(value) 229 | 230 | 231 | # Legacy implementation 232 | # 233 | # See https://github.com/zigpy/bellows/tree/dev/bellows/cli 234 | # 235 | # Code essentially from 236 | # https://github.com/zigpy/bellows/blob/dev/bellows/cli/backup.py 237 | # 238 | async def ezsp_backup_legacy( 239 | app, listener, ieee, cmd, data, service, params, event_data 240 | ): 241 | if u.get_radiotype(app) != u.RadioType.EZSP: 242 | msg = f"'{cmd}' is only available for BELLOWS/EZSP" 243 | LOGGER.debug(msg) 244 | raise ValueError(msg) 245 | 246 | # Import stuff we need 247 | import json 248 | 249 | from bellows.cli.backup import ( # isort:skip 250 | ATTR_NODE_TYPE, 251 | ATTR_NODE_ID, 252 | ATTR_NODE_EUI64, 253 | ATTR_PAN_ID, 254 | ATTR_EXT_PAN_ID, 255 | ATTR_RADIO_CHANNEL, 256 | ATTR_RADIO_TX_PWR, 257 | ATTR_NWK_UPDATE_ID, 258 | ATTR_CHANNELS, 259 | ATTR_KEY_GLOBAL, 260 | ATTR_KEY_NWK, 261 | ATTR_KEY_PARTNER, 262 | ATTR_KEY_TABLE, 263 | _backup_keys, 264 | ) 265 | 266 | (status, node_type, network) = await app._ezsp.getNetworkParameters() 267 | assert status == bt.EmberStatus.SUCCESS 268 | assert node_type == app._ezsp.types.EmberNodeType.COORDINATOR 269 | LOGGER.debug("Network params: %s", network) 270 | 271 | (node_id,) = await app._ezsp.getNodeId() 272 | (ieee,) = await app._ezsp.getEui64() 273 | 274 | result = { 275 | ATTR_NODE_TYPE: node_type.value, 276 | ATTR_NODE_ID: node_id, 277 | ATTR_NODE_EUI64: str(ieee), 278 | ATTR_PAN_ID: network.panId, 279 | ATTR_EXT_PAN_ID: str(network.extendedPanId), 280 | ATTR_RADIO_CHANNEL: network.radioChannel, 281 | ATTR_RADIO_TX_PWR: network.radioTxPower, 282 | ATTR_NWK_UPDATE_ID: network.nwkUpdateId, 283 | ATTR_CHANNELS: network.channels, 284 | } 285 | 286 | for key_name, key_type in ( 287 | (ATTR_KEY_GLOBAL, app._ezsp.types.EmberKeyType.TRUST_CENTER_LINK_KEY), 288 | (ATTR_KEY_NWK, app._ezsp.types.EmberKeyType.CURRENT_NETWORK_KEY), 289 | ): 290 | (status, key) = await app._ezsp.getKey(key_type) 291 | assert status == bt.EmberStatus.SUCCESS 292 | LOGGER.debug("%s key: %s", key_name, key) 293 | result[key_name] = key.as_dict() 294 | # 295 | result[key_name][ATTR_KEY_PARTNER] = str(key.partnerEUI64) 296 | 297 | keys = await _backup_keys(app._ezsp) 298 | result[ATTR_KEY_TABLE] = keys 299 | 300 | # Store backup information to file 301 | 302 | # Set name with regards to local path 303 | out_dir = u.get_local_dir() 304 | 305 | # Ensure that data is an empty string when not set 306 | if data is None: 307 | data = "" 308 | 309 | fname = out_dir + "nwk_backup" + str(data) + ".json" 310 | 311 | with open(fname, "w", encoding="utf_8") as jsonfile: 312 | jsonfile.write(json.dumps(result, indent=4)) 313 | 314 | 315 | async def ezsp_dummy_networkInit(): 316 | return (bellows.types.EmberStatus.SUCCESS,) 317 | 318 | 319 | async def ezsp_click_get_echo(s): 320 | LOGGER.error(f"GET_ECHO: {s}") 321 | bellows.cli._result = s 322 | 323 | 324 | async def ezsp_backup( 325 | app, listener, ieee, cmd, data, service, params, event_data 326 | ): 327 | if u.get_radiotype(app) != u.RadioType.EZSP: 328 | msg = f"'{cmd}' is only available for BELLOWS/EZSP" 329 | LOGGER.debug(msg) 330 | raise ValueError(msg) 331 | 332 | # Import stuff we need 333 | import io 334 | import json 335 | from contextlib import redirect_stdout 336 | 337 | from bellows.cli import backup as bellows_backup 338 | 339 | try: 340 | # Network is already initialised, fake result for backup function 341 | org_network_init = app._ezsp.networkInit 342 | app._ezsp.networkInit = ezsp_dummy_networkInit 343 | f = io.StringIO() 344 | with redirect_stdout(f): 345 | await bellows_backup._backup(app._ezsp) 346 | result = f.getvalue() 347 | finally: 348 | app._ezsp.networkInit = org_network_init # pylint: disable=E0601 349 | 350 | # Store backup information to file 351 | 352 | # Set name with regards to local path 353 | out_dir = u.get_local_dir() 354 | 355 | # Ensure that data is an empty string when not set 356 | if data is None: 357 | data = "" 358 | 359 | fname = out_dir + "nwk_backup" + str(data) + ".json" 360 | 361 | with open(fname, "w", encoding="utf_8") as jsonfile: 362 | jsonfile.write(json.dumps(json.loads(result), indent=4)) 363 | -------------------------------------------------------------------------------- /custom_components/zha_toolkit/ezsp_backup.py: -------------------------------------------------------------------------------- 1 | # Code from 2 | # https://raw.githubusercontent.com/puddly/bellows/puddly/open-coordinator-backup/bellows/cli/backup.py 3 | # slightly adapted 4 | # 5 | 6 | import datetime 7 | import logging 8 | 9 | import bellows 10 | import bellows.types as t 11 | 12 | LOGGER = logging.getLogger(__name__) 13 | 14 | 15 | EMBER_TABLE_ENTRY_UNUSED_NODE_ID = 0xFFFF 16 | EMBER_UNKNOWN_NODE_ID = 0xFFFD 17 | EMBER_DISCOVERY_ACTIVE_NODE_ID = 0xFFFC 18 | 19 | 20 | async def _backup(ezsp): 21 | # (status,) = await ezsp.networkInit() 22 | # assert status == t.EmberStatus.SUCCESS 23 | 24 | (status, node_type, network) = await ezsp.getNetworkParameters() 25 | assert status == t.EmberStatus.SUCCESS 26 | assert node_type == ezsp.types.EmberNodeType.COORDINATOR 27 | 28 | (ieee,) = await ezsp.getEui64() 29 | 30 | (status, nwk_key) = await ezsp.getKey( 31 | ezsp.types.EmberKeyType.CURRENT_NETWORK_KEY 32 | ) 33 | assert status == t.EmberStatus.SUCCESS 34 | 35 | (status, security_level) = await ezsp.getConfigurationValue( 36 | ezsp.types.EzspConfigId.CONFIG_SECURITY_LEVEL 37 | ) 38 | assert status == t.EmberStatus.SUCCESS 39 | 40 | (status, _tclk) = await ezsp.getKey( 41 | ezsp.types.EmberKeyType.TRUST_CENTER_LINK_KEY 42 | ) 43 | assert status == t.EmberStatus.SUCCESS 44 | 45 | addresses = {} 46 | 47 | for idx in range(0, 255 + 1): 48 | (nwk,) = await ezsp.getAddressTableRemoteNodeId(idx) 49 | (eui64,) = await ezsp.getAddressTableRemoteEui64(idx) 50 | 51 | if nwk == EMBER_TABLE_ENTRY_UNUSED_NODE_ID: 52 | continue 53 | if nwk == EMBER_UNKNOWN_NODE_ID: 54 | LOGGER.warning("NWK address for %s is unknown!", eui64) 55 | continue 56 | if nwk == EMBER_DISCOVERY_ACTIVE_NODE_ID: 57 | LOGGER.warning( 58 | "NWK address discovery for %s is currently ongoing", eui64 59 | ) 60 | continue 61 | 62 | LOGGER.debug("NWK for %s is %s", eui64, nwk) 63 | addresses[eui64] = nwk 64 | 65 | keys = {} 66 | 67 | for idx in range(0, 192): 68 | (status, key_struct) = await ezsp.getKeyTableEntry(idx) 69 | LOGGER.debug( 70 | "Got key at index %s status: %s key_struct: %s", 71 | idx, 72 | status, 73 | key_struct, 74 | ) 75 | 76 | if status == t.EmberStatus.SUCCESS: 77 | keys[key_struct.partnerEUI64] = key_struct 78 | elif status == t.EmberStatus.INDEX_OUT_OF_RANGE: 79 | break 80 | 81 | now = datetime.datetime.now().astimezone() 82 | result = { 83 | "metadata": { 84 | "version": 1, 85 | "format": "zigpy/open-coordinator-backup", 86 | "source": f"bellows@{bellows.__version__}", 87 | "internal": { 88 | "creation_time": now.isoformat(timespec="seconds"), 89 | }, 90 | }, 91 | "coordinator_ieee": ieee.serialize()[::-1].hex(), 92 | "pan_id": network.panId.serialize()[::-1].hex(), 93 | "extended_pan_id": network.extendedPanId.serialize()[::-1].hex(), 94 | "nwk_update_id": network.nwkUpdateId, 95 | "security_level": security_level, 96 | "channel": network.radioChannel, 97 | "channel_mask": list(network.channels), 98 | "network_key": { 99 | "key": nwk_key.key.serialize().hex(), 100 | "sequence_number": nwk_key.sequenceNumber, 101 | "frame_counter": nwk_key.outgoingFrameCounter, 102 | }, 103 | "devices": [ 104 | { 105 | "ieee_address": ieee.serialize()[::-1].hex(), 106 | "link_key": { 107 | "key": key.key.serialize().hex(), 108 | "rx_counter": key.incomingFrameCounter, 109 | "tx_counter": key.outgoingFrameCounter, 110 | }, 111 | "nwk_address": addresses[ieee].serialize()[::-1].hex(), 112 | } 113 | for ieee, key in keys.items() 114 | if ieee in addresses 115 | ], 116 | } 117 | return result 118 | -------------------------------------------------------------------------------- /custom_components/zha_toolkit/groups.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | import logging 4 | from typing import Any 5 | 6 | from . import utils as u 7 | from .params import INTERNAL_PARAMS as p 8 | 9 | LOGGER = logging.getLogger(__name__) 10 | 11 | 12 | async def get_groups( 13 | app, listener, ieee, cmd, data, service, params, event_data 14 | ): 15 | if ieee is None: 16 | LOGGER.error("missing ieee") 17 | return 18 | 19 | src_dev = await u.get_device(app, listener, ieee) 20 | 21 | groups: dict[int, dict[str, Any]] = {} 22 | endpoint_id = params[p.EP_ID] 23 | 24 | event_data["result"] = [] 25 | for ep_id, ep in src_dev.endpoints.items(): 26 | if ep_id == 0 or (endpoint_id is not None and ep_id != endpoint_id): 27 | continue 28 | try: 29 | ep_info: dict[str, Any] = {} 30 | res = await u.retry_wrapper( 31 | ep.groups.read_attributes, 32 | ["name_support"], 33 | tries=params[p.TRIES], 34 | ) 35 | event_data["result"].append(res) 36 | 37 | name_support = res[0]["name_support"] 38 | ep_info["name_support"] = int(name_support) 39 | LOGGER.debug( 40 | "Group on 0x%04X EP %u name support: %s", 41 | src_dev.nwk, 42 | ep_id, 43 | name_support, 44 | ) 45 | 46 | all_groups = await u.retry_wrapper( 47 | ep.groups.get_membership, [], tries=params[p.TRIES] 48 | ) 49 | LOGGER.debug( 50 | "Groups on 0x%04X EP %u : %s", src_dev.nwk, ep_id, all_groups 51 | ) 52 | ep_info["groups"] = all_groups[1] 53 | groups[ep_id] = ep_info 54 | except AttributeError: 55 | LOGGER.debug( 56 | "0x%04X/EP %u: no group cluster found", src_dev.nwk, ep_id 57 | ) 58 | 59 | event_data["groups"] = groups 60 | 61 | 62 | async def add_group( 63 | app, listener, ieee, cmd, data, service, params, event_data 64 | ): 65 | if ieee is None or not data: 66 | raise ValueError("ieee and command_data required") 67 | 68 | src_dev = await u.get_device(app, listener, ieee) 69 | 70 | group_id = u.str2int(data) 71 | endpoint_id = params[p.EP_ID] 72 | 73 | result = [] 74 | for ep_id, ep in src_dev.endpoints.items(): 75 | if ep_id == 0 or (endpoint_id is not None and ep_id != endpoint_id): 76 | # Skip ZDO or endpoints that are not selected 77 | continue 78 | try: 79 | res = await u.retry_wrapper( 80 | ep.groups.add, 81 | group_id, 82 | f"group {group_id}", 83 | tries=params[p.TRIES], 84 | ) 85 | result.append(res) 86 | LOGGER.debug( 87 | "0x%04x EP %u: Setting group 0x%04x: %s", 88 | src_dev.nwk, 89 | ep_id, 90 | group_id, 91 | res, 92 | ) 93 | except AttributeError: 94 | LOGGER.debug( 95 | "0x%04x EP %u : no group cluster found", src_dev.nwk, ep_id 96 | ) 97 | 98 | event_data["result"] = result 99 | 100 | 101 | async def remove_group( 102 | app, listener, ieee, cmd, data, service, params, event_data 103 | ): 104 | if ieee is None or not data: 105 | raise ValueError("ieee and command_data required") 106 | 107 | src_dev = await u.get_device(app, listener, ieee) 108 | 109 | group_id = u.str2int(data) 110 | endpoint_id = params[p.EP_ID] 111 | 112 | result = [] 113 | for ep_id, ep in src_dev.endpoints.items(): 114 | if ep_id == 0 or (endpoint_id is not None and ep_id != endpoint_id): 115 | # Skip ZDO or endpoints that are not selected 116 | continue 117 | try: 118 | res = await ep.groups.remove(group_id) 119 | result.append(res) 120 | LOGGER.debug( 121 | "0x%04x EP %u: Removing group 0x%04x: %s", 122 | src_dev.nwk, 123 | ep_id, 124 | group_id, 125 | res, 126 | ) 127 | except AttributeError: 128 | LOGGER.debug( 129 | "0x%04x EP %u: no group cluster found", src_dev.nwk, ep_id 130 | ) 131 | 132 | event_data["result"] = result 133 | 134 | 135 | async def remove_all_groups( 136 | app, listener, ieee, cmd, data, service, params, event_data 137 | ): 138 | LOGGER.debug("running 'remove all group' command: %s", service) 139 | if ieee is None: 140 | return 141 | 142 | src_dev = await u.get_device(app, listener, ieee) 143 | endpoint_id = params[p.EP_ID] 144 | result = [] 145 | 146 | for ep_id, ep in src_dev.endpoints.items(): 147 | if ep_id == 0 or (endpoint_id is not None and ep_id != endpoint_id): 148 | continue 149 | try: 150 | res = await ep.groups.remove_all() 151 | result.append(res) 152 | LOGGER.debug("0x%04x: Removing all groups: %s", src_dev.nwk, res) 153 | except AttributeError: 154 | LOGGER.debug( 155 | "0x%04x: no group cluster on endpoint #%d", src_dev.nwk, ep_id 156 | ) 157 | 158 | event_data["result"] = result 159 | 160 | 161 | async def add_to_group( 162 | app, listener, ieee, cmd, data, service, params, event_data 163 | ): 164 | if data is None or ieee is None: 165 | LOGGER.error("invalid arguments for subscribe_group()") 166 | return 167 | 168 | dev = await u.get_device(app, listener, ieee) 169 | 170 | grp_id = u.str2int(data) 171 | endpoint_id = params[p.EP_ID] 172 | 173 | result = [] 174 | for ep_id, ep in dev.endpoints.items(): 175 | if ep_id == 0 or (endpoint_id is not None and ep_id != endpoint_id): 176 | continue 177 | LOGGER.debug("Subscribing %s EP %u to group: %s", ieee, ep_id, grp_id) 178 | res = await ep.add_to_group(grp_id, f"Group {data}") 179 | result.append(res) 180 | LOGGER.info( 181 | "Subscribed %s EP %u to group: %s Result: %r", 182 | ieee, 183 | ep_id, 184 | grp_id, 185 | res, 186 | ) 187 | 188 | event_data["result"] = result 189 | 190 | 191 | async def remove_from_group( 192 | app, listener, ieee, cmd, data, service, params, event_data 193 | ): 194 | if data is None or ieee is None: 195 | raise ValueError("ieee and command_data required") 196 | 197 | dev = await u.get_device(app, listener, ieee) 198 | 199 | grp_id = u.str2int(data) 200 | endpoint_id = params[p.EP_ID] 201 | 202 | result = [] 203 | for ep_id, ep in dev.endpoints.items(): 204 | if ep_id == 0 or (endpoint_id is not None and ep_id != endpoint_id): 205 | continue 206 | LOGGER.debug( 207 | "Unsubscribing %s EP %u from group: %s", ieee, ep_id, grp_id 208 | ) 209 | res = await ep.remove_from_group(grp_id) 210 | result.append(res) 211 | LOGGER.info( 212 | "Unsubscribed %s EP %u from group: %s Result: %r", 213 | ieee, 214 | ep_id, 215 | grp_id, 216 | res, 217 | ) 218 | 219 | event_data["result"] = result 220 | 221 | 222 | async def get_zll_groups( 223 | app, listener, ieee, cmd, data, service, params, event_data 224 | ): 225 | from zigpy.zcl.clusters.lightlink import LightLink 226 | 227 | if ieee is None: 228 | LOGGER.error("missing ieee") 229 | return 230 | 231 | dev = await u.get_device(app, listener, ieee) 232 | 233 | clusters = [ 234 | ep.in_clusters[LightLink.cluster_id] 235 | for epid, ep in dev.endpoints.items() 236 | if epid and LightLink.cluster_id in ep.in_clusters 237 | ] 238 | zll_cluster = None 239 | try: 240 | zll_cluster = next(iter(clusters)) 241 | except Exception: 242 | LOGGER.warning("No cluster in clusters") 243 | 244 | if not zll_cluster: 245 | msg = f"Couldn't find ZLL Commissioning cluster on {dev.ieee}" 246 | event_data["warning"] = msg 247 | LOGGER.warning(msg) 248 | return 249 | 250 | res = await zll_cluster.get_group_identifiers(0) 251 | groups = [g.group_id for g in res[2]] 252 | LOGGER.debug("Get group identifiers response: %s", groups) 253 | 254 | event_data["groups"] = groups 255 | -------------------------------------------------------------------------------- /custom_components/zha_toolkit/ha.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | import logging 4 | 5 | from homeassistant.helpers.template import Template 6 | from homeassistant.util import dt as dt_util 7 | 8 | from . import utils as u 9 | from .params import INTERNAL_PARAMS as p 10 | 11 | LOGGER = logging.getLogger(__name__) 12 | 13 | 14 | async def ha_set_state( # noqa: C901 15 | app, listener, ieee, cmd, data, service, params, event_data 16 | ): 17 | success = True 18 | 19 | val = params[p.ATTR_VAL] 20 | state_field = None 21 | 22 | state_template_str = params[p.STATE_VALUE_TEMPLATE] 23 | if state_template_str is not None: 24 | template = Template( 25 | "{{ " + state_template_str + " }}", u.get_hass(listener) 26 | ) 27 | new_value = template.async_render(value=val, attr_val=val) 28 | val = new_value 29 | 30 | # Write value to provided state or state attribute 31 | if params[p.STATE_ID] is None: 32 | raise ValueError("'state_id' is required") 33 | 34 | if params[p.STATE_ATTR] is not None: 35 | state_field = f"{params[p.STATE_ID]}[{params[p.STATE_ATTR]}]" 36 | else: 37 | state_field = f"{params[p.STATE_ID]}" 38 | 39 | LOGGER.debug( 40 | "Set state '%s' -> %s", 41 | state_field, 42 | val, 43 | ) 44 | u.set_state( 45 | u.get_hass(listener), 46 | params[p.STATE_ID], 47 | val, 48 | key=params[p.STATE_ATTR], 49 | allow_create=params[p.ALLOW_CREATE], 50 | ) 51 | 52 | event_data["success"] = success 53 | 54 | if success and (params[p.CSV_FILE] is not None): 55 | fields = [] 56 | label = params[p.CSV_LABEL] 57 | 58 | fields.append(dt_util.utcnow().isoformat()) 59 | fields.append(state_field) 60 | fields.append(val) 61 | fields.append(label) 62 | 63 | u.append_to_csvfile( 64 | fields, 65 | "csv", 66 | params[p.CSV_FILE], 67 | f"{state_field}={val}", 68 | listener=listener, 69 | ) 70 | LOGGER.debug(f"ha_set_state info Written to CSV {params[p.CSV_FILE]}") 71 | 72 | if u.isJsonable(val): 73 | val = repr(val) 74 | 75 | # For internal use 76 | return success 77 | -------------------------------------------------------------------------------- /custom_components/zha_toolkit/manifest.json: -------------------------------------------------------------------------------- 1 | { 2 | "domain": "zha_toolkit", 3 | "name": "ZHA \ud83e\uddf0 Toolkit", 4 | "codeowners": ["@mdeweerd"], 5 | "dependencies": ["zha"], 6 | "documentation": "https://github.com/mdeweerd/zha-toolkit", 7 | "iot_class": "local_polling", 8 | "issue_tracker": "https://github.com/mdeweerd/zha-toolkit/issues", 9 | "requirements": ["aiofiles>=0.4.0", "pytz>=2016.10"], 10 | "version": "1.0.0" 11 | } 12 | -------------------------------------------------------------------------------- /custom_components/zha_toolkit/misc.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import logging 3 | 4 | import zigpy.types as t 5 | from zigpy.exceptions import ControllerException, DeliveryError 6 | 7 | from . import utils as u 8 | from .params import INTERNAL_PARAMS as p 9 | 10 | LOGGER = logging.getLogger(__name__) 11 | 12 | 13 | async def get_routes( 14 | app, listener, ieee, cmd, data, service, params, event_data 15 | ): 16 | LOGGER.debug("getting routes command: %s", service) 17 | 18 | for dev in app.devices.values(): 19 | if hasattr(dev, "relays"): 20 | status = f"has routes: {dev.relays}" 21 | else: 22 | status = "doesn't have routes" 23 | LOGGER.debug("Device %s/%s %s", dev.nwk, dev.model, status) 24 | 25 | LOGGER.debug("finished device get_routes") 26 | 27 | 28 | async def backup(app, listener, ieee, cmd, data, service, params, event_data): 29 | """Backup Coordinator Configuration.""" 30 | 31 | radio_type = u.get_radiotype(app) 32 | 33 | if radio_type == u.RadioType.ZNP: 34 | from . import znp 35 | 36 | await znp.znp_backup( 37 | app, 38 | listener, 39 | ieee, 40 | cmd, 41 | data, 42 | service, 43 | event_data=event_data, 44 | params=params, 45 | ) 46 | await znp.znp_nvram_backup( 47 | app, 48 | listener, 49 | ieee, 50 | cmd, 51 | data, 52 | service, 53 | event_data=event_data, 54 | params=params, 55 | ) 56 | elif radio_type == u.RadioType.EZSP: 57 | from . import ezsp 58 | 59 | await ezsp.ezsp_backup( 60 | app, 61 | listener, 62 | ieee, 63 | cmd, 64 | data, 65 | service, 66 | event_data=event_data, 67 | params=params, 68 | ) 69 | else: 70 | raise ValueError(f"Radio type {radio_type} not supported for backup") 71 | 72 | 73 | async def handle_join( 74 | app, listener, ieee, cmd, data, service, params, event_data 75 | ): 76 | """Rediscover a device. 77 | ieee -- ieee of the device 78 | data -- nwk of the device in decimal format 79 | """ 80 | LOGGER.debug("running 'handle_join' command: %s", service) 81 | if ieee is None: 82 | LOGGER.debug("Provide 'ieee' parameter for %s", cmd) 83 | raise ValueError("ieee parameter missing") 84 | 85 | dev = await u.get_device(app, listener, ieee) 86 | 87 | if data is None: 88 | if dev is None: 89 | LOGGER.debug( 90 | f"Device {ieee!r} missing in device table, provide NWK address" 91 | ) 92 | raise ValueError(f"Missing NWK for unknown device '{ieee}'") 93 | 94 | data = dev.nwk 95 | 96 | # Handle join will initialize the device if it isn't yet, otherwise 97 | # only scan groups 98 | # misc_reinitialise is more complete 99 | 100 | event_data["result"] = app.handle_join(u.str2int(data), ieee, None) 101 | 102 | 103 | async def misc_reinitialize( 104 | app, listener, ieee, cmd, data, service, params, event_data 105 | ): 106 | """Reinitialize a device, rediscover endpoints 107 | ieee -- ieee of the device 108 | """ 109 | if ieee is None: 110 | msg = f"Provide 'ieee' parameter for {cmd}" 111 | LOGGER.debug(msg) 112 | raise ValueError(ieee) 113 | 114 | dev = await u.get_device(app, listener, ieee) 115 | LOGGER.debug(f"{ieee!r} - Set initialisations=False, call handle_join") 116 | # dev.has_non_zdo_endpoints = False # Force rescan 117 | # Can't set: dev.non_zdo_endpoints = False # Force rescan 118 | dev.endpoints = {0: dev.zdo} # Force rescan 119 | 120 | # dev._znp = u.get_radio(app) 121 | # dev.node_desc = None # Force rescan 122 | 123 | dev.all_endpoint_init = False # Force rescan 124 | dev.model = None # Force rescan 125 | dev.manufacturer = None # Force rescan 126 | # event_data["result"] = await dev.schedule_initialize() 127 | event_data["result"] = await dev.initialize() 128 | 129 | 130 | async def rejoin(app, listener, ieee, cmd, data, service, params, event_data): 131 | """Leave and rejoin command. 132 | data -- device ieee to allow joining through 133 | ieee -- ieee of the device to leave and rejoin 134 | """ 135 | if ieee is None: 136 | LOGGER.error("missing ieee") 137 | return 138 | LOGGER.debug("running 'rejoin' command: %s", service) 139 | src = await u.get_device(app, listener, ieee) 140 | 141 | if data is None: 142 | await app.permit() 143 | else: 144 | await app.permit(node=t.EUI64.convert_ieee(data)) 145 | 146 | method = 1 147 | res = None 148 | 149 | if method == 0: 150 | # Works on HA 2021.12.10 & ZNP - rejoin is 1: 151 | res = await u.retry_wrapper( 152 | src.zdo.request, 0x0034, src.ieee, 0x01, params[p.TRIES] 153 | ) 154 | elif method == 1: 155 | # Works on ZNP but apparently not on bellows: 156 | triesToGo = params[p.TRIES] 157 | tryIdx = 0 158 | event_data["success"] = False 159 | while triesToGo >= 1: 160 | triesToGo = triesToGo - 1 161 | tryIdx += 1 162 | try: 163 | LOGGER.debug(f"Leave with rejoin - try {tryIdx}") 164 | res = await src.zdo.leave(remove_children=False, rejoin=True) 165 | event_data["success"] = True 166 | triesToGo = 0 # Stop loop 167 | # event_data["success"] = ( 168 | # resf[0][0].status == f.Status.SUCCESS 169 | # ) 170 | except ( 171 | DeliveryError, 172 | ControllerException, 173 | asyncio.TimeoutError, 174 | ) as d: 175 | event_data["errors"].append(repr(d)) 176 | continue 177 | except Exception as e: # Catch all others 178 | triesToGo = 0 # Stop loop 179 | LOGGER.debug("Leave with rejoin exception %s", e) 180 | event_data["errors"].append(repr(e)) 181 | 182 | elif method == 2: 183 | # Results in rejoin bit 0 on ZNP 184 | LOGGER.debug("Using Method 2 for Leave") 185 | res = await u.retry_wrapper( 186 | src.zdo.request, 0x0034, src.ieee, 0x80, params[p.TRIES] 187 | ) 188 | elif method == 3: 189 | # Results in rejoin and leave children bit set on ZNP 190 | LOGGER.debug("Using Method 3 for Leave") 191 | res = await u.retry_wrapper( 192 | src.zdo.request, 0x0034, src.ieee, 0xFF, params[p.TRIES] 193 | ) 194 | elif method == 4: 195 | # Results in rejoin and leave children bit set on ZNP 196 | LOGGER.debug("Using Method 4 for Leave") 197 | res = await u.retry_wrapper( 198 | src.zdo.request, 0x0034, src.ieee, 0x83, params[p.TRIES] 199 | ) 200 | else: 201 | res = "Not executed, no valid 'method' defined in code" 202 | 203 | event_data["result"] = res 204 | LOGGER.debug("%s -> %s: leave and rejoin result: %s", src, ieee, res) 205 | 206 | 207 | async def misc_settime( 208 | app, listener, ieee, cmd, data, service, params, event_data 209 | ): 210 | from bisect import bisect 211 | from datetime import datetime 212 | 213 | import pytz 214 | from homeassistant.util.dt import DEFAULT_TIME_ZONE, utcnow 215 | 216 | LOGGER.debug(f"Default time zone {DEFAULT_TIME_ZONE}") 217 | tz = pytz.timezone(str(DEFAULT_TIME_ZONE)) 218 | 219 | utc_time = utcnow().astimezone(pytz.UTC).replace(tzinfo=None) 220 | index = bisect( 221 | tz._utc_transition_times, utc_time # type:ignore[union-attr] 222 | ) 223 | 224 | if index is None: 225 | event_data["success"] = False 226 | event_data[ 227 | "msg" 228 | ] = "misc_settime expects DST changes, needs update if None" 229 | 230 | try: 231 | if ( 232 | tz._utc_transition_times[index] # type:ignore[union-attr] 233 | .replace(tzinfo=pytz.UTC) 234 | .astimezone(tz) 235 | .dst() 236 | .total_seconds() 237 | == 0 238 | ): 239 | # First date must be start of dst period 240 | index = index - 1 241 | 242 | dst1_obj = tz._utc_transition_times[index] # type:ignore[union-attr] 243 | dst2_obj = tz._utc_transition_times[ # type:ignore[union-attr] 244 | index + 1 245 | ] 246 | epoch2000 = datetime(2000, 1, 1, tzinfo=None) 247 | dst1 = (dst1_obj - epoch2000).total_seconds() 248 | dst2 = (dst2_obj - epoch2000).total_seconds() 249 | dst1_aware = tz._utc_transition_times[ # type:ignore[union-attr] 250 | index 251 | ].replace(tzinfo=pytz.UTC) 252 | dst2_aware = tz._utc_transition_times[ # type:ignore[union-attr] 253 | index + 1 254 | ].replace(tzinfo=pytz.UTC) 255 | 256 | dst1_local = dst1_aware.astimezone(tz) 257 | dst2_local = dst2_aware.astimezone(tz) 258 | 259 | dst_shift = dst1_local.dst().total_seconds() 260 | utc_offset = dst2_local.utcoffset().total_seconds() 261 | 262 | LOGGER.debug( 263 | f"Next dst changes {dst1_obj} .. {dst2_obj}" 264 | f" EPOCH 2000 {dst1} .. {dst2}" 265 | ) 266 | LOGGER.debug( 267 | f"Local {dst1_local} {dst2_local} in {tz}" 268 | f" {dst1_local.dst().total_seconds()}" 269 | f" {dst2_local.dst().total_seconds()}" 270 | ) 271 | LOGGER.debug(f"UTC OFFSET: {utc_offset} DST OFFSET: {dst_shift}") 272 | 273 | dev = await u.get_device(app, listener, ieee) 274 | params[p.CLUSTER_ID] = 0x000A # Time Cluster 275 | cluster = u.get_cluster_from_params(dev, params, event_data) 276 | 277 | # Prepare read and write lists 278 | attr_read_list = [ 279 | 0, 280 | 1, 281 | 2, 282 | 3, 283 | 4, 284 | 5, 285 | ] # Time, Timestatus, Timezone, DstStart, DstEnd, DstShift 286 | 287 | if params[p.READ_BEFORE_WRITE]: 288 | read_resp = await cluster.read_attributes(attr_read_list) 289 | event_data["read_before"] = ( 290 | u.dict_to_jsonable(read_resp[0]), 291 | read_resp[1], 292 | ) 293 | u.record_read_data(read_resp, cluster, params, listener) 294 | 295 | EPOCH2000_TIMESTAMP = 946684800 296 | utctime_towrite = utcnow().timestamp() - EPOCH2000_TIMESTAMP 297 | attr_write_list = { 298 | 0x0000: utctime_towrite, # Time 299 | 0x0002: utc_offset, # Timezone - int32 300 | 0x0003: dst1, # DstStart - uint32 301 | 0x0004: dst2, # DstEnd - uint32 302 | 0x0005: dst_shift, # DstEnd - uint32 303 | } 304 | 305 | event_data["result_write"] = await cluster.write_attributes( 306 | attr_write_list 307 | ) 308 | 309 | if params[p.READ_AFTER_WRITE]: 310 | read_resp = await cluster.read_attributes(attr_read_list) 311 | event_data["read_after"] = ( 312 | u.dict_to_jsonable(read_resp[0]), 313 | read_resp[1], 314 | ) 315 | u.record_read_data(read_resp, cluster, params, listener) 316 | 317 | event_data["success"] = True 318 | except DeliveryError as e: 319 | event_data["success"] = False 320 | event_data["msg"] = f"{e!r}" 321 | 322 | 323 | async def misc_energy_scan( 324 | app, listener, ieee, cmd, data, service, params, event_data 325 | ): 326 | """Run energy scan for each channel.""" 327 | # See https://github.com/zigpy/zigpy-cli/blob/dev/README.md#performing-an-energy-scan # noqa: E501 328 | # Lower value means less congestion. 329 | 330 | LOGGER.debug("Energy scan.") 331 | scan = await app.energy_scan( 332 | channels=t.Channels.ALL_CHANNELS, duration_exp=4, count=1 333 | ) 334 | event_data["energy_scan"] = { 335 | channel: 100 * energy / 255 for channel, energy in scan.items() 336 | } 337 | 338 | if params[p.CSV_FILE] is not None: 339 | # write CSV header 340 | u.append_to_csvfile( 341 | ["channel", "energy"], 342 | "csv", 343 | params[p.CSV_FILE], 344 | "Energy Scan", 345 | listener=listener, 346 | overwrite=True, 347 | ) 348 | # write CSV data 349 | for channel, energy in scan.items(): 350 | u.append_to_csvfile( 351 | [channel, 100 * energy / 255], 352 | "csv", 353 | params[p.CSV_FILE], 354 | str(channel), 355 | listener=listener, 356 | ) 357 | -------------------------------------------------------------------------------- /custom_components/zha_toolkit/neighbours.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | import asyncio 4 | import enum 5 | import logging 6 | import os 7 | from random import uniform 8 | 9 | import zigpy.zdo.types as zdo_t 10 | from zigpy.exceptions import DeliveryError 11 | 12 | from . import utils as u 13 | 14 | LOGGER = logging.getLogger(__name__) 15 | 16 | 17 | async def get_routes_and_neighbours( 18 | app, listener, ieee, cmd, data, service, params, event_data 19 | ): 20 | if ieee is None: 21 | LOGGER.error("missing ieee") 22 | return 23 | 24 | LOGGER.debug("Getting routes and neighbours: %s", service) 25 | device = await u.get_device(app, listener, ieee) 26 | event_data["result"] = await _routes_and_neighbours(device, listener) 27 | 28 | ieee_tail = "".join([f"{o:02X}" for o in device.ieee]) 29 | 30 | fname = os.path.join( 31 | u.get_hass(listener).config.config_dir, 32 | "scans", 33 | f"routes_and_neighbours_{ieee_tail}.json", 34 | ) 35 | u.helper_save_json(fname, event_data["result"]) 36 | 37 | LOGGER.debug("Wrote scan results to '%s'", fname) 38 | 39 | 40 | async def _routes_and_neighbours(device, listener): 41 | try: 42 | routes = await asyncio.wait_for(async_get_routes(device), 180) 43 | except asyncio.TimeoutError: 44 | routes = [] 45 | await asyncio.sleep(uniform(1.0, 1.5)) 46 | try: 47 | nbns = await asyncio.wait_for(async_get_neighbours(device), 180) 48 | except asyncio.TimeoutError: 49 | nbns = [] 50 | 51 | return {"routes": routes, "neighbours": nbns} 52 | 53 | 54 | async def all_routes_and_neighbours( 55 | app, listener, ieee, cmd, data, service, params, event_data 56 | ): 57 | LOGGER.debug("Getting routes and neighbours for all devices: %s", service) 58 | 59 | counter = 1 60 | devs = [d for d in app.devices.values() if not d.node_desc.is_end_device] 61 | all_routes = {} 62 | for device in devs: 63 | LOGGER.debug( 64 | "%s: Querying routes and neighbours: %s out of %s", 65 | device.ieee, 66 | counter, 67 | len(devs), 68 | ) 69 | all_routes[str(device.ieee)] = await _routes_and_neighbours( 70 | device, listener 71 | ) 72 | LOGGER.debug("%s: Got %s out of %s", device.ieee, counter, len(devs)) 73 | counter += 1 74 | 75 | event_data["result"] = all_routes 76 | 77 | all_routes_name = os.path.join( 78 | u.get_hass(listener).config.config_dir, 79 | "scans", 80 | "all_routes_and_neighbours.json", 81 | ) 82 | u.helper_save_json(all_routes_name, all_routes) 83 | 84 | 85 | async def async_get_neighbours(device): 86 | """Pull neighbour table from a device.""" 87 | 88 | def _process_neighbour(nbg): 89 | """Return dict of a neighbour entry.""" 90 | 91 | # LOGGER.debug(f"NEIGHBOR: {nbg!r}") 92 | res = {} 93 | res["pan_id"] = str(nbg.extended_pan_id) 94 | res["ieee"] = str(nbg.ieee) 95 | res["nwk"] = str(nbg.nwk) 96 | res["device_type"] = nbg.device_type.name 97 | res["rx_on_when_idle"] = nbg.rx_on_when_idle.name 98 | res["relationship"] = nbg.relationship.name 99 | res["permit_joining"] = nbg.permit_joining.name 100 | res["depth"] = nbg.depth 101 | res["lqi"] = nbg.lqi 102 | return res 103 | 104 | result = [] 105 | idx = 0 106 | while True: 107 | try: 108 | status, val = await device.zdo.request( 109 | zdo_t.ZDOCmd.Mgmt_Lqi_req, idx 110 | ) 111 | LOGGER.debug( 112 | "%s: neighbour request Status: %s. Response: %r", 113 | device.ieee, 114 | status, 115 | val, 116 | ) 117 | if zdo_t.Status.SUCCESS != status: 118 | LOGGER.debug( 119 | "%s: device does not support 'Mgmt_Lqi_req'", device.ieee 120 | ) 121 | break 122 | except DeliveryError: 123 | LOGGER.debug("%s: Could not deliver 'Mgmt_Lqi_req'", device.ieee) 124 | break 125 | 126 | LOGGER.debug(f"NEIGHBORS: {val!r}") 127 | 128 | if hasattr(val, "neighbor_table_list"): 129 | neighbours = val.neighbor_table_list 130 | entries = val.entries 131 | else: 132 | neighbours = val.NeighborTableList 133 | entries = val.Entries 134 | 135 | for neighbour in neighbours: 136 | result.append(_process_neighbour(neighbour)) 137 | idx += 1 138 | 139 | if idx >= entries: 140 | break 141 | 142 | await asyncio.sleep(uniform(1.0, 1.5)) 143 | 144 | return sorted(result, key=lambda x: x["ieee"]) 145 | 146 | 147 | async def async_get_routes(device): 148 | """Pull routing table from a device.""" 149 | 150 | def _process_route(route): 151 | """Return a dict representing routing entry.""" 152 | 153 | class RouteStatus(enum.IntEnum): 154 | Active = 0x0 155 | Discovery_Underway = 0x1 156 | Discovery_Failed = 0x2 157 | Inactive = 0x3 158 | Validation_Underway = 0x4 159 | 160 | res: dict[str, str | bool | None | int] = {} 161 | res["destination"] = f"0x{route.DstNWK:04x}" 162 | res["next_hop"] = f"0x{route.NextHop:04x}" 163 | raw = route.RouteStatus & 0x07 164 | try: 165 | cooked = RouteStatus(raw).name 166 | except ValueError: 167 | cooked = f"reserved_{raw:02x}" 168 | res["status"] = cooked 169 | res["memory_constrained"] = bool((route.RouteStatus >> 3) & 0x01) 170 | res["many_to_one"] = bool((route.RouteStatus >> 4) & 0x01) 171 | res["route_record_required"] = bool((route.RouteStatus >> 5) & 0x01) 172 | return res 173 | 174 | routes = [] 175 | idx = 0 176 | while True: 177 | try: 178 | status, val = await device.zdo.request( 179 | zdo_t.ZDOCmd.Mgmt_Rtg_req, idx 180 | ) 181 | LOGGER.debug( 182 | "%s: route request Status:%s. Routes: %r", 183 | device.ieee, 184 | status, 185 | val, 186 | ) 187 | if zdo_t.Status.SUCCESS != status: 188 | LOGGER.debug( 189 | "%s: Does not support 'Mgmt_rtg_req': %s", 190 | device.ieee, 191 | status, 192 | ) 193 | break 194 | except DeliveryError: 195 | LOGGER.debug("%s: Could not deliver 'Mgmt_rtg_req'", device.ieee) 196 | break 197 | 198 | LOGGER.debug(f"Mgmt_Rtg_rsp: {val!r}") 199 | for route in val.RoutingTableList: 200 | routes.append(_process_route(route)) 201 | idx += 1 202 | if idx >= val.Entries: 203 | break 204 | await asyncio.sleep(uniform(1.0, 1.5)) 205 | 206 | return routes 207 | -------------------------------------------------------------------------------- /custom_components/zha_toolkit/ota.py: -------------------------------------------------------------------------------- 1 | import json 2 | import logging 3 | import os 4 | from glob import glob 5 | 6 | import aiohttp 7 | import zigpy 8 | 9 | from . import DEFAULT_OTAU 10 | from . import utils as u 11 | from .params import INTERNAL_PARAMS as p 12 | 13 | LOGGER = logging.getLogger(__name__) 14 | KOENKK_LIST_URL = ( 15 | "https://raw.githubusercontent.com/Koenkk/zigbee-OTA/master/index.json" 16 | ) 17 | 18 | SONOFF_LIST_URL = "https://zigbee-ota.sonoff.tech/releases/upgrade.json" 19 | 20 | 21 | async def download_koenkk_ota(listener, ota_dir): 22 | # Get all FW files that were already downloaded. 23 | # The files usually have the FW version in their name, making them unique. 24 | ota_glob_expr = [ 25 | "*.ZIGBEE", 26 | "*.OTA", 27 | "*.sbl-ota", 28 | "*.bin", 29 | "*.ota", 30 | "*.zigbee", 31 | ] 32 | 33 | # Dictionary to do more efficient lookups 34 | LOGGER.debug("List OTA files available on file system") 35 | ota_files_on_disk = {} 36 | for glob_expr in ota_glob_expr: 37 | for path in [ 38 | os.path.basename(x) for x in glob(os.path.join(ota_dir, glob_expr)) 39 | ]: 40 | ota_files_on_disk[path] = True 41 | 42 | # LOGGER.debug(f"OTA files on disk {ota_files_on_disk!r}") 43 | 44 | # Get manufacturers 45 | manfs = {} 46 | for info in [ 47 | device.zha_device_info for device in u.get_zha_devices(listener) 48 | ]: 49 | manfs[info["manufacturer_code"]] = True 50 | 51 | LOGGER.debug(f"Get Koenkk FW list and check for manfs {manfs.keys()!r}") 52 | new_fw_info = {} 53 | async with aiohttp.ClientSession() as req: 54 | async with req.get(KOENKK_LIST_URL) as rsp: 55 | data = json.loads(await rsp.read()) 56 | for fw_info in data: 57 | if fw_info["url"]: 58 | filename = fw_info["url"].split("/")[-1] 59 | # Try to get fw corresponding to device manufacturers 60 | fw_manf = fw_info["manufacturerCode"] 61 | 62 | if fw_manf in manfs and filename not in ota_files_on_disk: 63 | LOGGER.debug( 64 | "OTA file to download for manf %u (0x%04X): '%s'", 65 | fw_manf, 66 | fw_manf, 67 | filename, 68 | ) 69 | new_fw_info[filename] = fw_info 70 | 71 | for filename, fw_info in new_fw_info.items(): 72 | async with aiohttp.ClientSession() as req: 73 | url = fw_info["url"] 74 | try: 75 | out_filename = os.path.join(ota_dir, filename) 76 | 77 | LOGGER.debug("Download '%s' to '%s'", url, out_filename) 78 | async with req.get(url) as rsp: 79 | data = await rsp.read() 80 | 81 | with open(out_filename, "wb") as ota_file: 82 | LOGGER.debug("Try to write '%s'", out_filename) 83 | ota_file.write(data) 84 | except Exception as e: 85 | LOGGER.warning("Exception getting '%s': %s", url, e) 86 | 87 | 88 | async def download_sonoff_ota(listener, ota_dir): 89 | # Get all FW files that were already downloaded. 90 | # The files usually have the FW version in their name, making them unique. 91 | ota_glob_expr = [ 92 | "*.ZIGBEE", 93 | "*.OTA", 94 | "*.sbl-ota", 95 | "*.bin", 96 | "*.ota", 97 | "*.zigbee", 98 | ] 99 | 100 | # Dictionary to do more efficient lookups 101 | LOGGER.debug("List OTA files available on file system") 102 | ota_files_on_disk = {} 103 | for glob_expr in ota_glob_expr: 104 | for path in [ 105 | os.path.basename(x) for x in glob(os.path.join(ota_dir, glob_expr)) 106 | ]: 107 | ota_files_on_disk[path] = True 108 | 109 | # LOGGER.debug(f"OTA files on disk {ota_files_on_disk!r}") 110 | 111 | # Get manufacturers 112 | manfs = {} 113 | for info in [ 114 | device.zha_device_info for device in u.get_zha_devices(listener) 115 | ]: 116 | manfs[info["manufacturer_code"]] = True 117 | 118 | LOGGER.debug(f"Get SONOFF FW list and check for manfs {manfs.keys()!r}") 119 | new_fw_info = {} 120 | async with aiohttp.ClientSession() as req: 121 | async with req.get(SONOFF_LIST_URL) as rsp: 122 | data = json.loads(await rsp.read()) 123 | for fw_info in data: 124 | if fw_info["fw_binary_url"]: 125 | filename = fw_info["fw_binary_url"].split("/")[-1] 126 | # Try to get fw corresponding to device manufacturers 127 | fw_manf = fw_info["fw_manufacturer_id"] 128 | fw_model_id = fw_info["model_id"] 129 | 130 | # Note: could check against model id in the future 131 | if fw_manf in manfs and filename not in ota_files_on_disk: 132 | LOGGER.debug( 133 | "OTA file to download for manf %u (0x%04X)" 134 | " Model:'%s': '%s'", 135 | fw_manf, 136 | fw_manf, 137 | fw_model_id, 138 | filename, 139 | ) 140 | new_fw_info[filename] = fw_info 141 | 142 | for filename, fw_info in new_fw_info.items(): 143 | async with aiohttp.ClientSession() as req: 144 | url = fw_info["fw_binary_url"] 145 | try: 146 | out_filename = os.path.join(ota_dir, filename) 147 | 148 | LOGGER.debug("Download '%s' to '%s'", url, out_filename) 149 | async with req.get(url) as rsp: 150 | data = await rsp.read() 151 | 152 | with open(out_filename, "wb") as ota_file: 153 | LOGGER.debug("Try to write '%s'", out_filename) 154 | ota_file.write(data) 155 | except Exception as e: 156 | LOGGER.warning("Exception getting '%s': %s", url, e) 157 | 158 | 159 | async def download_zigpy_ota(app, listener): 160 | LOGGER.debug("Zigpy download procedure starting") 161 | if hasattr(app, "ota") and hasattr(app.ota, "_listeners"): 162 | for _, (ota, _) in app.ota._listeners.items(): 163 | if isinstance(ota, zigpy.ota.provider.FileStore): 164 | # Skip files provider 165 | continue 166 | await ota.refresh_firmware_list() 167 | for image_key, image in ota._cache.items(): 168 | url = getattr(image, "url", None) 169 | LOGGER.error("Try getting %r, %r, %r", image_key, url, image) 170 | try: 171 | img = await app.ota.get_ota_image( 172 | image_key.manufacturer_id, 173 | image_key.image_type, 174 | model=None, 175 | ) 176 | LOGGER.info("Got image %r", getattr(img, "header", None)) 177 | except Exception as e: 178 | LOGGER.error("%r while getting %r - %s", e, image_key, url) 179 | else: 180 | LOGGER.warning( 181 | "Could not get ota object for download_zigpy_ota, try again" 182 | ) 183 | 184 | 185 | async def ota_update_images( 186 | app, listener, ieee, cmd, data, service, params, event_data 187 | ): 188 | if hasattr(app, "ota") and hasattr(app.ota, "_listeners"): 189 | for _, (ota, _) in app.ota._listeners.items(): 190 | await ota.refresh_firmware_list() 191 | else: 192 | LOGGER.warning( 193 | "Could not get ota object for ota_update_images, try again" 194 | ) 195 | 196 | 197 | async def ota_notify( 198 | app, listener, ieee, cmd, data, service, params, event_data 199 | ): 200 | LOGGER.debug("OTA_notify") 201 | event_data["PAR"] = params 202 | if params[p.DOWNLOAD]: 203 | if params[p.PATH]: 204 | ota_dir = params[p.PATH] 205 | else: 206 | ota_dir = DEFAULT_OTAU 207 | 208 | LOGGER.debug( 209 | "OTA image download to '%s' (Default dir is:'%s')", 210 | ota_dir, 211 | DEFAULT_OTAU, 212 | ) 213 | 214 | await download_zigpy_ota(app, listener) 215 | await download_koenkk_ota(listener, ota_dir) 216 | await download_sonoff_ota(listener, ota_dir) 217 | 218 | # Get tries 219 | tries = params[p.TRIES] 220 | 221 | # Update internal image database 222 | await ota_update_images( 223 | app, listener, ieee, cmd, data, service, params, event_data 224 | ) 225 | 226 | if ieee is None: 227 | LOGGER.error("missing ieee") 228 | return 229 | 230 | LOGGER.debug("running 'image_notify' command: %s", service) 231 | 232 | device = await u.get_device(app, listener, ieee) 233 | 234 | cluster = None 235 | for epid, ep in device.endpoints.items(): 236 | if epid == 0: 237 | continue 238 | if 0x0019 in ep.out_clusters: 239 | cluster = ep.out_clusters[0x0019] 240 | break 241 | if cluster is None: 242 | LOGGER.debug("No OTA cluster found") 243 | return 244 | basic = device.endpoints[cluster.endpoint.endpoint_id].basic 245 | await u.retry_wrapper(basic.bind, tries=tries) 246 | ret = await u.retry_wrapper( 247 | basic.configure_reporting, "sw_build_id", 0, 1800, 1, tries=tries 248 | ) 249 | LOGGER.debug("Configured reporting: %s", ret) 250 | 251 | ret = None 252 | if not u.is_zigpy_ge("0.45.0"): 253 | ret = await cluster.image_notify(0, 100) 254 | else: 255 | cmd_args = [0, 100] 256 | ret = await u.retry_wrapper( 257 | cluster.client_command, 258 | 0, # cmd_id 259 | *cmd_args, 260 | # expect_reply = True, 261 | tries=tries, 262 | ) 263 | 264 | LOGGER.debug("Sent image notify command to 0x%04x: %s", device.nwk, ret) 265 | event_data["result"] = ret 266 | -------------------------------------------------------------------------------- /custom_components/zha_toolkit/params.py: -------------------------------------------------------------------------------- 1 | # Constants related to parameters 2 | 3 | 4 | # Constants representing input parameter keys 5 | class USER_PARAMS_consts: # pylint: disable=too-few-public-methods 6 | __slots__ = () 7 | CMD = "cmd" 8 | ENDPOINT = "endpoint" 9 | DST_ENDPOINT = "dst_endpoint" 10 | CLUSTER = "cluster" 11 | ATTRIBUTE = "attribute" 12 | ATTR_TYPE = "attr_type" 13 | ATTR_VAL = "attr_val" 14 | CODE = "code" 15 | MIN_INTRVL = "min_interval" 16 | MAX_INTRVL = "max_interval" 17 | REPTBLE_CHG = "reportable_change" 18 | DIR = "dir" 19 | MANF = "manf" 20 | TRIES = "tries" 21 | EXPECT_REPLY = "expect_reply" 22 | ARGS = "args" 23 | KWARGS = "kwargs" 24 | STATE_ID = "state_id" 25 | STATE_ATTR = "state_attr" 26 | ALLOW_CREATE = "allow_create" 27 | EVENT_SUCCESS = "event_success" 28 | EVENT_FAIL = "event_fail" 29 | EVENT_DONE = "event_done" 30 | FORCE_UPDATE = "force_update" 31 | FAIL_EXCEPTION = "fail_exception" 32 | READ_BEFORE_WRITE = "read_before_write" 33 | READ_AFTER_WRITE = "read_after_write" 34 | STATE_VALUE_TEMPLATE = "state_value_template" 35 | WRITE_IF_EQUAL = "write_if_equal" 36 | OUTCSV = "csvout" 37 | CSVLABEL = "csvlabel" 38 | DOWNLOAD = "download" 39 | PATH = "path" 40 | USE_CACHE = "use_cache" 41 | JSON_OUT = "json_out" 42 | JSON_TIMESTAMP = "json_timestamp" 43 | 44 | 45 | class SERVICE_consts: # pylint: disable=too-few-public-methods 46 | __slots__ = () 47 | # General 48 | EXECUTE = "execute" 49 | # Specific 50 | ADD_GROUP = "add_group" 51 | ADD_TO_GROUP = "add_to_group" 52 | ALL_ROUTES_AND_NEIGHBOURS = "all_routes_and_neighbours" 53 | ATTR_READ = "attr_read" 54 | ATTR_WRITE = "attr_write" 55 | BACKUP = "backup" 56 | BIND_GROUP = "bind_group" 57 | BIND_IEEE = "bind_ieee" 58 | BINDS_GET = "binds_get" 59 | BINDS_REMOVE_ALL = "binds_remove_all" 60 | CONF_REPORT = "conf_report" 61 | CONF_REPORT_READ = "conf_report_read" 62 | EZSP_ADD_KEY = "ezsp_add_key" 63 | EZSP_BACKUP = "ezsp_backup" 64 | EZSP_CLEAR_KEYS = "ezsp_clear_keys" 65 | EZSP_GET_CONFIG_VALUE = "ezsp_get_config_value" 66 | EZSP_GET_IEEE_BY_NWK = "ezsp_get_ieee_by_nwk" 67 | EZSP_GET_KEYS = "ezsp_get_keys" 68 | EZSP_GET_POLICY = "ezsp_get_policy" 69 | EZSP_GET_TOKEN = "ezsp_get_token" # nosec 70 | EZSP_GET_VALUE = "ezsp_get_value" 71 | EZSP_SET_CHANNEL = "ezsp_set_channel" 72 | EZSP_START_MFG = "ezsp_start_mfg" 73 | GET_GROUPS = "get_groups" 74 | GET_ROUTES_AND_NEIGHBOURS = "get_routes_and_neighbours" 75 | GET_ZLL_GROUPS = "get_zll_groups" 76 | ZHA_DEVICES = "zha_devices" 77 | HANDLE_JOIN = "handle_join" 78 | HA_SET_STATE = "ha_set_state" 79 | IEEE_PING = "ieee_ping" 80 | LEAVE = "leave" 81 | MISC_REINITIALIZE = "misc_reinitialize" 82 | MISC_SETTIME = "misc_settime" 83 | OTA_NOTIFY = "ota_notify" 84 | REJOIN = "rejoin" 85 | REGISTER_SERVICES = "register_services" 86 | REMOVE_ALL_GROUPS = "remove_all_groups" 87 | REMOVE_FROM_GROUP = "remove_from_group" 88 | REMOVE_GROUP = "remove_group" 89 | SCAN_DEVICE = "scan_device" 90 | STATE_VALUE_TEMPLATE = "state_value_template" 91 | TUYA_MAGIC = "tuya_magic" 92 | UNBIND_COORDINATOR = "unbind_coordinator" 93 | UNBIND_GROUP = "unbind_group" 94 | ZCL_CMD = "zcl_cmd" 95 | ZDO_FLOOD_PARENT_ANNCE = "zdo_flood_parent_annce" 96 | ZDO_JOIN_WITH_CODE = "zdo_join_with_code" 97 | ZDO_SCAN_NOW = "zdo_scan_now" 98 | ZDO_UPDATE_NWK_ID = "zdo_update_nwk_id" 99 | ZNP_BACKUP = "znp_backup" 100 | ZNP_NVRAM_BACKUP = "znp_nvram_backup" 101 | ZNP_NVRAM_RESET = "znp_nvram_reset" 102 | ZNP_NVRAM_RESTORE = "znp_nvram_restore" 103 | ZNP_RESTORE = "znp_restore" 104 | 105 | 106 | # Constants representing internal parameters keys 107 | class INTERNAL_PARAMS_consts: # pylint: disable=too-few-public-methods 108 | __slots__ = () 109 | ALLOW_CREATE = "allow_create" 110 | ARGS = "args" 111 | KWARGS = "kwargs" 112 | ATTR_ID = "attr_id" 113 | ATTR_TYPE = "attr_type" 114 | ATTR_VAL = "attr_val" 115 | CLUSTER_ID = "cluster_id" 116 | CMD_ID = "cmd_id" 117 | CODE = "code" 118 | DIR = "dir" 119 | EP_ID = "endpoint_id" 120 | DST_EP_ID = "dst_endpoint_id" 121 | EVT_DONE = "event_done" 122 | EVT_FAIL = "event_fail" 123 | EVT_SUCCESS = "event_success" 124 | EXPECT_REPLY = "expect_reply" 125 | FAIL_EXCEPTION = "fail_exception" 126 | FORCE_UPDATE = "force_update" 127 | MANF = "manf" 128 | MAX_INTERVAL = "max_interval" 129 | MIN_INTERVAL = "min_interval" 130 | READ_AFTER_WRITE = "read_after_write" 131 | READ_BEFORE_WRITE = "read_before_write" 132 | REPORTABLE_CHANGE = "reportable_change" 133 | STATE_ATTR = "state_attr" 134 | STATE_ID = "state_id" 135 | STATE_VALUE_TEMPLATE = "state_value_template" 136 | TRIES = "tries" 137 | WRITE_IF_EQUAL = "write_if_equal" 138 | CSV_FILE = "csvfile" 139 | CSV_LABEL = "csvlabel" 140 | JSON_OUT = "json_out" 141 | JSON_TIMESTAMP = "json_timestamp" 142 | DOWNLOAD = "download" 143 | PATH = "path" 144 | USE_CACHE = "use_cache" 145 | 146 | 147 | INTERNAL_PARAMS = INTERNAL_PARAMS_consts() 148 | USER_PARAMS = USER_PARAMS_consts() 149 | SERVICES = SERVICE_consts() 150 | -------------------------------------------------------------------------------- /custom_components/zha_toolkit/scan_device.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | import asyncio 4 | import logging 5 | 6 | from zigpy import types as t 7 | from zigpy.exceptions import ControllerException, DeliveryError 8 | from zigpy.zcl import foundation 9 | 10 | from . import utils as u 11 | from .params import INTERNAL_PARAMS as p 12 | 13 | LOGGER = logging.getLogger(__name__) 14 | 15 | 16 | ACCESS_CONTROL_MAP = {0x01: "READ", 0x02: "WRITE", 0x04: "REPORT"} 17 | 18 | 19 | @u.retryable( 20 | ( 21 | DeliveryError, 22 | ControllerException, 23 | asyncio.CancelledError, 24 | asyncio.TimeoutError, 25 | ), 26 | tries=3, 27 | ) 28 | async def read_attr(cluster, attrs, manufacturer=None): 29 | return await cluster.read_attributes( 30 | attrs, allow_cache=False, manufacturer=manufacturer 31 | ) 32 | 33 | 34 | async def scan_results(device, endpoints=None, manufacturer=None, tries=3): 35 | """Construct scan results from information available in device""" 36 | result: dict[str, str | list | None] = { 37 | "ieee": str(device.ieee), 38 | "nwk": f"0x{device.nwk:04x}", 39 | } 40 | 41 | LOGGER.debug("Scanning device 0x{%04x}", device.nwk) 42 | 43 | # Get list of endpoints 44 | # None -> all endpoints 45 | # List or id -> Provided endpoints 46 | if endpoints is not None and isinstance(endpoints, int): 47 | endpoints = [endpoints] 48 | 49 | if ( 50 | endpoints is None 51 | or not isinstance(endpoints, list) 52 | or len(endpoints) == 0 53 | ): 54 | endpoints = [] 55 | for epid, _ep in device.endpoints.items(): 56 | endpoints.append(epid) 57 | 58 | LOGGER.debug("Endpoints %s", endpoints) 59 | 60 | ep_result = [] 61 | for epid in endpoints: 62 | if epid == 0: 63 | continue 64 | if epid in device.endpoints: 65 | LOGGER.debug("scanning endpoint #%i", epid) 66 | ep = device.endpoints[epid] 67 | result["model"] = ep.model 68 | result["manufacturer"] = ep.manufacturer 69 | if u.isManf(ep.manufacturer_id): 70 | result["manufacturer_id"] = f"0x{ep.manufacturer_id}" 71 | else: 72 | result["manufacturer_id"] = None 73 | endpoint = { 74 | "id": epid, 75 | "device_type": f"0x{ep.device_type:04x}", 76 | "profile": f"0x{ep.profile_id:04x}", 77 | } 78 | if epid != 242: 79 | LOGGER.debug( 80 | "Scanning endpoint #%i with manf '%r'", epid, manufacturer 81 | ) 82 | endpoint.update( 83 | await scan_endpoint(ep, manufacturer, tries=tries) 84 | ) 85 | if not u.isManf(manufacturer) and u.isManf(ep.manufacturer_id): 86 | LOGGER.debug( 87 | "Scanning endpoint #%i with manf '%r'", 88 | epid, 89 | ep.manufacturer_id, 90 | ) 91 | endpoint.update( 92 | await scan_endpoint( 93 | ep, ep.manufacturer_id, tries=tries 94 | ) 95 | ) 96 | ep_result.append(endpoint) 97 | 98 | result["endpoints"] = ep_result 99 | return result 100 | 101 | 102 | async def scan_endpoint(ep, manufacturer=None, tries=3): 103 | result = {} 104 | clusters = {} 105 | for cluster in ep.in_clusters.values(): 106 | LOGGER.debug( 107 | "Scanning input cluster 0x{:04x}/'{}' ".format( 108 | cluster.cluster_id, cluster.ep_attribute 109 | ) 110 | ) 111 | key = f"0x{cluster.cluster_id:04x}" 112 | clusters[key] = await scan_cluster( 113 | cluster, is_server=True, manufacturer=manufacturer, tries=tries 114 | ) 115 | result["in_clusters"] = dict(sorted(clusters.items(), key=lambda k: k[0])) 116 | 117 | clusters = {} 118 | for cluster in ep.out_clusters.values(): 119 | LOGGER.debug( 120 | "Scanning output cluster 0x{:04x}/'{}'".format( 121 | cluster.cluster_id, cluster.ep_attribute 122 | ) 123 | ) 124 | key = f"0x{cluster.cluster_id:04x}" 125 | clusters[key] = await scan_cluster( 126 | cluster, is_server=True, manufacturer=manufacturer, tries=tries 127 | ) 128 | result["out_clusters"] = dict(sorted(clusters.items(), key=lambda k: k[0])) 129 | return result 130 | 131 | 132 | async def scan_cluster(cluster, is_server=True, manufacturer=None, tries=3): 133 | if is_server: 134 | cmds_gen = "commands_generated" 135 | cmds_rec = "commands_received" 136 | else: 137 | cmds_rec = "commands_generated" 138 | cmds_gen = "commands_received" 139 | attributes = await discover_attributes_extended(cluster, None, tries=tries) 140 | LOGGER.debug("scan_cluster attributes (none): %s", attributes) 141 | if u.isManf(manufacturer): 142 | LOGGER.debug( 143 | "scan_cluster attributes (none) with manf '%s': %s", 144 | manufacturer, 145 | attributes, 146 | ) 147 | attributes.update( 148 | await discover_attributes_extended( 149 | cluster, manufacturer, tries=tries 150 | ) 151 | ) 152 | 153 | # LOGGER.debug("scan_cluster attributes: %s", attributes) 154 | 155 | return { 156 | "cluster_id": f"0x{cluster.cluster_id:04x}", 157 | "title": cluster.name, 158 | "name": cluster.ep_attribute, 159 | "attributes": attributes, 160 | cmds_rec: await discover_commands_received( 161 | cluster, is_server, tries=tries 162 | ), 163 | cmds_gen: await discover_commands_generated( 164 | cluster, is_server, tries=tries 165 | ), 166 | } 167 | 168 | 169 | async def discover_attributes_extended(cluster, manufacturer=None, tries=3): 170 | LOGGER.debug("Discovering attributes extended") 171 | result = {} 172 | to_read = [] 173 | attr_id = 0 # Start discovery at attr_id 0 174 | done = False 175 | 176 | while not done: # Repeat until all attributes are discovered or timeout 177 | try: 178 | done, rsp = await u.retry_wrapper( 179 | cluster.discover_attributes_extended, 180 | attr_id, # Start attribute identifier 181 | 16, # Number of attributes to discover in this request 182 | manufacturer=manufacturer, 183 | tries=tries, 184 | ) 185 | await asyncio.sleep(0.2) 186 | except (ValueError, DeliveryError, asyncio.TimeoutError) as ex: 187 | LOGGER.error( 188 | ( 189 | "Failed 'discover_attributes_extended'" 190 | " starting 0x%04x/0x%04x." 191 | " Error: %s" 192 | ), 193 | cluster.cluster_id, 194 | attr_id, 195 | ex, 196 | ) 197 | break 198 | if isinstance(rsp, foundation.Status): 199 | LOGGER.error( 200 | "got %s status for discover_attribute starting 0x%04x/0x%04x", 201 | rsp, 202 | cluster.cluster_id, 203 | attr_id, 204 | ) 205 | break 206 | LOGGER.debug("Cluster %s attr_recs: %s", cluster.cluster_id, rsp) 207 | for attr_rec in rsp: # Get attribute information from response 208 | attr_id = attr_rec.attrid 209 | attr_id = attr_rec.attrid 210 | attr_def = cluster.attributes.get( 211 | attr_rec.attrid, (str(attr_rec.attrid), None) 212 | ) 213 | if u.is_zigpy_ge("0.50.0") and isinstance( 214 | attr_def, foundation.ZCLAttributeDef 215 | ): 216 | attr_name = attr_def.name 217 | else: 218 | attr_name = attr_def[0] 219 | try: 220 | attr_type = foundation.DataType.from_type_id(attr_rec.datatype) 221 | except KeyError: 222 | attr_type = None 223 | access_acl = t.uint8_t(attr_rec.acl) 224 | 225 | # Note: reading back Array type was fixed in zigpy 0.58.1 . 226 | if ( 227 | u.is_zigpy_ge("0.58.1") or attr_rec.datatype not in [0x48] 228 | ) and (access_acl & foundation.AttributeAccessControl.READ != 0): 229 | to_read.append(attr_id) 230 | 231 | attr_type_hex = f"0x{attr_rec.datatype:02x}" 232 | if attr_type: 233 | attr_type = [ 234 | attr_type_hex, 235 | attr_type.python_type.__name__, 236 | attr_type.type_class.name, 237 | ] 238 | else: 239 | attr_type = attr_type_hex 240 | 241 | if access_acl != 0: 242 | access = "|".join( 243 | [ 244 | s 245 | for x, s in ACCESS_CONTROL_MAP.items() 246 | if x & access_acl != 0 247 | ] 248 | ) 249 | else: 250 | access = "undefined" 251 | 252 | result[attr_id] = { 253 | "attribute_id": f"0x{attr_id:04x}", 254 | "attribute_name": attr_name, 255 | "value_type": attr_type, 256 | "access": access, 257 | "access_acl": access_acl, 258 | } 259 | if u.isManf(manufacturer): 260 | result[attr_id]["manf_id"] = manufacturer 261 | attr_id += 1 262 | await asyncio.sleep(0.2) 263 | 264 | LOGGER.debug("Reading attrs: %s", to_read) 265 | chunk, to_read = to_read[:4], to_read[4:] 266 | while chunk: 267 | try: 268 | chunk = sorted(chunk) 269 | success, failed = await read_attr( 270 | cluster, chunk, manufacturer, tries=tries 271 | ) 272 | LOGGER.debug( 273 | "Reading attr success: %s, failed %s", success, failed 274 | ) 275 | for attr_id, value in success.items(): 276 | if isinstance(value, bytes): 277 | try: 278 | value = value.split(b"\x00")[0].decode().strip() 279 | except UnicodeDecodeError: 280 | value = value.hex() 281 | result[attr_id]["attribute_value"] = value 282 | else: 283 | result[attr_id]["attribute_value"] = u.value_to_jsonable( 284 | value 285 | ) 286 | except ( 287 | DeliveryError, 288 | asyncio.TimeoutError, 289 | ) as ex: 290 | LOGGER.error( 291 | "Couldn't read 0x%04x/0x%04x: %s", 292 | cluster.cluster_id, 293 | attr_id, 294 | ex, 295 | ) 296 | except Exception as ex_unexpected: 297 | LOGGER.error( 298 | "Unexpected Exception while reading 0x%04x/0x%04x: %s", 299 | cluster.cluster_id, 300 | attr_id, 301 | ex_unexpected, 302 | ) 303 | chunk, to_read = to_read[:4], to_read[4:] 304 | await asyncio.sleep(0.2) 305 | 306 | return {f"0x{a_id:04x}": result[a_id] for a_id in sorted(result)} 307 | 308 | 309 | async def discover_commands_received( 310 | cluster, is_server, manufacturer=None, tries=3 311 | ): 312 | from zigpy.zcl.foundation import Status 313 | 314 | LOGGER.debug("Discovering commands received") 315 | # direction = "received" if is_server else "generated" # noqa: F841 316 | result = {} 317 | cmd_id = 0 # Discover commands starting from 0 318 | done = False 319 | 320 | while not done: 321 | try: 322 | done, rsp = await u.retry_wrapper( 323 | cluster.discover_commands_received, 324 | cmd_id, # Start index of commands to discover 325 | 16, # Number of commands to discover 326 | manufacturer=manufacturer, 327 | tries=tries, 328 | ) 329 | await asyncio.sleep(0.2) 330 | except (ValueError, DeliveryError, asyncio.TimeoutError) as ex: 331 | LOGGER.error( 332 | "Failed to discover 0x%04x commands starting %s. Error: %s", 333 | cluster.cluster_id, 334 | cmd_id, 335 | ex, 336 | ) 337 | break 338 | if isinstance(rsp, Status): 339 | LOGGER.error( 340 | "got %s status for discover_commands starting %s", rsp, cmd_id 341 | ) 342 | break 343 | for cmd_id in rsp: 344 | cmd_def = cluster.server_commands.get( 345 | cmd_id, (str(cmd_id), "not_in_zcl", None) 346 | ) 347 | if u.is_zigpy_ge("0.50.0") and isinstance( 348 | cmd_def, foundation.ZCLCommandDef 349 | ): 350 | cmd_name = cmd_def.name 351 | cmd_args = cmd_def.schema 352 | else: 353 | cmd_name, cmd_args, _ = cmd_def 354 | 355 | if not isinstance(cmd_args, str): 356 | try: 357 | cmd_args = [arg.__name__ for arg in cmd_args] 358 | except TypeError: 359 | # Unexpected type, get repr to make sure it is ok for json 360 | cmd_args = f"{cmd_args!r}" 361 | 362 | key = f"0x{cmd_id:02x}" 363 | result[key] = { 364 | "command_id": f"0x{cmd_id:02x}", 365 | "command_name": cmd_name, 366 | "command_arguments": cmd_args, 367 | } 368 | cmd_id += 1 369 | await asyncio.sleep(0.2) 370 | return dict(sorted(result.items(), key=lambda k: k[0])) 371 | 372 | 373 | async def discover_commands_generated( 374 | cluster, is_server, manufacturer=None, tries=3 375 | ): 376 | from zigpy.zcl.foundation import Status 377 | 378 | LOGGER.debug("Discovering commands generated") 379 | # direction = "generated" if is_server else "received" # noqa: F841 380 | result = {} 381 | cmd_id = 0 # Initial index of commands to discover 382 | done = False 383 | 384 | while not done: 385 | try: 386 | done, rsp = await u.retry_wrapper( 387 | cluster.discover_commands_generated, 388 | cmd_id, # Start index of commands to discover 389 | 16, # Number of commands to discover this run 390 | manufacturer=manufacturer, 391 | tries=tries, 392 | ) 393 | await asyncio.sleep(0.2) 394 | except (ValueError, DeliveryError, asyncio.TimeoutError) as ex: 395 | LOGGER.error( 396 | "Failed to discover generated 0x%04X commands" 397 | " starting %s. Error: %s", 398 | cluster.cluster_id, 399 | cmd_id, 400 | ex, 401 | ) 402 | break 403 | if isinstance(rsp, Status): 404 | LOGGER.error( 405 | "got %s status for discover_commands starting %s", rsp, cmd_id 406 | ) 407 | break 408 | for cmd_id in rsp: 409 | cmd_def = cluster.client_commands.get( 410 | cmd_id, (str(cmd_id), "not_in_zcl", None) 411 | ) 412 | if u.is_zigpy_ge("0.50.0") and isinstance( 413 | cmd_def, foundation.ZCLCommandDef 414 | ): 415 | cmd_name = cmd_def.name 416 | cmd_args = cmd_def.schema 417 | else: 418 | cmd_name, cmd_args, _ = cmd_def 419 | 420 | if not isinstance(cmd_args, str): 421 | try: 422 | cmd_args = [arg.__name__ for arg in cmd_args] 423 | except (TypeError, AttributeError): 424 | # Unexpected type, get repr to make sure it is ok for json 425 | cmd_args = f"{cmd_args!r}" 426 | 427 | key = f"0x{cmd_id:02x}" 428 | result[key] = { 429 | "command_id": f"0x{cmd_id:02x}", 430 | "command_name": cmd_name, 431 | "command_args": cmd_args, 432 | } 433 | cmd_id += 1 434 | await asyncio.sleep(0.2) 435 | return dict(sorted(result.items(), key=lambda k: k[0])) 436 | 437 | 438 | async def scan_device( 439 | app, listener, ieee, cmd, data, service, params, event_data 440 | ): 441 | if ieee is None: 442 | LOGGER.error("missing ieee") 443 | raise ValueError("missing ieee") 444 | 445 | LOGGER.debug("Running 'scan_device'") 446 | 447 | device = await u.get_device(app, listener, ieee) 448 | 449 | endpoints = params[p.EP_ID] 450 | manf = params[p.MANF] 451 | tries = params[p.TRIES] 452 | 453 | if endpoints is None: 454 | endpoints = [] 455 | elif isinstance(endpoints, int): 456 | endpoints = [endpoints] 457 | elif not isinstance(endpoints, list): 458 | raise ValueError("Endpoint must be int or list of int") 459 | 460 | endpoints = sorted(set(endpoints)) # Uniqify and sort 461 | 462 | scan = await scan_results( 463 | device, endpoints, manufacturer=manf, tries=tries 464 | ) 465 | 466 | event_data["scan"] = scan 467 | 468 | model = scan.get("model") 469 | manufacturer = scan.get("manufacturer") 470 | 471 | if len(endpoints) == 0: 472 | ep_str = "" 473 | else: 474 | ep_str = "_" + ("_".join([f"{e:02x}" for e in endpoints])) 475 | 476 | postfix = f"{ep_str}_scan_results.txt" 477 | 478 | # Set a unique filename for each device, using the manf name and 479 | # the variable part of the device mac address 480 | if model is not None and u.isManf(manufacturer): 481 | ieee_tail = "".join([f"{o:02x}" for o in ieee[4::-1]]) 482 | file_name = f"{model}_{manufacturer}_{ieee_tail}{postfix}" 483 | else: 484 | ieee_tail = "".join([f"{o:02x}" for o in ieee[::-1]]) 485 | file_name = f"{ieee_tail}{postfix}" 486 | 487 | u.write_json_to_file( 488 | scan, 489 | subdir="scans", 490 | fname=file_name, 491 | desc="scan results", 492 | listener=listener, 493 | normalize_name=True, 494 | ) 495 | -------------------------------------------------------------------------------- /custom_components/zha_toolkit/tuya.py: -------------------------------------------------------------------------------- 1 | from custom_components.zha_toolkit import utils as u 2 | from custom_components.zha_toolkit.params import INTERNAL_PARAMS as p 3 | 4 | 5 | async def tuya_magic( 6 | app, listener, ieee, cmd, data, service, params, event_data 7 | ): 8 | """ 9 | Send 'magic spell' sequence to device to try to get 'normal' behavior. 10 | """ 11 | 12 | dev = await u.get_device(app, listener, ieee) 13 | basic_cluster = dev.endpoints[1].in_clusters[0] 14 | 15 | # The magic spell is needed only once. 16 | # TODO: Improve by doing this only once (successfully). 17 | 18 | # Magic spell - part 1 19 | attr_to_read = [4, 0, 1, 5, 7, 0xFFFE] 20 | res = await u.cluster_read_attributes( 21 | basic_cluster, attr_to_read, tries=params[p.TRIES] 22 | ) 23 | 24 | event_data["result"] = res 25 | 26 | # Magic spell - part 2 (skipped - does not seem to be needed) 27 | # attr_to_write={0xffde:13} 28 | # basic_cluster.write_attributes(attr_to_write, tries=3) 29 | -------------------------------------------------------------------------------- /custom_components/zha_toolkit/zcl_cmd.py: -------------------------------------------------------------------------------- 1 | import logging 2 | from typing import Any 3 | 4 | from . import utils as u 5 | from .params import INTERNAL_PARAMS as p 6 | from .params import USER_PARAMS as P 7 | 8 | LOGGER = logging.getLogger(__name__) 9 | 10 | 11 | ERR003_PARAMETER_MISSING = "Expecting parameter '{}'" 12 | ERR004_NOT_IN_CLUSTER = "In cluster 0x%04X not found for '%s', endpoint %s" 13 | ERR005_NOT_OUT_CLUSTER = "Out cluster 0x%04X not found for '%s', endpoint %s" 14 | 15 | 16 | async def zcl_cmd(app, listener, ieee, cmd, data, service, params, event_data): 17 | from zigpy import types as t 18 | from zigpy.zcl import foundation 19 | 20 | # Verify parameter presence 21 | 22 | if ieee is None: 23 | msg = ERR003_PARAMETER_MISSING.format("ieee") 24 | LOGGER.error(msg) 25 | raise ValueError(msg) 26 | 27 | dev = await u.get_device(app, listener, ieee) 28 | # The next line will also update the endpoint if it is not set 29 | cluster = u.get_cluster_from_params(dev, params, event_data) 30 | 31 | # Extract parameters 32 | 33 | # Endpoint to send command to 34 | ep_id = params[p.EP_ID] 35 | # Cluster to send command to 36 | cluster_id = params[p.CLUSTER_ID] 37 | # The command to send 38 | cmd_id = params[p.CMD_ID] 39 | if cmd_id is None: 40 | raise ValueError(ERR003_PARAMETER_MISSING, P.CMD) 41 | 42 | # The direction (to in or out cluster) 43 | dir_int = params[p.DIR] 44 | 45 | # Get manufacturer 46 | manf = params[p.MANF] 47 | 48 | # Get tries 49 | tries = params[p.TRIES] 50 | 51 | # Get expect_reply 52 | expect_reply = params[p.EXPECT_REPLY] 53 | 54 | cmd_args = params[p.ARGS] 55 | kw_args = params[p.KWARGS] 56 | 57 | # Direction 0 = Client to Server, as in protocol bit 58 | is_in_cluster = dir_int == 0 59 | 60 | if ep_id not in dev.endpoints: 61 | msg = f"Endpoint {ep_id} not found for '{repr(ieee)}'" 62 | LOGGER.error(msg) 63 | raise ValueError(msg) 64 | 65 | endpoint = dev.endpoints[ep_id] 66 | 67 | org_cluster_cmd_defs = {} 68 | 69 | # Exception caught in the try/catch below to throw after 70 | # restoring cluster definitions 71 | caught_e = None 72 | 73 | try: 74 | if is_in_cluster: 75 | if cluster_id not in endpoint.in_clusters: 76 | msg = ERR004_NOT_IN_CLUSTER.format( 77 | cluster_id, repr(ieee), ep_id 78 | ) 79 | LOGGER.error(msg) 80 | raise ValueError(msg) 81 | 82 | # Cluster is found 83 | cluster = endpoint.in_clusters[cluster_id] 84 | 85 | # Change command specification ourselves ... 86 | 87 | if (cluster_id == 5) and (cmd_id == 0): 88 | org_cluster_cmd_defs[0] = cluster.server_commands[0] 89 | cluster.server_commands[0] = ( 90 | "add", 91 | ( 92 | t.uint16_t, 93 | t.uint8_t, 94 | t.uint16_t, 95 | t.CharacterString, 96 | t.Optional(t.List[t.uint8_t]), 97 | ), 98 | False, 99 | ) 100 | elif cmd_id not in cluster.server_commands: 101 | schema_dict: dict[str, Any] = {} 102 | 103 | if cmd_args is not None: 104 | schema_dict = { 105 | f"param{i + 1}": t.uint8_t 106 | for i in range(len(cmd_args)) 107 | } 108 | 109 | temp = foundation.ZCLCommandDef( 110 | schema=schema_dict, 111 | direction=foundation.Direction.Client_to_Server, 112 | id=cmd_id, 113 | name="schema", 114 | ) 115 | cmd_schema = temp.with_compiled_schema().schema 116 | cmd_def = foundation.ZCLCommandDef( 117 | name=f"zha_toolkit_dummy_cmd{cmd_id}", 118 | id=cmd_id, 119 | schema=cmd_schema, 120 | direction=foundation.Direction.Client_to_Server, 121 | is_manufacturer_specific=(manf is not None), 122 | ) 123 | 124 | org_cluster_cmd_defs[cmd_id] = None 125 | cluster.server_commands[cmd_id] = cmd_def 126 | 127 | event_data["cmd_reply"] = await u.retry_wrapper( 128 | cluster.command, 129 | cmd_id, 130 | *cmd_args, 131 | manufacturer=manf, 132 | expect_reply=expect_reply, 133 | tries=tries, 134 | **kw_args, 135 | ) 136 | else: 137 | if cluster_id not in endpoint.out_clusters: 138 | msg = ERR005_NOT_OUT_CLUSTER.format( 139 | cluster_id, repr(ieee), ep_id 140 | ) 141 | LOGGER.error(msg) 142 | raise ValueError(msg) 143 | 144 | # Found cluster 145 | cluster = endpoint.out_clusters[cluster_id] 146 | 147 | # Note: client_command not tested 148 | event_data["cmd_reply"] = await cluster.client_command( 149 | cmd_id, *cmd_args, manufacturer=manf, **kw_args 150 | ) 151 | except Exception as e: 152 | caught_e = e 153 | finally: 154 | # Restore replaced cluster command definitions 155 | # LOGGER.debug("replaced %s", org_cluster_cmd_defs) 156 | for key, cmd_def in org_cluster_cmd_defs.items(): 157 | if is_in_cluster: 158 | if cmd_def is not None: 159 | cluster.server_commands[key] = cmd_def 160 | else: 161 | del cluster.server_commands[key] 162 | 163 | else: 164 | if cmd_def is not None: 165 | cluster.client_commands[key] = cmd_def 166 | else: 167 | del cluster.client_commands[key] 168 | if caught_e is not None: 169 | raise caught_e 170 | 171 | # Could check cluster.client_command, cluster_server commands 172 | -------------------------------------------------------------------------------- /custom_components/zha_toolkit/zdo.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import logging 3 | 4 | import zigpy.device 5 | import zigpy.types as t 6 | import zigpy.zdo 7 | import zigpy.zdo.types as zdo_t 8 | 9 | from . import utils as u 10 | from .params import INTERNAL_PARAMS as p 11 | 12 | LOGGER = logging.getLogger(__name__) 13 | 14 | 15 | def add_task_info(event_data, task): 16 | event_data["task"] = {"name": task.get_name(), "done": task.done()} 17 | 18 | 19 | async def leave(app, listener, ieee, cmd, data, service, params, event_data): 20 | if ieee is None or not data: 21 | raise ValueError("Need 'ieee' and command_data'") 22 | 23 | LOGGER.debug( 24 | "running 'leave' command. Telling 0x%s to remove %s: %s", 25 | data, 26 | ieee, 27 | service, 28 | ) 29 | 30 | parent = await u.get_device(app, listener, data) 31 | 32 | # Get tries 33 | tries = params[p.TRIES] 34 | 35 | res = await u.retry_wrapper( 36 | parent.zdo.request, 37 | zdo_t.ZDOCmd.Mgmt_Leave_req, 38 | ieee, 39 | 0x02, 40 | tries=tries, 41 | ) 42 | event_data["result_leave"] = res 43 | LOGGER.debug("0x%04x: Mgmt_Leave_req: %s", parent.nwk, res) 44 | 45 | 46 | async def ieee_ping( 47 | app, listener, ieee, cmd, data, service, params, event_data 48 | ): 49 | if ieee is None: 50 | LOGGER.warning( 51 | "Incorrect parameters for 'ieee_ping' command: %s", service 52 | ) 53 | return 54 | 55 | # The device is the parent device 56 | dev = await u.get_device(app, listener, ieee) 57 | 58 | # Get tries 59 | tries = params[p.TRIES] 60 | 61 | LOGGER.debug("running 'ieee_ping' command to 0x%s", dev.nwk) 62 | 63 | res = await u.retry_wrapper( 64 | dev.zdo.request, 65 | zdo_t.ZDOCmd.IEEE_addr_req, 66 | dev.nwk, # nwk_addr_of_interest 67 | 0x00, # request_type (0=single device response) 68 | 0x00, # Start index 69 | tries=tries, 70 | ) 71 | event_data["result_ping"] = res 72 | LOGGER.debug("0x%04x: IEEE_addr_req: %s", dev.nwk, res) 73 | 74 | 75 | async def zdo_join_with_code( 76 | app, listener, ieee, cmd, data, service, params, event_data 77 | ): 78 | import bellows.types as bt 79 | 80 | node = ieee # Was: t.EUI64.convert("04:cf:8c:df:3c:75:e1:e7") 81 | 82 | # Original code: 83 | # 84 | # code = ( 85 | # b"\xA8\x16\x92\x7F\xB1\x9B\x78\x55\xC1" 86 | # + b"\xD7\x76\x0D\x5C\xAD\x63\x7F\x69\xCC" 87 | # ) 88 | code = params[p.CODE] 89 | # Note: Router is awake, there is no need for "tries" 90 | res = await app.permit_with_key(node, code, 60) 91 | link_key = bt.EmberKeyData(b"ZigBeeAlliance09") 92 | res = await app._ezsp.addTransientLinkKey(node, link_key) 93 | LOGGER.debug("permit with key: %s", res) 94 | res = await app.permit(60) 95 | 96 | 97 | async def zdo_update_nwk_id( 98 | app, listener, ieee, cmd, data, service, params, event_data 99 | ): 100 | """Update NWK id. data contains new NWK id.""" 101 | if data is None: 102 | LOGGER.error("Need NWK update id in the data") 103 | return 104 | 105 | nwk_upd_id = t.uint8_t(data) 106 | 107 | await zigpy.device.broadcast( 108 | app, 109 | 0, 110 | zdo_t.ZDOCmd.Mgmt_NWK_Update_req, 111 | 0, 112 | 0, 113 | 0x0000, 114 | 0x00, 115 | 0xEE, 116 | b"\xee" 117 | + t.Channels.ALL_CHANNELS.serialize() 118 | + b"\xFF" 119 | + nwk_upd_id.serialize() 120 | + b"\x00\x00", 121 | ) 122 | 123 | res = await app._ezsp.getNetworkParameters() 124 | event_data["result_update"] = res 125 | LOGGER.debug("Network params: %s", res) 126 | 127 | 128 | async def zdo_scan_now( 129 | app, listener, ieee, cmd, data, service, params, event_data 130 | ): 131 | """Scan topology""" 132 | 133 | LOGGER.debug("Scanning topology") 134 | task = asyncio.create_task(app.topology.scan()) 135 | add_task_info(event_data, task) 136 | 137 | 138 | async def zdo_flood_parent_annce( 139 | app, listener, ieee, cmd, data, service, params, event_data 140 | ): 141 | LOGGER.debug("flooding network with parent annce") 142 | 143 | flooder_task = getattr(app, "flooder_task", None) 144 | if flooder_task and not flooder_task.done(): 145 | flooder_task.cancel() 146 | LOGGER.debug("Stop flooding network with parent annce messages") 147 | app.flooder_task = None 148 | event_data["task"] = None 149 | return 150 | 151 | flooder_task = asyncio.create_task(_flood_with_parent_annce(app, listener)) 152 | add_task_info(event_data, flooder_task) 153 | app.flooder_task = flooder_task 154 | 155 | 156 | async def _flood_with_parent_annce(app, listener): 157 | coord = await u.get_device(app, listener, app.ieee) 158 | 159 | while True: 160 | children = [ 161 | nei.device.ieee 162 | for nei in coord.neighbors 163 | if nei.device.node_desc.is_end_device 164 | ] 165 | coord.debug("Have the following children: %s", children) 166 | await zigpy.zdo.broadcast( 167 | app, 168 | zigpy.zdo.types.ZDOCmd.Parent_annce, 169 | 0x0000, 170 | 0x00, 171 | children, 172 | broadcast_address=t.BroadcastAddress.ALL_ROUTERS_AND_COORDINATOR, 173 | ) 174 | await asyncio.sleep(0.1) 175 | -------------------------------------------------------------------------------- /custom_components/zha_toolkit/zha.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | import logging 4 | from typing import Any 5 | 6 | from . import utils as u 7 | from .params import INTERNAL_PARAMS as p 8 | 9 | LOGGER = logging.getLogger(__name__) 10 | 11 | 12 | async def zha_devices( 13 | app, listener, ieee, cmd, data, service, params, event_data 14 | ): 15 | doGenerateCSV = params[p.CSV_FILE] is not None 16 | 17 | # Determine fields to render. 18 | # If the user provides a list, it is also used to 19 | # limit the contents of "devices" in the event_data. 20 | if data is not None and isinstance(data, list): 21 | selectDeviceFields = True 22 | columns = data 23 | else: 24 | selectDeviceFields = False 25 | columns = [ 26 | "ieee", 27 | "nwk", 28 | "manufacturer", 29 | "model", 30 | "name", 31 | "quirk_applied", 32 | "quirk_class", 33 | "manufacturer_code", 34 | "power_source", 35 | "lqi", 36 | "rssi", 37 | "last_seen", 38 | "available", 39 | "device_type", 40 | "user_given_name", 41 | "device_reg_id", 42 | "area_id", 43 | ] 44 | # TODO: Skipped in columns, needs special handling 45 | # 'signature' 46 | # 'endpoints' 47 | 48 | devices = [ 49 | device.zha_device_info for device in listener.device_proxies.values() 50 | ] 51 | 52 | if ieee is not None: 53 | ieee = str(ieee) 54 | # Select only the device with the given address 55 | devices = [d for d in devices if str(d["ieee"]) == ieee] 56 | 57 | # Set default value for 'devices' in event_data, 58 | # may be slimmed down. Ensures that devices is set in case 59 | # an exception occurs. 60 | event_data["devices"] = devices 61 | event_data["selectDeviceFields"] = selectDeviceFields 62 | 63 | if params[p.CSV_LABEL] is not None and isinstance( 64 | params[p.CSV_LABEL], str 65 | ): 66 | try: 67 | # Lambda function gets column and returns False if None 68 | # This makes compares possible for ints) 69 | devices = sorted( 70 | devices, 71 | key=lambda item: ( # pylint: disable=C3002 72 | lambda a: ( 73 | a is None, 74 | str.lower(a) if isinstance(a, str) else a, 75 | ) 76 | )(item[params[p.CSV_LABEL]]), 77 | ) 78 | except Exception: # nosec 79 | pass 80 | 81 | if doGenerateCSV or selectDeviceFields: 82 | if doGenerateCSV: 83 | # Write CSV header 84 | u.append_to_csvfile( 85 | columns, 86 | "csv", 87 | params[p.CSV_FILE], 88 | "device_dump['HEADER']", 89 | listener=listener, 90 | overwrite=True, 91 | ) 92 | 93 | slimmedDevices: list[Any] = [] 94 | for d in devices: 95 | # Fields for CSV 96 | csvFields: list[int | str | None] = [] 97 | # Fields for slimmed devices dict 98 | rawFields: dict[str, Any] = {} 99 | 100 | for c in columns: 101 | if c not in d.keys(): 102 | csvFields.append(None) 103 | else: 104 | val = d[c] 105 | rawFields[c] = val 106 | if c in ["manufacturer", "nwk"] and isinstance(val, int): 107 | val = f"0x{val:04X}" 108 | 109 | csvFields.append(d[c]) 110 | 111 | slimmedDevices.append(rawFields) 112 | 113 | if doGenerateCSV: 114 | LOGGER.debug("Device %r", csvFields) 115 | u.append_to_csvfile( 116 | csvFields, 117 | "csv", 118 | params[p.CSV_FILE], 119 | f"device_dump[{d['ieee']}]", 120 | listener=listener, 121 | ) 122 | if selectDeviceFields: 123 | event_data["devices"] = slimmedDevices 124 | 125 | if params[p.JSON_OUT] is not None: 126 | timeStamp = None 127 | if params[p.JSON_TIMESTAMP] is not None: 128 | timeStamp = event_data["start_time"].split(".", 1)[0] 129 | u.write_json_to_file( 130 | event_data, 131 | subdir="json", 132 | fname=params[p.JSON_OUT], 133 | desc="zha_devices", 134 | listener=listener, 135 | normalize_name=False, 136 | ts=timeStamp, 137 | ) 138 | -------------------------------------------------------------------------------- /custom_components/zha_toolkit/znp.py: -------------------------------------------------------------------------------- 1 | import json 2 | import logging 3 | from datetime import datetime 4 | 5 | import aiofiles 6 | from zigpy import types as t 7 | 8 | from . import utils as u 9 | 10 | try: 11 | from zigpy_znp.tools.common import validate_backup_json 12 | from zigpy_znp.tools.network_backup import backup_network 13 | from zigpy_znp.tools.network_restore import json_backup_to_zigpy_state 14 | from zigpy_znp.tools.nvram_read import nvram_read 15 | from zigpy_znp.tools.nvram_reset import nvram_reset 16 | from zigpy_znp.tools.nvram_write import nvram_write 17 | except ImportError: 18 | backup_network = None 19 | nvram_read = None 20 | nvram_write = None 21 | nvram_reset = None 22 | validate_backup_json = None 23 | json_backup_to_zigpy_state = None 24 | 25 | LOGGER = logging.getLogger(__name__) 26 | 27 | 28 | async def znp_backup( 29 | app, listener, ieee, cmd, data, service, params, event_data 30 | ): 31 | """Backup ZNP network information.""" 32 | 33 | LOGGER.debug("ZNP_BACKUP") 34 | 35 | if u.get_radiotype(app) != u.RadioType.ZNP: 36 | msg = f"{cmd} is only for ZNP" 37 | LOGGER.debug(msg) 38 | raise ValueError(msg) 39 | 40 | if backup_network is None: 41 | msg = "ZNP tools not available (backup_network)" 42 | LOGGER.debug(msg) 43 | raise RuntimeError(msg) 44 | 45 | # Get backup information 46 | backup_obj = await backup_network(app._znp) 47 | 48 | # Store backup information to file 49 | 50 | # Set name with regards to local path 51 | out_dir = u.get_local_dir() 52 | 53 | # Ensure that data is an empty string when not set 54 | if data is None: 55 | data = "" 56 | 57 | fname = out_dir + "nwk_backup" + str(data) + ".json" 58 | 59 | event_data["backup_file"] = fname 60 | 61 | LOGGER.debug("Writing to %s", fname) 62 | async with aiofiles.open(fname, "w", encoding="utf_8") as f: 63 | await f.write(json.dumps(backup_obj, indent=4)) 64 | 65 | 66 | async def znp_restore( 67 | app, listener, ieee, cmd, data, service, params, event_data 68 | ): 69 | """Restore ZNP network information.""" 70 | 71 | if u.get_radiotype(app) != u.RadioType.ZNP: 72 | msg = f"'{cmd}' is only available for ZNP" 73 | LOGGER.debug(msg) 74 | raise ValueError(msg) 75 | 76 | if validate_backup_json is None or json_backup_to_zigpy_state is None: 77 | msg = "ZNP tools not available (validate_backup_json)" 78 | LOGGER.debug(msg) 79 | raise RuntimeError(msg) 80 | 81 | # Get/set parameters 82 | 83 | # command_data (data): 84 | # counter_increment (defaults to 2500) 85 | 86 | counter_increment = u.str2int(data) 87 | 88 | if not isinstance(counter_increment, int): 89 | counter_increment = 2500 90 | 91 | counter_increment = t.uint32_t(counter_increment) 92 | 93 | current_datetime = datetime.now().strftime("_%Y%m%d_%H%M%S") 94 | 95 | # Safety: backup current configuration 96 | await znp_backup( 97 | app, listener, ieee, cmd, current_datetime, service, params, event_data 98 | ) 99 | 100 | # Set name with regards to local path 101 | fname = u.get_local_dir() + "nwk_backup.json" 102 | LOGGER.info("Restore from '%s'", fname) 103 | 104 | event_data["restore_file"] = fname 105 | 106 | # Read backup file 107 | async with aiofiles.open(fname, encoding="utf_8") as f: 108 | backup = json.loads(await f.read()) 109 | 110 | # validate the backup file 111 | LOGGER.info("Validating backup contents") 112 | validate_backup_json(backup) 113 | LOGGER.info("Backup contents validated") 114 | 115 | network_info, node_info = json_backup_to_zigpy_state(backup) 116 | 117 | network_info.network_key.tx_counter += counter_increment 118 | 119 | # Network already formed in HA 120 | # app._znp.startup(force_form=True) 121 | 122 | # Write back information from backup 123 | LOGGER.info("Writing to device") 124 | await app._znp.write_network_info( 125 | network_info=network_info, node_info=node_info 126 | ) 127 | 128 | # LOGGER.debug("List of attributes/methods in app %s", dir(app)) 129 | LOGGER.debug("List of attributes/methods in znp %s", dir(app._znp)) 130 | 131 | # Shutdown znp? 132 | LOGGER.info( 133 | "Write done, call pre_shutdown(). Restart the device/HA after this." 134 | ) 135 | await app._znp.pre_shutdown() 136 | LOGGER.info("pre_shutdown() Done.") 137 | 138 | # TODO: restart znp, HA? 139 | 140 | 141 | async def znp_nvram_backup( 142 | app, listener, ieee, cmd, data, service, params, event_data 143 | ): 144 | """Save ZNP NVRAM to file for backup""" 145 | 146 | if u.get_radiotype(app) != u.RadioType.ZNP: 147 | msg = f"'{cmd}' is only available for ZNP" 148 | LOGGER.debug(msg) 149 | raise ValueError(msg) 150 | 151 | if nvram_read is None: 152 | msg = "ZNP tools not available (nvram_read)" 153 | LOGGER.debug(msg) 154 | raise RuntimeError(msg) 155 | 156 | # Set name with regards to local path 157 | out_dir = u.get_local_dir() 158 | 159 | LOGGER.info("Reading NVRAM from device") 160 | backup_obj = await nvram_read(app._znp) 161 | 162 | # Ensure that data is an empty string when not set 163 | if data is None: 164 | data = "" 165 | 166 | fname = out_dir + "nvram_backup" + str(data) + ".json" 167 | 168 | LOGGER.info("Saving NVRAM to '%s'", fname) 169 | async with aiofiles.open(fname, "w", encoding="utf_8") as f: 170 | await f.write(json.dumps(backup_obj, indent=4)) 171 | LOGGER.info("NVRAM backup saved to '%s'", fname) 172 | 173 | 174 | async def znp_nvram_restore( 175 | app, listener, ieee, cmd, data, service, params, event_data 176 | ): 177 | """Restore ZNP NVRAM from file""" 178 | 179 | if u.get_radiotype(app) != u.RadioType.ZNP: 180 | msg = f"'{cmd}' is only available for ZNP" 181 | LOGGER.debug(msg) 182 | raise ValueError(msg) 183 | 184 | if nvram_write is None: 185 | msg = "ZNP tools not available (nvram_write)" 186 | LOGGER.debug(msg) 187 | raise RuntimeError(msg) 188 | 189 | current_datetime = datetime.now().strftime("_%Y%m%d_%H%M%S") 190 | await znp_nvram_backup( 191 | app, listener, ieee, cmd, current_datetime, service, params, event_data 192 | ) 193 | 194 | # Restore NVRAM backup from file 195 | # Set name with regards to local path 196 | out_dir = u.get_local_dir() 197 | 198 | # Ensure that data is an empty string when not set 199 | if data is None: 200 | data = "" 201 | 202 | fname = out_dir + "nvram_backup" + str(data) + ".json" 203 | 204 | LOGGER.info("Restoring NVRAM from '%s'", fname) 205 | async with aiofiles.open(fname, "r", encoding="utf_8") as f: 206 | nvram_obj = json.loads(await f.read()) 207 | 208 | await nvram_write(app._znp, nvram_obj) 209 | LOGGER.info("Restored NVRAM from '%s'", fname) 210 | 211 | # TODO: restart znp, HA? 212 | 213 | 214 | async def znp_nvram_reset( 215 | app, listener, ieee, cmd, data, service, params, event_data 216 | ): 217 | """Reset ZNP NVRAM""" 218 | 219 | if u.get_radiotype(app) != u.RadioType.ZNP: 220 | msg = f"'{cmd}' is only available for ZNP" 221 | LOGGER.debug(msg) 222 | raise ValueError(msg) 223 | 224 | if nvram_reset is None: 225 | msg = "ZNP tools not available (nvram_reset)" 226 | LOGGER.debug(msg) 227 | raise RuntimeError(msg) 228 | 229 | current_datetime = datetime.now().strftime("_%Y%m%d_%H%M%S") 230 | 231 | # Safety: backup current configuration 232 | await znp_nvram_backup( 233 | app, listener, ieee, cmd, current_datetime, service, params, event_data 234 | ) 235 | 236 | # Write back information from backup 237 | LOGGER.info("Reset NVRAM") 238 | await nvram_reset(app._znp) 239 | 240 | # Shutdown znp? 241 | # LOGGER.info("Call pre_shutdown(). Restart the device/HA after this.") 242 | # await app._znp.pre_shutdown() 243 | # LOGGER.info("pre_shutdown() Done.") 244 | 245 | # TODO: restart znp, HA? 246 | -------------------------------------------------------------------------------- /examples/README.md: -------------------------------------------------------------------------------- 1 | ## Read Attributes from Basic Cluster to CSV and state 2 | 3 | (Note: scripts can be called as a service) 4 | 5 | - `script_read_basic_cluster.yaml`:\ 6 | Script to add to HA (Configuration > 7 | Scripts): 8 | - `service_call_read_basic_cluster.yaml`:\ 9 | Example of service 10 | call.\ 11 | ![image](images/service_basic_cluster.png) 12 | - Values in state:\ 13 | ![image](images/state_basic_cluster.png) 14 | - Values in CSV: 15 | 16 | ```csv 17 | 2022-02-17T18:27:35.646226+00:00,Basic,zcl_version,3,0x0000,0x0000,1,60:a4:23:ff:fe:91:fc:9a, 18 | 2022-02-17T18:27:35.797180+00:00,Basic,app_version,80,0x0001,0x0000,1,60:a4:23:ff:fe:91:fc:9a, 19 | 2022-02-17T18:27:35.934612+00:00,Basic,stack_version,0,0x0002,0x0000,1,60:a4:23:ff:fe:91:fc:9a, 20 | 2022-02-17T18:27:36.071951+00:00,Basic,hw_version,1,0x0003,0x0000,1,60:a4:23:ff:fe:91:fc:9a, 21 | 2022-02-17T18:27:36.212760+00:00,Basic,manufacturer,_TZ3000_dbou1ap4,0x0004,0x0000,1,60:a4:23:ff:fe:91:fc:9a, 22 | 2022-02-17T18:27:36.352902+00:00,Basic,model,TS0505A,0x0005,0x0000,1,60:a4:23:ff:fe:91:fc:9a, 23 | 2022-02-17T18:27:36.488601+00:00,Basic,date_code,,0x0006,0x0000,1,60:a4:23:ff:fe:91:fc:9a, 24 | ``` 25 | 26 | ## Configure temperature reports by TRV or Thermometer 27 | 28 | (Note: scripts can be called as a service) 29 | 30 | - `script_TRV_setTemperatureReporting.yaml`:\ 31 | Script to configure a TRV to 32 | report every 5 minutes or when temperature changed by 0.2°C. 33 | - `script_Thermometer_setReporting.yaml`:\ 34 | Script to configure a 35 | Thermometer to report every 5 minutes or when temperature changed by 36 | 0.2°C. 37 | 38 | ## Download firmware from different sources. 39 | 40 | See `fetchOTAfw.sh` for instructions. The download functionality is now 41 | integrated in 42 | [ota_notify](https://github.com/mdeweerd/zha-toolkit#ota_notify) which is 43 | more selective. If you choose to use the script, you still need to trigger 44 | the OTA update (which can be done using ota_notify). 45 | 46 | ## FW resources 47 | 48 | - LEDVANCE/OSRAM: https://update.ledvance.com/firmware-overview 49 | -------------------------------------------------------------------------------- /examples/fetchOTAfw.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | # NOTE: you can now download by using the HA Service `zha-toolkit/ota_notify`. 3 | # 4 | # Important: 5 | # Requires `jq` (https://stedolan.github.io/jq/) 6 | # If not available on your system, on alpine you just 7 | # need to add the [jq package](https://pkgs.alpinelinux.org/package/edge/main/x86/jq). 8 | # 9 | # In configuration.yaml, set the fw directory: 10 | # (Note: only the otau_directory option is shown) 11 | # 12 | # ```yaml 13 | # zha: 14 | # zigpy_config: 15 | # ota: 16 | # otau_directory: /config/zb_ota 17 | # ``` 18 | # 19 | # Create the directory you have chosen (`/config/zb_ota` 20 | # in the example). Then add this script in that directory. 21 | # Make the script executable (`chmod +x fetchOTAfw.sh`) and 22 | # run it. 23 | # 24 | # 25 | # If you find FW that is not in that list, check out the 26 | # [instructions](https://github.com/Koenkk/zigbee-OTA#adding-new-and-updating-existing-ota-files) 27 | # to add them. 28 | # 29 | 30 | # List all FW files that were already downloaded. 31 | # The files usually have the FW version in their name, making them unique. 32 | ls -- *.ZIGBEE *.OTA *.sbl-ota *.bin *.ota *.zigbee > existing.list 33 | 34 | # Get and filter the list from Koenk's list, download the files 35 | # shellcheck disable=SC2016 36 | curl https://raw.githubusercontent.com/Koenkk/zigbee-OTA/master/index.json |\ 37 | jq -r '.[] |.url' |\ 38 | grep -v -f existing.list |\ 39 | xargs bash -c 'for f do wget --no-clobber $f || rm ${f##*/} ; done' 40 | 41 | # Delete the helper file used to filter already downloaded files 42 | rm existing.list 43 | -------------------------------------------------------------------------------- /examples/images/service_basic_cluster.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mdeweerd/zha-toolkit/b18b0dd24af541f145118f2c27366a544acb44cd/examples/images/service_basic_cluster.png -------------------------------------------------------------------------------- /examples/images/state_basic_cluster.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mdeweerd/zha-toolkit/b18b0dd24af541f145118f2c27366a544acb44cd/examples/images/state_basic_cluster.png -------------------------------------------------------------------------------- /examples/script_TRV_setTemperatureReporting.yaml: -------------------------------------------------------------------------------- 1 | alias: Zigbee TRV Configure Temperature Reports 2 | sequence: 3 | - service: zha_toolkit.conf_report 4 | data: 5 | ieee: "{{ entity_name }}" 6 | cluster: 513 7 | attribute: 0 8 | tries: 100 9 | event_done: zha_done 10 | reportable_change: 20 11 | max_interval: 300 12 | min_interval: 19 13 | - service: zha_toolkit.conf_report_read 14 | data: 15 | ieee: "{{ entity_name }}" 16 | cluster: 513 17 | attribute: 0 18 | tries: 100 19 | event_done: zha_done 20 | fields: 21 | entity_name: 22 | name: entity_name 23 | description: A Zigbee Entity (all entities of the device resolve to the same address) 24 | required: true 25 | selector: 26 | entity: 27 | integration: zha 28 | mode: restart 29 | icon: mdi:home-thermometer 30 | description: >- 31 | This script configures the selected TRV (Thermostatatic Radiator Valve) to 32 | report its temperature at least every 5 minutes or every 0.2°C whichever 33 | occurs first. 34 | -------------------------------------------------------------------------------- /examples/script_Thermometer_setReporting.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | alias: Zigbee Thermometer Configure Reporting 3 | fields: 4 | entity_name: 5 | name: entity_name 6 | description: A Zigbee Entity (all entities of the device resolve to the same address) 7 | required: true 8 | selector: 9 | entity: 10 | integration: zha 11 | sequence: 12 | - alias: 13 | Configure the temperature cluster of the device so that it reports every 14 | 0.2°C every 19 seconds at most, or sends a report at least every 5 minutes 15 | service: zha_toolkit.conf_report 16 | data: 17 | ieee: "{{ entity_name }}" 18 | cluster: 1026 19 | attribute: 0 20 | tries: 100 21 | event_done: zha_done 22 | reportable_change: 20 23 | max_interval: 300 24 | min_interval: 19 25 | - alias: 26 | Read back the report configuration so that it can be verified in the zha_done 27 | event data 28 | service: zha_toolkit.conf_report_read 29 | data: 30 | ieee: "{{ entity_name }}" 31 | cluster: 1026 32 | attribute: 0 33 | tries: 100 34 | event_done: zha_done 35 | - alias: Ensure that the cluster is bound to the coordinator 36 | service: zha_toolkit.bind_ieee 37 | data: 38 | ieee: 0 # 0 or false selects the coordinator in zha-toolkit 39 | cluster: 1026 40 | tries: 100 41 | event_done: zha_done 42 | mode: restart 43 | icon: mdi:thermometer-check 44 | description: >- 45 | This script configures the selected Zigbee Thermometer to report its 46 | temperature at least every 5 minutes or every 0.2°C whichever occurs first. 47 | -------------------------------------------------------------------------------- /examples/script_configure_Lixee_reporting.yaml: -------------------------------------------------------------------------------- 1 | alias: Configure Lixee Reporting to 1Wh change and max 5 minutes 2 | sequence: 3 | - variables: 4 | ieee: "{{(device_attr(device, 'identifiers')|list)[0][1]}}" 5 | default_tries: 3 6 | - service: zha_toolkit.conf_report 7 | data: 8 | ieee: "{{ ieee }}" 9 | cluster: 1794 10 | attribute: 0 11 | min_interval: 1 12 | max_interval: 300 13 | tries: "{{ default_tries}}" 14 | reportable_change: 1 15 | event_done: zha_done 16 | - service: zha_toolkit.conf_report_read 17 | alias: read report configuration (allows verification) 18 | data: 19 | ieee: "{{ ieee }}" 20 | cluster: 1794 21 | attribute: 0 22 | tries: "{{ default_tries}}" 23 | event_done: zha_done 24 | - service: zha_toolkit.conf_report 25 | data: 26 | ieee: "{{ ieee }}" 27 | cluster: 1794 28 | attribute: 0 29 | min_interval: 1 30 | max_interval: 300 31 | tries: "{{ default_tries}}" 32 | reportable_change: 1 33 | event_done: zha_done 34 | - service: zha_toolkit.conf_report_read 35 | alias: read report configuration (allows verification) 36 | data: 37 | ieee: "{{ ieee }}" 38 | endpoint: 1 39 | cluster: 1794 40 | attribute: 0 41 | tries: "{{ default_tries}}" 42 | event_done: zha_done 43 | - service: zha_toolkit.conf_report 44 | alias: Active power report configuration 45 | data: 46 | ieee: "{{ ieee }}" 47 | endpoint: 1 48 | cluster: 2820 49 | attribute: 1291 50 | min_interval: 1 51 | max_interval: 300 52 | tries: "{{ default_tries}}" 53 | reportable_change: 1 54 | event_done: zha_done 55 | - service: zha_toolkit.conf_report_read 56 | alias: Active power report configuration read back 57 | data: 58 | ieee: "{{ ieee }}" 59 | cluster: 2820 60 | attribute: 1291 61 | tries: "{{ default_tries}}" 62 | event_done: zha_done 63 | - service: zha_toolkit.conf_report 64 | alias: Mains voltage report configuration 65 | data: 66 | ieee: "{{ ieee }}" 67 | cluster: 1 68 | attribute: 0 69 | min_interval: 1 70 | max_interval: 3900 71 | tries: "{{ default_tries}}" 72 | reportable_change: 1 73 | event_done: zha_done 74 | - service: zha_toolkit.conf_report_read 75 | alias: Mains voltage report configuration read back 76 | data: 77 | ieee: "{{ ieee }}" 78 | cluster: 1 79 | attribute: 0 80 | tries: "{{ default_tries}}" 81 | event_done: zha_done 82 | fields: 83 | device: 84 | name: Lixee 85 | description: Lixee ZLinky device to configure 86 | required: true 87 | selector: 88 | device: 89 | manufacturer: LiXee 90 | model: ZLinky_TIC 91 | entity: 92 | integration: zha 93 | mode: single 94 | -------------------------------------------------------------------------------- /examples/script_danfoss_ally_adaptation_run_init.yaml: -------------------------------------------------------------------------------- 1 | alias: Danfoss Ally Start Adaptation Run 2 | description: >- 3 | This script resets the adaptation status of the valve by unmounting and 4 | remounting the valve. It then tries to initiate an adaptation run 5 | immediately. 6 | sequence: 7 | - variables: 8 | ieee: "{{(device_attr(device, 'identifiers')|list)[0][1]}}" 9 | csv: danfoss_adaptation_run.csv 10 | default_tries: 3 11 | - alias: Set the valve in mounting mode 12 | service: zha_toolkit.attr_write 13 | data: 14 | ieee: "{{ ieee }}" 15 | cluster: 513 16 | attribute: 16403 17 | attr_val: 1 18 | manf: 4678 19 | read_before_write: false 20 | csvout: "{{ csv }}" 21 | tries: "{{ default_tries}}" 22 | event_done: zha_done 23 | - alias: Wait until the device is in mounting mode (short press) 24 | repeat: 25 | until: 26 | - condition: template 27 | value_template: "{{ is_state_attr('var.allyscript', device + 'mounting', 0) }}" 28 | sequence: 29 | - delay: 30 | hours: 0 31 | minutes: 0 32 | seconds: 2 33 | milliseconds: 0 34 | alias: Wait between successive reads 35 | - alias: Read mount status (should be false) 36 | service: zha_toolkit.attr_read 37 | data: 38 | ieee: "{{ ieee }}" 39 | cluster: 513 40 | attribute: 16402 41 | manf: 4678 42 | csvout: "{{ csv }}" 43 | tries: "{{ default_tries}}" 44 | event_done: zha_done 45 | state_id: var.allyscript 46 | state_attr: "{{ device + 'mounting' }}" 47 | allow_create: true 48 | - alias: Wait until the user mounts the device 49 | repeat: 50 | until: 51 | - condition: template 52 | value_template: "{{ is_state_attr('var.allyscript', device + 'mounting', 0) }}" 53 | sequence: 54 | - delay: 55 | hours: 0 56 | minutes: 0 57 | seconds: 2 58 | milliseconds: 0 59 | alias: Wait between successive reads 60 | - alias: Read mount status (should be false) 61 | service: zha_toolkit.attr_read 62 | data: 63 | ieee: "{{ ieee }}" 64 | cluster: 513 65 | attribute: 16402 66 | manf: 4678 67 | csvout: "{{ csv }}" 68 | tries: "{{ default_tries}}" 69 | event_done: zha_done 70 | state_id: var.allyscript 71 | state_attr: "{{ device + 'mounting' }}" 72 | allow_create: true 73 | - alias: Read the adaptation status (should not be 2) 74 | service: zha_toolkit.attr_read 75 | data: 76 | ieee: "{{ ieee }}" 77 | cluster: 513 78 | attribute: 16461 79 | manf: 4678 80 | tries: "{{ default_tries}}" 81 | csvout: "{{ csv }}" 82 | event_done: zha_done 83 | - alias: Set Adaptation Run control to automatic 84 | service: zha_toolkit.attr_write 85 | data: 86 | ieee: "{{ ieee }}" 87 | cluster: 513 88 | attribute: 16460 89 | attr_val: 1 90 | manf: 4678 91 | tries: "{{ default_tries}}" 92 | csvout: "{{ csv }}" 93 | event_done: zha_done 94 | - alias: Initiate Adaptation Run 95 | service: zha_toolkit.attr_write 96 | data: 97 | ieee: "{{ ieee }}" 98 | cluster: 513 99 | attribute: 16460 100 | attr_val: 1 101 | manf: 4678 102 | read_before_write: false 103 | tries: "{{ default_tries}}" 104 | csvout: "{{ csv }}" 105 | event_done: zha_done 106 | - alias: Wait a bit 107 | delay: 108 | hours: 0 109 | minutes: 0 110 | seconds: 10 111 | milliseconds: 0 112 | - alias: Read the adaptation status (Expected to be 1, but not observed as such). 113 | service: zha_toolkit.attr_read 114 | data: 115 | ieee: "{{ ieee }}" 116 | cluster: 513 117 | attribute: 16461 118 | manf: 4678 119 | tries: "{{ default_tries}}" 120 | csvout: "{{ csv }}" 121 | event_done: zha_done 122 | mode: restart 123 | fields: 124 | device: 125 | name: Ally TRV Device 126 | description: A Danfoss Ally Thermostatic Regulation Valve (TRV) to configure 127 | required: true 128 | default: 7d16a871a8caa808d80e23f5d92ca65d 129 | selector: 130 | device: 131 | manufacturer: Danfoss 132 | entity: 133 | domain: climate 134 | integration: zha 135 | -------------------------------------------------------------------------------- /examples/script_danfoss_ally_configure.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | alias: Danfoss Ally TRV configuration 3 | sequence: 4 | - variables: 5 | ieee: "{{ (device_attr(device, 'identifiers')|list)[0][1] }}" 6 | default_tries: 3 7 | - alias: Configure reporting of local_temperature in Thermostat cluster 8 | service: zha_toolkit.conf_report 9 | data: 10 | ieee: "{{ ieee }}" 11 | cluster: 513 12 | attribute: 0 13 | tries: "{{ default_tries }}" 14 | event_done: zha_done 15 | reportable_change: 20 16 | max_interval: 300 17 | min_interval: 19 18 | - alias: Read back reporting configuration, for debugging 19 | service: zha_toolkit.conf_report_read 20 | data: 21 | ieee: "{{ ieee }}" 22 | cluster: 513 23 | attribute: 0 24 | tries: "{{ default_tries }}" 25 | event_done: zha_done 26 | - alias: Set lower limit for setpoint 27 | service: zha_toolkit.attr_write 28 | data: 29 | ieee: "{{ ieee }}" 30 | cluster: 513 31 | attribute: 21 32 | attr_val: "{{ ( set_min_temperature | float * 100) | int }}" 33 | tries: "{{ default_tries }}" 34 | csvout: danfoss_config.csv 35 | - alias: Set upper limit for setpoint 36 | service: zha_toolkit.attr_write 37 | data: 38 | ieee: "{{ ieee }}" 39 | cluster: 513 40 | attribute: 22 41 | attr_val: "{{ ( set_max_temperature | float * 100) | int }}" 42 | tries: "{{ default_tries }}" 43 | csvout: danfoss_config.csv 44 | - alias: Set Display rotation 45 | service: zha_toolkit.attr_write 46 | data: 47 | ieee: "{{ ieee }}" 48 | cluster: 516 49 | attribute: 16384 50 | attr_val: "{{ 0 if view_direction else 1 }}" 51 | manf: 4678 52 | event_done: zha_done 53 | tries: "{{ default_tries }}" 54 | csvout: danfoss_config.csv 55 | - alias: Set open window detection 56 | service: zha_toolkit.attr_write 57 | data: 58 | ieee: "{{ ieee }}" 59 | cluster: 513 60 | attribute: 16465 61 | attr_val: "{{ 1 if enable_open_window else 0 }}" 62 | manf: 4678 63 | event_done: zha_done 64 | tries: "{{ default_tries }}" 65 | csvout: danfoss_config.csv 66 | - alias: Check if window open reporting is configured, for debugging 67 | service: zha_toolkit.conf_report_read 68 | data: 69 | ieee: "{{ ieee }}" 70 | cluster: 513 71 | attribute: 16384 72 | manf: 4678 73 | tries: "{{ default_tries }}" 74 | event_done: zha_done 75 | - alias: Set TRV orientation (horizontal/vertical) 76 | service: zha_toolkit.attr_write 77 | data: 78 | ieee: "{{ ieee }}" 79 | cluster: 513 80 | attribute: 16404 81 | attr_val: "{{ 1 if orientation else 0 }}" 82 | manf: 4678 83 | event_done: zha_done 84 | tries: "{{ default_tries }}" 85 | csvout: danfoss_config.csv 86 | - alias: Set time 87 | service: zha_toolkit.misc_settime 88 | data: 89 | ieee: "{{ ieee }}" 90 | event_done: zha_done 91 | tries: "{{ default_tries }}" 92 | csvout: danfoss_config.csv 93 | - alias: Set time status to synchronised 94 | service: zha_toolkit.attr_write 95 | data: 96 | ieee: "{{ ieee }}" 97 | cluster: 10 98 | attribute: 1 99 | attr_val: 2 100 | tries: "{{ default_tries }}" 101 | csvout: danfoss_config.csv 102 | - alias: Set covered mode 103 | service: zha_toolkit.attr_write 104 | data: 105 | ieee: "{{ ieee }}" 106 | cluster: 513 107 | attribute: 16406 108 | attr_val: "{{ 1 if covered else 0 }}" 109 | manf: 4678 110 | tries: "{{ default_tries}}" 111 | csvout: danfoss_config.csv 112 | - alias: Check heat request reporting configuration 113 | service: zha_toolkit.conf_report_read 114 | data: 115 | ieee: "{{ ieee }}" 116 | cluster: 513 117 | attribute: 16433 118 | manf: 4678 119 | tries: "{{ default_tries }}" 120 | event_done: zha_done 121 | - alias: Read Heat Supply Request 122 | service: zha_toolkit.attr_read 123 | data: 124 | ieee: "{{ ieee }}" 125 | cluster: 513 126 | attribute: 16433 127 | manf: 4678 128 | tries: "{{ default_tries }}" 129 | csvout: danfoss_config.csv 130 | description: >- 131 | A script that configures a Danfoss Ally TRV zigbee thermostat. You can listen 132 | on the 'zha_done' event to see some of the configuration results. Sets report 133 | configuration and enables window open function. 134 | fields: 135 | device: 136 | name: Ally TRV Device 137 | description: A Danfoss Ally Thermostatic Regulation Valve (TRV) to configure 138 | required: true 139 | selector: 140 | device: 141 | manufacturer: Danfoss 142 | entity: 143 | domain: climate 144 | integration: zha 145 | set_min_temperature: 146 | name: Min user temperature 147 | description: The minimum temperature a user can set 148 | default: 8 149 | example: 8 150 | required: true 151 | selector: 152 | number: 153 | min: 8 154 | max: 22 155 | step: 0.5 156 | unit_of_measurement: °C 157 | mode: box 158 | set_max_temperature: 159 | name: Max user temperature 160 | description: The maximum temperature a user can set 161 | default: 22 162 | example: 22 163 | required: true 164 | selector: 165 | number: 166 | min: 8 167 | max: 22 168 | step: 0.5 169 | unit_of_measurement: °C 170 | mode: box 171 | enable_open_window: 172 | name: Enable open window detection 173 | description: When true, the valve detects open window and stops heating 174 | default: true 175 | example: true 176 | required: true 177 | selector: 178 | boolean: 179 | view_direction: 180 | name: Viewing direction/display rotation 181 | description: >- 182 | * When true, the text can be read when looking towards the valve (factory 183 | default), * When false, the text can be read when looking from the valve. 184 | default: true 185 | example: true 186 | required: true 187 | selector: 188 | boolean: 189 | orientation: 190 | name: TRV orientation 191 | description: >- 192 | - When false, mounted horizontally, - When true, mounted vertically. This 193 | selects the temperature gradient measured in the valve on radiator. 194 | default: false 195 | example: false 196 | required: true 197 | selector: 198 | boolean: 199 | covered: 200 | name: TRV covered setting 201 | description: >- 202 | - When true, the radiator is covered (you should use the automation to 203 | send temperature for external thermometer). 204 | default: false 205 | example: false 206 | required: true 207 | selector: 208 | boolean: 209 | mode: single 210 | icon: mdi:thermostat 211 | -------------------------------------------------------------------------------- /examples/script_danfoss_ally_settime.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | alias: Set Danfoss TRV times 3 | sequence: 4 | - variables: 5 | default_tries: 3 6 | - repeat: 7 | sequence: 8 | - alias: Set time 9 | service: zha_toolkit.misc_settime 10 | data: 11 | ieee: "{{ repeat.item }}" 12 | event_done: zha_done 13 | tries: "{{ default_tries }}" 14 | csvout: danfoss_config.csv 15 | - alias: Set time status to synchronised 16 | service: zha_toolkit.attr_write 17 | data: 18 | ieee: "{{ repeat.item }}" 19 | cluster: 10 20 | attribute: 1 21 | attr_val: 2 22 | tries: "{{ default_tries }}" 23 | csvout: danfoss_config.csv 24 | for_each: |- 25 | {% set ns = namespace(trvs=[]) %}{%- for s in states 26 | if device_id(s.entity_id) is not none 27 | and device_attr(s.entity_id, 'manufacturer') == 'Danfoss' 28 | and device_attr(s.entity_id, 'model') == 'eTRV0100' 29 | %}{% set ns.trvs=(ns.trvs +[device_attr(s.entity_id, 'id')])|unique|list 30 | %}{% endfor %}{{ ns.trvs }} 31 | description: >- 32 | A script that sets/updates the time on Danfoss TRVs. 33 | This is best called in an automation executed on a regular basis helping to 34 | ensure that the time on the TRVs stays correct. 35 | Could also be called when a TRV is detected to have been restarted, 36 | or it's synchronised value set to 0. 37 | mode: restart 38 | icon: mdi:clock-outline 39 | -------------------------------------------------------------------------------- /examples/script_read_basic_cluster.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | alias: Read Basic Cluster 3 | description: 4 | Read main attributes of cluster to CSV file and 'sensor.basic_cluster' 5 | state. 6 | fields: 7 | entity_name: 8 | name: entity_name 9 | description: A Zigbee Entity (all entities of the device resolve to the same address) 10 | required: true 11 | selector: 12 | entity: 13 | integration: zha 14 | csv: 15 | name: csv 16 | description: >- 17 | Csv filename '../www/basic.csv' can be downloaded from 18 | YOURINSTANCEURL/local/basic.csv . 19 | example: ../www/basic.csv 20 | required: true 21 | selector: 22 | text: 23 | sequence: 24 | - repeat: 25 | count: "7" 26 | sequence: 27 | - variables: 28 | current: "{{ ( repeat.index - 1 ) }}" 29 | - service: system_log.write 30 | data: 31 | logger: entity_name.read_basic_cluster_script 32 | level: warning 33 | message: "{{ 'Read Attribute %u' % (repeat.index, ) }}" 34 | - service: zha_toolkit.attr_read 35 | data: 36 | ieee: "{{ entity_name }}" 37 | cluster: 0 38 | attribute: "{{ current }}" 39 | tries: 3 40 | state_id: sensor.basic_cluster 41 | state_attr: '{{ "%s%04X" % (entity_name, current|int) }}' 42 | allow_create: true 43 | csvout: "{{ csv }}" 44 | - service: system_log.write 45 | data: 46 | logger: entity_name.basic_cluster_read 47 | level: warning 48 | message: Basic cluster read done 49 | mode: restart 50 | -------------------------------------------------------------------------------- /examples/script_request_all_light_states.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | alias: "[Lighting] Update on/off states" 3 | description: >- 4 | Request on/off state of all ZHA lights. 5 | This allows to cope with lights that do not notify their state as expected. 6 | In particular, when batch/group on/off commands are executes, the light states 7 | are not updated in Home Assistant. 8 | The result of the read requests will "force" Home Assistant to update. 9 | You can call this script in an automation triggered by the group action, or on a 10 | timely basis. 11 | (Original script by @HarvsG in https://github.com/mdeweerd/zha-toolkit/issues/113#issuecomment-1335616201) 12 | trigger: [] 13 | condition: [] 14 | action: 15 | - repeat: 16 | for_each: >- 17 | {{states.light | map(attribute='entity_id') | select('in', 18 | integration_entities('zha')) | list }} 19 | sequence: 20 | - continue_on_error: true 21 | service: zha_toolkit.attr_read 22 | data: 23 | ieee: "{{ repeat.item }}" 24 | cluster: 6 25 | attribute: 0 26 | fail_exception: false 27 | tries: 3 28 | mode: single 29 | -------------------------------------------------------------------------------- /examples/script_use_zha_devices.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | alias: Loop over zha_devices, extract some device data 3 | sequence: 4 | - parallel: 5 | - sequence: 6 | - wait_for_trigger: 7 | - platform: event 8 | event_type: zha_devices_ready 9 | - service: system_log.write 10 | data: 11 | logger: zha_devices 12 | level: error 13 | message: '{{ "Got event %s" % ( wait.trigger.event.data.devices ) }}' 14 | - service: system_log.write 15 | alias: List unavailable only 16 | data: 17 | logger: zha_devices 18 | level: error 19 | message: > 20 | {% set ns = namespace(names=[]) %} 21 | {% for item in wait.trigger.event.data.devices if not item.available %} 22 | {% set ns.names = ns.names + [ "'%s'" % (item.name) ] %} 23 | {% endfor %} 24 | Items: {{ ns.names | join(', ') }} 25 | - repeat: 26 | for_each: "{{ wait.trigger.event.data.devices }}" 27 | sequence: 28 | - service: system_log.write 29 | data: 30 | logger: zha_devices 31 | level: error 32 | message: >- 33 | {{ "Item '%s' Power: %s dBm Available: %s" % ( 34 | repeat.item.name, repeat.item.rssi, repeat.item.available 35 | ) }} 36 | - service: zha_toolkit.zha_devices 37 | data: 38 | event_done: zha_devices_ready 39 | mode: single 40 | -------------------------------------------------------------------------------- /examples/script_use_zha_devices_response.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | alias: Loop over zha_devices, extract some device data. For HA>=2023.7 3 | sequence: 4 | - service: zha_toolkit.zha_devices 5 | response_variable: dev_data 6 | - service: system_log.write 7 | data: 8 | logger: zha_devices 9 | level: error 10 | message: '{{ "Got device_data %s" % ( dev_data.devices ) }}' 11 | - service: system_log.write 12 | alias: List unavailable only 13 | data: 14 | logger: zha_devices 15 | level: error 16 | message: > 17 | {% set ns = namespace(names=[]) %} 18 | {% for item in dev_data.devices if not item.available %} 19 | {% set ns.names = ns.names + [ "'%s'" % (item.name) ] %} 20 | {% endfor %} 21 | Items: {{ ns.names | join(', ') }} 22 | - repeat: 23 | for_each: "{{ dev_data.devices }}" 24 | sequence: 25 | - service: system_log.write 26 | data: 27 | logger: zha_devices 28 | level: error 29 | message: >- 30 | {{ "Item '%s' Power: %s dBm Available: %s" % ( 31 | repeat.item.name, repeat.item.rssi, repeat.item.available 32 | ) }} 33 | mode: single 34 | -------------------------------------------------------------------------------- /examples/service_call_read_basic_cluster.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # Example of service call to read basic cluster 3 | # Needs: script_read_basic_cluster.yaml 4 | # 5 | service: script.1645121662206 6 | data: 7 | entity_name: button.bureau_identify 8 | csv: ../www/lidl_basic_cluster.csv 9 | -------------------------------------------------------------------------------- /hacs.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "🧰 ZHA Toolkit - Service for advanced Zigbee Usage", 3 | "content_in_root": false, 4 | "zip_release": true, 5 | "filename": "zha-toolkit.zip", 6 | "render_readme": true, 7 | "persistent_directory": "local", 8 | "homeassistant": "2024.9.0" 9 | } 10 | -------------------------------------------------------------------------------- /icon/icon.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mdeweerd/zha-toolkit/b18b0dd24af541f145118f2c27366a544acb44cd/icon/icon.png -------------------------------------------------------------------------------- /icon/icon.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | -------------------------------------------------------------------------------- /icon/icon@2x.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mdeweerd/zha-toolkit/b18b0dd24af541f145118f2c27366a544acb44cd/icon/icon@2x.png -------------------------------------------------------------------------------- /images/ServiceResponse.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mdeweerd/zha-toolkit/b18b0dd24af541f145118f2c27366a544acb44cd/images/ServiceResponse.png -------------------------------------------------------------------------------- /images/service-config-ui.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mdeweerd/zha-toolkit/b18b0dd24af541f145118f2c27366a544acb44cd/images/service-config-ui.png -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [tool.yamlfix] 2 | # allow_duplicate_keys = true 3 | #line_length = 280 4 | line_length = 80 5 | # none_representation = "null" 6 | -------------------------------------------------------------------------------- /requirements_test.txt: -------------------------------------------------------------------------------- 1 | # For tests 2 | pytest-homeassistant-custom-component>=0.4.8 3 | -------------------------------------------------------------------------------- /scripts/installNoHacsFromZip.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # To update / install : 3 | cd config || exit 4 | ( 5 | mkdir -p custom_components/zha_toolkit 6 | cd custom_components/zha_toolkit || exit 7 | rm zha-toolkit.zip >& /dev/null 8 | curl -s https://api.github.com/repos/mdeweerd/zha-toolkit/releases/latest \ 9 | | grep "browser_download_url.*/zha-toolkit.zip" \ 10 | | cut -d : -f 2,3 \ 11 | | tr -d \" \ 12 | | wget -qi - 13 | unzip -o zha-toolkit.zip 14 | rm zha-toolkit.zip 15 | ) 16 | -------------------------------------------------------------------------------- /scripts/installNoHacsWithGit.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | cd config/ || exit 3 | ( 4 | git clone -n --depth=1 --filter=tree:0 https://github.com/mdeweerd/zha-toolkit.git 5 | cd zha-toolkit || exit 6 | git sparse-checkout set --no-cone custom_components 7 | git checkout 8 | ) 9 | ( 10 | [[ -r custom_components ]] && cd custom_components && ln -s ../zha-toolkit/custom_components/zha_toolkit . 11 | ) 12 | # To update: 13 | ( 14 | cd zha-toolkit || exit 15 | git pull 16 | ) 17 | -------------------------------------------------------------------------------- /setup.cfg: -------------------------------------------------------------------------------- 1 | [tool:pytest] 2 | asyncio_mode=strict 3 | 4 | [flake8] 5 | exclude = .venv,.git,.tox 6 | # To work with Black 7 | max-line-length = 79 8 | # B028 is manually surrounded by quotes, consider using the `!r` 9 | # W503 line break before binary operator 10 | # E501 line too long 11 | ignore = 12 | B028, 13 | W503 14 | 15 | per-file-ignores = 16 | custom_components/zha_toolkit/__init__.py:E501 17 | 18 | # per-file-ignores = 19 | # example/*:F811,F401,F403 20 | 21 | [isort] 22 | profile = black 23 | line_length = 79 24 | 25 | [pylint.MESSAGES CONTROL] 26 | disable = invalid-name, unused-argument, broad-except, missing-docstring, fixme, 27 | consider-using-f-string, 28 | too-many-branches, too-many-statements, too-many-arguments, protected-access, 29 | import-error, too-many-locals, import-outside-toplevel, 30 | logging-fstring-interpolation, line-too-long, duplicate-code 31 | 32 | [pylint.FORMAT] 33 | max-line-length = 79 34 | 35 | [codespell] 36 | builtin=clear,rare,informal,usage,code,names 37 | ignore-words-list=hass,master,weerd,uint 38 | skip=./.* 39 | quiet-level=2 40 | 41 | [mypy] 42 | mypy_path = $MYPY_CONFIG_FILE_DIR 43 | explicit_package_bases = yes 44 | --------------------------------------------------------------------------------