├── .hadolint.yaml ├── tests ├── pytest.ini ├── requirements.txt ├── .gitignore ├── run_tests.sh ├── qemu-strategy.yaml ├── conftest.py ├── qemu_shell_strategy.py ├── smoke_test │ ├── test_offline.py │ ├── test_os_update.py │ └── test_basic.py └── supervisor_test │ └── test_supervisor.py ├── .dockerignore ├── .gitmodules ├── .github ├── dependabot.yml ├── workflows │ ├── label-actions.yml │ ├── release-drafter.yml │ ├── lock.yml │ ├── pr-checks.yml │ ├── matrix.json │ ├── stale.yml │ ├── test.yaml │ ├── artifacts-index.yaml │ └── build.yaml ├── actions │ ├── haos-builder-command │ │ └── action.yml │ └── bump-rpi-imager-version │ │ └── action.yml ├── release-drafter.yml ├── ISSUE_TEMPLATE │ ├── config.yml │ └── bug_report.yml └── label-actions.yml ├── scripts ├── update-dtb.sh ├── check-kernel-patches.sh ├── enter.sh ├── entry.sh ├── update-firmware-rpi.sh ├── update-kernel-rpi.sh └── update-kernel-upstream.sh ├── .gitignore ├── Documentation ├── kernel.md └── README.md ├── Dockerfile ├── Makefile ├── .os-artifacts └── index.html ├── README.md └── LICENSE /.hadolint.yaml: -------------------------------------------------------------------------------- 1 | ignored: 2 | - DL3008 3 | -------------------------------------------------------------------------------- /tests/pytest.ini: -------------------------------------------------------------------------------- 1 | [pytest] 2 | timeout_method = signal 3 | -------------------------------------------------------------------------------- /.dockerignore: -------------------------------------------------------------------------------- 1 | # Ignore everything except what we really need 2 | * 3 | !scripts/ 4 | -------------------------------------------------------------------------------- /tests/requirements.txt: -------------------------------------------------------------------------------- 1 | labgrid==24.0.1 2 | pytest-dependency==0.6.0 3 | pytest-timeout==2.3.1 4 | -------------------------------------------------------------------------------- /.gitmodules: -------------------------------------------------------------------------------- 1 | [submodule "buildroot"] 2 | path = buildroot 3 | url = https://github.com/home-assistant/buildroot.git 4 | branch = 2024.02.x-haos 5 | -------------------------------------------------------------------------------- /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | version: 2 2 | updates: 3 | - package-ecosystem: "github-actions" 4 | directory: "/" 5 | schedule: 6 | interval: "weekly" 7 | -------------------------------------------------------------------------------- /scripts/update-dtb.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | ## OVA 4 | dtc -@ -I dts -O dtb -o buildroot-external/bootloader/barebox-state-efi.dtb buildroot-external/bootloader/barebox-state-efi.dts 5 | 6 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # OS generated files 2 | .DS_Store 3 | 4 | # Build output dirs 5 | /release 6 | output*/ 7 | 8 | # Certificates 9 | *.pem 10 | 11 | # vscode generated files 12 | .vscode* 13 | -------------------------------------------------------------------------------- /tests/.gitignore: -------------------------------------------------------------------------------- 1 | # QEMU images 2 | *.qcow2 3 | 4 | # Generated logs 5 | /junit_reports 6 | /lg_logs 7 | 8 | # Byte-compiled / optimized / DLL files 9 | __pycache__/ 10 | *.py[cod] 11 | *$py.class 12 | 13 | # Virtualenv 14 | venv 15 | -------------------------------------------------------------------------------- /scripts/check-kernel-patches.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | kernel_patches_with_version=$(find buildroot-external -type d -regextype sed -regex ".*/linux/[0-9\.]*") 4 | 5 | if [ -n "$kernel_patches_with_version" ]; then 6 | echo "" 7 | echo "WARNING: Kernel patch directories with kernel version found! Check if updates are needed." 8 | fi 9 | -------------------------------------------------------------------------------- /.github/workflows/label-actions.yml: -------------------------------------------------------------------------------- 1 | name: 'Label Actions' 2 | 3 | on: 4 | issues: 5 | types: [labeled, unlabeled] 6 | 7 | permissions: 8 | contents: read 9 | issues: write 10 | pull-requests: write 11 | discussions: write 12 | 13 | jobs: 14 | action: 15 | runs-on: ubuntu-latest 16 | steps: 17 | - uses: dessant/label-actions@9e5fd757ffe1e065abf55e9f74d899dbe012922a # v5.0.0 18 | -------------------------------------------------------------------------------- /tests/run_tests.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | cd "$(dirname "$0")" 6 | 7 | if [ -z "$GITHUB_ACTIONS" ] && [ -z "$VIRTUAL_ENV" ]; then 8 | # Environment should be set up in separate GHA steps - which can also 9 | # handle caching of the dependecies, etc. 10 | python3 -m venv venv 11 | # shellcheck disable=SC1091 12 | source venv/bin/activate 13 | pip3 install -r requirements.txt 14 | fi 15 | 16 | pytest --lg-env qemu-strategy.yaml --lg-log=lg_logs --junitxml=junit_reports/tests.xml "$@" 17 | -------------------------------------------------------------------------------- /Documentation/kernel.md: -------------------------------------------------------------------------------- 1 | 2 | # Kernel Version 3 | 4 | | Board | Version | 5 | |-------|---------| 6 | | Open Virtual Appliance | 6.12.62 | 7 | | Raspberry Pi 3 | 6.12.47 | 8 | | Raspberry Pi 4 | 6.12.47 | 9 | | Raspberry Pi 5 | 6.12.47 | 10 | | Home Assistant Yellow | 6.12.47 | 11 | | Home Assistant Green | 6.12.62 | 12 | | ODROID-C2 | 6.12.62 | 13 | | ODROID-C4 | 6.12.62 | 14 | | ODROID-M1 | 6.12.62 | 15 | | ODROID-M1S | 6.12.62 | 16 | | ODROID-N2 | 6.12.62 | 17 | | Generic aarch64 | 6.12.62 | 18 | | Generic x86-64 | 6.12.62 | 19 | | Khadas VIM3 | 6.12.62 | 20 | -------------------------------------------------------------------------------- /.github/workflows/release-drafter.yml: -------------------------------------------------------------------------------- 1 | name: Release Drafter 2 | 3 | on: 4 | push: 5 | branches: 6 | - dev 7 | - rel-* 8 | 9 | jobs: 10 | update_release_draft: 11 | permissions: 12 | contents: write # for release-drafter/release-drafter to create a github release 13 | pull-requests: read # for release-drafter/release-drafter to read PR content and labels 14 | runs-on: ubuntu-latest 15 | steps: 16 | - uses: release-drafter/release-drafter@b1476f6e6eb133afa41ed8589daba6dc69b4d3f5 # v6.1.0 17 | env: 18 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 19 | -------------------------------------------------------------------------------- /Documentation/README.md: -------------------------------------------------------------------------------- 1 | # Documentation 2 | 3 | Documentation for the Home Assistant Operating System is available at [developers.home-assistant.io/docs/operating-system/][docs]. 4 | If you want to contribute to this documentation, please refer to the [home-assistant/developers.home-assistant][docs-repo] repository. 5 | 6 | For the list of Linux kernel versions used currently in this branch of Home Assistant Operating System, see [kernel.md](./kernel.md). 7 | 8 | [docs]: https://developers.home-assistant.io/docs/operating-system/ 9 | [docs-repo]: https://github.com/home-assistant/developers.home-assistant/ 10 | -------------------------------------------------------------------------------- /.github/actions/haos-builder-command/action.yml: -------------------------------------------------------------------------------- 1 | name: "Run command in HAOS build container" 2 | inputs: 3 | image: 4 | description: "HAOS builder image to use" 5 | required: true 6 | command: 7 | description: "Command to run in the container" 8 | required: true 9 | runs: 10 | using: 'composite' 11 | steps: 12 | - name: "Run command in HAOS build container" 13 | shell: bash 14 | run: | 15 | docker run --rm --privileged \ 16 | -e BUILDER_UID="$(id -u)" \ 17 | -e BUILDER_GID="$(id -g)" \ 18 | -v "${GITHUB_WORKSPACE}:/build" \ 19 | -v "/mnt/cache:/cache" \ 20 | -v "/mnt/output:/build/output" \ 21 | ${{ inputs.image }} \ 22 | ${{ inputs.command }} 23 | -------------------------------------------------------------------------------- /.github/workflows/lock.yml: -------------------------------------------------------------------------------- 1 | name: Lock 2 | 3 | on: 4 | schedule: 5 | - cron: "45 5 * * *" 6 | 7 | permissions: 8 | discussions: write 9 | issues: write 10 | pull-requests: write 11 | 12 | jobs: 13 | lock: 14 | if: github.repository_owner == 'home-assistant' 15 | runs-on: ubuntu-latest 16 | steps: 17 | - uses: dessant/lock-threads@7266a7ce5c1df01b1c6db85bf8cd86c737dadbe7 # v6.0.0 18 | with: 19 | github-token: ${{ github.token }} 20 | issue-inactive-days: "30" 21 | exclude-issue-created-before: "2025-01-01T00:00:00Z" 22 | issue-lock-reason: "" 23 | pr-inactive-days: "7" 24 | exclude-pr-created-before: "2025-01-01T00:00:00Z" 25 | pr-lock-reason: "" 26 | -------------------------------------------------------------------------------- /tests/qemu-strategy.yaml: -------------------------------------------------------------------------------- 1 | targets: 2 | main: 3 | resources: [] 4 | 5 | drivers: 6 | - QEMUDriver: 7 | qemu_bin: qemu-x86_64 8 | machine: pc 9 | cpu: qemu64 10 | memory: 1G 11 | extra_args: "-snapshot -accel kvm" 12 | nic: user,model=virtio-net-pci 13 | disk: disk-image 14 | bios: bios 15 | - CustomTimeoutShellDriver: 16 | login_prompt: 'homeassistant login: ' 17 | username: 'root' 18 | prompt: '# ' 19 | login_timeout: 300 20 | command_timeout: 300 21 | - QEMUShellStrategy: {} 22 | 23 | tools: 24 | qemu-x86_64: /usr/bin/qemu-system-x86_64 25 | 26 | images: 27 | disk-image: ./haos.qcow2 28 | bios: /usr/share/ovmf/OVMF.fd 29 | 30 | 31 | imports: 32 | - qemu_shell_strategy.py 33 | -------------------------------------------------------------------------------- /scripts/enter.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | BUILDER_UID="$(id -u)" 5 | BUILDER_GID="$(id -g)" 6 | CACHE_DIR="${CACHE_DIR:-$HOME/hassos-cache}" 7 | 8 | if [ "$BUILDER_UID" -eq "0" ] || [ "$BUILDER_GID" == "0" ]; then 9 | echo "ERROR: Please run this script as a regular (non-root) user with sudo privileges." 10 | exit 1 11 | fi 12 | 13 | mkdir -p "${CACHE_DIR}" 14 | docker build -t hassos:local . 15 | 16 | if [ ! -f buildroot/Makefile ]; then 17 | # Initialize git submodule 18 | git submodule update --init 19 | fi 20 | 21 | if command -v losetup >/dev/null && [ ! -e /dev/loop0 ]; then 22 | # Make sure loop devices are present before starting the container 23 | sudo losetup -f > /dev/null 24 | fi 25 | 26 | docker run -it --rm --privileged \ 27 | -v "$(pwd):/build" -v "${CACHE_DIR}:/cache" \ 28 | -e BUILDER_UID="${BUILDER_UID}" -e BUILDER_GID="${BUILDER_GID}" \ 29 | hassos:local "${@:-bash}" 30 | -------------------------------------------------------------------------------- /scripts/entry.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | USER="root" 5 | 6 | # Run dockerd 7 | dockerd -s vfs &> /dev/null & 8 | 9 | 10 | # Setup local group, if not existing 11 | if [ "${BUILDER_GID:-0}" -ne 0 ] && ! getent group "${BUILDER_GID:-0}"; then 12 | groupadd -g "${BUILDER_GID}" builder 13 | fi 14 | 15 | # Setup local user 16 | if [ "${BUILDER_UID:-0}" -ne 0 ]; then 17 | useradd -m -u "${BUILDER_UID}" -g "${BUILDER_GID}" -G docker,sudo builder 18 | echo "builder ALL=(ALL:ALL) NOPASSWD: ALL" >> /etc/sudoers 19 | # Make sure cache is accessible by builder 20 | chown "${BUILDER_UID}:${BUILDER_GID}" /cache 21 | # Make sure output is accessible by builder (if anonymous volume is used) 22 | chown "${BUILDER_UID}:${BUILDER_GID}" /build/output || true 23 | USER="builder" 24 | fi 25 | 26 | if CMD="$(command -v "$1")"; then 27 | shift 28 | sudo -H -u ${USER} "$CMD" "$@" 29 | else 30 | echo "Command not found: $1" 31 | exit 1 32 | fi 33 | -------------------------------------------------------------------------------- /scripts/update-firmware-rpi.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | if [ -z "$1" ]; then 5 | echo "Need a commit ID!" 6 | exit 1 7 | fi 8 | echo "Use firmware: https://github.com/raspberrypi/firmware/archive/$1.tar.gz" 9 | 10 | if [ -z "$2" ] || ! [ -f "$2" ]; then 11 | echo "Need buildroot patch file!" 12 | exit 1 13 | fi 14 | 15 | patch -Rf -d buildroot -p 1 < "$2" 16 | 17 | rm -rf /tmp/rpi-firmware.tar.gz 18 | curl -Lo /tmp/rpi-firmware.tar.gz "https://github.com/raspberrypi/firmware/archive/$1.tar.gz" 19 | checksum="$(sha256sum /tmp/rpi-firmware.tar.gz | cut -d' ' -f 1)" 20 | rm -rf /tmp/rpi-firmware.tar.gz 21 | 22 | 23 | sed -i "s/+RPI_FIRMWARE_VERSION = [a-f0-9]*/+RPI_FIRMWARE_VERSION = $1/g" "$2" 24 | sed -i "s/+sha256\s*[a-f0-9]*\s*rpi-firmware-[a-f0-9]*.tar.gz/+sha256 $checksum rpi-firmware-$1.tar.gz/g" "$2" 25 | 26 | patch -d buildroot -p 1 < "$2" 27 | git commit -m "RaspberryPi: Update firmware $1" "$2" buildroot/package/rpi-firmware 28 | -------------------------------------------------------------------------------- /scripts/update-kernel-rpi.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | if [ -z "$1" ]; then 5 | echo "Need a commit ID!" 6 | exit 1 7 | fi 8 | 9 | if [ -z "$2" ]; then 10 | echo "Need a kernel version!" 11 | exit 1 12 | fi 13 | 14 | defconfigs=(buildroot-external/configs/{rpi*,yellow}_defconfig) 15 | sed -i "s|BR2_LINUX_KERNEL_CUSTOM_TARBALL_LOCATION=\"https://github.com/raspberrypi/linux/.*\"|BR2_LINUX_KERNEL_CUSTOM_TARBALL_LOCATION=\"https://github.com/raspberrypi/linux/archive/$1.tar.gz\"|g" "${defconfigs[@]}" 16 | sed -i "s/| \(Raspberry Pi.*\|Home Assistant Yellow\) | .* |/| \1 | $2 |/g" Documentation/kernel.md 17 | git commit -m "RaspberryPi: Update kernel to $2 - $1" "${defconfigs[@]}" Documentation/kernel.md 18 | 19 | ./scripts/check-kernel-patches.sh 20 | 21 | echo 22 | echo "WARNING: bumping RPi kernel usually requires bump of rpi-firmware" 23 | echo "package to version from the corresponding branch in raspberrypi/firmware" 24 | echo "repository (which is usually the stable branch), namely because the DT" 25 | echo "overlays are copied from this repository" 26 | -------------------------------------------------------------------------------- /.github/workflows/pr-checks.yml: -------------------------------------------------------------------------------- 1 | # Home Assistant Operating System pull-request checks 2 | 3 | name: PR checks 4 | 5 | on: [pull_request] 6 | 7 | jobs: 8 | linters: 9 | runs-on: ubuntu-22.04 10 | steps: 11 | - name: Install additional dependencies 12 | run: | 13 | sudo apt-get update 14 | sudo apt-get -y install \ 15 | python3-flake8 16 | 17 | - name: Check out code 18 | uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 19 | with: 20 | submodules: true 21 | 22 | - name: Check Dockerfile 23 | uses: hadolint/hadolint-action@2332a7b74a6de0dda2e2221d575162eba76ba5e5 # v3.3.0 24 | with: 25 | dockerfile: Dockerfile 26 | 27 | - name: Check shell scripts 28 | uses: ludeeus/action-shellcheck@00cae500b08a931fb5698e11e79bfbd38e612a38 # 2.0.0 29 | with: 30 | ignore_paths: buildroot 31 | 32 | - name: Check buildroot-external packages 33 | run: | 34 | buildroot/utils/check-package --exclude PackageHeader --exclude Upstream --br2-external buildroot-external/package/*/* 35 | -------------------------------------------------------------------------------- /.github/release-drafter.yml: -------------------------------------------------------------------------------- 1 | name-template: Home Assistant OS $RESOLVED_VERSION 2 | tag-template: $RESOLVED_VERSION 3 | version-template: "$MAJOR.$MINOR" 4 | categories: 5 | - title: 'Home Assistant Operating System' 6 | label: 'os' 7 | - title: 'Raspberry Pi' 8 | label: 'board/raspberrypi' 9 | - title: 'Home Assistant Yellow' 10 | label: 'board/yellow' 11 | - title: 'Home Assistant Green' 12 | label: 'board/green' 13 | - title: 'Open Virtual Appliance' 14 | label: 'board/ova' 15 | - title: 'Generic x86-64' 16 | label: 'board/generic-x86-64' 17 | - title: 'Hardkernel ODROID' 18 | label: 'board/odroid' 19 | - title: 'Khadas VIM Series' 20 | label: 'board/khadas' 21 | - title: 'Generic aarch64' 22 | label: 'board/generic-aarch64' 23 | - title: 'Documentation' 24 | label: 'documentation' 25 | - title: 'Build System' 26 | label: 'build' 27 | - title: 'Dependencies' 28 | label: 'dependencies' 29 | filter-by-commitish: true 30 | template: | 31 | ## Changes 32 | 33 | $CHANGES 34 | version-resolver: 35 | major: 36 | labels: 37 | - 'major' 38 | default: minor 39 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/config.yml: -------------------------------------------------------------------------------- 1 | blank_issues_enabled: false 2 | contact_links: 3 | - name: Report incorrect or missing information on our documentation 4 | url: https://github.com/home-assistant/home-assistant.io/issues 5 | about: Our documentation has its own issue tracker. Please report issues with the website there. 6 | 7 | - name: Report incorrect or missing information on our developer documentation 8 | url: https://github.com/home-assistant/developers.home-assistant/issues 9 | about: Our developer documentation has its own issue tracker. Please report issues with the website there. 10 | 11 | - name: Request a feature for the Operating System 12 | url: https://github.com/orgs/home-assistant/discussions 13 | about: Request an new feature for the Operating System. 14 | 15 | - name: I have a question or need support 16 | url: https://www.home-assistant.io/help 17 | about: We use GitHub for tracking bugs, check our website for resources on getting help. 18 | 19 | - name: I'm unsure where to go? 20 | url: https://www.home-assistant.io/join-chat 21 | about: If you are unsure where to go, then joining our chat is recommended; Just ask! 22 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM debian:bullseye 2 | 3 | # Set shell 4 | SHELL ["/bin/bash", "-o", "pipefail", "-c"] 5 | 6 | # Docker 7 | RUN apt-get update && apt-get install -y --no-install-recommends \ 8 | apt-transport-https \ 9 | ca-certificates \ 10 | curl \ 11 | gpg-agent \ 12 | gpg \ 13 | dirmngr \ 14 | software-properties-common \ 15 | && curl -fsSL https://download.docker.com/linux/debian/gpg | gpg --dearmor -o /etc/apt/trusted.gpg.d/docker.gpg \ 16 | && echo "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/trusted.gpg.d/docker.gpg] \ 17 | https://download.docker.com/linux/debian $(lsb_release -cs) stable" > /etc/apt/sources.list.d/docker.list \ 18 | && apt-get update && apt-get install -y --no-install-recommends \ 19 | docker-ce \ 20 | && rm -rf /var/lib/apt/lists/* 21 | 22 | # Build tools 23 | RUN apt-get update && apt-get install -y --no-install-recommends \ 24 | automake \ 25 | bash \ 26 | bc \ 27 | binutils \ 28 | build-essential \ 29 | bzip2 \ 30 | cpio \ 31 | file \ 32 | git \ 33 | graphviz \ 34 | help2man \ 35 | jq \ 36 | make \ 37 | ncurses-dev \ 38 | openssh-client \ 39 | patch \ 40 | perl \ 41 | pigz \ 42 | python3 \ 43 | python3-matplotlib \ 44 | python-is-python3 \ 45 | qemu-utils \ 46 | rsync \ 47 | skopeo \ 48 | sudo \ 49 | texinfo \ 50 | unzip \ 51 | vim \ 52 | wget \ 53 | zip \ 54 | && rm -rf /var/lib/apt/lists/* 55 | 56 | # Init entry 57 | COPY scripts/entry.sh /usr/sbin/ 58 | ENTRYPOINT ["/usr/sbin/entry.sh"] 59 | 60 | # Get buildroot 61 | WORKDIR /build 62 | -------------------------------------------------------------------------------- /scripts/update-kernel-upstream.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | if [ -z "$1" ]; then 5 | echo "Need a kernel version!" 6 | exit 1 7 | fi 8 | 9 | # assume the version is same in all defconfigs, take ova as the reference 10 | current_version=$(grep 'BR2_LINUX_KERNEL_CUSTOM_VERSION_VALUE' buildroot-external/configs/ova_defconfig | cut -d '"' -f 2) 11 | 12 | # get X.Y.Z tokens of the current and new version 13 | IFS='.' read -r -a current_version_parts <<< "$current_version" 14 | IFS='.' read -r -a new_version_parts <<< "$1" 15 | 16 | 17 | defconfigs=(buildroot-external/configs/{generic_aarch64,generic_x86_64,ova,odroid_*,khadas_vim3,green}_defconfig) 18 | sed -i "s/BR2_LINUX_KERNEL_CUSTOM_VERSION_VALUE=\".*\"/BR2_LINUX_KERNEL_CUSTOM_VERSION_VALUE=\"$1\"/g" "${defconfigs[@]}" 19 | sed -i "s/| \(Open Virtual Appliance\|Generic aarch64\|Generic x86-64\|ODROID-.*\|Khadas VIM3\|Home Assistant Green\) | .* |/| \1 | $1 |/g" Documentation/kernel.md 20 | 21 | commit_message="Linux: Update kernel to $1" 22 | 23 | # get links to changelog if we're only updating the Z part of the version 24 | if [ "${current_version_parts[0]}" == "${new_version_parts[0]}" ] && \ 25 | [ "${current_version_parts[1]}" == "${new_version_parts[1]}" ] && \ 26 | [ "${current_version_parts[2]}" -lt "${new_version_parts[2]}" ]; then 27 | 28 | commit_message="$commit_message"$'\n\n' 29 | 30 | # loop from the current Z + 1 to the new Z 31 | for (( z = current_version_parts[2] + 1; z <= new_version_parts[2]; z++ )); do 32 | next_version="${current_version_parts[0]}.${current_version_parts[1]}.$z" 33 | commit_message="${commit_message}* https://cdn.kernel.org/pub/linux/kernel/v${current_version_parts[0]}.x/ChangeLog-${next_version}"$'\n' 34 | done 35 | 36 | # remove trailing newline 37 | commit_message=$(echo -n "$commit_message") 38 | fi 39 | 40 | git commit -m "$commit_message" "${defconfigs[@]}" Documentation/kernel.md 41 | 42 | ./scripts/check-kernel-patches.sh 43 | -------------------------------------------------------------------------------- /tests/conftest.py: -------------------------------------------------------------------------------- 1 | import json 2 | import logging 3 | import os 4 | 5 | from labgrid.driver import ShellDriver 6 | import pytest 7 | 8 | 9 | logger = logging.getLogger(__name__) 10 | 11 | 12 | @pytest.fixture(scope="function") 13 | def without_internet(strategy): 14 | default_nic = strategy.qemu.nic 15 | if strategy.status.name == "shell": 16 | strategy.transition("off") 17 | strategy.qemu.nic = "user,net=192.168.76.0/24,dhcpstart=192.168.76.10,restrict=yes" 18 | strategy.transition("shell") 19 | yield 20 | strategy.transition("off") 21 | strategy.qemu.nic = default_nic 22 | 23 | 24 | @pytest.fixture(autouse=True, scope="module") 25 | def restart_qemu(strategy): 26 | """Use fresh QEMU instance for each module.""" 27 | if strategy.status.name == "shell": 28 | logger.info("Restarting QEMU before %s module tests.", strategy.target.name) 29 | strategy.transition("off") 30 | strategy.transition("shell") 31 | 32 | 33 | @pytest.hookimpl 34 | def pytest_runtest_setup(item): 35 | log_dir = item.config.option.lg_log 36 | 37 | if not log_dir: 38 | return 39 | 40 | logging_plugin = item.config.pluginmanager.get_plugin("logging-plugin") 41 | log_name = item.nodeid.replace(".py::", "/") 42 | logging_plugin.set_log_path(os.path.join(log_dir, f"{log_name}.log")) 43 | 44 | 45 | @pytest.fixture 46 | def shell(target, strategy) -> ShellDriver: 47 | """Fixture for accessing shell.""" 48 | strategy.transition("shell") 49 | shell = target.get_driver("ShellDriver") 50 | return shell 51 | 52 | 53 | @pytest.fixture 54 | def shell_json(target, strategy) -> callable: 55 | """Fixture for running CLI commands returning JSON string as output.""" 56 | strategy.transition("shell") 57 | shell = target.get_driver("ShellDriver") 58 | 59 | def get_json_response(command, *, timeout=None) -> dict: 60 | return json.loads("\n".join(shell.run_check(command, timeout=timeout))) 61 | 62 | return get_json_response 63 | -------------------------------------------------------------------------------- /.github/workflows/matrix.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "id": "ova", 4 | "defconfig": "ova", 5 | "architecture": "x86-64", 6 | "label": "board/ova" 7 | }, 8 | { 9 | "id": "generic-x86-64", 10 | "defconfig": "generic_x86_64", 11 | "architecture": "x86-64", 12 | "label": "board/generic-x86-64" 13 | }, 14 | { 15 | "id": "generic-aarch64", 16 | "defconfig": "generic_aarch64", 17 | "architecture": "aarch64", 18 | "label": "board/generic-aarch64" 19 | }, 20 | { 21 | "id": "khadas-vim3", 22 | "defconfig": "khadas_vim3", 23 | "architecture": "aarch64", 24 | "label": "board/khadas" 25 | }, 26 | { 27 | "id": "odroid-c2", 28 | "defconfig": "odroid_c2", 29 | "architecture": "aarch64", 30 | "label": "board/odroid" 31 | }, 32 | { 33 | "id": "odroid-c4", 34 | "defconfig": "odroid_c4", 35 | "architecture": "aarch64", 36 | "label": "board/odroid" 37 | }, 38 | { 39 | "id": "odroid-m1", 40 | "defconfig": "odroid_m1", 41 | "architecture": "aarch64", 42 | "label": "board/odroid" 43 | }, 44 | { 45 | "id": "odroid-m1s", 46 | "defconfig": "odroid_m1s", 47 | "architecture": "aarch64", 48 | "label": "board/odroid" 49 | }, 50 | { 51 | "id": "odroid-n2", 52 | "defconfig": "odroid_n2", 53 | "architecture": "aarch64", 54 | "label": "board/odroid" 55 | }, 56 | { 57 | "id": "rpi3-64", 58 | "defconfig": "rpi3_64", 59 | "architecture": "aarch64", 60 | "label": "board/raspberrypi" 61 | }, 62 | { 63 | "id": "rpi4-64", 64 | "defconfig": "rpi4_64", 65 | "architecture": "aarch64", 66 | "label": "board/raspberrypi" 67 | }, 68 | { 69 | "id": "rpi5-64", 70 | "defconfig": "rpi5_64", 71 | "architecture": "aarch64", 72 | "label": "board/raspberrypi" 73 | }, 74 | { 75 | "id": "yellow", 76 | "defconfig": "yellow", 77 | "architecture": "aarch64", 78 | "label": "board/yellow" 79 | }, 80 | { 81 | "id": "green", 82 | "defconfig": "green", 83 | "architecture": "aarch64", 84 | "label": "board/green" 85 | } 86 | ] 87 | -------------------------------------------------------------------------------- /.github/workflows/stale.yml: -------------------------------------------------------------------------------- 1 | name: Stale 2 | 3 | # yamllint disable-line rule:truthy 4 | on: 5 | schedule: 6 | - cron: "40 5 * * *" 7 | workflow_dispatch: 8 | 9 | jobs: 10 | stale: 11 | permissions: 12 | issues: write # for actions/stale to close stale issues 13 | pull-requests: write # for actions/stale to close stale PRs 14 | runs-on: ubuntu-latest 15 | steps: 16 | # The 90 day stale policy 17 | # Used for: 18 | # - Issues & PRs 19 | # - No PRs marked as no-stale or pinned 20 | # - No issues marked as no-stale, help-wanted or pinned 21 | - name: 90 days stale issues & PRs policy 22 | uses: actions/stale@997185467fa4f803885201cee163a9f38240193d # v10.1.1 23 | with: 24 | repo-token: ${{ secrets.GITHUB_TOKEN }} 25 | days-before-stale: 90 26 | days-before-close: 7 27 | operations-per-run: 150 28 | remove-stale-when-updated: true 29 | stale-issue-label: "stale" 30 | exempt-issue-labels: "no-stale,help-wanted,pinned,enhancement" 31 | stale-issue-message: > 32 | There hasn't been any activity on this issue recently. To keep our 33 | backlog manageable we have to clean old issues, as many of them have 34 | already been resolved with the latest updates. 35 | 36 | Please make sure to update to the latest Home Assistant OS version and 37 | check if that solves the issue. Let us know if that works for you by 38 | adding a comment 👍 39 | 40 | This issue has now been marked as stale and will be closed if no 41 | further activity occurs. Thank you for your contributions. 42 | 43 | stale-pr-label: "stale" 44 | exempt-pr-labels: "no-stale,pinned" 45 | stale-pr-message: > 46 | There hasn't been any activity on this pull request recently. This 47 | pull request has been automatically marked as stale because of that 48 | and will be closed if no further activity occurs within 7 days. 49 | 50 | Thank you for your contributions. 51 | -------------------------------------------------------------------------------- /.github/label-actions.yml: -------------------------------------------------------------------------------- 1 | assume-fixed: 2 | comment: > 3 | :wave: @{issue-author}, thanks for reporting an issue! 4 | 5 | 6 | This issue is assumed to be fixed in the latest stable release. Please 7 | reopen in case you can still reproduce the issue with the latest stable 8 | release. You can find the latest stable release at 9 | https://github.com/home-assistant/operating-system/releases/latest 10 | close: true 11 | close-reason: not planned 12 | 13 | core-issue: 14 | comment: > 15 | :wave: @{issue-author}, thanks for reporting an issue! 16 | 17 | 18 | It looks like this issue is related to Home Assistant Core. Please check 19 | the [Home Assistant Core](https://github.com/home-assistant/core/issues) 20 | repository, the issue might have been reported already. Open a new issue 21 | in that repository if you can't find a matching issue. 22 | close: true 23 | close-reason: not planned 24 | 25 | frontend-issue: 26 | comment: > 27 | :wave: @{issue-author}, thanks for reporting an issue! 28 | 29 | 30 | It looks like this issue is related to Home Assistant Frontend. Please 31 | check the [Home Assistant Frontend](https://github.com/home-assistant/frontend/issues) 32 | repository, the issue might have been reported already. Open a new issue 33 | in that repository if you can't find a matching issue. 34 | close: true 35 | close-reason: not planned 36 | 37 | supervisor-issue: 38 | comment: > 39 | :wave: @{issue-author}, thanks for reporting an issue! 40 | 41 | 42 | It looks like this issue is related to Home Assistant Supervisor. Please 43 | check the [Home Assistant Supervisor](https://github.com/home-assistant/supervisor/issues) 44 | repository, the issue might have been reported already. Open a new issue 45 | in that repository if you can't find a matching issue. 46 | close: true 47 | close-reason: not planned 48 | 49 | new-feature: 50 | comment: > 51 | :wave: @{issue-author}, thanks for your input! 52 | 53 | We use this issue tracker to track issues of currently supported features. 54 | Your request appears to request a new feature. We track potential new 55 | features in the [Feature Request section of our Community Forum](https://community.home-assistant.io/c/feature-requests/13). 56 | Please check if someone already requested a similar feature, or create 57 | a new feature request with the "haos" tag in that forum. Thank you! 58 | close: true 59 | close-reason: not planned 60 | -------------------------------------------------------------------------------- /tests/qemu_shell_strategy.py: -------------------------------------------------------------------------------- 1 | import enum 2 | import os 3 | 4 | import attr 5 | 6 | from labgrid import target_factory, step 7 | from labgrid.driver import ShellDriver 8 | from labgrid.strategy import Strategy, StrategyError 9 | 10 | 11 | class Status(enum.Enum): 12 | unknown = 0 13 | off = 1 14 | shell = 2 15 | 16 | 17 | @target_factory.reg_driver 18 | @attr.s(eq=False) 19 | class CustomTimeoutShellDriver(ShellDriver): 20 | """ShellDriver with a config-customizable timeout for run and run_check.""" 21 | command_timeout = attr.ib(default=30, validator=attr.validators.instance_of(int)) 22 | 23 | def run(self, cmd: str, *, timeout=None, codec="utf-8", decodeerrors="strict"): 24 | return super().run(cmd, timeout=timeout or self.command_timeout, codec=codec, decodeerrors=decodeerrors) 25 | 26 | def run_check(self, cmd: str, *, timeout=None, codec="utf-8", decodeerrors="strict"): 27 | return super().run_check(cmd, timeout=timeout or self.command_timeout, codec=codec, decodeerrors=decodeerrors) 28 | 29 | 30 | @target_factory.reg_driver 31 | @attr.s(eq=False) 32 | class QEMUShellStrategy(Strategy): 33 | """Strategy for starting a QEMU VM and running shell commands within it.""" 34 | 35 | bindings = { 36 | "qemu": "QEMUDriver", 37 | "shell": "CustomTimeoutShellDriver", 38 | } 39 | 40 | status = attr.ib(default=Status.unknown) 41 | 42 | def __attrs_post_init__(self): 43 | super().__attrs_post_init__() 44 | if "-accel kvm" in self.qemu.extra_args and os.environ.get("NO_KVM"): 45 | self.qemu.extra_args = self.qemu.extra_args.replace( 46 | "-accel kvm", "" 47 | ).strip() 48 | 49 | @step(args=["status"]) 50 | def transition(self, status, *, step): # pylint: disable=redefined-outer-name 51 | if not isinstance(status, Status): 52 | status = Status[status] 53 | if status == Status.unknown: 54 | raise StrategyError(f"can not transition to {status}") 55 | elif status == self.status: 56 | step.skip("nothing to do") 57 | return # nothing to do 58 | elif status == Status.off: 59 | self.target.deactivate(self.qemu) 60 | self.target.deactivate(self.shell) 61 | elif status == Status.shell: 62 | self.target.activate(self.qemu) 63 | self.qemu.on() 64 | self.target.activate(self.shell) 65 | else: 66 | raise StrategyError(f"no transition found from {self.status} to {status}") 67 | self.status = status 68 | -------------------------------------------------------------------------------- /tests/smoke_test/test_offline.py: -------------------------------------------------------------------------------- 1 | import logging 2 | from time import sleep 3 | 4 | import pytest 5 | from labgrid.driver import ExecutionError 6 | 7 | _LOGGER = logging.getLogger(__name__) 8 | 9 | 10 | def _check_connectivity(shell, *, connected): 11 | for target in ["home-assistant.io", "1.1.1.1"]: 12 | try: 13 | output = shell.run_check(f"ping {target}") 14 | if f"{target} is alive!" in output: 15 | if connected: 16 | return True 17 | else: 18 | raise AssertionError(f"expecting disconnected but {target} is alive") 19 | except ExecutionError as exc: 20 | if not connected: 21 | stdout = "\n".join(exc.stdout) 22 | assert ("Network is unreachable" in stdout 23 | or "bad address" in stdout 24 | or "No response" in stdout) 25 | 26 | if connected: 27 | raise AssertionError(f"expecting connected but all targets are down") 28 | 29 | 30 | @pytest.mark.timeout(120) 31 | @pytest.mark.usefixtures("without_internet") 32 | def test_ha_runs_offline(shell): 33 | def check_container_running(container_name): 34 | out = shell.run_check( 35 | f"docker container inspect -f '{{{{.State.Status}}}}' {container_name} || true" 36 | ) 37 | return "running" in out 38 | 39 | # wait for supervisor to create network 40 | while True: 41 | if check_container_running("hassio_supervisor"): 42 | nm_conns = shell.run_check('nmcli con show') 43 | if "Supervisor" in " ".join(nm_conns): 44 | break 45 | sleep(1) 46 | 47 | # To simulate situation where HAOS is not connected to internet, we need to add 48 | # default gateway to the supervisor connection. So we add a default route to 49 | # a non-existing IP address in the VM's subnet. Maybe there is a better way? 50 | shell.run_check('nmcli con modify "Supervisor enp0s3" ipv4.addresses "192.168.76.10/24" ' 51 | '&& nmcli con modify "Supervisor enp0s3" ipv4.gateway 192.168.76.1 ' 52 | '&& nmcli device reapply enp0s3') 53 | 54 | _check_connectivity(shell, connected=False) 55 | 56 | for _ in range(60): 57 | if check_container_running("homeassistant") and check_container_running("hassio_cli"): 58 | break 59 | sleep(1) 60 | else: 61 | shell.run_check("docker logs hassio_supervisor") 62 | raise AssertionError("homeassistant or hassio_cli not running after 60s") 63 | 64 | web_index = shell.run_check("curl http://localhost:8123") 65 | assert "" in " ".join(web_index) 66 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | BUILDDIR:=$(shell pwd) 2 | 3 | BUILDROOT=$(BUILDDIR)/buildroot 4 | BUILDROOT_EXTERNAL=$(BUILDDIR)/buildroot-external 5 | DEFCONFIG_DIR = $(BUILDROOT_EXTERNAL)/configs 6 | 7 | TARGETS := $(notdir $(patsubst %_defconfig,%,$(wildcard $(DEFCONFIG_DIR)/*_defconfig))) 8 | TARGETS_CONFIG := $(notdir $(patsubst %_defconfig,%-config,$(wildcard $(DEFCONFIG_DIR)/*_defconfig))) 9 | 10 | # Set O variable if not already done on the command line 11 | ifneq ("$(origin O)", "command line") 12 | O := $(BUILDDIR)/output 13 | else 14 | override O := $(BUILDDIR)/$(O) 15 | endif 16 | 17 | ################################################################################ 18 | 19 | SILENT := $(findstring s,$(word 1, $(MAKEFLAGS))) 20 | 21 | define print 22 | $(if $(SILENT),,$(info $1)) 23 | endef 24 | 25 | COLOR_STEP := $(shell tput smso 2>/dev/null) 26 | COLOR_WARN := $(shell (tput setab 3; tput setaf 0) 2>/dev/null) 27 | TERM_RESET := $(shell tput sgr0 2>/dev/null) 28 | 29 | ################################################################################ 30 | 31 | .NOTPARALLEL: $(TARGETS) $(TARGETS_CONFIG) default 32 | 33 | .PHONY: $(TARGETS) $(TARGETS_CONFIG) default buildroot-help help 34 | 35 | # fallback target when target undefined here is given 36 | .DEFAULT: 37 | $(call print,$(COLOR_STEP)=== Falling back to Buildroot target '$@' ===$(TERM_RESET)) 38 | $(MAKE) -C $(BUILDROOT) O=$(O) BR2_EXTERNAL=$(BUILDROOT_EXTERNAL) "$@" 39 | 40 | # default target when no target is given - must be first in Makefile 41 | default: 42 | $(MAKE) -C $(BUILDROOT) O=$(O) BR2_EXTERNAL=$(BUILDROOT_EXTERNAL) 43 | 44 | $(TARGETS_CONFIG): %-config: 45 | @if [ -f $(O)/.config ] && ! grep -q 'BR2_DEFCONFIG="$(DEFCONFIG_DIR)/$*_defconfig"' $(O)/.config; then \ 46 | echo "$(COLOR_WARN)WARNING: Output directory '$(O)' already contains files for another target!$(TERM_RESET)"; \ 47 | echo " Before running build for a different target, run 'make distclean' first."; \ 48 | echo ""; \ 49 | bash -c 'read -t 10 -p "Waiting 10s, press enter to continue or Ctrl-C to abort..."' || true; \ 50 | fi 51 | $(call print,$(COLOR_STEP)=== Using $*_defconfig ===$(TERM_RESET)) 52 | $(MAKE) -C $(BUILDROOT) O=$(O) BR2_EXTERNAL=$(BUILDROOT_EXTERNAL) "$*_defconfig" 53 | 54 | $(TARGETS): %: %-config 55 | $(call print,$(COLOR_STEP)=== Building $@ ===$(TERM_RESET)) 56 | $(MAKE) -C $(BUILDROOT) O=$(O) BR2_EXTERNAL=$(BUILDROOT_EXTERNAL) 57 | 58 | buildroot-help: 59 | $(MAKE) -C $(BUILDROOT) O=$(O) BR2_EXTERNAL=$(BUILDROOT_EXTERNAL) help 60 | 61 | help: 62 | @echo "Run 'make ' to build a target image." 63 | @echo "Run 'make -config' to configure buildroot for a target." 64 | @echo "" 65 | @echo "Supported targets: $(TARGETS)" 66 | @echo "" 67 | @echo "Unknown Makefile targets fall back to Buildroot make - for details run 'make buildroot-help'" 68 | -------------------------------------------------------------------------------- /.os-artifacts/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | Home Assistant OS - development builds 5 | 6 | 15 | 16 | 17 |

Home Assistant OS - development builds

18 | 19 |
20 | 81 | 82 | -------------------------------------------------------------------------------- /.github/workflows/test.yaml: -------------------------------------------------------------------------------- 1 | name: Test HAOS image 2 | run-name: "Test HAOS ${{ inputs.version || format('(OS build #{0})', github.event.workflow_run.run_number) }}" 3 | 4 | on: 5 | # Manual test of specified version 6 | workflow_dispatch: 7 | inputs: 8 | version: 9 | description: Version of HAOS to test 10 | required: true 11 | type: string 12 | 13 | # Called by other workflows (e.g. build.yaml) 14 | workflow_call: 15 | inputs: 16 | use-artifact: 17 | # Workaround for GH weirdness: https://github.com/actions/runner/discussions/1884 18 | description: Download OS image using actions/download-artifact 19 | required: false 20 | type: boolean 21 | default: true 22 | version: 23 | description: Version of HAOS to test (as used in the name of the qcow2 image artifact) 24 | required: true 25 | type: string 26 | 27 | jobs: 28 | test: 29 | if: ${{ github.event_name != 'workflow_run' || github.event.workflow_run.conclusion == 'success' }} 30 | 31 | name: Test in QEMU 32 | runs-on: ubuntu-22.04 33 | steps: 34 | - name: Checkout source 35 | uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 36 | with: 37 | persist-credentials: false 38 | 39 | - name: Install system dependencies 40 | run: | 41 | sudo apt update 42 | sudo apt install -y qemu-system-x86 ovmf 43 | 44 | - name: Setup Python 45 | uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 46 | with: 47 | python-version: 3.12 48 | 49 | - name: Install Python requirements 50 | run: 51 | pip install -r tests/requirements.txt 52 | 53 | - name: Download HAOS image 54 | if: ${{ !inputs.use-artifact }} 55 | run: | 56 | curl -sfL -o haos.qcow2.xz https://os-artifacts.home-assistant.io/${{github.event.inputs.version}}/haos_ova-${{github.event.inputs.version}}.qcow2.xz 57 | 58 | - name: Get OS image artifact 59 | if: ${{ inputs.use-artifact }} 60 | uses: actions/download-artifact@37930b1c2abaa49bbe596cd826c3c89aef350131 # v7.0.0 61 | with: 62 | name: haos_ova-${{ inputs.version }}.qcow2.xz 63 | 64 | - name: Extract OS image 65 | run: | 66 | xz -dc haos*.qcow2.xz > tests/haos.qcow2 67 | rm haos*.qcow2.xz 68 | 69 | - name: Enable KVM group perms 70 | run: | 71 | echo 'KERNEL=="kvm", GROUP="kvm", MODE="0666", OPTIONS+="static_node=kvm"' | sudo tee /etc/udev/rules.d/99-kvm4all.rules 72 | sudo udevadm control --reload-rules 73 | sudo udevadm trigger --name-match=kvm 74 | 75 | - name: Run tests 76 | run: | 77 | ./tests/run_tests.sh --durations=0 --durations-min=5.0 78 | 79 | - name: Archive logs 80 | uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6.0.0 81 | if: always() 82 | with: 83 | name: logs 84 | path: | 85 | tests/lg_logs/** 86 | 87 | - name: Archive JUnit reports 88 | uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6.0.0 89 | if: always() 90 | with: 91 | name: junit_reports 92 | path: | 93 | tests/junit_reports/*.xml 94 | 95 | - name: Publish test report 96 | uses: mikepenz/action-junit-report@e08919a3b1fb83a78393dfb775a9c37f17d8eea6 # v6.0.1 97 | if: always() 98 | with: 99 | report_paths: 'tests/junit_reports/*.xml' 100 | annotate_only: true 101 | detailed_summary: true 102 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Home Assistant Operating System 2 | 3 | Home Assistant Operating System (formerly HassOS) is a Linux based operating system optimized to host [Home Assistant](https://www.home-assistant.io) and its [Add-ons](https://www.home-assistant.io/addons/). 4 | 5 | Home Assistant Operating System uses Docker as its container engine. By default it deploys the Home Assistant Supervisor as a container. Home Assistant Supervisor in turn uses the Docker container engine to control Home Assistant Core and Add-Ons in separate containers. Home Assistant Operating System is **not** based on a regular Linux distribution like Ubuntu. It is built using [Buildroot](https://buildroot.org/) and it is optimized to run Home Assistant. It targets single board compute (SBC) devices like the Raspberry Pi or ODROID but also supports x86-64 systems with UEFI. 6 | 7 | [![Home Assistant - A project from the Open Home Foundation](https://www.openhomefoundation.org/badges/home-assistant.png)](https://www.openhomefoundation.org/) 8 | 9 | ## Features 10 | 11 | - Lightweight and memory-efficient 12 | - Minimized I/O 13 | - Over The Air (OTA) updates 14 | - Offline updates 15 | - Modular using Docker container engine 16 | 17 | ## Supported hardware 18 | 19 | The list of supported hardware is defined by [ADR-0015](https://github.com/home-assistant/architecture/blob/master/adr/0015-home-assistant-os.md). 20 | Every new hardware addition must meet at least requirements defined in [ADR-0017](https://github.com/home-assistant/architecture/blob/master/adr/0017-hardware-screening-os.md) and pass through an architecture design proposal. 21 | 22 | For documentation explaining details of the individual supported boards, see [Board support](https://developers.home-assistant.io/docs/operating-system/boards/overview) section of the Home Assistant Developer Docs. 23 | 24 | ## Getting Started 25 | 26 | If you just want to use Home Assistant the official [getting started guide](https://www.home-assistant.io/getting-started/) and [installation instructions](https://www.home-assistant.io/hassio/installation/) take you through how to download Home Assistant Operating System and get it running on your machine. 27 | 28 | If you're interested in finding out more about Home Assistant Operating System and how it works read on... 29 | 30 | ## Development 31 | 32 | If you don't have experience with embedded systems, Buildroot or the build process for Linux distributions it is recommended to read up on these topics first (e.g. [Bootlin](https://bootlin.com/docs/) has excellent resources). 33 | 34 | The Home Assistant Operating System documentation can be found on the [Home Assistant Developer Docs website](https://developers.home-assistant.io/docs/operating-system). 35 | 36 | ### Components 37 | 38 | - **Bootloader:** 39 | - [GRUB](https://www.gnu.org/software/grub/) for devices that support UEFI 40 | - [U-Boot](https://www.denx.de/wiki/U-Boot) for devices that don't support UEFI 41 | - **Operating System:** 42 | - [Buildroot](https://buildroot.org/) LTS Linux 43 | - **File Systems:** 44 | - [SquashFS](https://www.kernel.org/doc/Documentation/filesystems/squashfs.txt) for read-only file systems (using LZ4 compression) 45 | - [ZRAM](https://www.kernel.org/doc/Documentation/blockdev/zram.txt) for `/tmp`, `/var` and swap (using LZ4 compression) 46 | - **Container Platform:** 47 | - [Docker Engine](https://docs.docker.com/engine/) for running Home Assistant components in containers 48 | - **Updates:** 49 | - [RAUC](https://rauc.io/) for Over The Air (OTA) and USB updates 50 | - **Security:** 51 | - [AppArmor](https://apparmor.net/) Linux kernel security module 52 | 53 | ### Development builds 54 | 55 | The Development build GitHub Action Workflow is a manually triggered workflow 56 | which creates Home Assistant OS development builds. The development builds are 57 | available at [https://os-artifacts.home-assistant.io/index.html](https://os-artifacts.home-assistant.io/index.html). 58 | -------------------------------------------------------------------------------- /.github/actions/bump-rpi-imager-version/action.yml: -------------------------------------------------------------------------------- 1 | name: 'Bump RPi Imager OS version' 2 | description: 'Bump version of Home Assistant OS in RPi Imager' 3 | inputs: 4 | version: 5 | required: true 6 | description: "Version of Home Assistant OS to bump to." 7 | release-date: 8 | required: true 9 | description: "Release date as ISO 8601 date string." 10 | runs: 11 | using: "composite" 12 | steps: 13 | - shell: bash 14 | id: validate-input 15 | env: 16 | INPUTS_DATE: ${{ inputs.release-date }} 17 | run: | 18 | if [[ -z "$INPUTS_DATE" ]] || [[ ! "$INPUTS_DATE" =~ ^([0-9]{4})-([0-9]{2})-([0-9]{2})T([0-9]{2}):([0-9]{2}):([0-9]{2})Z$ ]]; then 19 | echo "::error::Argument 'release-date' must be an ISO 8601 date string." 20 | exit 1 21 | else 22 | echo "date=$(date --date=${INPUTS_DATE} +'%Y-%m-%d')" >> "$GITHUB_OUTPUT" 23 | fi 24 | 25 | - shell: bash 26 | run: git clone --depth 1 https://github.com/home-assistant/version.git /tmp/version 27 | 28 | - shell: bash 29 | env: 30 | INPUTS_VERSION: ${{ inputs.version }} 31 | run: | 32 | function bump_entry() { 33 | json=$1 34 | version=$2 35 | release_date=$3 36 | image_id=$4 37 | image_name=$5 38 | url="https://github.com/home-assistant/operating-system/releases/download/${version}/haos_${image_id}-${version}.img.xz" 39 | temp_image=$(mktemp --suffix=.img.xz) 40 | temp_out=$(mktemp) 41 | 42 | curl -fsL -o "$temp_image" "$url" 43 | image_download_size=$(stat --printf="%s" "$temp_image") 44 | image_download_sha256=$(sha256sum "$temp_image" | awk '{print $1}') 45 | unxz "$temp_image" 46 | temp_unpacked="${temp_image%.*}" 47 | extract_size=$(stat --printf="%s" "$temp_unpacked") 48 | extract_sha256=$(sha256sum "$temp_unpacked" | awk '{print $1}') 49 | 50 | entry_name="Home Assistant OS ${version} (${image_name})" 51 | 52 | jq ' 53 | . as $data 54 | | $data 55 | | .os_list = [ 56 | .os_list[] 57 | | if .name | test("Home Assistant OS .* \\(" + $image_name + "\\)") then 58 | .name = "Home Assistant OS " + $version + " (" + $image_name + ")" 59 | | .url = $url 60 | | .extract_size = ($extract_size | tonumber) 61 | | .extract_sha256 = $extract_sha256 62 | | .release_date = $release_date 63 | | .image_download_size = ($image_download_size | tonumber) 64 | | .image_download_sha256 = $image_download_sha256 65 | else . 66 | end 67 | ]' \ 68 | --arg version "$version" \ 69 | --arg image_name "$image_name" \ 70 | --arg entry_name "$entry_name" \ 71 | --arg release_date "$release_date" \ 72 | --arg url "$url" \ 73 | --arg image_download_size "$image_download_size" \ 74 | --arg image_download_sha256 "$image_download_sha256" \ 75 | --arg extract_size "$extract_size" \ 76 | --arg extract_sha256 "$extract_sha256" \ 77 | "$json" > "$temp_out" 78 | 79 | mv "$temp_out" "$json" 80 | rm -rf "$temp_unpacked" "$temp_out" 81 | } 82 | 83 | bump_entry /tmp/version/rpi-imager-haos.json "$INPUTS_VERSION" "${{ steps.validate-input.outputs.date }}" "rpi3-64" "RPi 3" 84 | bump_entry /tmp/version/rpi-imager-haos.json "$INPUTS_VERSION" "${{ steps.validate-input.outputs.date }}" "rpi4-64" "RPi 4/400" 85 | bump_entry /tmp/version/rpi-imager-haos.json "$INPUTS_VERSION" "${{ steps.validate-input.outputs.date }}" "rpi5-64" "RPi 5" 86 | bump_entry /tmp/version/rpi-imager-haos.json "$INPUTS_VERSION" "${{ steps.validate-input.outputs.date }}" "yellow" "Yellow" 87 | 88 | - shell: bash 89 | env: 90 | INPUTS_VERSION: ${{ inputs.version }} 91 | run: | 92 | cd /tmp/version 93 | git commit -am "Bump Home Assistant OS to ${INPUTS_VERSION} for RPi Imager" 94 | git push 95 | 96 | - shell: bash 97 | run: rm -rf /tmp/version 98 | -------------------------------------------------------------------------------- /.github/workflows/artifacts-index.yaml: -------------------------------------------------------------------------------- 1 | name: Update artifacts index 2 | 3 | on: 4 | # Manual run for specified version 5 | workflow_dispatch: 6 | inputs: 7 | version: 8 | description: Version of HAOS to build index for 9 | required: true 10 | type: string 11 | 12 | # Called by other workflows (e.g. build.yaml) 13 | workflow_call: 14 | inputs: 15 | version: 16 | description: Version of HAOS to build index for 17 | required: true 18 | type: string 19 | secrets: 20 | R2_OS_ARTIFACTS_ID: 21 | required: true 22 | R2_OS_ARTIFACTS_KEY: 23 | required: true 24 | R2_OS_ARTIFACTS_BUCKET: 25 | required: true 26 | R2_OS_ARTIFACTS_ENDPOINT: 27 | required: true 28 | CF_ZONE: 29 | required: true 30 | CF_PURGE_TOKEN: 31 | required: true 32 | 33 | env: 34 | PYTHON_VERSION: "3.13" 35 | 36 | jobs: 37 | build-index: 38 | name: Build Home Assistant OS artifacts index 39 | runs-on: ubuntu-22.04 40 | steps: 41 | - name: Checkout source 42 | uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 43 | with: 44 | persist-credentials: false 45 | 46 | - name: Setup Python version ${{ env.PYTHON_VERSION }} 47 | uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 48 | with: 49 | python-version: ${{ env.PYTHON_VERSION }} 50 | 51 | - name: Install AWS CLI 52 | run: pip install 'awscli<1.37.0' 53 | 54 | - name: Create build index 55 | env: 56 | AWS_ACCESS_KEY_ID: ${{ secrets.R2_OS_ARTIFACTS_ID }} 57 | AWS_SECRET_ACCESS_KEY: ${{ secrets.R2_OS_ARTIFACTS_KEY }} 58 | run: | 59 | aws s3api list-objects-v2 \ 60 | --bucket "${{ secrets.R2_OS_ARTIFACTS_BUCKET }}" \ 61 | --endpoint-url "${{ secrets.R2_OS_ARTIFACTS_ENDPOINT }}" \ 62 | --prefix "${{ inputs.version }}/" \ 63 | --query 'Contents[].Key' | jq 'map(split("/")[1]) | sort' > "${{ inputs.version }}.json" 64 | aws s3 cp \ 65 | "${{ inputs.version }}.json" \ 66 | s3://${{ secrets.R2_OS_ARTIFACTS_BUCKET }}/indexes/ \ 67 | --endpoint-url "${{ secrets.R2_OS_ARTIFACTS_ENDPOINT }}" 68 | 69 | - name: Regenerate artifacts index 70 | env: 71 | AWS_ACCESS_KEY_ID: ${{ secrets.R2_OS_ARTIFACTS_ID }} 72 | AWS_SECRET_ACCESS_KEY: ${{ secrets.R2_OS_ARTIFACTS_KEY }} 73 | run: | 74 | aws s3api list-objects-v2 \ 75 | --bucket "${{ secrets.R2_OS_ARTIFACTS_BUCKET }}" \ 76 | --endpoint-url "${{ secrets.R2_OS_ARTIFACTS_ENDPOINT }}" \ 77 | --prefix "indexes/" \ 78 | --query 'Contents[].Key' | jq 'map(capture("indexes/(?[[:digit:]].+).json").version) | sort' > .os-artifacts/index.json 79 | aws s3 sync \ 80 | .os-artifacts/ \ 81 | s3://${{ secrets.R2_OS_ARTIFACTS_BUCKET }}/ \ 82 | --endpoint-url "${{ secrets.R2_OS_ARTIFACTS_ENDPOINT }}" \ 83 | 84 | - name: Flush CloudFlare cache 85 | run: | 86 | # Create purge list of all artifacts 87 | jq -r '. | map("https://os-artifacts.home-assistant.io/${{ inputs.version }}/" + .) | join("\n")' < "${{ inputs.version }}.json" > purge_list 88 | # Add indexes to purge list too 89 | echo "https://os-artifacts.home-assistant.io/indexes/${{ inputs.version }}.json" >> purge_list 90 | echo "https://os-artifacts.home-assistant.io/index.html" >> purge_list 91 | echo "https://os-artifacts.home-assistant.io/index.json" >> purge_list 92 | # Split to chunks of 30 files (limit of CF API) 93 | split -d -l30 purge_list purge_list_chunked 94 | # Convert chunked lists to JSON arrays and call CF purge API 95 | for f in purge_list_chunked*; do 96 | files=$(jq -R -s 'split("\n")[:-1]' < "$f") 97 | curl --silent --show-error --fail -X POST \ 98 | "https://api.cloudflare.com/client/v4/zones/${{ secrets.CF_ZONE }}/purge_cache" \ 99 | -H "Authorization: Bearer ${{ secrets.CF_PURGE_TOKEN }}" \ 100 | -H "Content-Type: application/json" \ 101 | --data "{\"files\": ${files}}" 102 | done 103 | -------------------------------------------------------------------------------- /tests/smoke_test/test_os_update.py: -------------------------------------------------------------------------------- 1 | import json 2 | import logging 3 | from time import sleep 4 | 5 | import pytest 6 | 7 | _LOGGER = logging.getLogger(__name__) 8 | 9 | 10 | @pytest.mark.dependency() 11 | @pytest.mark.timeout(120) 12 | def test_init(shell, shell_json): 13 | def check_container_running(container_name): 14 | out = shell.run_check( 15 | f"docker container inspect -f '{{{{.State.Status}}}}' {container_name} || true" 16 | ) 17 | return "running" in out 18 | 19 | # wait for important containers first 20 | while True: 21 | if check_container_running("homeassistant") and check_container_running("hassio_supervisor"): 22 | break 23 | 24 | sleep(1) 25 | 26 | # wait for the system ready and Supervisor at the latest version 27 | while True: 28 | supervisor_info = "\n".join(shell.run_check("ha supervisor info --no-progress --raw-json || true")) 29 | # make sure not to fail when Supervisor is restarting 30 | supervisor_info = json.loads(supervisor_info) if supervisor_info.startswith("{") else None 31 | # make sure not to fail when Supervisor is in setup state 32 | supervisor_data = supervisor_info.get("data") if supervisor_info else None 33 | if supervisor_data and supervisor_data["version"] == supervisor_data["version_latest"]: 34 | output = "\n".join(shell.run_check("ha os info || true")) 35 | if "System is not ready" not in output: 36 | break 37 | 38 | sleep(5) 39 | 40 | 41 | @pytest.mark.dependency(depends=["test_init"]) 42 | @pytest.mark.timeout(600) # TODO: reduce to 300 after 17.0 release 43 | def test_os_update(shell, shell_json, target): 44 | def check_container_running(container_name): 45 | out = shell.run_check( 46 | f"docker container inspect -f '{{{{.State.Status}}}}' {container_name} || true" 47 | ) 48 | return "running" in out 49 | 50 | # fetch version info and OTA URL 51 | shell.run_check("ha su reload --no-progress") 52 | 53 | # update OS to latest stable - in tests it should never be the same version 54 | stable_version = shell_json("curl -sSL https://version.home-assistant.io/stable.json")["hassos"]["ova"] 55 | 56 | # Core (and maybe Supervisor) might be downloaded at this point, so we need to keep trying 57 | while True: 58 | output = "\n".join(shell.run_check(f"ha os update --no-progress --version {stable_version} || true", timeout=120)) 59 | if "Don't have an URL for OTA updates" in output: 60 | shell.run_check("ha su reload --no-progress") 61 | elif "Command completed successfully" in output: 62 | break 63 | 64 | sleep(5) 65 | 66 | shell.console.expect("Booting `Slot ", timeout=60) 67 | 68 | # reactivate ShellDriver to handle login again 69 | target.deactivate(shell) 70 | target.activate(shell) 71 | 72 | # temporary needed for OS 17.0 -> 16.x path, where all containers must be re-downloaded 73 | while True: 74 | if check_container_running("hassio_supervisor") and check_container_running("hassio_cli"): 75 | break 76 | 77 | sleep(1) 78 | 79 | # wait for the system to be ready after update 80 | while True: 81 | output = "\n".join(shell.run_check("ha os info || true")) 82 | if "System is not ready" not in output: 83 | break 84 | 85 | sleep(1) 86 | 87 | # check the updated version 88 | os_info = shell_json("ha os info --no-progress --raw-json") 89 | assert os_info["data"]["version"] == stable_version, "OS did not update successfully" 90 | 91 | 92 | @pytest.mark.dependency(depends=["test_os_update"]) 93 | @pytest.mark.timeout(180) 94 | def test_boot_other_slot(shell, shell_json, target): 95 | # switch to the other slot 96 | os_info = shell_json("ha os info --no-progress --raw-json") 97 | other_version = os_info["data"]["boot_slots"]["A"]["version"] 98 | 99 | # as we sometimes don't get another shell prompt after the boot slot switch, 100 | # use plain sendline instead of the run_check method 101 | shell.console.sendline(f"ha os boot-slot other --no-progress || true") 102 | 103 | shell.console.expect("Booting `Slot ", timeout=60) 104 | 105 | # reactivate ShellDriver to handle login again 106 | target.deactivate(shell) 107 | target.activate(shell) 108 | 109 | # wait for the system to be ready after switching slots 110 | while True: 111 | output = "\n".join(shell.run_check("ha os info || true")) 112 | if "System is not ready" not in output: 113 | break 114 | 115 | sleep(1) 116 | 117 | # check that the boot slot has changed 118 | os_info = shell_json("ha os info --no-progress --raw-json") 119 | assert os_info["data"]["version"] == other_version 120 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/bug_report.yml: -------------------------------------------------------------------------------- 1 | name: Report an issue with Home Assistant Operating System 2 | description: Report an issue related to the Home Assistant Operating System. 3 | body: 4 | - type: markdown 5 | attributes: 6 | value: | 7 | Make sure to test with the last version of the Operating System before reporting a bug. 8 | If the bug appears to be a regression, make sure to check if the bug indeed disappears 9 | from the previous version. Use `ha os update --version x.y` command to downgrade. 10 | - type: textarea 11 | validations: 12 | required: true 13 | attributes: 14 | label: Describe the issue you are experiencing 15 | description: Provide a clear and concise description of what the bug is. 16 | - type: markdown 17 | attributes: 18 | value: | 19 | ## Environment 20 | - type: dropdown 21 | validations: 22 | required: true 23 | attributes: 24 | label: What operating system image do you use? 25 | default: 0 26 | options: 27 | - generic-x86-64 (Generic UEFI capable x86-64 systems) 28 | - generic-aarch64 (Generic UEFI capable aarch64 systems) 29 | - khadas-vim3 (Khadas VIM3) 30 | - odroid-c2 (Hardkernel ODROID-C2) 31 | - odroid-c4 (Hardkernel ODROID-C4) 32 | - odroid-m1 (Hardkernel ODROID-M1) 33 | - odroid-m1s (Hardkernel ODROID-M1S) 34 | - odroid-n2 (Hardkernel ODROID-N2/N2+) 35 | - ova (for Virtual Machines) 36 | - rpi3-64 (Raspberry Pi 3 64-bit OS) 37 | - rpi4-64 (Raspberry Pi 4/400 64-bit OS) 38 | - rpi5-64 (Raspberry Pi 5 64-bit OS) 39 | - yellow (Home Assistant Yellow) 40 | - green (Home Assistant Green) 41 | description: > 42 | Can be found in [Settings -> System -> Repairs -> System Information](https://my.home-assistant.io/redirect/system_health/). It is listed as the `Board` value. 43 | 44 | [![Open your Home Assistant instance and show health information about your system.](https://my.home-assistant.io/badges/system_health.svg)](https://my.home-assistant.io/redirect/system_health/) 45 | - type: input 46 | validations: 47 | required: true 48 | attributes: 49 | label: What version of Home Assistant Operating System is installed? 50 | placeholder: "6.6" 51 | description: > 52 | Can be found in [Settings -> System -> Repairs -> System Information (top right menu)](https://my.home-assistant.io/redirect/system_health/). It is listed as the `Host Operating System` value. 53 | - type: dropdown 54 | validations: 55 | required: true 56 | attributes: 57 | label: Did the problem occur after upgrading the Operating System? 58 | default: 0 59 | options: 60 | - "No" 61 | - "Yes" 62 | - type: textarea 63 | validations: 64 | required: true 65 | attributes: 66 | label: Hardware details 67 | description: > 68 | Provide details about the hardware used for your install. 69 | This is especially important for bare-metal x86 installations. 70 | If you have any USB devices attached, please list them here. 71 | For VMs, include the hypervisor type and version. 72 | - type: textarea 73 | validations: 74 | required: true 75 | attributes: 76 | label: Steps to reproduce the issue 77 | description: | 78 | Please tell us exactly how to reproduce your issue. 79 | Provide clear and concise step by step instructions and add code snippets if needed. 80 | value: | 81 | 1. 82 | 2. 83 | 3. 84 | ... 85 | - type: textarea 86 | validations: 87 | required: true 88 | attributes: 89 | label: Anything in the Supervisor logs that might be useful for us? 90 | description: > 91 | Supervisor Logs can be found in [Settings -> System -> Logs](https://my.home-assistant.io/redirect/logs/?provider=supervisor) 92 | then choose `Supervisor` in the top right. Alternatively enter `ha supervisor logs` in the Home Assistant CLI. 93 | 94 | [![Open your Home Assistant instance and show your Supervisor system logs.](https://my.home-assistant.io/badges/supervisor_logs.svg)](https://my.home-assistant.io/redirect/logs/?provider=supervisor) 95 | render: txt 96 | - type: textarea 97 | validations: 98 | required: true 99 | attributes: 100 | label: Anything in the Host logs that might be useful for us? 101 | description: > 102 | Host Logs can be found in [Settings -> System -> Logs](https://my.home-assistant.io/redirect/logs/?provider=host) 103 | then choose `Host` in the top right. Alternatively enter `ha host logs` in the Home Assistant CLI. 104 | render: txt 105 | - type: textarea 106 | attributes: 107 | label: System information 108 | description: > 109 | **Optional** Copy the full System Health in this text area. 110 | 111 | System information can be found in [Settings -> System -> Repairs -> System Information (top right menu)](https://my.home-assistant.io/redirect/system_health/), 112 | Click the copy button at the bottom of the pop-up and paste it here. 113 | 114 | [![Open your Home Assistant instance and show health information about your system.](https://my.home-assistant.io/badges/system_health.svg)](https://my.home-assistant.io/redirect/system_health/) 115 | - type: textarea 116 | attributes: 117 | label: Additional information 118 | description: > 119 | **Optional** If you have any additional information for us, use the field below. 120 | Please note, you can attach screenshots or screen recordings here, by 121 | dragging and dropping files in the field below. 122 | -------------------------------------------------------------------------------- /tests/smoke_test/test_basic.py: -------------------------------------------------------------------------------- 1 | import logging 2 | from time import sleep 3 | 4 | import pytest 5 | 6 | 7 | _LOGGER = logging.getLogger(__name__) 8 | 9 | 10 | @pytest.mark.dependency() 11 | @pytest.mark.timeout(120) 12 | def test_init(shell): 13 | def check_container_running(container_name): 14 | out = shell.run_check( 15 | f"docker container inspect -f '{{{{.State.Status}}}}' {container_name} || true" 16 | ) 17 | return "running" in out 18 | 19 | # wait for important containers first 20 | while True: 21 | if check_container_running("homeassistant") and check_container_running("hassio_supervisor"): 22 | break 23 | 24 | sleep(1) 25 | 26 | # wait for system ready 27 | while True: 28 | output = "\n".join(shell.run_check("ha os info || true")) 29 | if "System is not ready" not in output: 30 | break 31 | 32 | sleep(1) 33 | 34 | output = shell.run_check("ha os info") 35 | _LOGGER.info("%s", "\n".join(output)) 36 | 37 | 38 | def test_rauc_status(shell, shell_json): 39 | rauc_status = shell.run_check("rauc status --output-format=shell --detailed") 40 | # RAUC_BOOT_PRIMARY won't be set if correct grub env is missing 41 | assert "RAUC_BOOT_PRIMARY='kernel.0'" in rauc_status 42 | assert "rauc-WARNING" not in "\n".join(rauc_status) 43 | 44 | os_info = shell_json("ha os info --no-progress --raw-json") 45 | expected_version = os_info.get("data", {}).get("version") 46 | assert expected_version is not None and expected_version != "" 47 | 48 | boot_slots = filter(lambda x: "RAUC_SYSTEM_SLOTS=" in x, rauc_status) 49 | boot_slots = next(boot_slots, "").replace("RAUC_SYSTEM_SLOTS='", "").replace("'", "") 50 | assert boot_slots != "" 51 | booted_idx = boot_slots.split(" ").index("kernel.0") 52 | assert booted_idx >= 0 53 | 54 | assert f"RAUC_SLOT_STATUS_BUNDLE_VERSION_{booted_idx + 1}='{expected_version}'" in rauc_status 55 | 56 | 57 | def test_dmesg(shell): 58 | output = shell.run_check("dmesg") 59 | _LOGGER.info("%s", "\n".join(output)) 60 | 61 | 62 | @pytest.mark.dependency(depends=["test_init"]) 63 | def test_supervisor_logs(shell): 64 | output = shell.run_check("ha su logs") 65 | _LOGGER.info("%s", "\n".join(output)) 66 | 67 | 68 | @pytest.mark.dependency(depends=["test_init"]) 69 | def test_landing_page(shell): 70 | web_index = shell.run_check("curl http://localhost:8123") 71 | assert "" in " ".join(web_index) 72 | 73 | 74 | def test_systemctl_status(shell): 75 | output = shell.run_check("systemctl --no-pager -l status -a || true") 76 | _LOGGER.info("%s", "\n".join(output)) 77 | 78 | 79 | def test_systemctl_check_no_failed(shell): 80 | output = shell.run_check("systemctl --no-pager -l list-units --state=failed") 81 | assert "0 loaded units listed." in output, f"Some units failed:\n{"\n".join(output)}" 82 | 83 | 84 | def test_systemctl_no_cycles(shell): 85 | # we don't have systemd-analyze available, so check it naively using grep 86 | output = shell.run_check("journalctl -b0 | grep 'ordering cycle' || true") 87 | assert not output, f"Found Systemd dependency cycles:\n{"\n".join(output)}" 88 | 89 | 90 | def test_host_connectivity(shell): 91 | output = shell.run_check("curl -f https://checkonline.home-assistant.io/online.txt") 92 | assert "NetworkManager is online" in output 93 | output = shell.run_check("nmcli network connectivity check") 94 | assert "full" in output, f"Connectivity check failed, nmcli reports: {output}" 95 | 96 | 97 | @pytest.mark.dependency(depends=["test_init"]) 98 | @pytest.mark.timeout(10) 99 | def test_supervisor_connectivity(shell): 100 | # checks URL used by connectivity checks via docker0 bridge 101 | output = shell.run_check("docker exec -ti hassio_supervisor curl -f https://checkonline.home-assistant.io/online.txt") 102 | assert "NetworkManager is online" in output 103 | 104 | 105 | @pytest.mark.dependency(depends=["test_init"]) 106 | @pytest.mark.timeout(10) 107 | def test_hassio_connectivity(shell): 108 | # checks URL used by connectivity checks via hassio bridge 109 | output = shell.run_check("docker exec -ti hassio_cli curl -f https://checkonline.home-assistant.io/online.txt") 110 | assert "NetworkManager is online" in output 111 | 112 | 113 | @pytest.mark.dependency(depends=["test_init"]) 114 | def test_custom_swap_size(shell, target): 115 | output = shell.run_check("stat -c '%s' /mnt/data/swapfile") 116 | # set new swap size to half of the previous size - round to 4k blocks 117 | new_swap_size = (int(output[0]) // 2 // 4096) * 4096 118 | shell.console.sendline(f"echo 'SWAPSIZE={new_swap_size/1024/1024}M' > /etc/default/haos-swapfile; reboot") 119 | shell.console.expect("Booting `Slot ", timeout=60) 120 | # reactivate ShellDriver to handle login again 121 | target.deactivate(shell) 122 | target.activate(shell) 123 | output = shell.run_check("stat -c '%s' /mnt/data/swapfile") 124 | assert int(output[0]) == new_swap_size, f"Incorrect swap size {new_swap_size}B: {output}" 125 | 126 | 127 | @pytest.mark.dependency(depends=["test_custom_swap_size"]) 128 | def test_no_swap(shell, target): 129 | shell.console.sendline("echo 'SWAPSIZE=0' > /etc/default/haos-swapfile; reboot") 130 | shell.console.expect("Booting `Slot ", timeout=60) 131 | # reactivate ShellDriver to handle login again 132 | target.deactivate(shell) 133 | target.activate(shell) 134 | output = shell.run_check("systemctl --no-pager -l list-units --state=failed") 135 | assert "0 loaded units listed." in output, f"Some units failed:\n{"\n".join(output)}" 136 | swapon = shell.run_check("swapon --show") 137 | assert swapon == [], f"Swapfile still exists: {swapon}" 138 | 139 | 140 | def test_kernel_not_tainted(shell): 141 | """Check if the kernel is not tainted - do it at the end of the 142 | test suite to increase the chance of catching issues.""" 143 | output = shell.run_check("cat /proc/sys/kernel/tainted") 144 | assert "\n".join(output) == "0", f"Kernel tainted: {output}" 145 | -------------------------------------------------------------------------------- /tests/supervisor_test/test_supervisor.py: -------------------------------------------------------------------------------- 1 | import logging 2 | from time import sleep 3 | 4 | import pytest 5 | from labgrid.driver import ExecutionError 6 | 7 | 8 | logger = logging.getLogger(__name__) 9 | 10 | 11 | @pytest.fixture(scope="module") 12 | def stash() -> dict: 13 | """Simple stash for sharing data between tests in this module.""" 14 | stash = {} 15 | return stash 16 | 17 | 18 | @pytest.mark.dependency() 19 | @pytest.mark.timeout(120) 20 | def test_start_supervisor(shell, shell_json): 21 | def check_container_running(container_name): 22 | out = shell.run_check(f"docker container inspect -f '{{{{.State.Status}}}}' {container_name} || true") 23 | return "running" in out 24 | 25 | while True: 26 | if check_container_running("homeassistant") and check_container_running("hassio_supervisor"): 27 | break 28 | 29 | sleep(1) 30 | 31 | supervisor_ip = "\n".join( 32 | shell.run_check("docker inspect --format='{{.NetworkSettings.Networks.bridge.IPAddress}}' hassio_supervisor") 33 | ) 34 | 35 | while True: 36 | try: 37 | if shell_json(f"curl -sSL http://{supervisor_ip}/supervisor/ping").get("result") == "ok": 38 | break 39 | except ExecutionError: 40 | pass # avoid failure when the container is restarting 41 | 42 | sleep(1) 43 | 44 | 45 | @pytest.mark.dependency(depends=["test_start_supervisor"]) 46 | def test_check_supervisor(shell_json): 47 | # check supervisor info 48 | supervisor_info = shell_json("ha supervisor info --no-progress --raw-json") 49 | assert supervisor_info.get("result") == "ok", "supervisor info failed" 50 | logger.info("Supervisor info: %s", supervisor_info) 51 | # check network info 52 | network_info = shell_json("ha network info --no-progress --raw-json") 53 | assert network_info.get("result") == "ok", "network info failed" 54 | logger.info("Network info: %s", network_info) 55 | 56 | 57 | @pytest.mark.dependency(depends=["test_check_supervisor"]) 58 | @pytest.mark.timeout(120) 59 | def test_update_supervisor(shell_json): 60 | supervisor_info = shell_json("ha supervisor info --no-progress --raw-json") 61 | supervisor_version = supervisor_info.get("data").get("version") 62 | supervisor_version_latest = supervisor_info.get("data").get("version_latest") 63 | assert supervisor_version_latest, "Missing latest supervisor version info" 64 | if supervisor_version == supervisor_version_latest: 65 | logger.info("Supervisor is already up to date") 66 | pytest.skip("Supervisor is already up to date") 67 | else: 68 | result = shell_json("ha supervisor update --no-progress --raw-json") 69 | if result.get("result") == "error" and "Another job is running" in result.get("message"): 70 | pass 71 | else: 72 | assert result.get("result") == "ok", f"Supervisor update failed: {result}" 73 | 74 | while True: 75 | try: 76 | supervisor_info = shell_json("ha supervisor info --no-progress --raw-json") 77 | data = supervisor_info.get("data") 78 | if data and data.get("version") == data.get("version_latest"): 79 | logger.info( 80 | "Supervisor updated from %s to %s: %s", 81 | supervisor_version, 82 | data.get("version"), 83 | supervisor_info, 84 | ) 85 | break 86 | except ExecutionError: 87 | pass # avoid failure when the container is restarting 88 | 89 | sleep(1) 90 | 91 | 92 | @pytest.mark.dependency(depends=["test_check_supervisor"]) 93 | def test_supervisor_is_updated(shell_json): 94 | supervisor_info = shell_json("ha supervisor info --no-progress --raw-json") 95 | data = supervisor_info.get("data") 96 | assert data and data.get("version") == data.get("version_latest") 97 | 98 | 99 | @pytest.mark.dependency(depends=["test_supervisor_is_updated"]) 100 | def test_addon_install(shell_json): 101 | # install Core SSH add-on 102 | assert ( 103 | shell_json("ha addons install core_ssh --no-progress --raw-json").get("result") == "ok" 104 | ), "Core SSH add-on install failed" 105 | # check Core SSH add-on is installed 106 | assert ( 107 | shell_json("ha addons info core_ssh --no-progress --raw-json").get("data", {}).get("version") is not None 108 | ), "Core SSH add-on not installed" 109 | # start Core SSH add-on 110 | assert ( 111 | shell_json("ha addons start core_ssh --no-progress --raw-json").get("result") == "ok" 112 | ), "Core SSH add-on start failed" 113 | # check Core SSH add-on is running 114 | ssh_info = shell_json("ha addons info core_ssh --no-progress --raw-json") 115 | assert ssh_info.get("data", {}).get("state") == "started", "Core SSH add-on not running" 116 | logger.info("Core SSH add-on info: %s", ssh_info) 117 | 118 | 119 | @pytest.mark.dependency(depends=["test_supervisor_is_updated"]) 120 | def test_supervisor_errors(shell_json): 121 | # run Supervisor health check 122 | health_check = shell_json("ha resolution healthcheck --no-progress --raw-json") 123 | assert health_check.get("result") == "ok", "Supervisor health check failed" 124 | logger.info("Supervisor health check result: %s", health_check) 125 | # get resolution center info 126 | resolution_info = shell_json("ha resolution info --no-progress --raw-json") 127 | logger.info("Resolution center info: %s", resolution_info) 128 | # check supervisor is healthy 129 | unhealthy = resolution_info.get("data").get("unhealthy") 130 | assert len(unhealthy) == 0, "Supervisor is unhealthy" 131 | # check for unsupported entries 132 | unsupported = resolution_info.get("data").get("unsupported") 133 | assert len(unsupported) == 0, "Unsupported entries found" 134 | 135 | 136 | @pytest.mark.dependency(depends=["test_supervisor_is_updated"]) 137 | def test_create_backup(shell_json, stash): 138 | result = shell_json("ha backups new --no-progress --raw-json") 139 | assert result.get("result") == "ok", f"Backup creation failed: {result}" 140 | slug = result.get("data", {}).get("slug") 141 | assert slug is not None 142 | stash.update(slug=slug) 143 | logger.info("Backup creation result: %s", result) 144 | 145 | 146 | @pytest.mark.dependency(depends=["test_addon_install"]) 147 | def test_addon_uninstall(shell_json): 148 | result = shell_json("ha addons uninstall core_ssh --no-progress --raw-json") 149 | assert result.get("result") == "ok", f"Core SSH add-on uninstall failed: {result}" 150 | logger.info("Core SSH add-on uninstall result: %s", result) 151 | 152 | 153 | @pytest.mark.dependency(depends=["test_supervisor_is_updated"]) 154 | @pytest.mark.timeout(120) 155 | def test_restart_supervisor(shell, shell_json): 156 | result = shell_json("ha supervisor restart --no-progress --raw-json") 157 | assert result.get("result") == "ok", f"Supervisor restart failed: {result}" 158 | 159 | supervisor_ip = "\n".join( 160 | shell.run_check("docker inspect --format='{{.NetworkSettings.Networks.bridge.IPAddress}}' hassio_supervisor") 161 | ) 162 | 163 | while True: 164 | try: 165 | if shell_json(f"curl -sSL http://{supervisor_ip}/supervisor/ping").get("result") == "ok": 166 | if shell_json("ha os info --no-progress --raw-json").get("result") == "ok": 167 | break 168 | except ExecutionError: 169 | pass # avoid failure when the container is restarting 170 | 171 | sleep(1) 172 | 173 | 174 | @pytest.mark.dependency(depends=["test_create_backup"]) 175 | def test_restore_backup(shell_json, stash): 176 | result = shell_json(f"ha backups restore {stash.get('slug')} --addons core_ssh --no-progress --raw-json") 177 | assert result.get("result") == "ok", f"Backup restore failed: {result}" 178 | logger.info("Backup restore result: %s", result) 179 | 180 | addon_info = shell_json("ha addons info core_ssh --no-progress --raw-json") 181 | assert addon_info.get("data", {}).get("version") is not None, "Core SSH add-on not installed" 182 | assert addon_info.get("data", {}).get("state") == "started", "Core SSH add-on not running" 183 | logger.info("Core SSH add-on info: %s", addon_info) 184 | 185 | 186 | @pytest.mark.dependency(depends=["test_create_backup"]) 187 | def test_restore_ssl_directory(shell_json, stash): 188 | result = shell_json(f"ha backups restore {stash.get('slug')} --folders ssl --no-progress --raw-json") 189 | assert result.get("result") == "ok", f"Backup restore failed: {result}" 190 | logger.info("Backup restore result: %s", result) 191 | 192 | 193 | @pytest.mark.dependency(depends=["test_start_supervisor"]) 194 | def test_no_apparmor_denies(shell): 195 | """Check there are no AppArmor denies in the logs raised during Supervisor tests.""" 196 | output = shell.run_check("journalctl -t audit | grep DENIED || true") 197 | assert not output, f"AppArmor denies found: {output}" 198 | 199 | 200 | @pytest.mark.dependency(depends=["test_start_supervisor"]) 201 | def test_kernel_not_tainted(shell): 202 | """Check if the kernel is not tainted - do it at the end of the 203 | test suite to increase the chance of catching issues.""" 204 | output = shell.run_check("cat /proc/sys/kernel/tainted") 205 | assert "\n".join(output) == "0", f"Kernel tainted: {output}" 206 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "{}" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright 2017 Pascal Vizeli 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /.github/workflows/build.yaml: -------------------------------------------------------------------------------- 1 | # Home Assistant Operating System build workflow 2 | 3 | name: OS build 4 | 5 | on: 6 | release: 7 | types: [published] 8 | workflow_dispatch: 9 | inputs: 10 | boards: 11 | description: 'List of boards to build (comma separated identifiers)' 12 | required: false 13 | type: string 14 | publish: 15 | description: 'Publish build artifacts to R2 (not applicable to forks)' 16 | required: true 17 | type: boolean 18 | default: true 19 | run_tests: 20 | description: 'Run tests after build' 21 | required: true 22 | type: boolean 23 | default: true 24 | hassio_channel: 25 | description: 'Release channel to use (default: stable for GH releases, dev otherwise)' 26 | type: choice 27 | required: true 28 | default: default 29 | options: 30 | - default 31 | - stable 32 | - beta 33 | - dev 34 | 35 | env: 36 | PYTHON_VERSION: "3.13" 37 | 38 | jobs: 39 | prepare: 40 | name: Prepare build 41 | runs-on: ubuntu-22.04 42 | permissions: 43 | contents: read 44 | pull-requests: read 45 | packages: write 46 | outputs: 47 | version_dev: ${{ steps.version_dev.outputs.version_dev }} 48 | version_main: ${{ steps.version.outputs.version_main }} 49 | version_full: ${{ steps.version.outputs.version_full }} 50 | channel: ${{ steps.channel.outputs.channel }} 51 | hassio_channel_option: ${{ steps.channel.outputs.hassio_channel_option }} 52 | matrix: ${{ steps.generate_matrix.outputs.result }} 53 | build_container_image: ghcr.io/${{ github.repository_owner }}/haos-builder@${{ steps.build_haos_builder.outputs.digest }} 54 | publish_build: ${{ steps.check_publish.outputs.publish_build }} 55 | self_signed_cert: ${{ steps.generate_signing_key.outputs.self_signed_cert }} 56 | steps: 57 | - name: Checkout source 58 | uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 59 | with: 60 | persist-credentials: false 61 | 62 | - name: Check if build should be published 63 | id: check_publish 64 | env: 65 | PUBLISH_FLAG: ${{ inputs.publish }} 66 | run: | 67 | if [ "${{ github.repository }}" == "home-assistant/operating-system" ]; then 68 | if [ "${PUBLISH_FLAG}" != "true" ] && [ "${{ github.event_name }}" != "release" ]; then 69 | echo "publish_build=false" >> "$GITHUB_OUTPUT" 70 | else 71 | echo "publish_build=true" >> "$GITHUB_OUTPUT" 72 | fi 73 | else 74 | echo "publish_build=false" >> "$GITHUB_OUTPUT" 75 | fi 76 | 77 | - name: Generate development version 78 | shell: bash 79 | id: version_dev 80 | if: ${{ github.event_name != 'release' }} 81 | env: 82 | PUBLISH_BUILD: ${{ steps.check_publish.outputs.publish_build }} 83 | run: | 84 | version_dev="dev$(date --utc +'%Y%m%d')" 85 | if [ "${{ env.PUBLISH_BUILD }}" != "true" ] || [ "${{ github.ref }}" != "refs/heads/dev" ]; then 86 | version_dev="dev$(date +%s)" 87 | fi 88 | echo "Development version \"${version_dev}\"" 89 | echo "version_dev=${version_dev}" >> $GITHUB_OUTPUT 90 | 91 | - name: Set version suffix 92 | if: ${{ github.event_name != 'release' }} 93 | env: 94 | VERSION_DEV: ${{ steps.version_dev.outputs.version_dev }} 95 | run: | 96 | sed -i -E "s/(^VERSION_SUFFIX=\").*(\"$)/\1${VERSION_DEV}\2/" buildroot-external/meta 97 | 98 | - name: Get version 99 | id: version 100 | run: | 101 | . ${GITHUB_WORKSPACE}/buildroot-external/meta 102 | echo "version_main=${VERSION_MAJOR}.${VERSION_MINOR}" >> $GITHUB_OUTPUT 103 | if [ -z "${VERSION_SUFFIX}" ]; then 104 | version_full="${VERSION_MAJOR}.${VERSION_MINOR}" 105 | else 106 | version_full="${VERSION_MAJOR}.${VERSION_MINOR}.${VERSION_SUFFIX}" 107 | fi 108 | echo "version_full=${version_full}" >> $GITHUB_OUTPUT 109 | echo "Full version number of this release is \"${version_full}\"." 110 | 111 | - name: Validate version 112 | id: version_check 113 | if: ${{ github.event_name == 'release' }} 114 | run: | 115 | if [ "${{ steps.version.outputs.version_full }}" != "${{ github.event.release.tag_name }}" ]; then 116 | echo "Version number in Buildroot metadata does not match tag (${{ steps.version.outputs.version_full }} vs ${{ github.event.release.tag_name }})." 117 | exit 1 118 | fi 119 | 120 | - name: Get channel 121 | id: channel 122 | run: | 123 | if [[ "${{ github.event_name }}" == "release" ]]; then 124 | if [[ "${{ github.event.release.prerelease }}" == "true" ]]; then 125 | echo "channel=beta" >> "$GITHUB_OUTPUT" 126 | else 127 | echo "channel=stable" >> "$GITHUB_OUTPUT" 128 | fi 129 | else 130 | echo "channel=dev" >> "$GITHUB_OUTPUT" 131 | fi 132 | 133 | if [[ "${{ inputs.hassio_channel }}" == "default" ]]; then 134 | if [[ "${{ github.event_name }}" == "release" ]]; then 135 | echo "hassio_channel_option=BR2_PACKAGE_HASSIO_CHANNEL_STABLE" >> "$GITHUB_OUTPUT" 136 | else 137 | echo "hassio_channel_option=BR2_PACKAGE_HASSIO_CHANNEL_DEV" >> "$GITHUB_OUTPUT" 138 | fi 139 | else 140 | if [[ "${{ inputs.hassio_channel }}" == "stable" ]]; then 141 | echo "hassio_channel_option=BR2_PACKAGE_HASSIO_CHANNEL_STABLE" >> "$GITHUB_OUTPUT" 142 | elif [[ "${{ inputs.hassio_channel }}" == "beta" ]]; then 143 | echo "hassio_channel_option=BR2_PACKAGE_HASSIO_CHANNEL_BETA" >> "$GITHUB_OUTPUT" 144 | elif [[ "${{ inputs.hassio_channel }}" == "dev" ]]; then 145 | echo "hassio_channel_option=BR2_PACKAGE_HASSIO_CHANNEL_DEV" >> "$GITHUB_OUTPUT" 146 | fi 147 | fi 148 | 149 | - name: Create build matrix 150 | uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8.0.0 151 | id: generate_matrix 152 | with: 153 | script: | 154 | const boards = require('./.github/workflows/matrix.json') 155 | if ("${{ github.event_name }}" == "release") { 156 | return { "board": boards } 157 | } 158 | 159 | const boardFilter = "${{ github.event.inputs.boards }}" 160 | const runTests = "${{ github.event.inputs.run_tests }}" === "true" 161 | 162 | if (boardFilter == "") { 163 | console.log("Run full build for all boards") 164 | return { "board": boards } 165 | } else { 166 | console.log("Run partial build") 167 | const boardSet = new Set(boardFilter.split(",")) 168 | 169 | // if tests are enabled, we need to ensure the OVA board is included 170 | if (runTests && !boardSet.has("ova")) { 171 | console.log("Adding OVA board for integration tests") 172 | boardSet.add("ova") 173 | } 174 | 175 | const buildBoards = boards.filter(b => boardSet.has(b.id)) 176 | return { "board": buildBoards } 177 | } 178 | 179 | - name: Set up Docker Buildx 180 | uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v3.11.1 181 | - name: Log in to the GitHub container registry 182 | uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0 183 | with: 184 | registry: ghcr.io 185 | username: ${{ github.repository_owner }} 186 | password: ${{ secrets.GITHUB_TOKEN }} 187 | - name: Build and Push 188 | uses: docker/build-push-action@263435318d21b8e681c14492fe198d362a7d2c83 # v6.18.0 189 | id: build_haos_builder 190 | with: 191 | context: . 192 | file: Dockerfile 193 | tags: ghcr.io/${{ github.repository_owner }}/haos-builder 194 | cache-from: ghcr.io/${{ github.repository_owner }}/haos-builder:cache-${{ steps.version.outputs.version_main }} 195 | cache-to: ghcr.io/${{ github.repository_owner }}/haos-builder:cache-${{ steps.version.outputs.version_main }} 196 | push: true 197 | 198 | - name: Generate self-signed certificate 199 | id: generate_signing_key 200 | env: 201 | RAUC_CERTIFICATE: ${{ secrets.RAUC_CERTIFICATE }} 202 | RAUC_PRIVATE_KEY: ${{ secrets.RAUC_PRIVATE_KEY }} 203 | if: env.RAUC_CERTIFICATE == '' || env.RAUC_PRIVATE_KEY == '' 204 | run: | 205 | echo "::warning:: RAUC certificate or key is missing in the repository secrets. Building with a public self-signed certificate!" 206 | buildroot-external/scripts/generate-signing-key.sh cert.pem key.pem 207 | echo "self_signed_cert=true" >> $GITHUB_OUTPUT 208 | 209 | - name: Create signing key 210 | uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6.0.0 211 | if: steps.generate_signing_key.outcome == 'success' 212 | with: 213 | name: signing-key 214 | path: | 215 | cert.pem 216 | key.pem 217 | 218 | build: 219 | name: Build for ${{ matrix.board.id }} 220 | permissions: 221 | contents: write # for actions/upload-release-asset to upload release asset 222 | needs: prepare 223 | strategy: 224 | fail-fast: ${{ github.event_name == 'release' }} 225 | matrix: ${{ fromJson(needs.prepare.outputs.matrix) }} 226 | runs-on: ubuntu-22.04 227 | 228 | steps: 229 | - name: Checkout source 230 | uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 231 | with: 232 | submodules: true 233 | persist-credentials: false 234 | 235 | - name: Setup Python version ${{ env.PYTHON_VERSION }} 236 | if: ${{ github.event_name != 'release' }} 237 | uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 238 | with: 239 | python-version: ${{ env.PYTHON_VERSION }} 240 | 241 | - name: Install AWS CLI 242 | if: ${{ github.event_name != 'release' && needs.prepare.outputs.publish_build == 'true' }} 243 | run: pip install 'awscli<1.37.0' 244 | 245 | - name: Set version suffix 246 | if: ${{ github.event_name != 'release' }} 247 | env: 248 | VERSION_DEV: ${{ needs.prepare.outputs.version_dev }} 249 | run: | 250 | sed -i -E "s/(^VERSION_SUFFIX=\").*(\"$)/\1${VERSION_DEV}\2/" buildroot-external/meta 251 | 252 | - name: 'Add release PKI certs' 253 | if: ${{ needs.prepare.outputs.self_signed_cert != 'true' }} 254 | env: 255 | RAUC_CERTIFICATE: ${{ secrets.RAUC_CERTIFICATE }} 256 | RAUC_PRIVATE_KEY: ${{ secrets.RAUC_PRIVATE_KEY }} 257 | run: | 258 | echo -e "-----BEGIN CERTIFICATE-----\n${RAUC_CERTIFICATE}\n-----END CERTIFICATE-----" > cert.pem 259 | echo -e "-----BEGIN PRIVATE KEY-----\n${RAUC_PRIVATE_KEY}\n-----END PRIVATE KEY-----" > key.pem 260 | 261 | - name: Get self-signed certificate from the prepare job 262 | if: ${{ needs.prepare.outputs.self_signed_cert == 'true' }} 263 | uses: actions/download-artifact@37930b1c2abaa49bbe596cd826c3c89aef350131 # v7.0.0 264 | with: 265 | name: signing-key 266 | 267 | - name: Free space on build drive 268 | run: | 269 | # Inspired by https://github.com/easimon/maximize-build-space/blob/v7/action.yml 270 | df -h 271 | sudo rm -rf /usr/local/lib/android/sdk/ndk 272 | sudo rm -rf /opt/hostedtoolcache/CodeQL 273 | sudo mkdir /mnt/cache 274 | sudo mkdir /mnt/output 275 | WORKSPACE_OWNER="$(stat -c '%U:%G' "${GITHUB_WORKSPACE}")" 276 | # output directory is symlinked for easier access from workspace 277 | # but for build container it must be mounted as a volume 278 | sudo ln -sf /mnt/output "${GITHUB_WORKSPACE}/output" 279 | sudo chown -R "${WORKSPACE_OWNER}" /mnt/cache 280 | sudo chown -R "${WORKSPACE_OWNER}" /mnt/output 281 | df -h 282 | 283 | - name: "Restore cache: object files" 284 | uses: actions/cache/restore@9255dc7a253b0ccc959486e2bca901246202afeb # v5.0.1 285 | with: 286 | path: /mnt/cache/cc 287 | key: haos-cc-${{ matrix.board.id }} 288 | 289 | - name: Generate build config 290 | uses: "./.github/actions/haos-builder-command" 291 | with: 292 | image: ${{ needs.prepare.outputs.build_container_image }} 293 | command: make ${{ matrix.board.defconfig }}_defconfig 294 | 295 | - name: Override release channel 296 | if: ${{ needs.prepare.outputs.hassio_channel_option != 'BR2_PACKAGE_HASSIO_CHANNEL_STABLE' }} 297 | uses: "./.github/actions/haos-builder-command" 298 | with: 299 | image: ${{ needs.prepare.outputs.build_container_image }} 300 | command: | 301 | bash -c 'echo "${{ needs.prepare.outputs.hassio_channel_option }}=y" >> /build/output/.config && make olddefconfig' 302 | 303 | - name: Build 304 | uses: "./.github/actions/haos-builder-command" 305 | with: 306 | image: ${{ needs.prepare.outputs.build_container_image }} 307 | command: make 308 | 309 | - name: Check Linux config 310 | uses: "./.github/actions/haos-builder-command" 311 | with: 312 | image: ${{ needs.prepare.outputs.build_container_image }} 313 | command: | 314 | make BR2_CHECK_DOTCONFIG_OPTS="--github-format --strip-path-prefix=/build/" linux-check-dotconfig 315 | 316 | - name: Upload artifacts 317 | if: ${{ github.event_name != 'release' && needs.prepare.outputs.publish_build == 'true' }} 318 | working-directory: output/images/ 319 | env: 320 | AWS_ACCESS_KEY_ID: ${{ secrets.R2_OS_ARTIFACTS_ID }} 321 | AWS_SECRET_ACCESS_KEY: ${{ secrets.R2_OS_ARTIFACTS_KEY }} 322 | run: | 323 | aws s3 sync \ 324 | ./ \ 325 | s3://${{ secrets.R2_OS_ARTIFACTS_BUCKET }}/${{ needs.prepare.outputs.version_full }}/ \ 326 | --exclude "*" \ 327 | --include "haos_*" \ 328 | --endpoint-url ${{ secrets.R2_OS_ARTIFACTS_ENDPOINT }} 329 | 330 | - name: Upload release assets 331 | if: ${{ github.event_name == 'release' }} 332 | uses: shogo82148/actions-upload-release-asset@59cbc563d11314e48122193f8fe5cdda62ea6cf9 # v1.9.1 333 | with: 334 | upload_url: ${{ github.event.release.upload_url }} 335 | asset_path: output/images/haos_* 336 | 337 | - name: Print cache stats 338 | run: | 339 | echo "Cache size: $(du -sh /mnt/cache/cc)" 340 | echo "Files total: $(find /mnt/cache/cc -mindepth 1 -type f | wc -l)" 341 | echo "Old files to remove: $(find /mnt/cache/cc -mindepth 1 -type f -not -anewer output/Makefile | wc -l)" 342 | find /mnt/cache/cc -mindepth 1 -type f -not -anewer output/Makefile -delete 343 | echo "Cache size after pruning: $(du -sh /mnt/cache/cc)" 344 | 345 | - name: "Save cache: object files" 346 | if: github.ref == 'refs/heads/dev' 347 | uses: actions/cache/save@9255dc7a253b0ccc959486e2bca901246202afeb # v5.0.1 348 | with: 349 | path: /mnt/cache/cc 350 | key: haos-cc-${{ matrix.board.id }}-${{ github.run_id }} 351 | 352 | - name: Generate build summary 353 | run: | 354 | echo "# ${{ matrix.board.id }} build summary" >> $GITHUB_STEP_SUMMARY 355 | echo "## Built-in OS components" >> $GITHUB_STEP_SUMMARY 356 | echo "Release channel: ${{ inputs.hassio_channel }} (${{ needs.prepare.outputs.hassio_channel_option }})" >> $GITHUB_STEP_SUMMARY 357 | echo "| Container | Version |" >> $GITHUB_STEP_SUMMARY 358 | echo "|:-|:-|" >> $GITHUB_STEP_SUMMARY 359 | supervisor_version=$(jq -r ".supervisor" output/build/hassio-*/version.json) 360 | landingpage_version=$(curl -fsSL https://api.github.com/repos/home-assistant/landingpage/releases/latest | jq -r '.tag_name') 361 | echo "| supervisor | [${supervisor_version}](https://github.com/home-assistant/supervisor/releases/tag/${supervisor_version}) |" >> $GITHUB_STEP_SUMMARY 362 | echo "| landingpage | [${landingpage_version}](https://github.com/home-assistant/landingpage/releases/tag/${landingpage_version}) |" >> $GITHUB_STEP_SUMMARY 363 | for plugin in dns audio cli multicast observer; do 364 | version=$(jq -r ".${plugin}" output/build/hassio-*/version.json) 365 | echo "| plugin-${plugin} | [${version}](https://github.com/home-assistant/plugin-${plugin}/releases/tag/${version}) |" >> $GITHUB_STEP_SUMMARY 366 | done 367 | echo "## Artifacts" >> $GITHUB_STEP_SUMMARY 368 | echo "| File | Size (bytes) | Size (formatted) |" >> $GITHUB_STEP_SUMMARY 369 | echo "|:-|:-|:-|" >> $GITHUB_STEP_SUMMARY 370 | for f in output/images/haos_*; do 371 | echo "| $(basename $f) | $(du -b $f | cut -f1) | $(du -bh $f | cut -f1) |" >> $GITHUB_STEP_SUMMARY 372 | done 373 | echo "## Partitions" >> $GITHUB_STEP_SUMMARY 374 | echo "| File | Size (bytes) | Size (formatted) |" >> $GITHUB_STEP_SUMMARY 375 | echo "|:-|:-|:-|" >> $GITHUB_STEP_SUMMARY 376 | for f in boot.vfat kernel.img rootfs.erofs overlay.ext4 data.ext4; do 377 | echo "| ${f} | $(du -b output/images/$f | cut -f1) | $(du -bh output/images/$f | cut -f1) |" >> $GITHUB_STEP_SUMMARY 378 | done 379 | 380 | - name: Upload OS image artifact 381 | uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6.0.0 382 | if: ${{ github.event_name != 'release' && needs.prepare.outputs.publish_build != 'true' && matrix.board.id != 'ova' }} 383 | with: 384 | name: haos_${{ matrix.board.id }}-${{ needs.prepare.outputs.version_full }}.img.xz 385 | path: | 386 | output/images/haos_${{ matrix.board.id }}-${{ needs.prepare.outputs.version_full }}.img.xz 387 | 388 | - name: Upload RAUC bundle artifact 389 | uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6.0.0 390 | if: ${{ github.event_name != 'release' && needs.prepare.outputs.publish_build != 'true' }} 391 | with: 392 | name: haos_${{ matrix.board.id }}-${{ needs.prepare.outputs.version_full }}.raucb 393 | path: | 394 | output/images/haos_${{ matrix.board.id }}-${{ needs.prepare.outputs.version_full }}.raucb 395 | 396 | - name: Upload Open Virtualization Format (OVA) artifact 397 | uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6.0.0 398 | if: ${{ github.event_name != 'release' && needs.prepare.outputs.publish_build != 'true' && matrix.board.id == 'ova' }} 399 | with: 400 | name: haos_${{ matrix.board.id }}-${{ needs.prepare.outputs.version_full }}.ova 401 | path: | 402 | output/images/haos_${{ matrix.board.id }}-${{ needs.prepare.outputs.version_full }}.ova 403 | 404 | - name: Upload QEMU disk image artifact 405 | uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6.0.0 406 | # Create artifact for ova every time - it's used by the called tests workflow 407 | if: ${{ matrix.board.id == 'ova' || (github.event_name != 'release' && needs.prepare.outputs.publish_build != 'true' && matrix.board.id == 'generic-aarch64') }} 408 | with: 409 | name: haos_${{ matrix.board.id }}-${{ needs.prepare.outputs.version_full }}.qcow2.xz 410 | path: | 411 | output/images/haos_${{ matrix.board.id }}-${{ needs.prepare.outputs.version_full }}.qcow2.xz 412 | 413 | - name: Upload VMware Virtual Machine Disk (VMDK) artifact 414 | uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6.0.0 415 | if: ${{ github.event_name != 'release' && needs.prepare.outputs.publish_build != 'true' && (matrix.board.id == 'generic-aarch64' || matrix.board.id == 'ova') }} 416 | with: 417 | name: haos_${{ matrix.board.id }}-${{ needs.prepare.outputs.version_full }}.vmdk.zip 418 | path: | 419 | output/images/haos_${{ matrix.board.id }}-${{ needs.prepare.outputs.version_full }}.vmdk.zip 420 | 421 | - name: Upload VirtualBox Virtual Disk Image (VDI) artifact 422 | uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6.0.0 423 | if: ${{ github.event_name != 'release' && needs.prepare.outputs.publish_build != 'true' && matrix.board.id == 'ova' }} 424 | with: 425 | name: haos_${{ matrix.board.id }}-${{ needs.prepare.outputs.version_full }}.vdi.zip 426 | path: | 427 | output/images/haos_${{ matrix.board.id }}-${{ needs.prepare.outputs.version_full }}.vdi.zip 428 | 429 | - name: Upload Virtual Hard Disk v2 (VHDX) artifact 430 | uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6.0.0 431 | if: ${{ github.event_name != 'release' && needs.prepare.outputs.publish_build != 'true' && matrix.board.id == 'ova' }} 432 | with: 433 | name: haos_${{ matrix.board.id }}-${{ needs.prepare.outputs.version_full }}.vhdx.zip 434 | path: | 435 | output/images/haos_${{ matrix.board.id }}-${{ needs.prepare.outputs.version_full }}.vhdx.zip 436 | 437 | test: 438 | name: Test OS image 439 | if: ${{ github.event_name == 'release' || inputs.run_tests == true }} 440 | needs: [ build, prepare ] 441 | uses: ./.github/workflows/test.yaml 442 | with: 443 | version: ${{ needs.prepare.outputs.version_full }} 444 | 445 | update_index: 446 | name: Update artifacts index 447 | if: ${{ github.event_name != 'release' && needs.prepare.outputs.publish_build == 'true' }} 448 | needs: [ build, prepare ] 449 | uses: home-assistant/operating-system/.github/workflows/artifacts-index.yaml@dev 450 | with: 451 | version: ${{ needs.prepare.outputs.version_full }} 452 | secrets: 453 | R2_OS_ARTIFACTS_ID: ${{ secrets.R2_OS_ARTIFACTS_ID }} 454 | R2_OS_ARTIFACTS_KEY: ${{ secrets.R2_OS_ARTIFACTS_KEY }} 455 | R2_OS_ARTIFACTS_BUCKET: ${{ secrets.R2_OS_ARTIFACTS_BUCKET }} 456 | R2_OS_ARTIFACTS_ENDPOINT: ${{ secrets.R2_OS_ARTIFACTS_ENDPOINT }} 457 | CF_ZONE: ${{ secrets.CF_ZONE }} 458 | CF_PURGE_TOKEN: ${{ secrets.CF_PURGE_TOKEN }} 459 | 460 | bump_version: 461 | name: Bump ${{ needs.prepare.outputs.channel }} channel version 462 | if: ${{ github.repository == 'home-assistant/operating-system' && needs.prepare.outputs.publish_build == 'true' && (needs.prepare.outputs.channel != 'dev' || github.ref == 'refs/heads/dev') }} 463 | environment: ${{ needs.prepare.outputs.channel }} 464 | needs: [ build, prepare ] 465 | runs-on: ubuntu-22.04 466 | 467 | steps: 468 | - name: Checkout source 469 | uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 470 | with: 471 | persist-credentials: false 472 | 473 | - name: Initialize git 474 | uses: home-assistant/actions/helpers/git-init@master 475 | with: 476 | name: ${{ secrets.GIT_NAME }} 477 | email: ${{ secrets.GIT_EMAIL }} 478 | token: ${{ secrets.GIT_TOKEN }} 479 | 480 | - name: Bump Home Assistant OS ${{ needs.prepare.outputs.channel }} channel version 481 | uses: home-assistant/actions/helpers/version-push@master 482 | with: 483 | key: "hassos[]" 484 | key-description: "Home Assistant OS" 485 | version: ${{ needs.prepare.outputs.version_full }} 486 | channel: ${{ needs.prepare.outputs.channel }} 487 | 488 | - name: Bump Home Assistant OS beta channel version on stable release 489 | if: ${{ needs.prepare.outputs.channel == 'stable' }} 490 | uses: home-assistant/actions/helpers/version-push@master 491 | with: 492 | key: "hassos[]" 493 | key-description: "Home Assistant OS" 494 | version: ${{ needs.prepare.outputs.version_full }} 495 | channel: beta 496 | 497 | - name: Bump stable Home Assistant version for RPi Imager 498 | if: ${{ github.event_name == 'release' && needs.prepare.outputs.channel == 'stable' }} 499 | uses: "./.github/actions/bump-rpi-imager-version" 500 | with: 501 | version: ${{ needs.prepare.outputs.version_full }} 502 | release-date: ${{ github.event.release.published_at }} 503 | --------------------------------------------------------------------------------