├── .gitignore
├── rootfs
├── etc
│ ├── fix-attrs.d
│ │ └── 55-node-red
│ ├── cont-init.d
│ │ ├── 55-node-red
│ │ └── 40-dbus-avahi
│ └── services.d
│ │ ├── dbus
│ │ └── run
│ │ ├── nodered
│ │ └── run
│ │ └── avahi
│ │ └── run
└── defaults
│ ├── avahi-daemon.conf
│ └── avahi-http.service
├── .github
├── dependabot.yml
└── workflows
│ ├── dependabot-auto-merge.yml
│ ├── prepare-release.yml
│ ├── docker-test.yml
│ └── docker-build.yml
├── package.json
├── .docker
└── Dockerfile.alpine
├── LICENSE
├── README.md
└── strategy.js
/.gitignore:
--------------------------------------------------------------------------------
1 | /.idea/
2 | /node-red-homekit-docker.iml
3 | /node_modules
4 |
--------------------------------------------------------------------------------
/rootfs/etc/fix-attrs.d/55-node-red:
--------------------------------------------------------------------------------
1 | /usr/src/node-red/package.json true 1000:1000 0755 0755
--------------------------------------------------------------------------------
/rootfs/etc/cont-init.d/55-node-red:
--------------------------------------------------------------------------------
1 | #!/usr/bin/with-contenv sh
2 |
3 | exec npm config set cache /usr/src/node-red/.npm --global
4 |
--------------------------------------------------------------------------------
/rootfs/etc/services.d/dbus/run:
--------------------------------------------------------------------------------
1 | #!/usr/bin/with-contenv sh
2 |
3 | echo "Starting dbus-daemon"
4 | exec dbus-daemon --system --nofork
--------------------------------------------------------------------------------
/rootfs/etc/services.d/nodered/run:
--------------------------------------------------------------------------------
1 | #!/usr/bin/with-contenv sh
2 |
3 | cd /usr/src/node-red || exit
4 |
5 | echo "Starting node-red"
6 | s6-setuidgid node-red npm start -- --userDir /data
7 |
--------------------------------------------------------------------------------
/rootfs/etc/services.d/avahi/run:
--------------------------------------------------------------------------------
1 | #!/usr/bin/with-contenv sh
2 |
3 | until [ -e /var/run/dbus/system_bus_socket ]; do
4 | sleep 1s
5 | done
6 |
7 | echo "Starting Avahi daemon"
8 | exec avahi-daemon --no-chroot -f /etc/avahi/avahi-daemon.conf
--------------------------------------------------------------------------------
/rootfs/defaults/avahi-daemon.conf:
--------------------------------------------------------------------------------
1 | [server]
2 | #host-name=
3 | use-ipv4=yes
4 | use-ipv6=no
5 | enable-dbus=yes
6 | ratelimit-interval-usec=1000000
7 | ratelimit-burst=1000
8 |
9 | [wide-area]
10 | enable-wide-area=yes
11 |
12 | [rlimits]
13 | rlimit-core=0
14 | rlimit-data=4194304
15 | rlimit-fsize=0
16 | rlimit-nofile=768
17 | rlimit-stack=4194304
--------------------------------------------------------------------------------
/.github/dependabot.yml:
--------------------------------------------------------------------------------
1 | version: 2
2 | updates:
3 | - package-ecosystem: "npm"
4 | directory: "/"
5 | schedule:
6 | interval: "daily"
7 | ignore:
8 | - dependency-name: "node-red"
9 | update-types: [ "version-update:semver-major" ]
10 | - dependency-name: "node-red-contrib-homekit-bridged"
11 | update-types: [ "version-update:semver-major" ]
--------------------------------------------------------------------------------
/rootfs/defaults/avahi-http.service:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | %h
5 |
6 | _http._tcp
7 | 1880
8 | path=/
9 | version=NODE_RED_VERSION
10 |
11 |
--------------------------------------------------------------------------------
/.github/workflows/dependabot-auto-merge.yml:
--------------------------------------------------------------------------------
1 | name: Dependabot auto-merge
2 |
3 | on:
4 | pull_request_target:
5 |
6 | permissions:
7 | pull-requests: write
8 | contents: write
9 |
10 | jobs:
11 | dependabot:
12 | runs-on: ubuntu-latest
13 | if: ${{ github.actor == 'dependabot[bot]' }}
14 | steps:
15 | - name: Dependabot metadata
16 | id: metadata
17 | uses: dependabot/fetch-metadata@v2
18 | with:
19 | github-token: "${{ secrets.GITHUB_TOKEN }}"
20 | - name: Enable auto-merge for Dependabot PRs
21 | run: gh pr merge --auto --merge "$PR_URL"
22 | env:
23 | PR_URL: ${{github.event.pull_request.html_url}}
24 | GITHUB_TOKEN: ${{secrets.GITHUB_TOKEN}}
25 |
--------------------------------------------------------------------------------
/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "node-red-contrib-homekit-docker",
3 | "version": "4.0.3",
4 | "description": "Node-RED nodes to simulate Apple HomeKit devices.",
5 | "homepage": "https://nrchkb.github.io/",
6 | "license": "Apache-2.0",
7 | "repository": {
8 | "type": "git",
9 | "url": "https://github.com/NRCHKB/node-red-contrib-homekit-docker"
10 | },
11 | "main": "node_modules/node-red/red/red.js",
12 | "scripts": {
13 | "start": "node $NODE_OPTIONS node_modules/node-red/red.js $FLOWS"
14 | },
15 | "contributors": [
16 | {
17 | "name": "Raymond Mouthaan"
18 | },
19 | {
20 | "name": "Tadeusz Wyrzykowski",
21 | "email": "shaquu@icloud.com",
22 | "url": "https://github.com/Shaquu"
23 | }
24 | ],
25 | "dependencies": {
26 | "node-red": "4.1.2",
27 | "node-red-contrib-homekit-bridged": "1.7.3"
28 | },
29 | "engines": {
30 | "node": ">=18"
31 | }
32 | }
33 |
--------------------------------------------------------------------------------
/rootfs/etc/cont-init.d/40-dbus-avahi:
--------------------------------------------------------------------------------
1 | #!/usr/bin/with-contenv sh
2 |
3 | # make folders
4 | mkdir -p /var/run/dbus
5 | mkdir -p /var/run/avahi-daemon
6 |
7 | # delete existing pid if found
8 | [ -e /var/run/dbus.pid ] && rm -f /var/run/dbus.pid
9 | [ -e /var/run/dbus/pid ] && rm -f /var/run/dbus/pid
10 | [ -e /var/run/avahi-daemon/pid ] && rm -f /var/run/avahi-daemon/pid
11 |
12 | # permissions
13 | chown messagebus:messagebus /var/run/dbus
14 | chown avahi:avahi /var/run/avahi-daemon
15 | dbus-uuidgen --ensure
16 | sleep 1
17 |
18 | # avahi config
19 | cp /defaults/avahi-daemon.conf /etc/avahi/avahi-daemon.conf
20 | rm -rf /etc/avahi/services/*
21 | cp /defaults/avahi-http.service /etc/avahi/services/http.service
22 |
23 | # fix for synology dsm - see #35
24 | if [ ! -z "$DSM_HOSTNAME" ]; then
25 | sed -i "s/.*host-name.*/host-name=${DSM_HOSTNAME}/" /etc/avahi/avahi-daemon.conf
26 | else
27 | sed -i "s/.*host-name.*/#host-name=/" /etc/avahi/avahi-daemon.conf
28 | fi
29 |
30 | # avahi config service http
31 | NODE_RED_VERSION=$(grep -oE "\"node-red\": \"(\w*.\w*.\w*.\w*.\w*.)" package.json | cut -d\" -f4)
32 | sed -i "s/NODE_RED_VERSION/${NODE_RED_VERSION}/" /etc/avahi/services/http.service
33 |
34 | if [ ! -z "$PORT" ]; then
35 | sed -i "s/1880<\/port>/${PORT}<\/port>/" /etc/avahi/services/http.service
36 | fi
--------------------------------------------------------------------------------
/.github/workflows/prepare-release.yml:
--------------------------------------------------------------------------------
1 | name: Prepare release
2 |
3 | permissions:
4 | contents: write
5 |
6 | concurrency:
7 | group: prepare-release-${{ github.ref }}
8 | cancel-in-progress: true
9 |
10 | on:
11 | workflow_dispatch:
12 | schedule:
13 | - cron: "0 0 * * *"
14 |
15 | jobs:
16 | release:
17 | name: Check for new features to be released
18 | runs-on: ubuntu-latest
19 | if: ${{ github.ref == 'refs/heads/master' }}
20 | timeout-minutes: 20
21 | defaults:
22 | run:
23 | shell: bash
24 | steps:
25 | - name: Checkout
26 | uses: actions/checkout@v4
27 | with:
28 | fetch-depth: 0
29 | - name: Changes since last tag
30 | id: diff
31 | run: |
32 | LAST_TAG="$(git describe --tags --abbrev=0)"
33 | CURRENT_TAG="${{ github.ref }}"
34 | CHANGES="$(git diff $LAST_TAG $CURRENT_TAG --stat)"
35 |
36 | ANY_CHANGES='false'
37 | if [[ "$CHANGES" != "" ]]; then
38 | ANY_CHANGES='true'
39 | fi
40 |
41 | echo "last-tag=$LAST_TAG" >> $GITHUB_OUTPUT
42 | echo "any_changes=$ANY_CHANGES" >> $GITHUB_OUTPUT
43 |
44 | echo "from: $LAST_TAG"
45 | echo "to: $CURRENT_TAG"
46 | echo "changes: $ANY_CHANGES"
47 | - name: Setup Node.js
48 | if: ${{ steps.diff.outputs.any_changes == 'true' }}
49 | uses: actions/setup-node@v4
50 | with:
51 | node-version: "20"
52 | - name: Bump
53 | if: ${{ steps.diff.outputs.any_changes == 'true' }}
54 | id: version-bump
55 | uses: phips28/gh-action-bump-version@master
56 | with:
57 | tag-prefix: v
58 | env:
59 | GITHUB_TOKEN: ${{ secrets.NRCHKB_DOCKER }}
60 | - name: Checkout
61 | if: ${{ steps.diff.outputs.any_changes == 'true' }}
62 | uses: actions/checkout@v4
63 | - name: Create release
64 | if: ${{ steps.diff.outputs.any_changes == 'true' }}
65 | uses: softprops/action-gh-release@v2
66 | with:
67 | tag_name: ${{ steps.version-bump.outputs.newTag }}
68 | - name: Dispatch release build
69 | if: ${{ steps.diff.outputs.any_changes == 'true' }}
70 | uses: peter-evans/repository-dispatch@v3
71 | with:
72 | token: ${{ secrets.NRCHKB_DOCKER }}
73 | event-type: docker-build
74 |
--------------------------------------------------------------------------------
/.docker/Dockerfile.alpine:
--------------------------------------------------------------------------------
1 | ARG ARCH
2 | ARG NODE_VERSION
3 | ARG TAG_SUFFIX
4 | ARG NODE_RED_VERSION
5 |
6 | FROM nodered/node-red:${NODE_RED_VERSION}-${NODE_VERSION}${TAG_SUFFIX}
7 |
8 | ARG HOMEKIT_BRIDGED_VERSION
9 |
10 | LABEL org.label-schema.build-date=${BUILD_DATE} \
11 | org.label-schema.docker.dockerfile=".docker/Dockerfile.alpine" \
12 | org.label-schema.license="Apache-2.0" \
13 | org.label-schema.name="Node-RED" \
14 | org.label-schema.version=${BUILD_VERSION} \
15 | org.label-schema.description="Low-code programming for event-driven applications." \
16 | org.label-schema.url="https://nodered.org" \
17 | org.label-schema.vcs-ref=${BUILD_REF} \
18 | org.label-schema.vcs-type="Git" \
19 | org.label-schema.vcs-url="https://github.com/NRCHKB/node-red-contrib-homekit-docker.git" \
20 | org.label-schema.arch=${ARCH} \
21 | authors="Raymond Mouthaan, Tadeusz Wyrzykowski"
22 |
23 | USER root
24 |
25 | RUN apk update
26 |
27 | # root filesystem
28 | COPY rootfs /
29 |
30 | # Install tools
31 | RUN set -ex \
32 | && apk add --no-cache --virtual .run-deps \
33 | avahi-compat-libdns_sd \
34 | avahi-dev \
35 | dbus \
36 | && npm set package-lock=false
37 |
38 | ARG TARGETPLATFORM
39 | ARG FFMPEG_OS
40 |
41 | # vcgencmd
42 | RUN case "$FFMPEG_OS" in \
43 | raspbian) \
44 | set -x && \
45 | apk add --no-cache cmake && \
46 | git clone https://github.com/raspberrypi/utils.git && \
47 | cd utils/vcgencmd && \
48 | cmake . && \
49 | make && \
50 | make install && \
51 | cd ../.. && \
52 | rm -r utils \
53 | ;; \
54 | esac
55 |
56 | # s6 overlay
57 | RUN case "$TARGETPLATFORM" in \
58 | linux\/amd64) S6_ARCH='amd64';; \
59 | linux\/arm/v7) S6_ARCH='armhf';; \
60 | linux\/arm/v6) S6_ARCH='armhf';; \
61 | linux\/arm64) S6_ARCH='aarch64';; \
62 | *) echo "unsupported architecture"; exit 1 ;; \
63 | esac \
64 | && set -x \
65 | && curl -Lfs https://github.com/just-containers/s6-overlay/releases/download/v2.2.0.3/s6-overlay-${S6_ARCH}.tar.gz | tar xzf - -C / --no-same-owner
66 |
67 | # ffmpeg-for-homebridge
68 | RUN case "$TARGETPLATFORM" in \
69 | linux\/amd64) FFMPEG_ARCH='x86_64';; \
70 | linux\/arm/v7) FFMPEG_ARCH='arm32v7';; \
71 | linux\/arm/v6) FFMPEG_ARCH='arm32v7';; \
72 | linux\/arm64) FFMPEG_ARCH='aarch64';; \
73 | *) echo "unsupported architecture"; exit 1 ;; \
74 | esac \
75 | && set -x \
76 | && curl -Lfs https://github.com/homebridge/ffmpeg-for-homebridge/releases/latest/download/ffmpeg-alpine-${FFMPEG_ARCH}.tar.gz | tar xzf - -C / --no-same-owner
77 |
78 | COPY package.json .
79 | RUN npm install --unsafe-perm --no-update-notifier --only=production
80 |
81 | # Add passport openidconnect strategy to allow usage of OIDC for authentication at Node RED editor & dashboard
82 | RUN npm install passport-openidconnect \
83 | && npm install jwt-decode
84 |
85 | # Modify jaredhanson/passport-openidconnect to retrieve profile scope
86 | # Apply modified file /usr/src/node-red/node_modules/passport-openidconnect/lib/strategy.js
87 | # Modified file adds profile in line 244 to get userinfo
88 | COPY strategy.js /usr/src/node-red/node_modules/passport-openidconnect/lib/strategy.js
89 |
90 | RUN apk add --no-cache libcap \
91 | && setcap 'cap_net_bind_service=+ep' `which node`
92 |
93 | ENTRYPOINT [ "/init" ]
94 |
--------------------------------------------------------------------------------
/.github/workflows/docker-test.yml:
--------------------------------------------------------------------------------
1 | name: Docker images - test
2 |
3 | permissions:
4 | contents: read
5 |
6 | concurrency:
7 | group: docker-test-${{ github.ref }}
8 | cancel-in-progress: true
9 |
10 | on:
11 | workflow_dispatch:
12 | pull_request:
13 |
14 | jobs:
15 | testbuild:
16 | timeout-minutes: 60
17 | env:
18 | LATEST_NODE: 22
19 | DEFAULT_IMAGE: nrchkb/node-red-homekit
20 | DEV_IMAGE: nrchkb/node-red-homekit-dev
21 | runs-on: ubuntu-latest
22 |
23 | defaults:
24 | run:
25 | shell: bash
26 |
27 | strategy:
28 | matrix:
29 | node: [ 18, 20, 22 ]
30 | suffix: [ "", "-minimal", "-raspbian" ]
31 |
32 | steps:
33 | - name: Checkout
34 | uses: actions/checkout@v4
35 | - name: Show Env
36 | run: env
37 | - name: Docker Metadata
38 | id: meta
39 | uses: docker/metadata-action@v5
40 | with:
41 | flavor: |
42 | latest=false
43 | suffix=-${{matrix.node}}${{matrix.suffix}}
44 | images: |
45 | ${{ env.DEFAULT_IMAGE }}
46 | tags: |
47 | type=ref,event=branch
48 | type=semver,pattern={{version}}
49 | - name: Setup QEMU
50 | uses: docker/setup-qemu-action@v3
51 | - name: Setup Docker buildx
52 | uses: docker/setup-buildx-action@v3
53 | - name: Get Date
54 | id: date
55 | run: echo "date=$(date +'%Y-%m-%dT%H:%M:%SZ')" >> $GITHUB_OUTPUT
56 | - name: Get NRCHKB Version
57 | id: nrchkbVersion
58 | run: |
59 | HOMEKIT_BRIDGED_VERSION=$(grep -oE "\"node-red-contrib-homekit-bridged\": \"(\w*.\w*.\w*.\w*.\w*.)" package.json | cut -d\" -f4)
60 | echo "version=$HOMEKIT_BRIDGED_VERSION" >> $GITHUB_OUTPUT
61 | - name: Set Node-RED Version
62 | id: nodeRedVersion
63 | run: |
64 | NODE_RED_VERSION=$(grep -oE "\"node-red\": \"(\w*.\w*.\w*.\w*.\w*.)" package.json | cut -d\" -f4)
65 | echo "version=$NODE_RED_VERSION" >> $GITHUB_OUTPUT
66 | - name: Set Image Settings
67 | id: imageSettings
68 | run: |
69 | TAGS=""
70 | while IFS= read -r TAG;do
71 | if [ -z "$TAGS" ]; then
72 | TAGS=$TAG
73 | else
74 | TAGS="$TAGS,$TAG"
75 | fi
76 | done <<< "${{ steps.meta.outputs.tags }}"
77 |
78 | CURRENT_TAG="$(echo "$GITHUB_REF" | awk -F '/' '{ print $3}')"
79 |
80 | if [[ ! "$CURRENT_TAG" =~ ^v[0-9\.-]*$ ]]; then
81 | CURRENT_TAG="$(git describe --tags --abbrev=0)"
82 | fi
83 |
84 | echo "current tag $CURRENT_TAG"
85 |
86 | if [[ "$CURRENT_TAG" =~ ^v[0-9\.-]*$ ]]; then
87 | IMAGE=${{ env.DEFAULT_IMAGE }}
88 | PUSH="true"
89 | VERSION=${CURRENT_TAG:1}
90 | if [ "${{ matrix.node }}" == "${{ env.LATEST_NODE }}" ] && [ "${{ matrix.suffix}}" == "" ]; then
91 | TAGS="$TAGS,$IMAGE:$VERSION,$IMAGE:latest"
92 | elif [ "${{ matrix.node }}" == "${{ env.LATEST_NODE }}" ] && [ "${{ matrix.suffix}}" == "-minimal" ]; then
93 | TAGS="$TAGS,$IMAGE:$VERSION,$IMAGE:latest-minimal"
94 | elif [ "${{ matrix.node }}" == "${{ env.LATEST_NODE }}" ] && [ "${{ matrix.suffix}}" == "-raspbian" ]; then
95 | TAGS="$TAGS,$IMAGE:$VERSION,$IMAGE:latest-raspbian"
96 | fi
97 | TAGS="$TAGS,$IMAGE:latest-${{ matrix.node }}"
98 | if [ "${{ matrix.suffix}}" != "" ]; then
99 | TAGS="$TAGS,$IMAGE:latest-${{ matrix.node }}${{ matrix.suffix }}"
100 | fi
101 | else
102 | IMAGE=${{ env.DEV_IMAGE }}
103 | if [[ "$CURRENT_TAG" == *"dev"* || "$CURRENT_TAG" == *"beta"* ]]; then
104 | PUSH="true"
105 | else
106 | PUSH="false"
107 | fi
108 | VERSION=${CURRENT_TAG}
109 | TAGS=$(echo $TAGS | sed 's!${{ env.DEFAULT_IMAGE}}!${{ env.DEV_IMAGE }}!')
110 | if [ "${{ matrix.node }}" == "${{ env.LATEST_NODE }}" ] && [ "${{ matrix.suffix}}" == "" ]; then
111 | TAGS="$TAGS,$IMAGE:$VERSION"
112 | fi
113 | fi
114 |
115 | PLATFORMS=""
116 | SUFFIX=""
117 | if [[ "${{matrix.suffix}}" == "-raspbian" ]]; then
118 | # Upstream Node-RED images no longer provide arm/v6 variants for recent tags
119 | # Test build only for supported Raspberry Pi platform(s)
120 | PLATFORMS="linux/arm/v7"
121 | SUFFIX=""
122 | FFMPEG_OS="raspbian"
123 | else
124 | # Remove arm/v6 as it is not available in upstream manifests for recent tags
125 | PLATFORMS="linux/amd64, linux/arm64, linux/arm/v7"
126 | SUFFIX="${{ matrix.suffix }}"
127 | FFMPEG_OS="alpine"
128 | fi
129 |
130 | echo $TAGS
131 |
132 | echo "tags=$TAGS" >> $GITHUB_OUTPUT
133 | echo "push=$PUSH" >> $GITHUB_OUTPUT
134 | echo "version=$VERSION" >> $GITHUB_OUTPUT
135 | echo "platforms=$PLATFORMS" >> $GITHUB_OUTPUT
136 | echo "suffix=$SUFFIX" >> $GITHUB_OUTPUT
137 | echo "ffmpegOS=$FFMPEG_OS" >> $GITHUB_OUTPUT
138 | - name: Build and push
139 | id: build-push
140 | uses: docker/build-push-action@v6
141 | continue-on-error: true
142 | with:
143 | context: .
144 | platforms: ${{ steps.imageSettings.outputs.platforms }}
145 | push: ${{ steps.imageSettings.outputs.push }}
146 | file: .docker/Dockerfile.alpine
147 | build-args: |
148 | NODE_VERSION=${{ matrix.node }}
149 | BUILD_DATE=${{ steps.date.outputs.date }}
150 | BUILD_VERSION=${{ steps.imageSettings.outputs.version }}
151 | BUILD_REF=${{ env.GITHUB_SHA }}
152 | NODE_RED_VERSION=${{ steps.nodeRedVersion.outputs.version }}
153 | HOMEKIT_BRIDGED_VERSION=${{ steps.nrchkbVersion.outputs.version }}
154 | FFMPEG_OS=${{ steps.imageSettings.outputs.ffmpegOS }}
155 | TAG_SUFFIX=${{ steps.imageSettings.outputs.suffix }}
156 | tags: ${{ steps.imageSettings.outputs.tags }}
157 |
--------------------------------------------------------------------------------
/.github/workflows/docker-build.yml:
--------------------------------------------------------------------------------
1 | name: Docker images - build
2 |
3 | permissions:
4 | contents: read
5 |
6 | concurrency:
7 | group: docker-build-${{ github.ref }}
8 | cancel-in-progress: true
9 |
10 | on:
11 | workflow_dispatch:
12 | release:
13 | types: [ published ]
14 | repository_dispatch:
15 | types: [ docker-build ]
16 |
17 | jobs:
18 | build:
19 | timeout-minutes: 90
20 | env:
21 | LATEST_NODE: 22
22 | DEFAULT_IMAGE: nrchkb/node-red-homekit
23 | DEV_IMAGE: nrchkb/node-red-homekit-dev
24 | runs-on: ubuntu-latest
25 |
26 | defaults:
27 | run:
28 | shell: bash
29 |
30 | strategy:
31 | matrix:
32 | node: [ 18, 20, 22 ]
33 | suffix: [ "", "-minimal", "-raspbian" ]
34 |
35 | steps:
36 | - name: Login to DockerHub
37 | uses: docker/login-action@v3
38 | with:
39 | username: ${{ secrets.DOCKERHUB_USERNAME }}
40 | password: ${{ secrets.DOCKERHUB_TOKEN }}
41 | - name: Checkout
42 | uses: actions/checkout@v4
43 | with:
44 | fetch-depth: 0
45 | - name: Show Env
46 | run: env
47 | - name: Docker Metadata
48 | id: meta
49 | uses: docker/metadata-action@v5
50 | with:
51 | flavor: |
52 | latest=false
53 | suffix=-${{matrix.node}}${{matrix.suffix}}
54 | images: |
55 | ${{ env.DEFAULT_IMAGE }}
56 | tags: |
57 | type=ref,event=branch
58 | type=semver,pattern={{version}}
59 | - name: Setup QEMU
60 | uses: docker/setup-qemu-action@v3
61 | - name: Setup Docker buildx
62 | uses: docker/setup-buildx-action@v3
63 | - name: Get Date
64 | id: date
65 | run: echo "date=$(date +'%Y-%m-%dT%H:%M:%SZ')" >> $GITHUB_OUTPUT
66 | - name: Get NRCHKB Version
67 | id: nrchkbVersion
68 | run: |
69 | HOMEKIT_BRIDGED_VERSION=$(grep -oE "\"node-red-contrib-homekit-bridged\": \"(\w*.\w*.\w*.\w*.\w*.)" package.json | cut -d\" -f4)
70 | echo "version=$HOMEKIT_BRIDGED_VERSION" >> $GITHUB_OUTPUT
71 | - name: Set Node-RED Version
72 | id: nodeRedVersion
73 | run: |
74 | NODE_RED_VERSION=$(grep -oE "\"node-red\": \"(\w*.\w*.\w*.\w*.\w*.)" package.json | cut -d\" -f4)
75 | echo "version=$NODE_RED_VERSION" >> $GITHUB_OUTPUT
76 | - name: Set Image Settings
77 | id: imageSettings
78 | run: |
79 | TAGS=""
80 | while IFS= read -r TAG;do
81 | if [ -z "$TAGS" ]; then
82 | TAGS=$TAG
83 | else
84 | TAGS="$TAGS,$TAG"
85 | fi
86 | done <<< "${{ steps.meta.outputs.tags }}"
87 |
88 | CURRENT_TAG="$(echo "$GITHUB_REF" | awk -F '/' '{ print $3}')"
89 |
90 | if [[ ! "$CURRENT_TAG" =~ ^v[0-9\.-]*$ ]]; then
91 | CURRENT_TAG="$(git describe --tags --abbrev=0)"
92 | fi
93 |
94 | echo "current tag $CURRENT_TAG"
95 |
96 | if [[ "$CURRENT_TAG" =~ ^v[0-9\.-]*$ ]]; then
97 | IMAGE=${{ env.DEFAULT_IMAGE }}
98 | PUSH="true"
99 | VERSION=${CURRENT_TAG:1}
100 | if [ "${{ matrix.node }}" == "${{ env.LATEST_NODE }}" ] && [ "${{ matrix.suffix}}" == "" ]; then
101 | TAGS="$TAGS,$IMAGE:$VERSION,$IMAGE:latest"
102 | elif [ "${{ matrix.node }}" == "${{ env.LATEST_NODE }}" ] && [ "${{ matrix.suffix}}" == "-minimal" ]; then
103 | TAGS="$TAGS,$IMAGE:$VERSION,$IMAGE:latest-minimal"
104 | elif [ "${{ matrix.node }}" == "${{ env.LATEST_NODE }}" ] && [ "${{ matrix.suffix}}" == "-raspbian" ]; then
105 | TAGS="$TAGS,$IMAGE:$VERSION,$IMAGE:latest-raspbian"
106 | fi
107 | TAGS="$TAGS,$IMAGE:latest-${{ matrix.node }}"
108 | if [ "${{ matrix.suffix}}" != "" ]; then
109 | TAGS="$TAGS,$IMAGE:latest-${{ matrix.node }}${{ matrix.suffix }}"
110 | fi
111 | else
112 | IMAGE=${{ env.DEV_IMAGE }}
113 | if [[ "$CURRENT_TAG" == *"dev"* || "$CURRENT_TAG" == *"beta"* ]]; then
114 | PUSH="true"
115 | else
116 | PUSH="false"
117 | fi
118 | VERSION=${CURRENT_TAG}
119 | TAGS=$(echo $TAGS | sed 's!${{ env.DEFAULT_IMAGE}}!${{ env.DEV_IMAGE }}!')
120 | if [ "${{ matrix.node }}" == "${{ env.LATEST_NODE }}" ] && [ "${{ matrix.suffix}}" == "" ]; then
121 | TAGS="$TAGS,$IMAGE:$VERSION"
122 | fi
123 | fi
124 |
125 | PLATFORMS=""
126 | SUFFIX=""
127 | if [[ "${{matrix.suffix}}" == "-raspbian" ]]; then
128 | # Upstream Node-RED images no longer provide arm/v6 variants for recent tags
129 | # Build only for supported Raspberry Pi platform(s)
130 | PLATFORMS="linux/arm/v7"
131 | SUFFIX=""
132 | FFMPEG_OS="raspbian"
133 | else
134 | # Remove arm/v6 as it is not available in upstream manifests for recent tags
135 | PLATFORMS="linux/amd64, linux/arm64, linux/arm/v7"
136 | SUFFIX="${{ matrix.suffix }}"
137 | FFMPEG_OS="alpine"
138 | fi
139 |
140 | echo $TAGS
141 |
142 | echo "tags=$TAGS" >> $GITHUB_OUTPUT
143 | echo "push=$PUSH" >> $GITHUB_OUTPUT
144 | echo "version=$VERSION" >> $GITHUB_OUTPUT
145 | echo "platforms=$PLATFORMS" >> $GITHUB_OUTPUT
146 | echo "suffix=$SUFFIX" >> $GITHUB_OUTPUT
147 | echo "ffmpegOS=$FFMPEG_OS" >> $GITHUB_OUTPUT
148 | - name: Build and push
149 | id: build-push
150 | uses: docker/build-push-action@v6
151 | continue-on-error: true
152 | with:
153 | context: .
154 | platforms: ${{ steps.imageSettings.outputs.platforms }}
155 | push: ${{ steps.imageSettings.outputs.push }}
156 | file: .docker/Dockerfile.alpine
157 | build-args: |
158 | NODE_VERSION=${{ matrix.node }}
159 | BUILD_DATE=${{ steps.date.outputs.date }}
160 | BUILD_VERSION=${{ steps.imageSettings.outputs.version }}
161 | BUILD_REF=${{ env.GITHUB_SHA }}
162 | NODE_RED_VERSION=${{ steps.nodeRedVersion.outputs.version }}
163 | HOMEKIT_BRIDGED_VERSION=${{ steps.nrchkbVersion.outputs.version }}
164 | FFMPEG_OS=${{ steps.imageSettings.outputs.ffmpegOS }}
165 | TAG_SUFFIX=${{ steps.imageSettings.outputs.suffix }}
166 | tags: ${{ steps.imageSettings.outputs.tags }}
167 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | Apache License
2 | Version 2.0, January 2004
3 | http://www.apache.org/licenses/
4 |
5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6 |
7 | 1. Definitions.
8 |
9 | "License" shall mean the terms and conditions for use, reproduction,
10 | and distribution as defined by Sections 1 through 9 of this document.
11 |
12 | "Licensor" shall mean the copyright owner or entity authorized by
13 | the copyright owner that is granting the License.
14 |
15 | "Legal Entity" shall mean the union of the acting entity and all
16 | other entities that control, are controlled by, or are under common
17 | control with that entity. For the purposes of this definition,
18 | "control" means (i) the power, direct or indirect, to cause the
19 | direction or management of such entity, whether by contract or
20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the
21 | outstanding shares, or (iii) beneficial ownership of such entity.
22 |
23 | "You" (or "Your") shall mean an individual or Legal Entity
24 | exercising permissions granted by this License.
25 |
26 | "Source" form shall mean the preferred form for making modifications,
27 | including but not limited to software source code, documentation
28 | source, and configuration files.
29 |
30 | "Object" form shall mean any form resulting from mechanical
31 | transformation or translation of a Source form, including but
32 | not limited to compiled object code, generated documentation,
33 | and conversions to other media types.
34 |
35 | "Work" shall mean the work of authorship, whether in Source or
36 | Object form, made available under the License, as indicated by a
37 | copyright notice that is included in or attached to the work
38 | (an example is provided in the Appendix below).
39 |
40 | "Derivative Works" shall mean any work, whether in Source or Object
41 | form, that is based on (or derived from) the Work and for which the
42 | editorial revisions, annotations, elaborations, or other modifications
43 | represent, as a whole, an original work of authorship. For the purposes
44 | of this License, Derivative Works shall not include works that remain
45 | separable from, or merely link (or bind by name) to the interfaces of,
46 | the Work and Derivative Works thereof.
47 |
48 | "Contribution" shall mean any work of authorship, including
49 | the original version of the Work and any modifications or additions
50 | to that Work or Derivative Works thereof, that is intentionally
51 | submitted to Licensor for inclusion in the Work by the copyright owner
52 | or by an individual or Legal Entity authorized to submit on behalf of
53 | the copyright owner. For the purposes of this definition, "submitted"
54 | means any form of electronic, verbal, or written communication sent
55 | to the Licensor or its representatives, including but not limited to
56 | communication on electronic mailing lists, source code control systems,
57 | and issue tracking systems that are managed by, or on behalf of, the
58 | Licensor for the purpose of discussing and improving the Work, but
59 | excluding communication that is conspicuously marked or otherwise
60 | designated in writing by the copyright owner as "Not a Contribution."
61 |
62 | "Contributor" shall mean Licensor and any individual or Legal Entity
63 | on behalf of whom a Contribution has been received by Licensor and
64 | subsequently incorporated within the Work.
65 |
66 | 2. Grant of Copyright License. Subject to the terms and conditions of
67 | this License, each Contributor hereby grants to You a perpetual,
68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69 | copyright license to reproduce, prepare Derivative Works of,
70 | publicly display, publicly perform, sublicense, and distribute the
71 | Work and such Derivative Works in Source or Object form.
72 |
73 | 3. Grant of Patent License. Subject to the terms and conditions of
74 | this License, each Contributor hereby grants to You a perpetual,
75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76 | (except as stated in this section) patent license to make, have made,
77 | use, offer to sell, sell, import, and otherwise transfer the Work,
78 | where such license applies only to those patent claims licensable
79 | by such Contributor that are necessarily infringed by their
80 | Contribution(s) alone or by combination of their Contribution(s)
81 | with the Work to which such Contribution(s) was submitted. If You
82 | institute patent litigation against any entity (including a
83 | cross-claim or counterclaim in a lawsuit) alleging that the Work
84 | or a Contribution incorporated within the Work constitutes direct
85 | or contributory patent infringement, then any patent licenses
86 | granted to You under this License for that Work shall terminate
87 | as of the date such litigation is filed.
88 |
89 | 4. Redistribution. You may reproduce and distribute copies of the
90 | Work or Derivative Works thereof in any medium, with or without
91 | modifications, and in Source or Object form, provided that You
92 | meet the following conditions:
93 |
94 | (a) You must give any other recipients of the Work or
95 | Derivative Works a copy of this License; and
96 |
97 | (b) You must cause any modified files to carry prominent notices
98 | stating that You changed the files; and
99 |
100 | (c) You must retain, in the Source form of any Derivative Works
101 | that You distribute, all copyright, patent, trademark, and
102 | attribution notices from the Source form of the Work,
103 | excluding those notices that do not pertain to any part of
104 | the Derivative Works; and
105 |
106 | (d) If the Work includes a "NOTICE" text file as part of its
107 | distribution, then any Derivative Works that You distribute must
108 | include a readable copy of the attribution notices contained
109 | within such NOTICE file, excluding those notices that do not
110 | pertain to any part of the Derivative Works, in at least one
111 | of the following places: within a NOTICE text file distributed
112 | as part of the Derivative Works; within the Source form or
113 | documentation, if provided along with the Derivative Works; or,
114 | within a display generated by the Derivative Works, if and
115 | wherever such third-party notices normally appear. The contents
116 | of the NOTICE file are for informational purposes only and
117 | do not modify the License. You may add Your own attribution
118 | notices within Derivative Works that You distribute, alongside
119 | or as an addendum to the NOTICE text from the Work, provided
120 | that such additional attribution notices cannot be construed
121 | as modifying the License.
122 |
123 | You may add Your own copyright statement to Your modifications and
124 | may provide additional or different license terms and conditions
125 | for use, reproduction, or distribution of Your modifications, or
126 | for any such Derivative Works as a whole, provided Your use,
127 | reproduction, and distribution of the Work otherwise complies with
128 | the conditions stated in this License.
129 |
130 | 5. Submission of Contributions. Unless You explicitly state otherwise,
131 | any Contribution intentionally submitted for inclusion in the Work
132 | by You to the Licensor shall be under the terms and conditions of
133 | this License, without any additional terms or conditions.
134 | Notwithstanding the above, nothing herein shall supersede or modify
135 | the terms of any separate license agreement you may have executed
136 | with Licensor regarding such Contributions.
137 |
138 | 6. Trademarks. This License does not grant permission to use the trade
139 | names, trademarks, service marks, or product names of the Licensor,
140 | except as required for reasonable and customary use in describing the
141 | origin of the Work and reproducing the content of the NOTICE file.
142 |
143 | 7. Disclaimer of Warranty. Unless required by applicable law or
144 | agreed to in writing, Licensor provides the Work (and each
145 | Contributor provides its Contributions) on an "AS IS" BASIS,
146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147 | implied, including, without limitation, any warranties or conditions
148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149 | PARTICULAR PURPOSE. You are solely responsible for determining the
150 | appropriateness of using or redistributing the Work and assume any
151 | risks associated with Your exercise of permissions under this License.
152 |
153 | 8. Limitation of Liability. In no event and under no legal theory,
154 | whether in tort (including negligence), contract, or otherwise,
155 | unless required by applicable law (such as deliberate and grossly
156 | negligent acts) or agreed to in writing, shall any Contributor be
157 | liable to You for damages, including any direct, indirect, special,
158 | incidental, or consequential damages of any character arising as a
159 | result of this License or out of the use or inability to use the
160 | Work (including but not limited to damages for loss of goodwill,
161 | work stoppage, computer failure or malfunction, or any and all
162 | other commercial damages or losses), even if such Contributor
163 | has been advised of the possibility of such damages.
164 |
165 | 9. Accepting Warranty or Additional Liability. While redistributing
166 | the Work or Derivative Works thereof, You may choose to offer,
167 | and charge a fee for, acceptance of support, warranty, indemnity,
168 | or other liability obligations and/or rights consistent with this
169 | License. However, in accepting such obligations, You may act only
170 | on Your own behalf and on Your sole responsibility, not on behalf
171 | of any other Contributor, and only if You agree to indemnify,
172 | defend, and hold each Contributor harmless for any liability
173 | incurred by, or claims asserted against, such Contributor by reason
174 | of your accepting any such warranty or additional liability.
175 |
176 | END OF TERMS AND CONDITIONS
177 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | git p# Node-RED-homekit-docker
2 | [](https://github.com/NRCHKB/node-red-contrib-homekit-docker/actions/workflows/docker-build.yml)
3 | [](https://hub.docker.com/r/nrchkb/node-red-homekit)
4 | [](https://hub.docker.com/r/nrchkb/node-red-homekit)
5 |
6 | Node-RED-homekit-docker is a Node-RED based project with support for homekit. It is based on
7 | the [official Node-RED Docker](https://hub.docker.com/r/nodered/node-red) images with the necessary tools and npm
8 | module [node-red-contrib-homekit-bridged](https://www.npmjs.com/package/node-red-contrib-homekit-bridged) installed to
9 | run homekit within a docker container.
10 |
11 | ## Architecture
12 |
13 | Node-RED-homekit-docker is supported by a manifest list, which means one doesn't need to specify the tag for a specific
14 | architecture. Using the image without any tag or the latest tag will pull the right image for the architecture
15 | required.
16 |
17 | Currently, Node-RED-homekit images are published as a multi-arch manifest supporting the following architectures:
18 |
19 | - `amd64` – Alpine based (most PCs/servers: x64, x86-64, x86_64)
20 | - `arm32v7` – Alpine based (Raspberry Pi 2/3/4)
21 | - `arm64v8` – Alpine based (64-bit ARM boards like Raspberry Pi 3/4 64-bit OS, Pine64, etc.)
22 |
23 | Notes:
24 |
25 | - The dedicated Raspbian variant (`*-raspbian` tags) is built for `arm32v7` only.
26 | - We no longer build or publish `arm32v6` images because upstream Node-RED images no longer provide this architecture
27 | for current tags.
28 |
29 | **Note**: As of the Node-RED 4.0.0 release, we are no longer building docker images for previous versions. At the same
30 | time,
31 | images with NodeJS up to 18 (excluded) are dropped. The next major NRCHKB release will require NodeJS >= 22.
32 |
33 | #### Verify the published architecture
34 |
35 | You can verify the architectures available for a given tag using Docker Buildx:
36 |
37 | ```bash
38 | docker buildx imagetools inspect nrchkb/node-red-homekit:latest
39 | # or a specific tag, e.g. Node/variant tagged image
40 | docker buildx imagetools inspect nrchkb/node-red-homekit:latest-22
41 | ```
42 |
43 | ### Quick Start (for those already running Docker)
44 |
45 | ```bash
46 | docker run -d --net=host -v :/data -e TZ=Europe/Amsterdam -e DEBUG=NRCHKB:* --name=node-red-homekit nrchkb/node-red-homekit
47 | ```
48 |
49 | Let's dissect that command:
50 |
51 | docker run - Run this container.
52 | -d - Run container in background and print container ID.
53 | --net=host - Connect to the host network, which is required to work with homekit.
54 | -v :/data - Persist container data
55 | -e TZ=Europe/Amsterdam - Set timezone, see https://en.wikipedia.org/wiki/List_of_tz_database_time_zones
56 | -e DEBUG=NRCHKB:* - Print basic debug logs for NRCHKB
57 | --name node-red-homekit - Give this machine a friendly local name.
58 | nrchkb/node-red-homekit - The image to base it.
59 |
60 | ### Raspberry Pi (including installation Docker)
61 |
62 | Following these commands will install Docker, add user `pi` to a Docker group, then set the docker container to always
63 | run.
64 |
65 | We assume you have some basic knowledge of Linux and you are logged in as `pi` user.
66 |
67 | 1) Make sure we are in the home directory of the pi user:
68 |
69 | ```bash
70 | cd ~
71 | ```
72 |
73 | 2) Make sure we have the latest packages available and upgrade to the latest versions (reboot if needed!):
74 |
75 | ```bash
76 | sudo apt update && sudo apt upgrade -y
77 | ```
78 |
79 | 3) Download the docker install script and execute it to install Docker on your system.
80 |
81 | ```bash
82 | curl -fsSL https://get.docker.com -o get-docker.sh && sudo sh get-docker.sh
83 | ```
84 |
85 | 4) As the Docker script explains, add the pi user to the Docker group so that the pi user has the permissions to execute
86 | docker commands:
87 |
88 | ```bash
89 | sudo usermod -aG docker pi
90 | ```
91 |
92 | 5) Reboot the Raspberry PI or just log out and back in:
93 |
94 | ```bash
95 | sudo reboot
96 | ```
97 |
98 | 6) To test if your Docker installation went well:
99 |
100 | ```bash
101 | docker run --rm hello-world
102 | ```
103 |
104 | The above command should say 'Hello from Docker':
105 |
106 | ```
107 | Unable to find image 'hello-world:latest' locally
108 | latest: Pulling from library/hello-world
109 | 0e03bdcc26d7: Pull complete
110 | Digest: sha256:7f0a9f93b4aa3022c3a4c147a449bf11e0941a1fd0bf4a8e6c9408b2600777c5
111 | Status: Downloaded newer image for hello-world:latest
112 |
113 | Hello from Docker!
114 | This message shows that your installation appears to be working correctly.
115 |
116 | To generate this message, Docker took the following steps:
117 | 1. The Docker client contacted the Docker daemon.
118 | 2. The Docker daemon pulled the "hello-world" image from the Docker Hub.
119 | (amd64)
120 | 3. The Docker daemon created a new container from that image which runs the
121 | executable that produces the output you are currently reading.
122 | 4. The Docker daemon streamed that output to the Docker client, which sent it
123 | to your terminal.
124 |
125 | To try something more ambitious, you can run an Ubuntu container with:
126 | $ docker run -it ubuntu bash
127 |
128 | Share images, automate workflows, and more with a free Docker ID:
129 | https://hub.docker.com/
130 |
131 | For more examples and ideas, visit:
132 | https://docs.docker.com/get-started/
133 | ```
134 |
135 | If the above steps went as expected, you are ready to run the `nrchkb/node-red-homekit` image as a container. But before
136 | that we create a directory on the pi host so that all node-red / node-red-homekit files are stored outside the container
137 | on your raspberry pi.
138 |
139 | 7) Make a directory in your pi user's home directory:
140 |
141 | ```bash
142 | mkdir node-red-homekit
143 | ```
144 |
145 | 8) Run the Docker run command to deploy the `nrchkb/node-red-homekit` image as a container and where the container
146 | `/data` directory is bound to the `/home/pi/node-red-homekit` directory on your Raspberry PI.
147 |
148 | ```bash
149 | docker run -d --net=host -v ~/node-red-homekit:/data --restart always -e TZ=Europe/Amsterdam -e DEBUG=NRCHKB:* --name node-red-homekit nrchkb/node-red-homekit
150 | ```
151 |
152 | You don't need to explicitly map ports, since all ports are opened on the host network! This is required for homekit to
153 | work well.
154 |
155 | ### Upgrade to the latest image
156 |
157 | Suppose there is a new `nrchkb/node-red-homekit` image available? How do I make use of this new image?
158 |
159 | 1) Find the id of your current deployed container:
160 |
161 | ```bash
162 | docker container ls
163 | ```
164 |
165 | The above command lists all running containers, and in the first column it displays the id of the container and in the
166 | last column its name.
167 |
168 | 2) Stop the current container:
169 |
170 | ```
171 | docker stop
172 | ```
173 |
174 | 3) Remove the current container:
175 |
176 | ```
177 | docker rm
178 | ```
179 |
180 | 4) Pull the latest `nrchkb/node-red-homekit` image from [Docker Hub](https://hub.docker.com/r/nrchkb/node-red-homekit):
181 |
182 | ```bash
183 | docker pull nrchkb/node-red-homekit
184 | ```
185 |
186 | 5) Deploy the container again:
187 |
188 | ```bash
189 | docker run -d --net=host -v ~/node-red-homekit:/data --restart always -e TZ=Europe/Amsterdam -e DEBUG=NRCHKB:* --name node-red-homekit nrchkb/node-red-homekit
190 | ```
191 |
192 | This runs the container based on the latest `nrchkb/node-red-homekit` image and retains your flows!
193 |
194 | ### Docker Compose (including installation Docker Compose)
195 |
196 | For demo purpose we use a Raspberry Pi with Docker installed (see)
197 |
198 | 1) Install required packages for Docker Compose
199 |
200 | ```bash
201 | sudo apt update && sudo apt install -y libffi-dev libssl-dev python3 python3-pip
202 | ```
203 |
204 | 2) Install Docker Compose using pip3
205 |
206 | ```
207 | sudo pip3 -v install docker-compose
208 | ```
209 |
210 | 3) Create a Docker compose yml for `nrchkb/node-red-homekit`:
211 |
212 | Let's assume you have a directory `/home/pi/node-red-homekit` with a `data` subdirectory:
213 |
214 | ```bash
215 | node-red-homekit/
216 | `-- data
217 |
218 | 1 directory, 0 files
219 | ```
220 |
221 | Use your favorite editor like nano to create a file named `docker-compose.yml` with the content below and save it in
222 | `/home/pi/node-red-homekit`.
223 |
224 | ```yaml
225 | version: '2'
226 | services:
227 | node-red-homekit:
228 | image: nrchkb/node-red-homekit
229 | restart: always
230 | network_mode: host
231 | environment:
232 | - TZ=Europe/Amsterdam
233 | - DEBUG=NRCHKB:*
234 | volumes:
235 | - /home/pi/node-red-homekit/data:/data
236 | ```
237 |
238 | note 1: `/home/pi/node-red-homekit/data` is the persistence directory where the Docker container stores its files.
239 | note 2: there is no port mapping defined, since the container is attached / uses the host network.
240 |
241 | 4) Deploy the service as define in `docker-compose.yml`
242 |
243 | From the `/home/pi/node-red-homekit` directory executed the command below to deploy the service and therefore the
244 | container:
245 |
246 | ```bash
247 | docker-compose up -d
248 | ```
249 |
250 | 5) Verify your deployed container:
251 |
252 | ```bash
253 | docker container ls
254 | ```
255 |
256 | 6) To update to the latest image:
257 |
258 | The command below stops the current running container, removes it and removes the image.
259 |
260 | ```bash
261 | docker-compose down --rmi all
262 | ```
263 |
264 | Run the command in step 4 to redeploy the service. It pulls the (latest) image, since now is available locally.
265 |
266 | ### Synology
267 |
268 | Synology users need to add the environment variable DSM_HOSTNAME.
269 |
270 | Click the Environment tab and add a new environment variable named DSM_HOSTNAME. The value of the DSM_HOSTNAME
271 | environment variable should exactly match the server name as shown under Synology DSM Control Panel → Info Center →
272 | Server name, it should contain no spaces or special characters.
273 |
274 | ```bash
275 | docker run -it --net=host -v :/data -e DSM_HOSTNAME= -e TZ=Europe/Amsterdam -e DEBUG=NRCHKB:* --name=homekit nrchkb/node-red-homekit:
276 | ```
277 |
278 | ### Permissions
279 |
280 | Since Node-RED 1.0 the container user is `node-red` and has uid `1000` and gid `1000`, make sure your has
281 | the same uid and gid:
282 |
283 | Verify command:
284 |
285 | ```bash
286 | ls -nal
287 | ```
288 |
289 | Modify command:
290 |
291 | ```bash
292 | chown -R 1000:1000
293 | ```
294 |
295 | ### Debug
296 |
297 | To debug NRCHKB, you have to add environment variable to command running node-red in docker by adding `-e` argument:
298 |
299 | ```-e "DEBUG=NRCHKB*,HAP-NodeJS*"```
300 |
301 | To do that, modify a starting script like below:
302 |
303 | ```bash
304 | docker run -it -e "DEBUG=NRCHKB*,HAP-NodeJS*" --net=host -v :/data -e DSM_HOSTNAME= -e TZ=Europe/Amsterdam -e DEBUG=NRCHKB:* --name=homekit nrchkb/node-red-homekit:
305 | ```
306 |
307 | ### Node-RED Docker official
308 |
309 | For more detailed info refer to the [Node-RED Docker official](https://github.com/node-red/node-red-docker) pages.
310 |
311 | ### NRCHKB Support
312 |
313 | For more info visit our [Website](https://nrchkb.github.io) or [Discord](https://discord.gg/uvYac5u).
314 |
--------------------------------------------------------------------------------
/strategy.js:
--------------------------------------------------------------------------------
1 | /**
2 | * Module dependencies.
3 | */
4 | var passport = require('passport-strategy')
5 | , url = require('url')
6 | , querystring = require('querystring')
7 | , util = require('util')
8 | , utils = require('./utils')
9 | , OAuth2 = require('oauth').OAuth2
10 | , SessionStateStore = require('./state/session')
11 | //, setup = require('./setup')
12 | , InternalOAuthError = require('./errors/internaloautherror')
13 | , AuthorizationError = require('./errors/authorizationerror');
14 |
15 |
16 | /**
17 | * `Strategy` constructor.
18 | *
19 | * The OpenID Connect authentication strategy authenticates requests using
20 | * OpenID Connect, which is an identity layer on top of the OAuth 2.0 protocol.
21 | *
22 | * @param {Object} options
23 | * @param {Function} verify
24 | * @api public
25 | */
26 | function Strategy(options, verify) {
27 | options = options || {};
28 | passport.Strategy.call(this);
29 | this.name = 'openidconnect';
30 | this._verify = verify;
31 |
32 | // TODO: What's the recommended field name for OpenID Connect?
33 | this._identifierField = options.identifierField || 'openid_identifier';
34 | this._scope = options.scope;
35 | this._passReqToCallback = options.passReqToCallback;
36 | this._skipUserProfile = (options.skipUserProfile === undefined) ? false : options.skipUserProfile;
37 |
38 | this._setup = undefined;
39 |
40 | this._key = options.sessionKey || (this.name + ':' + url.parse(options.authorizationURL).hostname);
41 | this._stateStore = options.store || new SessionStateStore({key: this._key});
42 |
43 | if (options.authorizationURL && options.tokenURL) {
44 | // This OpenID Connect strategy is configured to work with a specific
45 | // provider. Override the discovery process with pre-configured endpoints.
46 | this.configure(require('./setup/manual')(options));
47 | //this.configure(require('./setup/dynamic')(options));
48 | } else {
49 | this.configure(require('./setup/dynamic')(options));
50 | }
51 | }
52 |
53 | /**
54 | * Inherit from `passport.Strategy`.
55 | */
56 | util.inherits(Strategy, passport.Strategy);
57 |
58 |
59 | /**
60 | * Authenticate request by delegating to an OpenID Connect provider.
61 | *
62 | * @param {Object} req
63 | * @param {Object} options
64 | * @api protected
65 | */
66 | Strategy.prototype.authenticate = function (req, options) {
67 | options = options || {};
68 | var self = this;
69 |
70 | if (req.query && req.query.error) {
71 | if (req.query.error == 'access_denied') {
72 | return this.fail({message: req.query.error_description});
73 | } else {
74 | return this.error(new AuthorizationError(req.query.error_description, req.query.error, req.query.error_uri));
75 | }
76 | }
77 |
78 | if (req.query && req.query.code) {
79 |
80 | function loaded(err, ok, state) {
81 | if (err) {
82 | return self.error(err);
83 | }
84 | if (!ok) {
85 | return self.fail(state, 403);
86 | }
87 | var code = req.query.code;
88 |
89 | var meta = state;
90 | var callbackURL = meta.callbackURL;
91 |
92 | var oauth2 = self._getOAuth2Client(meta);
93 |
94 | oauth2.getOAuthAccessToken(code, {
95 | grant_type: 'authorization_code',
96 | redirect_uri: callbackURL
97 | }, function (err, accessToken, refreshToken, params) {
98 | if (err) {
99 | return self.error(new InternalOAuthError('failed to obtain access token', err));
100 | }
101 |
102 | var idToken = params['id_token'];
103 | if (!idToken) {
104 | return self.error(new Error('ID Token not present in token response'));
105 | }
106 |
107 | var idTokenSegments = idToken.split('.')
108 | , jwtClaimsStr
109 | , jwtClaims;
110 |
111 | try {
112 | jwtClaimsStr = new Buffer(idTokenSegments[1], 'base64').toString();
113 | jwtClaims = JSON.parse(jwtClaimsStr);
114 | } catch (ex) {
115 | return self.error(ex);
116 | }
117 |
118 | var missing = ['iss', 'sub', 'aud', 'exp', 'iat'].filter(function (param) {
119 | return !jwtClaims[param]
120 | });
121 | if (missing.length) return self.error(new Error('id token is missing required parameter(s) - ' + missing.join(', ')));
122 |
123 | // https://openid.net/specs/openid-connect-basic-1_0.html#IDTokenValidation - check 1.
124 | if (jwtClaims.iss !== meta.issuer) return self.error(new Error('id token not issued by correct OpenID provider - ' +
125 | 'expected: ' + meta.issuer + ' | from: ' + jwtClaims.iss));
126 |
127 | // https://openid.net/specs/openid-connect-basic-1_0.html#IDTokenValidation - checks 2 and 3.
128 | if (typeof jwtClaims.aud === 'string') {
129 | if (jwtClaims.aud !== meta.clientID) return self.error(new Error('aud parameter does not include this client - is: '
130 | + jwtClaims.aud + '| expected: ' + meta.clientID));
131 | } else if (Array.isArray(jwtClaims.aud)) {
132 | if (jwtClaims.aud.indexOf(meta.clientID) === -1) return self.error(new Error('aud parameter does not include this client - is: ' +
133 | jwtClaims.aud + ' | expected to include: ' + meta.clientID));
134 | if (jwtClaims.length > 1 && !jwtClaims.azp) return self.error(new Error('azp parameter required with multiple audiences'));
135 | } else {
136 | return self.error(new Error('Invalid aud parameter type'));
137 | }
138 |
139 | // https://openid.net/specs/openid-connect-basic-1_0.html#IDTokenValidation - check 4.
140 | if (jwtClaims.azp && jwtClaims.azp !== meta.clientID) return self.error(new Error('this client is not the authorized party - ' +
141 | 'expected: ' + meta.clientID + ' | is: ' + jwtClaims.azp));
142 |
143 | // Possible TODO: Add accounting for some clock skew.
144 | // https://openid.net/specs/openid-connect-basic-1_0.html#IDTokenValidation - check 5.
145 | if (jwtClaims.exp < (Date.now() / 1000)) return self.error(new Error('id token has expired'));
146 |
147 | // Note: https://openid.net/specs/openid-connect-basic-1_0.html#IDTokenValidation - checks 6 and 7 are out of scope of this library.
148 |
149 | // https://openid.net/specs/openid-connect-basic-1_0.html#IDTokenValidation - check 8.
150 | if (meta.params.max_age && (!jwtClaims.auth_time || ((meta.timestamp - meta.params.max_age) > jwtClaims.auth_time))) {
151 | return self.error(new Error('auth_time in id_token not included or too old'));
152 | }
153 |
154 | if (meta.params.nonce && (!jwtClaims.nonce || jwtClaims.nonce !== meta.params.nonce)) {
155 | return self.error(new Error('Invalid nonce in id_token'));
156 | }
157 |
158 | var iss = jwtClaims.iss;
159 | var sub = jwtClaims.sub;
160 | // Prior to OpenID Connect Basic Client Profile 1.0 - draft 22, the
161 | // "sub" claim was named "user_id". Many providers still issue the
162 | // claim under the old field, so fallback to that.
163 | if (!sub) {
164 | sub = jwtClaims.user_id;
165 | }
166 |
167 | self._shouldLoadUserProfile(iss, sub, function (err, load) {
168 | if (err) {
169 | return self.error(err);
170 | }
171 | ;
172 |
173 | if (load) {
174 | var parsed = url.parse(meta.userInfoURL, true);
175 | parsed.query['schema'] = 'openid';
176 | delete parsed.search;
177 | var userInfoURL = url.format(parsed);
178 |
179 | // NOTE: We are calling node-oauth's internal `_request` function (as
180 | // opposed to `get`) in order to send the access token in the
181 | // `Authorization` header rather than as a query parameter.
182 | //
183 | // Additionally, the master branch of node-oauth (as of
184 | // 2013-02-16) will include the access token in *both* headers
185 | // and query parameters, which is a violation of the spec.
186 | // Setting the fifth argument of `_request` to `null` works
187 | // around this issue. I've noted this in comments here:
188 | // https://github.com/ciaranj/node-oauth/issues/117
189 |
190 | //oauth2.get(userInfoURL, accessToken, function (err, body, res) {
191 | oauth2._request("GET", userInfoURL, {
192 | 'Authorization': "Bearer " + accessToken,
193 | 'Accept': "application/json"
194 | }, null, null, function (err, body, res) {
195 | if (err) {
196 | return self.error(new InternalOAuthError('failed to fetch user profile', err));
197 | }
198 |
199 | var profile = {};
200 |
201 | try {
202 | var json = JSON.parse(body);
203 |
204 | profile.id = json.sub;
205 | // Prior to OpenID Connect Basic Client Profile 1.0 - draft 22, the
206 | // "sub" key was named "user_id". Many providers still use the old
207 | // key, so fallback to that.
208 | if (!profile.id) {
209 | profile.id = json.user_id;
210 | }
211 |
212 | profile.displayName = json.name;
213 | profile.name = {
214 | familyName: json.family_name,
215 | givenName: json.given_name,
216 | middleName: json.middle_name
217 | };
218 |
219 | profile._raw = body;
220 | profile._json = json;
221 |
222 | onProfileLoaded(profile);
223 | } catch (ex) {
224 | return self.error(ex);
225 | }
226 | });
227 | } else {
228 | onProfileLoaded();
229 | }
230 |
231 | function onProfileLoaded(profile) {
232 | function verified(err, user, info) {
233 | if (err) {
234 | return self.error(err);
235 | }
236 | if (!user) {
237 | return self.fail(info);
238 | }
239 |
240 | info = info || {};
241 | if (state) {
242 | info.state = state;
243 | }
244 | self.success(user, info);
245 | }
246 |
247 | if (self._passReqToCallback) {
248 | var arity = self._verify.length;
249 | if (arity == 9) {
250 | self._verify(req, iss, sub, profile, jwtClaims, accessToken, refreshToken, params, verified);
251 | } else if (arity == 8) {
252 | self._verify(req, iss, sub, profile, accessToken, refreshToken, params, verified);
253 | } else if (arity == 7) {
254 | self._verify(req, iss, sub, profile, accessToken, refreshToken, verified);
255 | } else if (arity == 5) {
256 | self._verify(req, iss, sub, profile, verified);
257 | } else { // arity == 4
258 | self._verify(req, iss, sub, verified);
259 | }
260 | } else {
261 | var arity = self._verify.length;
262 | if (arity == 8) {
263 | self._verify(iss, sub, profile, jwtClaims, accessToken, refreshToken, params, verified);
264 | } else if (arity == 7) {
265 | self._verify(iss, sub, profile, accessToken, refreshToken, params, verified);
266 | } else if (arity == 6) {
267 | self._verify(iss, sub, profile, accessToken, refreshToken, verified);
268 | } else if (arity == 4) {
269 | self._verify(iss, sub, profile, verified);
270 | } else { // arity == 3 - aritry deos not work when using togehter with Node RED
271 | self._verify(iss, sub, profile, accessToken, refreshToken, params, verified);
272 | }
273 | }
274 | } // onProfileLoaded
275 | }); // self._shouldLoadUserProfile
276 | }); // oauth2.getOAuthAccessToken
277 | } // loaded
278 |
279 | var state = req.query.state;
280 | try {
281 | self._stateStore.verify(req, state, loaded);
282 | } catch (ex) {
283 | return self.error(ex);
284 | }
285 | } else {
286 | // The request being authenticated is initiating OpenID Connect
287 | // authentication. Prior to redirecting to the provider, configuration will
288 | // be loaded. The configuration is typically either pre-configured or
289 | // discovered dynamically. When using dynamic discovery, a user supplies
290 | // their identifer as input.
291 |
292 | var identifier;
293 | if (req.body && req.body[this._identifierField]) {
294 | identifier = req.body[this._identifierField];
295 | } else if (req.query && req.query[this._identifierField]) {
296 | identifier = req.query[this._identifierField];
297 | }
298 |
299 | // FIXME: Hard coded for test purposes:
300 | //identifier = 'acct:paulej@packetizer.com';
301 | this._setup(identifier, function (err, config) {
302 | if (err) {
303 | return self.error(err);
304 | }
305 |
306 | // Required Parameters
307 | var meta = config;
308 |
309 | var callbackURL = options.callbackURL || config.callbackURL;
310 | if (callbackURL) {
311 | var parsed = url.parse(callbackURL);
312 | if (!parsed.protocol) {
313 | // The callback URL is relative, resolve a fully qualified URL from the
314 | // URL of the originating request.
315 | callbackURL = url.resolve(utils.originalURL(req), callbackURL);
316 | }
317 | }
318 | meta.callbackURL = callbackURL;
319 |
320 | var params = self.authorizationParams(options);
321 | params['response_type'] = 'code';
322 | params['client_id'] = config.clientID;
323 | if (callbackURL) {
324 | params.redirect_uri = callbackURL;
325 | }
326 | var scope = options.scope || self._scope;
327 | if (Array.isArray(scope)) {
328 | scope = scope.join(' ');
329 | }
330 | if (scope) {
331 | params.scope = 'openid ' + scope;
332 | } else {
333 | params.scope = 'openid';
334 | }
335 |
336 | // Optional Parameters
337 |
338 | var simple_optional_params = ['max_age', 'ui_locals', 'id_token_hint', 'login_hint', 'acr_values'];
339 | simple_optional_params.filter(x => {
340 | return x in config
341 | }).map(y => {
342 | params[y] = config[y]
343 | });
344 |
345 | if (config.display && ['page', 'popup', 'touch', 'wap'].indexOf(config.display) !== -1) params.display = config.display;
346 | if (config.prompt && ['none', 'login', 'consent', 'select_account'].indexOf(config.prompt) !== -1) params.prompt = config.prompt;
347 |
348 | if (config.nonce && typeof config.nonce === 'boolean') {
349 | params.nonce = utils.uid(20);
350 | }
351 | if (config.nonce && typeof config.nonce === 'number') {
352 | params.nonce = utils.uid(config.nonce);
353 | }
354 | if (config.nonce && typeof config.nonce === 'string') {
355 | params.nonce = config.nonce;
356 | }
357 |
358 | if (params.max_age) meta.timestamp = Math.floor(Date.now() / 1000);
359 |
360 | meta.params = params;
361 | for (param in params) {
362 | if (meta[param]) delete meta[param]; // Remove redundant information.
363 | }
364 |
365 | // State Storage/Management
366 |
367 | function stored(err, state) {
368 | if (err) {
369 | return self.error(err);
370 | }
371 | if (!state) {
372 | return self.error(new Error('Unable to generate required state parameter'));
373 | }
374 |
375 | params.state = state;
376 | var location = config.authorizationURL + '?' + querystring.stringify(params);
377 | self.redirect(location);
378 | }
379 |
380 | try {
381 | var arity = self._stateStore.store.length;
382 | if (arity == 3) {
383 | self._stateStore.store(req, meta, stored);
384 | } else { // arity == 2
385 | self._stateStore.store(req, stored);
386 | }
387 | } catch (ex) {
388 | return self.error(ex);
389 | }
390 | }); // this.configure
391 | }
392 | }
393 |
394 | /**
395 | * Register a function used to configure the strategy.
396 | *
397 | * OpenID Connect is an identity layer on top of OAuth 2.0. OAuth 2.0 requires
398 | * knowledge of certain endpoints (authorization, token, etc.) as well as a
399 | * client identifier (and corresponding secret) registered at the authorization
400 | * server.
401 | *
402 | * Configuration functions are responsible for loading this information. This
403 | * is typically done via one of two popular mechanisms:
404 | *
405 | * - The configuration is known ahead of time, and pre-configured via options
406 | * to the strategy.
407 | * - The configuration is dynamically loaded, using optional discovery and
408 | * registration specifications. (Note: Providers are not required to
409 | * implement support for dynamic discovery and registration. As such, there
410 | * is no guarantee that this will result in successfully initiating OpenID
411 | * Connect authentication.)
412 | *
413 | * @param {Function} fn
414 | * @api public
415 | */
416 | Strategy.prototype.configure = function (identifier, done) {
417 | this._setup = identifier;
418 | }
419 |
420 |
421 | /**
422 | * Return extra parameters to be included in the authorization request.
423 | *
424 | * Some OpenID Connect providers allow additional, non-standard parameters to be
425 | * included when requesting authorization. Since these parameters are not
426 | * standardized by the OpenID Connect specification, OpenID Connect-based
427 | * authentication strategies can overrride this function in order to populate
428 | * these parameters as required by the provider.
429 | *
430 | * @param {Object} options
431 | * @return {Object}
432 | * @api protected
433 | */
434 | Strategy.prototype.authorizationParams = function (options) {
435 | return {};
436 | }
437 |
438 | /**
439 | * Check if should load user profile, contingent upon options.
440 | *
441 | * @param {String} issuer
442 | * @param {String} subject
443 | * @param {Function} done
444 | * @api private
445 | */
446 | Strategy.prototype._shouldLoadUserProfile = function (issuer, subject, done) {
447 | if (typeof this._skipUserProfile == 'function' && this._skipUserProfile.length > 1) {
448 | // async
449 | this._skipUserProfile(issuer, subject, function (err, skip) {
450 | if (err) {
451 | return done(err);
452 | }
453 | if (!skip) {
454 | return done(null, true);
455 | }
456 | return done(null, false);
457 | });
458 | } else {
459 | var skip = (typeof this._skipUserProfile == 'function') ? this._skipUserProfile(issuer, subject) : this._skipUserProfile;
460 | if (!skip) {
461 | return done(null, true);
462 | }
463 | return done(null, false);
464 | }
465 | }
466 |
467 | Strategy.prototype._getOAuth2Client = function (config) {
468 | return new OAuth2(config.clientID, config.clientSecret,
469 | '', config.authorizationURL, config.tokenURL);
470 | }
471 |
472 | /**
473 | * Expose `Strategy`.
474 | */
475 | module.exports = Strategy;
476 |
--------------------------------------------------------------------------------