├── waterguru-api ├── README.md ├── icon.png ├── logo.png ├── .gitattributes ├── run.sh ├── Dockerfile ├── config.yaml └── waterguru_flask.py ├── README.md ├── sendmail ├── README.md ├── rootfs │ └── etc │ │ ├── cont-init.d │ │ └── sendmail.sh │ │ ├── services.d │ │ └── sendmail │ │ │ ├── run │ │ │ └── finish │ │ └── mail │ │ ├── sendmail.mc │ │ └── sendmail.conf ├── build.json ├── Dockerfile └── config.json ├── hass-ap ├── README.md ├── rootfs │ └── etc │ │ ├── services.d │ │ ├── dhcpd │ │ │ ├── finish │ │ │ └── run │ │ └── hostapd │ │ │ ├── finish │ │ │ └── run │ │ └── cont-init.d │ │ └── wlanstart.sh ├── config.json └── Dockerfile ├── ipfire ├── README.md ├── build.json ├── config.json └── Dockerfile ├── kegbot ├── icon.png ├── logo.png ├── requirements.txt ├── rootfs │ ├── etc │ │ ├── cont-init.d │ │ │ └── kegbot.sh │ │ └── services.d │ │ │ ├── redis │ │ │ ├── run │ │ │ └── finish │ │ │ └── kegbot │ │ │ ├── finish │ │ │ └── run │ └── usr │ │ └── lib │ │ └── bashio │ │ └── kegbot.sh ├── build.json ├── nginx.conf ├── README.md ├── kegbot.conf ├── config.json ├── Dockerfile ├── local_settings.py └── run.sh ├── owntone ├── icon.png ├── logo.png ├── build.yaml ├── .gitattributes ├── rootfs │ └── etc │ │ ├── services.d │ │ └── owntone │ │ │ ├── finish │ │ │ └── run │ │ └── cont-init.d │ │ └── owntone.sh ├── config.yaml ├── README.md └── Dockerfile ├── openwrt ├── README.md ├── build.json.old ├── config.json ├── rootfs │ └── etc │ │ └── cont-init.d │ │ └── wlanstart.sh ├── Dockerfile └── Dockerfile.old ├── deepstream-iot ├── README.md ├── build.json ├── rootfs │ └── etc │ │ ├── cont-init.d │ │ └── deepstream-iot.sh │ │ └── services.d │ │ └── deepstream-iot │ │ ├── run │ │ └── finish ├── config.json └── Dockerfile ├── keras-rest ├── build.json ├── rootfs │ ├── etc │ │ ├── services.d │ │ │ ├── redis │ │ │ │ ├── run │ │ │ │ └── finish │ │ │ ├── gunicorn │ │ │ │ ├── finish │ │ │ │ └── run │ │ │ └── modelserver │ │ │ │ ├── finish │ │ │ │ └── run │ │ ├── cont-init.d │ │ │ └── keras-rest.sh │ │ └── gunicorn │ │ │ └── gunicorn_conf.py │ └── app │ │ ├── modelsaver │ │ └── main.py │ │ ├── main.py │ │ └── modelserver │ │ └── main.py ├── config.json ├── README.md └── Dockerfile ├── repository.json ├── z-way-server ├── README.md ├── build.json ├── rootfs │ └── etc │ │ ├── services.d │ │ └── z-way-server │ │ │ ├── finish │ │ │ └── run │ │ └── cont-init.d │ │ └── z-way-server.sh ├── config.json └── Dockerfile ├── .gitattributes ├── fff-api ├── rootfs │ └── etc │ │ ├── cont-init.d │ │ └── fff-api.sh │ │ └── services.d │ │ └── fff-api │ │ ├── finish │ │ └── run ├── build.json ├── config.json ├── README.md └── Dockerfile ├── rtl4332mqtt ├── rootfs │ └── etc │ │ ├── cont-init.d │ │ └── rtl4332mqtt.sh │ │ └── services.d │ │ └── rtl4332mqtt │ │ ├── finish │ │ └── run ├── build.json ├── config.json ├── LICENSE ├── Dockerfile └── README.md ├── forked-daapd ├── build.json ├── rootfs │ └── etc │ │ ├── services.d │ │ └── forked-daapd │ │ │ ├── finish │ │ │ └── run │ │ └── cont-init.d │ │ └── forked-daapd.sh ├── config.json ├── README.md └── Dockerfile └── LICENSE /waterguru-api/README.md: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # hassio-addons 2 | repository for my homeassistant addons 3 | -------------------------------------------------------------------------------- /sendmail/README.md: -------------------------------------------------------------------------------- 1 | sendmail hassio addon. DOES NOT WORK DO NOT USE 2 | -------------------------------------------------------------------------------- /hass-ap/README.md: -------------------------------------------------------------------------------- 1 | HA-ization of docker-ap. Probably doesn't work yet, ymmv. 2 | -------------------------------------------------------------------------------- /ipfire/README.md: -------------------------------------------------------------------------------- 1 | ipfire in a HA container. Probably doesn't work, do not use. 2 | -------------------------------------------------------------------------------- /kegbot/icon.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/johnpdowling/hassio-addons/HEAD/kegbot/icon.png -------------------------------------------------------------------------------- /kegbot/logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/johnpdowling/hassio-addons/HEAD/kegbot/logo.png -------------------------------------------------------------------------------- /owntone/icon.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/johnpdowling/hassio-addons/HEAD/owntone/icon.png -------------------------------------------------------------------------------- /owntone/logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/johnpdowling/hassio-addons/HEAD/owntone/logo.png -------------------------------------------------------------------------------- /openwrt/README.md: -------------------------------------------------------------------------------- 1 | An attempt at porting OpenWrt into HA as an add-on. Probably doesn't work, do not use. 2 | -------------------------------------------------------------------------------- /waterguru-api/icon.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/johnpdowling/hassio-addons/HEAD/waterguru-api/icon.png -------------------------------------------------------------------------------- /waterguru-api/logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/johnpdowling/hassio-addons/HEAD/waterguru-api/logo.png -------------------------------------------------------------------------------- /deepstream-iot/README.md: -------------------------------------------------------------------------------- 1 | HA-ized build of the deepstream-l4t (iot) container by NVIDIA. 2 | 3 | Experimental, do not use. 4 | -------------------------------------------------------------------------------- /keras-rest/build.json: -------------------------------------------------------------------------------- 1 | { 2 | "squash": false, 3 | "build_from": { 4 | "aarch64": "youdownwithjpd/addon-nvidia-base:latest" 5 | }, 6 | "args": {} 7 | } 8 | -------------------------------------------------------------------------------- /repository.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "Hass.IO add-on repository by John Dowling", 3 | "url": "https://github.com/johnpdowling/hassio-addons", 4 | "maintainer": "JPD" 5 | } 6 | -------------------------------------------------------------------------------- /deepstream-iot/build.json: -------------------------------------------------------------------------------- 1 | { 2 | "squash": false, 3 | "build_from": { 4 | "aarch64": "youdownwithjpd/addon-deepstream_iot-base:latest" 5 | }, 6 | "args": {} 7 | } 8 | -------------------------------------------------------------------------------- /z-way-server/README.md: -------------------------------------------------------------------------------- 1 | Hassio add-on for z-way-server, based on my fork of a fork of docker-image-zway 2 | 3 | Works, but only on ARM, and is an old rev of z-way. Config is in %config%/z-way-server. Tested with a UZB1. 4 | -------------------------------------------------------------------------------- /.gitattributes: -------------------------------------------------------------------------------- 1 | # Set the default behavior, in case people don't have core.autocrlf set. 2 | * text=auto 3 | 4 | # Declare files that will always have LF line endings on checkout. 5 | *.sh text eol=lf 6 | *.conf text eol=lf 7 | *.yaml text eol=lf 8 | run text eol=lf 9 | finish text eol=lf 10 | -------------------------------------------------------------------------------- /owntone/build.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | build_from: 3 | aarch64: hassioaddons/debian-base-aarch64:latest 4 | amd64: hassioaddons/debian-base-amd64:latest 5 | armhf: hassioaddons/debian-base-armhf:latest 6 | armv7: hassioaddons/debian-base-armv7:latest 7 | i386: hassioaddons/debian-base-i386:latest 8 | -------------------------------------------------------------------------------- /owntone/.gitattributes: -------------------------------------------------------------------------------- 1 | # Set the default behavior, in case people don't have core.autocrlf set. 2 | * text=auto 3 | 4 | # Declare files that will always have LF line endings on checkout. 5 | *.sh text eol=lf 6 | *.conf text eol=lf 7 | *.yaml text eol=lf 8 | run text eol=lf 9 | finish text eol=lf 10 | -------------------------------------------------------------------------------- /waterguru-api/.gitattributes: -------------------------------------------------------------------------------- 1 | # Set the default behavior, in case people don't have core.autocrlf set. 2 | * text=auto 3 | 4 | # Declare files that will always have LF line endings on checkout. 5 | *.sh text eol=lf 6 | *.conf text eol=lf 7 | *.yaml text eol=lf 8 | run text eol=lf 9 | finish text eol=lf 10 | -------------------------------------------------------------------------------- /kegbot/requirements.txt: -------------------------------------------------------------------------------- 1 | django < 3 2 | pipenv 3 | requests 4 | mysqlclient 5 | isodate 6 | coloredlogs 7 | addict 8 | future 9 | whitenoise 10 | dj-database-url 11 | django-crispy-forms 12 | django-bootstrap-pagination 13 | django-imagekit 14 | pillow 15 | jsonfield 16 | gunicorn 17 | celery 18 | redis 19 | -------------------------------------------------------------------------------- /kegbot/rootfs/etc/cont-init.d/kegbot.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/with-contenv bashio 2 | # ============================================================================== 3 | # JPD Hass.io Add-ons: Kegbot Server 4 | # Configures the Kegbot Server 5 | # ============================================================================== 6 | -------------------------------------------------------------------------------- /z-way-server/build.json: -------------------------------------------------------------------------------- 1 | { 2 | "squash": false, 3 | "build_from": { 4 | "aarch64": "youdownwithjpd/addon-raspbian-base:latest", 5 | "armhf": "youdownwithjpd/addon-raspbian-base:latest", 6 | "armv7": "youdownwithjpd/addon-raspbian-base:latest" 7 | }, 8 | "args": {} 9 | } 10 | -------------------------------------------------------------------------------- /fff-api/rootfs/etc/cont-init.d/fff-api.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/with-contenv bashio 2 | # ============================================================================== 3 | # JPD Hass.io Add-ons: FFF-API Server 4 | # Configures the FFF-API Server 5 | # ============================================================================== 6 | -------------------------------------------------------------------------------- /sendmail/rootfs/etc/cont-init.d/sendmail.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/with-contenv bashio 2 | # ============================================================================== 3 | # JPD Hass.io Add-ons: Sendmail Server 4 | # Configures the Sendmail Server 5 | # ============================================================================== 6 | -------------------------------------------------------------------------------- /deepstream-iot/rootfs/etc/cont-init.d/deepstream-iot.sh: -------------------------------------------------------------------------------- 1 | 2 | #!/usr/bin/with-contenv bashio 3 | # ============================================================================== 4 | # JPD Hass.io Add-ons: DeepStream IoT Server 5 | # Runs any config 6 | # ============================================================================== 7 | -------------------------------------------------------------------------------- /openwrt/build.json.old: -------------------------------------------------------------------------------- 1 | 2 | { 3 | "squash": false, 4 | "build_from": { 5 | "aarch64": "openwrtorg/rootfs:armvirt-64", 6 | "amd64": "openwrtorg/rootfs:x86-64", 7 | "armv7": "openwrtorg/rootfs:armvirt-32", 8 | "i386": "openwrtorg/rootfs:x86-64" 9 | }, 10 | "args": {} 11 | } 12 | -------------------------------------------------------------------------------- /rtl4332mqtt/rootfs/etc/cont-init.d/rtl4332mqtt.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/with-contenv bashio 2 | # ============================================================================== 3 | # JPD Hass.io Add-ons: RTL433-to-MQTT 4 | # Configures the rtl4332mqtt script 5 | # ============================================================================== 6 | -------------------------------------------------------------------------------- /rtl4332mqtt/build.json: -------------------------------------------------------------------------------- 1 | { 2 | "squash": false, 3 | "build_from": { 4 | "aarch64": "hassioaddons/base-aarch64:5.0.1", 5 | "amd64": "hassioaddons/base-amd64:5.0.1", 6 | "armv7": "hassioaddons/base-armv7:5.0.1", 7 | "i386": "hassioaddons/base-i386:5.0.1" 8 | }, 9 | "args": {} 10 | } 11 | -------------------------------------------------------------------------------- /sendmail/build.json: -------------------------------------------------------------------------------- 1 | { 2 | "build_from": { 3 | "aarch64": "hassioaddons/ubuntu-base-aarch64:4.0.0", 4 | "amd64": "hassioaddons/ubuntu-base-amd64:4.0.0", 5 | "armv7": "hassioaddons/ubuntu-base-armv7:4.0.0", 6 | "i386": "hassioaddons/ubuntu-base-i386:4.0.0" 7 | }, 8 | "args": {} 9 | } 10 | -------------------------------------------------------------------------------- /deepstream-iot/rootfs/etc/services.d/deepstream-iot/run: -------------------------------------------------------------------------------- 1 | #!/usr/bin/with-contenv bashio 2 | # ============================================================================== 3 | # Community Hass.io Add-ons: DeepStream IoT Server 4 | # Runs the deepstream app 5 | # ============================================================================== 6 | -------------------------------------------------------------------------------- /kegbot/rootfs/etc/services.d/redis/run: -------------------------------------------------------------------------------- 1 | #!/usr/bin/with-contenv bashio 2 | # ============================================================================== 3 | # Community Hass.io Add-ons: Kegbot Server 4 | # Runs the Redis daemon 5 | # ============================================================================== 6 | 7 | bashio::log.info "Starting Redis..." 8 | redis-server 9 | -------------------------------------------------------------------------------- /fff-api/build.json: -------------------------------------------------------------------------------- 1 | { 2 | "squash": false, 3 | "build_from": { 4 | "aarch64": "hassioaddons/base-aarch64:5.0.1", 5 | "amd64": "hassioaddons/base-amd64:5.0.1", 6 | "armhf": "hassioaddons/base-armhf:5.0.1", 7 | "armv7": "hassioaddons/base-armv7:5.0.1", 8 | "i386": "hassioaddons/base-i386:5.0.1" 9 | }, 10 | "args": {} 11 | } 12 | -------------------------------------------------------------------------------- /kegbot/build.json: -------------------------------------------------------------------------------- 1 | { 2 | "squash": false, 3 | "build_from": { 4 | "aarch64": "hassioaddons/base-aarch64:5.0.1", 5 | "amd64": "hassioaddons/base-amd64:5.0.1", 6 | "armhf": "hassioaddons/base-armhf:5.0.1", 7 | "armv7": "hassioaddons/base-armv7:5.0.1", 8 | "i386": "hassioaddons/base-i386:5.0.1" 9 | }, 10 | "args": {} 11 | } 12 | -------------------------------------------------------------------------------- /keras-rest/rootfs/etc/services.d/redis/run: -------------------------------------------------------------------------------- 1 | #!/usr/bin/with-contenv bashio 2 | # ============================================================================== 3 | # Community Hass.io Add-ons: Keras REST Server 4 | # Runs the Redis daemon 5 | # ============================================================================== 6 | 7 | bashio::log.info "Starting Redis..." 8 | redis-server 9 | -------------------------------------------------------------------------------- /forked-daapd/build.json: -------------------------------------------------------------------------------- 1 | { 2 | "squash": false, 3 | "build_from": { 4 | "aarch64": "hassioaddons/base-aarch64:5.0.1", 5 | "amd64": "hassioaddons/base-amd64:5.0.1", 6 | "armhf": "hassioaddons/base-armhf:5.0.1", 7 | "armv7": "hassioaddons/base-armv7:5.0.1", 8 | "i386": "hassioaddons/base-i386:5.0.1" 9 | }, 10 | "args": {} 11 | } 12 | -------------------------------------------------------------------------------- /sendmail/Dockerfile: -------------------------------------------------------------------------------- 1 | ARG BUILD_FROM=hassioaddons/ubuntu-base:4.0.0 2 | # hadolint ignore=DL3006 3 | FROM ${BUILD_FROM} 4 | 5 | # Set shell 6 | SHELL ["/bin/bash", "-o", "pipefail", "-c"] 7 | 8 | # Install base 9 | # hadolint ignore=DL3003 10 | RUN \ 11 | apt-get update && \ 12 | apt-get install -y --no-install-recommends \ 13 | sendmail 14 | 15 | COPY rootfs / 16 | -------------------------------------------------------------------------------- /ipfire/build.json: -------------------------------------------------------------------------------- 1 | 2 | { 3 | "squash": false, 4 | "build_from": { 5 | "aarch64": "hassioaddons/ubuntu-base-aarch64:latest", 6 | "amd64": "hassioaddons/ubuntu-base-amd64:latest", 7 | "armhf": "hassioaddons/ubuntu-base-armhf:latest", 8 | "armv7": "hassioaddons/ubuntu-base-armv7:latest", 9 | "i386": "hassioaddons/ubuntu-base-i386:latest" 10 | }, 11 | "args": {} 12 | } 13 | -------------------------------------------------------------------------------- /hass-ap/rootfs/etc/services.d/dhcpd/finish: -------------------------------------------------------------------------------- 1 | #!/usr/bin/execlineb -S0 2 | # ============================================================================== 3 | # JPD Hass.io Add-ons: HASS-AP 4 | # Take down the S6 supervision tree when dhcpd fails 5 | # ============================================================================== 6 | if -n { s6-test $# -ne 0 } 7 | if -n { s6-test ${1} -eq 256 } 8 | 9 | s6-svscanctl -t /var/run/s6/services 10 | -------------------------------------------------------------------------------- /hass-ap/rootfs/etc/services.d/hostapd/finish: -------------------------------------------------------------------------------- 1 | #!/usr/bin/execlineb -S0 2 | # ============================================================================== 3 | # JPD Hass.io Add-ons: HASS-AP 4 | # Take down the S6 supervision tree when HostAP fails 5 | # ============================================================================== 6 | if -n { s6-test $# -ne 0 } 7 | if -n { s6-test ${1} -eq 256 } 8 | 9 | s6-svscanctl -t /var/run/s6/services 10 | -------------------------------------------------------------------------------- /sendmail/rootfs/etc/services.d/sendmail/run: -------------------------------------------------------------------------------- 1 | #!/usr/bin/with-contenv bashio 2 | # ============================================================================== 3 | # JPD Hass.io Add-ons: Sendmail Server 4 | # Runs the Sendmail MTA 5 | # ============================================================================== 6 | set -e 7 | CONFIG_PATH=/data/options.json 8 | 9 | bashio::log.info "Starting Sendmail MTA Server..." 10 | exec sendmail-mta 11 | -------------------------------------------------------------------------------- /kegbot/rootfs/etc/services.d/kegbot/finish: -------------------------------------------------------------------------------- 1 | #!/usr/bin/execlineb -S0 2 | # ============================================================================== 3 | # JPD Hass.io Add-ons: Kegbot Server 4 | # Take down the S6 supervision tree when kegbot fails 5 | # ============================================================================== 6 | if -n { s6-test $# -ne 0 } 7 | if -n { s6-test ${1} -eq 256 } 8 | 9 | s6-svscanctl -t /var/run/s6/services 10 | -------------------------------------------------------------------------------- /kegbot/rootfs/etc/services.d/redis/finish: -------------------------------------------------------------------------------- 1 | #!/usr/bin/execlineb -S0 2 | # ============================================================================== 3 | # JPD Hass.io Add-ons: Kegbot Server 4 | # Take down the S6 supervision tree when kegbot fails 5 | # ============================================================================== 6 | if -n { s6-test $# -ne 0 } 7 | if -n { s6-test ${1} -eq 256 } 8 | 9 | s6-svscanctl -t /var/run/s6/services 10 | -------------------------------------------------------------------------------- /fff-api/rootfs/etc/services.d/fff-api/finish: -------------------------------------------------------------------------------- 1 | #!/usr/bin/execlineb -S0 2 | # ============================================================================== 3 | # JPD Hass.io Add-ons: FFF-API Server 4 | # Take down the S6 supervision tree when fff-api fails 5 | # ============================================================================== 6 | if -n { s6-test $# -ne 0 } 7 | if -n { s6-test ${1} -eq 256 } 8 | 9 | s6-svscanctl -t /var/run/s6/services 10 | -------------------------------------------------------------------------------- /keras-rest/rootfs/etc/services.d/redis/finish: -------------------------------------------------------------------------------- 1 | #!/usr/bin/execlineb -S0 2 | # ============================================================================== 3 | # JPD Hass.io Add-ons: Keras REST Server 4 | # Take down the S6 supervision tree when redis fails 5 | # ============================================================================== 6 | if -n { s6-test $# -ne 0 } 7 | if -n { s6-test ${1} -eq 256 } 8 | 9 | s6-svscanctl -t /var/run/s6/services 10 | -------------------------------------------------------------------------------- /owntone/rootfs/etc/services.d/owntone/finish: -------------------------------------------------------------------------------- 1 | #!/command/execlineb -S0 2 | # ============================================================================== 3 | # JPD Hass.io Add-ons: owntone server 4 | # Take down the S6 supervision tree when owntone fails 5 | # ============================================================================== 6 | if -n { s6-test $# -ne 0 } 7 | if -n { s6-test ${1} -eq 256 } 8 | 9 | s6-svscanctl -t /var/run/s6/services 10 | -------------------------------------------------------------------------------- /sendmail/rootfs/etc/services.d/sendmail/finish: -------------------------------------------------------------------------------- 1 | #!/usr/bin/execlineb -S0 2 | # ============================================================================== 3 | # JPD Hass.io Add-ons: Sendmail Server 4 | # Take down the S6 supervision tree when sendmail fails 5 | # ============================================================================== 6 | if -n { s6-test $# -ne 0 } 7 | if -n { s6-test ${1} -eq 256 } 8 | 9 | s6-svscanctl -t /var/run/s6/services 10 | -------------------------------------------------------------------------------- /keras-rest/rootfs/etc/services.d/gunicorn/finish: -------------------------------------------------------------------------------- 1 | #!/usr/bin/execlineb -S0 2 | # ============================================================================== 3 | # JPD Hass.io Add-ons: Keras REST Server 4 | # Take down the S6 supervision tree when gunicorn fails 5 | # ============================================================================== 6 | if -n { s6-test $# -ne 0 } 7 | if -n { s6-test ${1} -eq 256 } 8 | 9 | s6-svscanctl -t /var/run/s6/services 10 | -------------------------------------------------------------------------------- /hass-ap/rootfs/etc/services.d/hostapd/run: -------------------------------------------------------------------------------- 1 | #!/usr/bin/with-contenv bashio 2 | # ============================================================================== 3 | # JPD Hass.io Add-ons: RTL433-to-MQTT 4 | # Runs the HostAP daemon 5 | # ============================================================================== 6 | 7 | s6-svwait -u -t 5000 /var/run/s6/services/dhcpd 8 | 9 | bashio::log.info "Starting HostAP daemon ..." 10 | /usr/sbin/hostapd /etc/hostapd.conf 11 | -------------------------------------------------------------------------------- /z-way-server/rootfs/etc/services.d/z-way-server/finish: -------------------------------------------------------------------------------- 1 | #!/usr/bin/execlineb -S0 2 | # ============================================================================== 3 | # JPD Hass.io Add-ons: Z-Way Server 4 | # Take down the S6 supervision tree when z-way-server fails 5 | # ============================================================================== 6 | if -n { s6-test $# -ne 0 } 7 | if -n { s6-test ${1} -eq 256 } 8 | 9 | s6-svscanctl -t /var/run/s6/services 10 | -------------------------------------------------------------------------------- /forked-daapd/rootfs/etc/services.d/forked-daapd/finish: -------------------------------------------------------------------------------- 1 | #!/usr/bin/execlineb -S0 2 | # ============================================================================== 3 | # JPD Hass.io Add-ons: forked-daapd server 4 | # Take down the S6 supervision tree when forked-daapd fails 5 | # ============================================================================== 6 | if -n { s6-test $# -ne 0 } 7 | if -n { s6-test ${1} -eq 256 } 8 | 9 | s6-svscanctl -t /var/run/s6/services 10 | -------------------------------------------------------------------------------- /keras-rest/rootfs/etc/services.d/modelserver/finish: -------------------------------------------------------------------------------- 1 | #!/usr/bin/execlineb -S0 2 | # ============================================================================== 3 | # JPD Hass.io Add-ons: Keras REST Server 4 | # Take down the S6 supervision tree when the modelserver fails 5 | # ============================================================================== 6 | if -n { s6-test $# -ne 0 } 7 | if -n { s6-test ${1} -eq 256 } 8 | 9 | s6-svscanctl -t /var/run/s6/services 10 | -------------------------------------------------------------------------------- /hass-ap/rootfs/etc/services.d/dhcpd/run: -------------------------------------------------------------------------------- 1 | #!/usr/bin/with-contenv bashio 2 | # ============================================================================== 3 | # JPD Hass.io Add-ons: HASS-AP 4 | # Runs dhcpd 5 | # ============================================================================== 6 | 7 | true ${INTERFACE:=wlan0} 8 | 9 | # start the dhcp server in the foreground, erroring to stderr 10 | bashio::log.info "Starting DHCP server .." 11 | dhcpd -d -f ${INTERFACE} 12 | -------------------------------------------------------------------------------- /rtl4332mqtt/rootfs/etc/services.d/rtl4332mqtt/finish: -------------------------------------------------------------------------------- 1 | #!/usr/bin/execlineb -S0 2 | # ============================================================================== 3 | # JPD Hass.io Add-ons: RTL433-to-MQTT 4 | # Take down the S6 supervision tree when the rtl4332mqtt script fails 5 | # ============================================================================== 6 | if -n { s6-test $# -ne 0 } 7 | if -n { s6-test ${1} -eq 256 } 8 | 9 | s6-svscanctl -t /var/run/s6/services 10 | -------------------------------------------------------------------------------- /deepstream-iot/rootfs/etc/services.d/deepstream-iot/finish: -------------------------------------------------------------------------------- 1 | 2 | #!/usr/bin/execlineb -S0 3 | # ============================================================================== 4 | # JPD Hass.io Add-ons: DeepStream IoT Server 5 | # Take down the S6 supervision tree when the modelserver fails 6 | # ============================================================================== 7 | if -n { s6-test $# -ne 0 } 8 | if -n { s6-test ${1} -eq 256 } 9 | 10 | s6-svscanctl -t /var/run/s6/services 11 | -------------------------------------------------------------------------------- /waterguru-api/run.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/with-contenv bashio 2 | set -e 3 | 4 | bashio::log.info "Creating WG configuration..." 5 | 6 | # Create main config 7 | WG_USER=$(bashio::config 'email') 8 | WG_PASS=$(bashio::config 'password') 9 | 10 | sed -i "s/WG_USER/${WG_USER}/" /waterguru_flask.py 11 | sed -i "s/WG_PASS/${WG_PASS}/" /waterguru_flask.py 12 | sed -i "s/WG_PORT/${WG_PORT}/" /waterguru_flask.py 13 | 14 | bashio::log.info "Starting WG API server..." 15 | python3 /waterguru_flask.py 16 | -------------------------------------------------------------------------------- /owntone/rootfs/etc/services.d/owntone/run: -------------------------------------------------------------------------------- 1 | #!/command/with-contenv bashio 2 | # ============================================================================== 3 | # JPD Hass.io Add-ons: owntone server 4 | # Runs the owntone Server 5 | # ============================================================================== 6 | # shellcheck disable=SC1091 7 | 8 | # Run owntone 9 | bashio::log.info 'Starting the OwnTone Server...' 10 | owntone -f -c /config/owntone/owntone.conf -P /var/run/owntone.pid 11 | -------------------------------------------------------------------------------- /keras-rest/rootfs/etc/services.d/modelserver/run: -------------------------------------------------------------------------------- 1 | #!/usr/bin/with-contenv bashio 2 | # ============================================================================== 3 | # Community Hass.io Add-ons: Keras REST Server 4 | # Runs the modelserver daemon 5 | # ============================================================================== 6 | 7 | # Wait for gunicorn to become available 8 | s6-svwait -u -t 5000 /var/run/s6/services/gunicorn 9 | 10 | bashio::log.info "Starting the Model Server..." 11 | python3 /app/modelserver/main.py 12 | -------------------------------------------------------------------------------- /z-way-server/rootfs/etc/services.d/z-way-server/run: -------------------------------------------------------------------------------- 1 | #!/usr/bin/with-contenv bashio 2 | # ============================================================================== 3 | # JPD Hass.io Add-ons: Z-Way Server 4 | # Runs the Z-Way Server 5 | # ============================================================================== 6 | # shellcheck disable=SC1091 7 | 8 | # Run z-way-server 9 | bashio::log.info 'Starting the Z-Way Server...' 10 | (cd /opt/z-way-server && ./z-way-server -c /config/z-way-server/config/config.xml -L /dev/stdout) 11 | -------------------------------------------------------------------------------- /forked-daapd/rootfs/etc/services.d/forked-daapd/run: -------------------------------------------------------------------------------- 1 | #!/usr/bin/with-contenv bashio 2 | # ============================================================================== 3 | # JPD Hass.io Add-ons: forked-daapd server 4 | # Runs the forked-daapd Server 5 | # ============================================================================== 6 | # shellcheck disable=SC1091 7 | 8 | bashio::log.info 'Starting the forked-daapd Server...' 9 | # Run forked-daapd 10 | forked-daapd -f -c /config/forked-daapd/forked-daapd.conf -P /var/run/forked-daapd.pid 11 | -------------------------------------------------------------------------------- /waterguru-api/Dockerfile: -------------------------------------------------------------------------------- 1 | ARG BUILD_FROM 2 | FROM $BUILD_FROM 3 | 4 | ENV WG_PORT=53255 5 | 6 | VOLUME /etc/localtime:/etc/localtime:ro 7 | 8 | # Install requirements for add-on 9 | RUN \ 10 | apk add --no-cache \ 11 | python3 py3-pip 12 | RUN pip install --no-cache-dir \ 13 | flask boto3 requests-aws4auth warrant-ext 14 | #RUN \ 15 | # pip install \ 16 | # requests_aws4auth boto3 flask warrant 17 | 18 | # Copy data for add-on 19 | COPY run.sh / 20 | COPY waterguru_flask.py / 21 | RUN chmod a+x /run.sh 22 | RUN chmod a+x /waterguru_flask.py 23 | 24 | CMD [ "/run.sh" ] 25 | -------------------------------------------------------------------------------- /sendmail/config.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "sendmail", 3 | "version": "0.0.1", 4 | "slug": "sendmail", 5 | "description": "Sendmail!", 6 | "url": "https://github.com/johnpdowling/hassio-addons/sendmail", 7 | "startup": "before", 8 | "arch": [ 9 | "aarch64", 10 | "amd64", 11 | "armv7", 12 | "i386" 13 | ], 14 | "boot": "auto", 15 | "host_network": true, 16 | "map": [ 17 | "config:rw" 18 | ], 19 | "ports": { 20 | "25/tcp": 25 21 | }, 22 | "ports_description": { 23 | "25/tcp": "outgoing mail port" 24 | }, 25 | "options": { 26 | }, 27 | "schema": { 28 | } 29 | } 30 | -------------------------------------------------------------------------------- /forked-daapd/config.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "forked-daapd", 3 | "version": "0.0.2", 4 | "slug": "forked-daapd", 5 | "description": "Forked-daapd media server instance", 6 | "url": "https://github.com/johnpdowling/hassio-addons/forked-daapd", 7 | "startup": "services", 8 | "host_network": true, 9 | "host_dbus": true, 10 | "arch": [ 11 | "aarch64", 12 | "amd64", 13 | "armhf", 14 | "armv7", 15 | "i386" 16 | ], 17 | "boot": "auto", 18 | "hassio_api": true, 19 | "hassio_role": "default", 20 | "map": [ 21 | "config:rw" 22 | ], 23 | "options": { 24 | }, 25 | "schema": { 26 | } 27 | } 28 | -------------------------------------------------------------------------------- /keras-rest/rootfs/etc/cont-init.d/keras-rest.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/with-contenv bashio 2 | # ============================================================================== 3 | # JPD Hass.io Add-ons: Keras REST Server 4 | # Runs the Keras model saver 5 | # ============================================================================== 6 | set -e 7 | CONFIG_PATH=/data/options.json 8 | 9 | #pull required option(s) 10 | export MODEL_NAME=$(jq --raw-output '.model_name' $CONFIG_PATH) 11 | if [ -z "${MODEL_NAME}" ]; then 12 | export MODEL_NAME="imagenet" 13 | fi 14 | bashio::log.debug "Setting up Keras model..." 15 | python3 /app/modelsaver/main.py 16 | -------------------------------------------------------------------------------- /keras-rest/config.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "Keras REST Server", 3 | "version": "0.0.1", 4 | "slug": "keras-rest-server", 5 | "description": "Keras REST server instance", 6 | "url": "https://github.com/johnpdowling/hassio-addons/keras-rest", 7 | "startup": "services", 8 | "ingress": false, 9 | "arch": [ 10 | "aarch64" 11 | ], 12 | "boot": "auto", 13 | "ports": { 14 | "80/tcp": 5000 15 | }, 16 | "ports_description": { 17 | "80/tcp": "REST interface" 18 | }, 19 | "map": [ 20 | "config:rw" 21 | ], 22 | "options": { 23 | "model_name": "imagenet" 24 | }, 25 | "schema": { 26 | "model_name": "str" 27 | } 28 | } 29 | -------------------------------------------------------------------------------- /fff-api/config.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "FFF-API", 3 | "version": "0.0.3", 4 | "slug": "fff-api", 5 | "description": "Monitor your Flashforge Finder via API", 6 | "url": "https://github.com/johnpdowling/hassio-addons/fff-api", 7 | "startup": "before", 8 | "arch": [ 9 | "aarch64", 10 | "amd64", 11 | "armhf", 12 | "armv7", 13 | "i386" 14 | ], 15 | "boot": "auto", 16 | "map": [ ], 17 | "ports": { 18 | "5000/tcp": 5000 19 | }, 20 | "ports_description": { 21 | "5000/tcp": "API interface" 22 | }, 23 | "options": { 24 | "local_only": false 25 | }, 26 | "schema": { 27 | "local_only": "bool" 28 | } 29 | } 30 | -------------------------------------------------------------------------------- /waterguru-api/config.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | name: WaterGuru API 3 | version: 0.0.1 4 | slug: waterguru-api 5 | description: API Bridge to WaterGuru SENSE cloud services 6 | arch: 7 | - aarch64 8 | - amd64 9 | - armhf 10 | - armv7 11 | - i386 12 | url: https://github.com/johnpdowling/hassio-addons/tree/master/waterguru-api 13 | startup: services 14 | boot: auto 15 | init: false 16 | ports: 17 | 53255/tcp: 53255 18 | ports_description: 19 | 53255/tcp: API access port 20 | options: 21 | email: "user@example.com" 22 | password: "P@$$w0rd" 23 | schema: 24 | log_level: list(trace|debug|info|notice|warning|error|fatal)? 25 | email: str 26 | password: str 27 | -------------------------------------------------------------------------------- /owntone/config.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | name: OwnTone 3 | version: 0.0.2 4 | slug: owntone 5 | description: OwnTone media server instance 6 | url: https://github.com/johnpdowling/hassio-addons/tree/master/owntone 7 | startup: services 8 | host_network: true 9 | host_dbus: true 10 | arch: 11 | - aarch64 12 | - amd64 13 | - armhf 14 | - armv7 15 | - i386 16 | boot: auto 17 | hassio_api: true 18 | hassio_role: default 19 | map: 20 | - config:rw 21 | options: 22 | packages: [] 23 | init_commands: [] 24 | schema: 25 | log_level: list(trace|debug|info|notice|warning|error|fatal)? 26 | config_path: str? 27 | packages: 28 | - str 29 | init_commands: 30 | - str 31 | -------------------------------------------------------------------------------- /z-way-server/config.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "Z-Way Server", 3 | "version": "0.0.1", 4 | "slug": "z-way-server", 5 | "description": "Z-Way ZWave server instance", 6 | "url": "https://github.com/johnpdowling/hassio-addons/z-way-server", 7 | "startup": "before", 8 | "auto_uart": true, 9 | "ingress": false, 10 | "arch": [ 11 | "aarch64", 12 | "armhf", 13 | "armv7" 14 | ], 15 | "boot": "auto", 16 | "ports": { 17 | "8083/tcp": 8083 18 | }, 19 | "ports_description": { 20 | "8083/tcp": "Web interface" 21 | }, 22 | "map": [ 23 | "config:rw", 24 | "ssl" 25 | ], 26 | "options": { 27 | }, 28 | "schema": { 29 | } 30 | } 31 | -------------------------------------------------------------------------------- /fff-api/README.md: -------------------------------------------------------------------------------- 1 | # Flashforge Finder API hass.io add-on 2 | A hass.io add-on for exposing data from Flashforge Finder(s) as API endpoints for consumption as HA REST sensors. This project is more or less a wrapper around 01F0's work (+ my & Dan McInerney's) here: https://github.com/johnpdowling/flashforge-finder-api. 3 | 4 | ## Usage 5 | 1) Install the addon and configure the port that the API will be served on. Default is 5000. 6 | 7 | 2) Start the addon. Information about your printer will be available at http://{homeassistant_ip}:{fff-api_port}/{printer_ip}/{info|head-location|temp|progress|status|set-temp|set-light} 8 | 9 | 3) Configure RESTful sensors & switches in HA as necessary 10 | -------------------------------------------------------------------------------- /fff-api/rootfs/etc/services.d/fff-api/run: -------------------------------------------------------------------------------- 1 | #!/usr/bin/with-contenv bashio 2 | # ============================================================================== 3 | # Community Hass.io Add-ons: FFF-API Server 4 | # Runs the fff-api flask server 5 | # ============================================================================== 6 | set -e 7 | CONFIG_PATH=/data/options.json 8 | 9 | # configs 10 | LOCAL_ONLY=$(jq --raw-output '.local_only' $CONFIG_PATH) 11 | WAIT_TIME=10 12 | 13 | HOST="--host=0.0.0.0" 14 | if [ "$LOCAL_ONLY" == true ] 15 | then 16 | HOST="" 17 | fi 18 | 19 | sleep "$WAIT_TIME" 20 | 21 | # Run fff-api 22 | (cd /fff-api/api && FLASK_APP=webapi.py flask run $HOST --port=5000) 23 | -------------------------------------------------------------------------------- /deepstream-iot/config.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "DeepStream IoT Server", 3 | "version": "0.0.1", 4 | "slug": "deepstream-iot-server", 5 | "description": "DeepStream IoT server instance", 6 | "url": "https://github.com/johnpdowling/hassio-addons/deepstream-iot", 7 | "startup": "services", 8 | "ingress": false, 9 | "arch": [ 10 | "aarch64" 11 | ], 12 | "boot": "auto", 13 | "ports": { 14 | "80/tcp": 8080, 15 | "8554/tcp": 8554 16 | }, 17 | "ports_description": { 18 | "80/tcp": "Web interface", 19 | "8554/tcp": "RTSP interface" 20 | }, 21 | "map": [ 22 | "config:rw" 23 | ], 24 | "options": { 25 | "model_name": "test5" 26 | }, 27 | "schema": { 28 | "model_name": "str" 29 | } 30 | } 31 | -------------------------------------------------------------------------------- /ipfire/config.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "IPFire", 3 | "version": "0.0.1", 4 | "slug": "ipfire", 5 | "description": "IPFire instance", 6 | "url": "https://github.com/johnpdowling/hassio-addons/ipfire", 7 | "startup": "before", 8 | "full_access": "true", 9 | "docker_api": "true", 10 | "privileged": [ "NET_ADMIN" ], 11 | "ingress": false, 12 | "host_network": false, 13 | "host_dbus": false, 14 | "ports": { 15 | }, 16 | "ports_description": { 17 | }, 18 | "arch": [ 19 | "aarch64", 20 | "amd64", 21 | "armv7", 22 | "i386" 23 | ], 24 | "boot": "auto", 25 | "hassio_api": true, 26 | "hassio_role": "default", 27 | "map": [ 28 | "config:rw" 29 | ], 30 | "devices": [ ], 31 | "options": { 32 | }, 33 | "schema": { 34 | } 35 | } 36 | -------------------------------------------------------------------------------- /keras-rest/README.md: -------------------------------------------------------------------------------- 1 | Hassio add-on for a REST server endpoint for a keras prediction model. Only supporting NVIDIA Jetson Nano platforms. 2 | Based on [Adrian Rosebrock's](https://www.pyimagesearch.com/2018/02/05/deep-learning-production-keras-redis-flask-apache/) and [Shane Soh's](https://medium.com/analytics-vidhya/deploy-machine-learning-models-with-keras-fastapi-redis-and-docker-4940df614ece) blog posts. Models frozen and converted as detailed in [Chengwei's](https://www.dlology.com/blog/how-to-run-keras-model-on-jetson-nano/) post and [Stack Overflow](https://stackoverflow.com/questions/44274701/make-predictions-using-a-tensorflow-graph-from-a-keras-model). 3 | Intended to be used with a component like https://github.com/robmarkcole/HASS-rest-image-process 4 | 5 | DOES NOT WORK, DO NOT USE 6 | -------------------------------------------------------------------------------- /openwrt/config.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "OpenWRT", 3 | "version": "0.0.1", 4 | "slug": "openwrt", 5 | "description": "OpenWRT instance", 6 | "url": "https://github.com/johnpdowling/hassio-addons/openwrt", 7 | "startup": "before", 8 | "full_access": "true", 9 | "docker_api": "true", 10 | "privileged": [ "NET_ADMIN" ], 11 | "ingress": false, 12 | "host_network": false, 13 | "host_dbus": false, 14 | "ports": { 15 | }, 16 | "ports_description": { 17 | }, 18 | "arch": [ 19 | "aarch64", 20 | "amd64", 21 | "armv7", 22 | "i386" 23 | ], 24 | "boot": "auto", 25 | "hassio_api": true, 26 | "hassio_role": "default", 27 | "map": [ 28 | "config:rw" 29 | ], 30 | "devices": [ ], 31 | "options": { 32 | }, 33 | "schema": { 34 | } 35 | } 36 | -------------------------------------------------------------------------------- /kegbot/nginx.conf: -------------------------------------------------------------------------------- 1 | user root; 2 | worker_processes 1; 3 | 4 | error_log /var/log/nginx/error.log warn; 5 | pid /var/run/nginx.pid; 6 | 7 | 8 | events { 9 | worker_connections 1024; 10 | } 11 | 12 | 13 | http { 14 | include /etc/nginx/mime.types; 15 | default_type application/octet-stream; 16 | 17 | log_format main '$remote_addr - $remote_user [$time_local] "$request" ' 18 | '$status $body_bytes_sent "$http_referer" ' 19 | '"$http_user_agent" "$http_x_forwarded_for"'; 20 | 21 | access_log /var/log/nginx/access.log main; 22 | 23 | sendfile on; 24 | #tcp_nopush on; 25 | 26 | keepalive_timeout 65; 27 | 28 | #gzip on; 29 | 30 | include /etc/nginx/conf.d/*.conf; 31 | } 32 | -------------------------------------------------------------------------------- /kegbot/README.md: -------------------------------------------------------------------------------- 1 | # Currently broken, do not use! 2 | 3 | 4 | # Kegbot hass.io add-on 5 | This add-on provides an installation of the [Kegbot](https://kegbot.org/) server and is based on their [docker project](https://github.com/Kegbot/kegbot-docker). 6 | 7 | ## Usage 8 | Before beginning, set up a kegbot user on a MySQL/MariaDB system but do not create a database as that will be handled during setup. If you're using email, set up your server and/or gather your information. 9 | 1) Install add-on 10 | 2) In the config window, db information is required and email information (email_from, email_host, email_port, email_user, email_password, email_use_ssl, email_use_tls) is optional. 11 | 3) If you need to add extra kegbot or django settings, the settings file is located at %config%/kegbot/local_settings.py (right now there's a bug and DEBUG = False has to be hard-coded here) 12 | 4) Media is located in %config%/kegbot/media/ 13 | -------------------------------------------------------------------------------- /owntone/README.md: -------------------------------------------------------------------------------- 1 | # OwnTone hass.io add-on 2 | This add-on provides an installation of the [OwnTone](https://github.com/owntone/owntone-server) server and is based on the [docker project](https://github.com/sretalla/docker-forked-daapd) by [/u/sretalla](https://github.com/sretalla) and [/u/kevineye](https://github.com/kevineye). 3 | 4 | ## Usage 5 | 1) Install add-on and start it 6 | 2) For your setup, you may need to further edit owntone.conf to suit your needs. All files are located in %config%/owntone/ and are created after first start 7 | 8 | ## Notes 9 | 1) The OwnTone compiled here was only given the web interface. Chromecast, Spotify, etc. are disabled in this add-on. I wanted an instance to work with my [custom integration](https://github.com/johnpdowling/custom_components/tree/master/owntone) and AirPlay devices 10 | 2) The named pipe %config%/owntone/music/HomeAssistantAnnounce is created to facilitate announcements via the component 11 | -------------------------------------------------------------------------------- /owntone/rootfs/etc/cont-init.d/owntone.sh: -------------------------------------------------------------------------------- 1 | #!/command/with-contenv bashio 2 | # ============================================================================== 3 | # JPD Hass.io Add-ons: owntone server 4 | # Runs the owntone Server 5 | # ============================================================================== 6 | # shellcheck disable=SC1091 7 | 8 | # Start with init 9 | if ! bashio::fs.directory_exists '/config/owntone/cache'; then 10 | bashio::log.info 'Creating cache folder...' 11 | mkdir -p /config/owntone/cache 12 | fi 13 | 14 | if ! bashio::fs.file_exists '/config/owntone/owntone.conf'; then 15 | bashio::log.info 'Copying default conf file...' 16 | cp /usr/local/etc/owntone.conf /config/owntone/owntone.conf 17 | fi 18 | 19 | if ! bashio::fs.directory_exists '/config/owntone/music'; then 20 | bashio::log.info 'Creating music folder...' 21 | mkdir -p /config/owntone/music 22 | bashio::log.info 'Creating HA fifo file...' 23 | mkfifo -m 666 /config/owntone/music/HomeAssistantAnnounce 24 | fi 25 | -------------------------------------------------------------------------------- /forked-daapd/rootfs/etc/cont-init.d/forked-daapd.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/with-contenv bashio 2 | # ============================================================================== 3 | # JPD Hass.io Add-ons: forked-daapd server 4 | # Configures the forked-daapd Server 5 | # ============================================================================== 6 | 7 | if ! bashio::fs.directory_exists '/config/forked-daapd/cache'; then 8 | bashio::log.debug 'Creating cache folder...' 9 | mkdir -p /config/forked-daapd/cache 10 | fi 11 | 12 | if ! bashio::fs.file_exists '/config/forked-daapd/forked-daapd.conf'; then 13 | bashio::log.debug 'Copying default conf file...' 14 | cp /usr/local/etc/forked-daapd.conf /config/forked-daapd/forked-daapd.conf 15 | fi 16 | 17 | if ! bashio::fs.directory_exists '/config/forked-daapd/music'; then 18 | bashio::log.debug 'Creating music folder...' 19 | mkdir -p /config/forked-daapd/music 20 | bashio::log.debug 'Creating HA fifo file...' 21 | mkfifo -m 666 /config/forked-daapd/music/HomeAssistantAnnounce 22 | fi 23 | -------------------------------------------------------------------------------- /forked-daapd/README.md: -------------------------------------------------------------------------------- 1 | # Add-on archived and won't be further developed here. Please use OwnTone. 2 | 3 | # forked-daapd hass.io add-on 4 | This add-on provides an installation of the [forked-daapd](https://github.com/ejurgensen/forked-daapd) server and is based on the [docker project](https://github.com/sretalla/docker-forked-daapd) by [/u/sretalla](https://github.com/sretalla) and [/u/kevineye](https://github.com/kevineye). 5 | 6 | ## Usage 7 | 1) Install add-on and start it 8 | 2) For your setup, you may need to further edit forked-daapd.conf to suit your needs. All files are located in %config%/forked-daapd/ and are created after first start 9 | 10 | ## Notes 11 | 1) The forked-daapd compiled here was only given the configure flag --enable-itunes. Chromecast, Spotify, etc. are disabled in this add-on. I wanted an instance to work with my [custom component](https://github.com/johnpdowling/custom_components/tree/master/forked-daapd) and AirPlay devices 12 | 2) The named pipe %config%/forked-daapd/music/HomeAssistantAnnounce is created to facilitate announcements via the component 13 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2019 John Dowling 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /rtl4332mqtt/config.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "RTL_433 to MQTT Bridge", 3 | "version": "0.2.1", 4 | "slug": "rtl4332mqtt", 5 | "description": "433MHz RF to MQTT Bridge based on RTL_SDR/RTL_433 for RTL2832U based DVB-T USB tuners", 6 | "url": "https://github.com/james-fry/hassio-addons/tree/master/rtl4332mqtt", 7 | "startup": "before", 8 | "boot": "auto", 9 | "map": ["config:rw", "ssl"], 10 | "devices": ["/dev/bus/usb:/dev/bus/usb:rwm"], 11 | "host_network": "False", 12 | "arch": [ 13 | "aarch64", 14 | "amd64", 15 | "armv7", 16 | "i386" 17 | ], 18 | "options": 19 | { 20 | "mqtt_host": "hassio.local", 21 | "mqtt_user": "", 22 | "mqtt_password": "", 23 | "mqtt_topic": "homeassistant/sensor/currentcost", 24 | "protocol": [ 100, 101 ], 25 | "frequency": 433920000, 26 | "gain": 60, 27 | "frequency_offset": 0 28 | }, 29 | "schema": 30 | { 31 | "mqtt_host": "str", 32 | "mqtt_user": "str", 33 | "mqtt_password": "str", 34 | "mqtt_topic": "str", 35 | "protocol": [ "int" ], 36 | "frequency": "int", 37 | "gain": "int", 38 | "frequency_offset": "int" 39 | } 40 | } 41 | -------------------------------------------------------------------------------- /rtl4332mqtt/LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2017 Chris Kacerguis 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /hass-ap/config.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "HASS AP", 3 | "version": "0.0.1", 4 | "slug": "hass-ap", 5 | "description": "Home Assistant Access Point", 6 | "url": "https://github.com/johnpdowling/hassio-addons/hass-ap", 7 | "startup": "before", 8 | "full_access": "true", 9 | "docker_api": "true", 10 | "privileged": [ "NET_ADMIN" ], 11 | "arch": [ 12 | "i386", 13 | "amd64", 14 | "aarch64", 15 | "armhf", 16 | "armv7" 17 | ], 18 | "boot": "auto", 19 | "map": [ 20 | "config:rw" 21 | ], 22 | "options": { 23 | "ssid": "hass-ap", 24 | "wpa_passphrase": "passw0rd", 25 | "interface": "wlan0", 26 | "outgoing": "eth0", 27 | "channel": "11", 28 | "subnet": "192.168.254.0", 29 | "ap_address": "192.168.254.1", 30 | "dhcp_min": 100, 31 | "dhcp_max": 199, 32 | "docker_only": false 33 | }, 34 | "schema": { 35 | "ssid": "str", 36 | "wpa_passphrase" : "str", 37 | "interface": "str", 38 | "outgoing": "str", 39 | "channel": "str", 40 | "subnet": "str", 41 | "ap_address": "str", 42 | "dhcp_min": "int", 43 | "dhcp_max": "int", 44 | "docker_only": "bool" 45 | } 46 | } 47 | -------------------------------------------------------------------------------- /keras-rest/rootfs/etc/gunicorn/gunicorn_conf.py: -------------------------------------------------------------------------------- 1 | import json 2 | import multiprocessing 3 | import os 4 | 5 | workers_per_core_str = os.getenv("WORKERS_PER_CORE", "1") 6 | web_concurrency_str = os.getenv("WEB_CONCURRENCY", None) 7 | host = os.getenv("HOST", "0.0.0.0") 8 | port = os.getenv("PORT", "80") 9 | bind_env = os.getenv("BIND", None) 10 | use_loglevel = os.getenv("LOG_LEVEL", "info") 11 | if bind_env: 12 | use_bind = bind_env 13 | else: 14 | use_bind = f"{host}:{port}" 15 | 16 | cores = multiprocessing.cpu_count() 17 | workers_per_core = float(workers_per_core_str) 18 | default_web_concurrency = workers_per_core * cores 19 | if web_concurrency_str: 20 | web_concurrency = int(web_concurrency_str) 21 | assert web_concurrency > 0 22 | else: 23 | web_concurrency = max(int(default_web_concurrency), 2) 24 | 25 | # Gunicorn config variables 26 | loglevel = use_loglevel 27 | workers = web_concurrency 28 | bind = use_bind 29 | keepalive = 120 30 | errorlog = "-" 31 | 32 | # For debugging and testing 33 | log_data = { 34 | "loglevel": loglevel, 35 | "workers": workers, 36 | "bind": bind, 37 | # Additional, non-gunicorn variables 38 | "workers_per_core": workers_per_core, 39 | "host": host, 40 | "port": port, 41 | } 42 | print(json.dumps(log_data)) 43 | -------------------------------------------------------------------------------- /deepstream-iot/Dockerfile: -------------------------------------------------------------------------------- 1 | ARG BUILD_FROM=youdownwithjpd/addon-deepstream_iot-base:latest 2 | # hadolint ignore=DL3006 3 | FROM ${BUILD_FROM} 4 | 5 | # Set shell 6 | SHELL ["/bin/bash", "-o", "pipefail", "-c"] 7 | 8 | # Copy root filesystem 9 | COPY rootfs / 10 | 11 | # Build arugments 12 | ARG BUILD_DATE 13 | ARG BUILD_REF 14 | ARG BUILD_VERSION 15 | 16 | # Labels 17 | LABEL \ 18 | io.hass.name="NVIDIA DeepStream IoT Server" \ 19 | io.hass.description="NVIDIA DeepStream IoT Server" \ 20 | io.hass.arch="${BUILD_ARCH}" \ 21 | io.hass.type="addon" \ 22 | io.hass.version=${BUILD_VERSION} \ 23 | maintainer="John Dowling " \ 24 | org.label-schema.description="JPD Hass.io Add-ons: ${BUILD_ARCH} DeepStream IoT Server" \ 25 | org.label-schema.build-date=${BUILD_DATE} \ 26 | org.label-schema.name="Addon DeepStream IoT Server for ${BUILD_ARCH}" \ 27 | org.label-schema.schema-version="1.0" \ 28 | org.label-schema.url="https://addons.community" \ 29 | org.label-schema.usage="https://github.com/johnpdowling/hassio-addons/deepstream-iot/blob/master/README.md" \ 30 | org.label-schema.vcs-ref=${REF} \ 31 | org.label-schema.vcs-url="https://github.com/johnpdowling/hassio-addons/deepstream-iot" \ 32 | org.label-schema.vendor="John Dowling" 33 | ############################################## 34 | -------------------------------------------------------------------------------- /hass-ap/Dockerfile: -------------------------------------------------------------------------------- 1 | ARG BUILD_FROM 2 | FROM $BUILD_FROM 3 | 4 | ENV LANG C.UTF-8 5 | 6 | COPY rootfs / 7 | 8 | RUN apk update && \ 9 | apk add --no-cache \ 10 | bash \ 11 | hostapd \ 12 | iptables \ 13 | dhcp \ 14 | docker \ 15 | iproute2 \ 16 | iw 17 | 18 | RUN echo "" > /var/lib/dhcp/dhcpd.leases 19 | 20 | RUN modprobe ip_tables 21 | RUN echo 'ip_tables' >> /etc/modules 22 | 23 | # Build arguments 24 | ARG BUILD_ARCH 25 | ARG BUILD_DATE 26 | ARG BUILD_REF 27 | ARG BUILD_VERSION 28 | 29 | # Labels 30 | LABEL \ 31 | io.hass.name="Home Assistant Access Point" \ 32 | io.hass.description="This image is used to create a wireless access point." \ 33 | io.hass.arch="${BUILD_ARCH}" \ 34 | io.hass.type="addon" \ 35 | io.hass.version=${BUILD_VERSION} \ 36 | maintainer="John Dowling " \ 37 | org.label-schema.description="Home Assistant Access Point instance" \ 38 | org.label-schema.build-date=${BUILD_DATE} \ 39 | org.label-schema.name="Home Assistant Access Point" \ 40 | org.label-schema.schema-version="1.0" \ 41 | org.label-schema.url="" \ 42 | org.label-schema.usage="https://github.com/johnpdowling/hassio-addons/tree/master/hass-ap/README.md" \ 43 | org.label-schema.vcs-ref=${BUILD_REF} \ 44 | org.label-schema.vcs-url="https://github.com/johnpdowling/hassio-addons/hass-ap" \ 45 | org.label-schema.vendor="JPD Hass.io Add-ons" 46 | -------------------------------------------------------------------------------- /keras-rest/rootfs/etc/services.d/gunicorn/run: -------------------------------------------------------------------------------- 1 | #!/usr/bin/with-contenv bashio 2 | # ============================================================================== 3 | # Community Hass.io Add-ons: Keras REST Server 4 | # Starts the uvicorn/gunicorn service 5 | # ============================================================================== 6 | set -e 7 | 8 | # Wait for redis to become available 9 | s6-svwait -u -t 5000 /var/run/s6/services/redis 10 | 11 | if [ -f /app/app/main.py ]; then 12 | DEFAULT_MODULE_NAME=app.main 13 | elif [ -f /app/main.py ]; then 14 | DEFAULT_MODULE_NAME=main 15 | fi 16 | MODULE_NAME=${MODULE_NAME:-$DEFAULT_MODULE_NAME} 17 | VARIABLE_NAME=${VARIABLE_NAME:-app} 18 | export APP_MODULE=${APP_MODULE:-"$MODULE_NAME:$VARIABLE_NAME"} 19 | 20 | if [ -f /etc/gunicorn/gunicorn_conf.py ]; then 21 | DEFAULT_GUNICORN_CONF=/etc/gunicorn/gunicorn_conf.py 22 | elif [ -f /app/gunicorn_conf.py ]; then 23 | DEFAULT_GUNICORN_CONF=/app/gunicorn_conf.py 24 | elif [ -f /app/app/gunicorn_conf.py ]; then 25 | DEFAULT_GUNICORN_CONF=/app/app/gunicorn_conf.py 26 | else 27 | DEFAULT_GUNICORN_CONF=/gunicorn_conf.py 28 | fi 29 | export GUNICORN_CONF=${GUNICORN_CONF:-$DEFAULT_GUNICORN_CONF} 30 | 31 | #removed pre-start.sh stuff, we have cont-init.d 32 | 33 | # Start Gunicorn 34 | bashio::log.info 'Starting the Uvicorn Gunicorn workers...' 35 | exec gunicorn --chdir=/app -k uvicorn.workers.UvicornWorker -c "$GUNICORN_CONF" "$APP_MODULE" 36 | -------------------------------------------------------------------------------- /openwrt/rootfs/etc/cont-init.d/wlanstart.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/with-contenv bashio 2 | # ============================================================================== 3 | # JPD Hass.io Add-ons: OpenWRT 4 | # Configures wlan0 in guest mode so we can use it in services 5 | # ============================================================================== 6 | 7 | # privileged set in config.json, but check anyway 8 | if [ ! -w "/sys" ] ; then 9 | bashio::log.error "[Error] Not running in privileged mode." 10 | exit 1 11 | fi 12 | #CONFIG_PATH=/data/options.json 13 | #MQTT_HOST="$(jq --raw-output '.mqtt_host' $CONFIG_PATH)" 14 | 15 | # Default values 16 | true ${INTERFACE:=wlan0} 17 | 18 | # Attach interface to container in guest mode 19 | bashio::log.info "Fetching interface data for container" 20 | 21 | CONTAINER_ID=$(cat /proc/self/cgroup | grep -o -e "/docker/.*" | head -n 1| sed "s/\/docker\/\(.*\)/\\1/") 22 | CONTAINER_PID=$(docker inspect -f '{{.State.Pid}}' ${CONTAINER_ID}) 23 | CONTAINER_IMAGE=$(docker inspect -f '{{.Config.Image}}' ${CONTAINER_ID}) 24 | 25 | bashio::log.info "Attaching interface to container" 26 | docker run -t --privileged --net=host --pid=host --rm --entrypoint /bin/sh ${CONTAINER_IMAGE} -c " 27 | PHY=\$(echo phy\$(iw dev ${INTERFACE} info | grep wiphy | tr ' ' '\n' | tail -n 1)) 28 | iw phy \$PHY set netns ${CONTAINER_PID} 29 | " 30 | bashio::log.info "Setting up interface" 31 | ip link set ${INTERFACE} name wlan0 32 | 33 | INTERFACE=wlan0 34 | 35 | # unblock wlan 36 | rfkill unblock wlan 37 | 38 | # Setup interface and bring it up 39 | ip link set ${INTERFACE} up 40 | ip addr flush dev ${INTERFACE} 41 | -------------------------------------------------------------------------------- /fff-api/Dockerfile: -------------------------------------------------------------------------------- 1 | ARG BUILD_FROM=hassioaddons/base:5.0.1 2 | # hadolint ignore=DL3006 3 | FROM ${BUILD_FROM} 4 | 5 | # Creates an image for a hass.io add-on that contains enough software to bother your Flashforge Finder and publish its info. 6 | # This hass.io addon is based on 01F0's project here: https://github.com/01F0/flashforge-finder-api 7 | 8 | #install environment 9 | RUN apk add --no-cache --virtual build-deps alpine-sdk cmake git \ 10 | python python-dev py-pip build-base 11 | RUN pip install flask flask-cors --upgrade 12 | RUN git clone https://github.com/johnpdowling/flashforge-finder-api.git /fff-api 13 | 14 | # Copy services 15 | COPY rootfs / 16 | 17 | # Build arguments 18 | ARG BUILD_ARCH 19 | ARG BUILD_DATE 20 | ARG BUILD_REF 21 | ARG BUILD_VERSION 22 | 23 | # Labels 24 | LABEL \ 25 | io.hass.name="FFF-API Server" \ 26 | io.hass.description="A Flask API server that will act as a conduit to your Flashforge Finder" \ 27 | io.hass.arch="${BUILD_ARCH}" \ 28 | io.hass.type="addon" \ 29 | io.hass.version=${BUILD_VERSION} \ 30 | maintainer="John Dowling " \ 31 | org.label-schema.description="The fff-api server program" \ 32 | org.label-schema.build-date=${BUILD_DATE} \ 33 | org.label-schema.name="FFF-API Server" \ 34 | org.label-schema.schema-version="1.0" \ 35 | org.label-schema.url="" \ 36 | org.label-schema.usage="https://github.com/johnpdowling/hassio-addons/tree/master/fff-api/README.md" \ 37 | org.label-schema.vcs-ref=${BUILD_REF} \ 38 | org.label-schema.vcs-url="https://github.com/johnpdowling/hassio-addons/fff-api" \ 39 | org.label-schema.vendor="JPD Hass.io Add-ons" 40 | -------------------------------------------------------------------------------- /kegbot/kegbot.conf: -------------------------------------------------------------------------------- 1 | # Kegbot nginx.conf file 2 | 3 | upstream kegbot { 4 | server localhost:8000; 5 | keepalive 16; 6 | } 7 | 8 | server { 9 | listen 80; 10 | tcp_nopush on; 11 | tcp_nodelay on; 12 | 13 | gzip on; 14 | gzip_disable "msie6"; 15 | gzip_types text/plain text/css application/x-javascript text/xml application/xml application/xml+rss text/javascript; 16 | gzip_vary on; 17 | 18 | keepalive_timeout 0; 19 | client_max_body_size 10m; 20 | 21 | location / { 22 | proxy_redirect off; 23 | # proxy_set_header Host $host:$server_port; 24 | proxy_set_header Host $host; 25 | proxy_set_header X-Real-IP $remote_addr; 26 | proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; 27 | proxy_set_header X-Forwarded-Protocol $scheme; 28 | proxy_pass http://kegbot; 29 | } 30 | 31 | location /media/ { 32 | alias /config/kegbot/media/; 33 | access_log off; 34 | log_not_found off; 35 | expires 7d; 36 | add_header pragma public; 37 | add_header cache-control "public"; 38 | } 39 | 40 | location /static/ { 41 | alias /kegbot-data/static/; 42 | access_log off; 43 | log_not_found off; 44 | expires 7d; 45 | add_header pragma public; 46 | add_header cache-control "public"; 47 | } 48 | 49 | location /robots.txt { 50 | root /kegbot-data/static/; 51 | access_log off; 52 | log_not_found off; 53 | } 54 | 55 | location /favicon.ico { 56 | root /kegbot-data/static/; 57 | access_log off; 58 | log_not_found off; 59 | } 60 | 61 | } 62 | -------------------------------------------------------------------------------- /kegbot/config.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "Kegbot", 3 | "version": "0.0.2", 4 | "slug": "kegbot", 5 | "description": "Kegbot Server, the internet beer kegerator monitoring system.", 6 | "url": "https://github.com/johnpdowling/hassio-addons/kegbot", 7 | "ingress": false, 8 | "panel_icon": "mdi:keg", 9 | "startup": "application", 10 | "homeassistant": "0.92.0b2", 11 | "arch": [ 12 | "aarch64", 13 | "amd64", 14 | "armhf", 15 | "armv7", 16 | "i386" 17 | ], 18 | "ports": { 19 | "8000/tcp": 2080 20 | }, 21 | "ports_description": { 22 | "8000/tcp": "Web interface" 23 | }, 24 | "boot": "auto", 25 | "hassio_api": false, 26 | "hassio_role": "manager", 27 | "homeassistant_api": false, 28 | "host_network": false, 29 | "auth_api": false, 30 | "auto_uart": false, 31 | "gpio": false, 32 | "privileged": [ 33 | ], 34 | "devices": [ 35 | ], 36 | "apparmor": true, 37 | "map": [ 38 | "config:rw", 39 | "ssl" 40 | ], 41 | "options": { 42 | "debug": true, 43 | "ssl": false, 44 | "certfile": "fullchain.pem", 45 | "keyfile": "privkey.pem", 46 | "db_name": "kegbot", 47 | "db_host": "core-mariadb", 48 | "db_port": "3306", 49 | "db_user": "kegbot", 50 | "db_pass": "" 51 | }, 52 | "schema": { 53 | "log_level": "match(^(trace|debug|info|notice|warning|error|fatal)$)?", 54 | "debug": "bool", 55 | "ssl": "bool", 56 | "certfile": "str", 57 | "db_name": "str", 58 | "db_host": "str", 59 | "db_port": "int", 60 | "db_user": "str", 61 | "db_pass": "str", 62 | "email_from": "str?", 63 | "email_host": "str?", 64 | "email_port": "int?", 65 | "email_user": "str?", 66 | "email_pass": "str?", 67 | "email_ssl": "bool?", 68 | "email_tls": "bool?", 69 | "i_like_to_be_pwned": "bool?", 70 | "leave_front_door_open": "bool?" 71 | } 72 | } 73 | -------------------------------------------------------------------------------- /ipfire/Dockerfile: -------------------------------------------------------------------------------- 1 | ARG BUILD_FROM 2 | # hadolint ignore=DL3006 3 | FROM ${BUILD_FROM} 4 | 5 | # Set shell 6 | SHELL ["/bin/bash", "-o", "pipefail", "-c"] 7 | 8 | # install build reqs 9 | RUN apt-get update 10 | RUN apt-get upgrade -y 11 | RUN apt-get install -y \ 12 | linux-headers-generic 13 | RUN apt-get install -y \ 14 | build-essential \ 15 | git-core \ 16 | g++ \ 17 | coreutils \ 18 | patch \ 19 | byacc \ 20 | make \ 21 | python-urlgrabber \ 22 | autoconf \ 23 | automake \ 24 | nano \ 25 | wget \ 26 | pv \ 27 | dialog \ 28 | bzip2 29 | 30 | RUN git clone git://git.ipfire.org/ipfire-2.x.git && \ 31 | cd ipfire-2.x && \ 32 | git checkout -b next -t origin/next && \ 33 | ./make.sh downloadsrc && \ 34 | ./make.sh gettoolchain && \ 35 | ./make.sh build 36 | 37 | # Build arugments 38 | ARG BUILD_ARCH 39 | ARG BUILD_DATE 40 | ARG BUILD_REF 41 | ARG BUILD_VERSION 42 | 43 | # Labels 44 | LABEL \ 45 | io.hass.name="IPFire addon for ${BUILD_ARCH}" \ 46 | io.hass.description="JPD Hass.io Add-ons: ${BUILD_ARCH} IPFire image" \ 47 | io.hass.arch="${BUILD_ARCH}" \ 48 | io.hass.type="addon" \ 49 | io.hass.version=${BUILD_VERSION} \ 50 | maintainer="John Dowling " \ 51 | org.label-schema.description="JPD Hass.io Add-ons: ${BUILD_ARCH} IPFire" \ 52 | org.label-schema.build-date=${BUILD_DATE} \ 53 | org.label-schema.name="Addon IPFire for ${BUILD_ARCH}" \ 54 | org.label-schema.schema-version="1.0" \ 55 | org.label-schema.url="https://addons.community" \ 56 | org.label-schema.usage="https://github.com/johnpdowling/hassio-addons/ipfire/blob/master/README.md" \ 57 | org.label-schema.vcs-ref=${REF} \ 58 | org.label-schema.vcs-url="https://github.com/johnpdowling/hassio-addons/ipfire" \ 59 | org.label-schema.vendor="John Dowling" 60 | ############################################## 61 | -------------------------------------------------------------------------------- /z-way-server/rootfs/etc/cont-init.d/z-way-server.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/with-contenv bashio 2 | # ============================================================================== 3 | # JPD Hass.io Add-ons: Z-Way Server 4 | # Configures the Z-Way Server 5 | # ============================================================================== 6 | 7 | bashio::log.debug 'Creating config folder (if necessary)...' 8 | mkdir -p /config/z-way-server/config 9 | bashio::log.debug 'Creating storage folder (if necessary)...' 10 | mkdir -p /config/z-way-server/storage 11 | bashio::log.debug 'Creating user modules folder (if necessary)...' 12 | mkdir -p /config/z-way-server/userModules 13 | bashio::log.debug 'Creating zddx folder (if necessary)...' 14 | mkdir -p /config/z-way-server/ZDDX 15 | 16 | bashio::log.debug 'Copying default config files (if necessary)...' 17 | [ -z /config/z-way-server/config ] && cp -r /opt/z-way-server/config /config/z-way-server/config 18 | bashio::log.debug 'Linking config files...' 19 | rm -rf /opt/z-way-server/config && ln -s /config/z-way-server/config /opt/z-way-server/config 20 | 21 | bashio::log.debug 'Copying default storage files (if necessary)...' 22 | [ -z /config/z-way-server/storage ] && cp -r /opt/z-way-server/automation/storage /config/z-way-server/storage 23 | bashio::log.debug 'Linking storage files...' 24 | rm -rf /opt/z-way-server/automation/storage && ln -s /config/z-way-server/storage /opt/z-way-server/automation/storage 25 | 26 | bashio::log.debug 'Copying default user modules files (if necessary)...' 27 | [ -z /config/z-way-server/userModules ] && cp -r /opt/z-way-server/automation/userModules /config/z-way-server/userModules 28 | bashio::log.debug 'Linking user modules files...' 29 | rm -rf /opt/z-way-server/automation/userModules && ln -s /config/z-way-server/userModules /opt/z-way-server/automation/userModules 30 | 31 | bashio::log.debug 'Copying default zddx files (if necessary)...' 32 | [ -z /config/z-way-server/ZDDX ] && cp -r /opt/z-way-server/ZDDX /config/z-way-server/ZDDX 33 | bashio::log.debug 'Linking zddx files...' 34 | rm -rf /opt/z-way-server/ZDDX && ln -s /config/z-way-server/ZDDX /opt/z-way-server/ZDDX 35 | -------------------------------------------------------------------------------- /kegbot/rootfs/etc/services.d/kegbot/run: -------------------------------------------------------------------------------- 1 | #!/usr/bin/with-contenv bashio 2 | # ============================================================================== 3 | # JPD Hass.io Add-ons: Kegbot Server 4 | # Runs the Kegbot daemon 5 | # ============================================================================== 6 | set -e 7 | CONFIG_PATH=/data/options.json 8 | 9 | # Wait for other services to become available 10 | #s6-svwait -u -t 5000 /var/run/s6/services/nginx 11 | s6-svwait -u -t 5000 /var/run/s6/services/redis 12 | 13 | die() { 14 | bashio::log.info "Error: $@" 15 | exit 1 16 | } 17 | 18 | do_mysql() { 19 | mysql -B -u "${KEGBOT_DB_USER}" --password="${KEGBOT_DB_PASS}" -h "${KEGBOT_DB_HOST}" -P ${KEGBOT_DB_PORT} "${@}" 20 | return $? 21 | } 22 | 23 | no_tables() { 24 | local return_str=$(mysql -B -u "${KEGBOT_DB_USER}" --password="${KEGBOT_DB_PASS}" -h "${KEGBOT_DB_HOST}" -P ${KEGBOT_DB_PORT} "${KEGBOT_DB_NAME}" -e "show tables") 25 | bashio::log.info "Table_len: ${#return_str}" 26 | return ${#return_str} 27 | } 28 | 29 | wait_for_mysql() { 30 | nc -z $KEGBOT_DB_HOST $KEGBOT_DB_PORT || sleep 30 31 | if ! do_mysql "${KEGBOT_DB_NAME}" -e "show tables"; then 32 | #no db 33 | do_mysql -e "create database ${KEGBOT_DB_NAME};" 34 | fi 35 | if no_tables; then 36 | #db empty 37 | /usr/bin/pipenv run kegbot migrate --noinput -v 0 38 | do_mysql "${KEGBOT_DB_NAME}" -e "show tables" 39 | fi 40 | } 41 | 42 | # Perform first-launch setup. 43 | maybe_setup_kegbot() { 44 | if [ ! -d "${KEGBOT_SETTINGS_DIR}" ]; then 45 | mkdir -p ${KEGBOT_SETTINGS_DIR}/media 46 | mv /kegbot-data/local_settings.py ${KEGBOT_SETTINGS_DIR} 47 | fi 48 | 49 | #kegbot collectstatic --noinput -v 0 50 | #do_mysql -e "create database ${KEGBOT_DB_NAME};" || die "Could not create database." 51 | true 52 | } 53 | 54 | bashio::log.info "Checking environment..." 55 | cd /app 56 | env 57 | bashio::log.info "Checking Kegbot..." 58 | maybe_setup_kegbot 59 | bashio::log.info "Waiting for Redis..." 60 | redis-cli -h "${KEGBOT_REDIS_HOST}" -p ${KEGBOT_REDIS_PORT} ping 61 | bashio::log.info "Waiting for MySQL..." 62 | wait_for_mysql 63 | bashio::log.info "Starting Kegbot..." 64 | echo `date` >> /kegbot-data/runlog 65 | /usr/bin/pipenv run gunicorn pykeg.web.wsgi:application -w 3 -b 0.0.0.0:8000 66 | #kegbot run_all --logs_dir=/kegbot-data --gunicorn_options="-w 3 -b 127.0.0.1:8000" 67 | -------------------------------------------------------------------------------- /keras-rest/rootfs/app/modelsaver/main.py: -------------------------------------------------------------------------------- 1 | """ 2 | Keras model to TensorRT graph script that downloads keras models and coverts them to tensor graphs 3 | Adapted from https://www.dlology.com/blog/how-to-run-keras-model-on-jetson-nano/ 4 | """ 5 | import base64 6 | import json 7 | import os 8 | import sys 9 | import time 10 | import tensorflow as tf 11 | import tensorflow.contrib.tensorrt as trt 12 | 13 | from tensorflow.python.framework import graph_io 14 | from tensorflow.keras.models import load_model 15 | from keras.applications import ResNet50 16 | 17 | model_folder = '/config/keras-rest/models' 18 | model_name = os.environ['MODEL_NAME'] 19 | 20 | def freeze_graph(graph, session, output, save_pb_dir='/config/keras-rest/models', save_pb_name='frozen_model.pb', save_pb_as_text=False): 21 | with graph.as_default(): 22 | graphdef_inf = tf.graph_util.remove_training_nodes(graph.as_graph_def()) 23 | graphdef_frozen = tf.graph_util.convert_variables_to_constants(session, graphdef_inf, output) 24 | graph_io.write_graph(graphdef_frozen, save_pb_dir, save_pb_name, as_text=save_pb_as_text) 25 | return graphdef_frozen 26 | 27 | def save_model(): 28 | #get the model, save the model 29 | model = ResNet50(weights=model_name) 30 | model.save(model_folder + '/' + model_name + '.h5') 31 | # Clear any previous session. 32 | tf.keras.backend.clear_session() 33 | 34 | # This line must be executed before loading Keras model. 35 | tf.keras.backend.set_learning_phase(0) 36 | model = load_model(model_folder + '/' + model_name + '.h5') 37 | session = tf.keras.backend.get_session() 38 | input_names = [t.op.name for t in model.inputs] 39 | output_names = [t.op.name for t in model.outputs] 40 | # freeze the model to a graph 41 | frozen_graph = freeze_graph(session.graph, session, [out.op.name for out in model.outputs], save_pb_dir=model_folder) 42 | trt_graph = trt.create_inference_graph( 43 | input_graph_def=frozen_graph, 44 | outputs=output_names, 45 | max_batch_size=1, 46 | max_workspace_size_bytes=1 << 25, 47 | precision_mode='FP16', 48 | minimum_segment_size=50 49 | ) 50 | graph_io.write_graph(trt_graph, model_folder + '/', 51 | model_name + '.pb', as_text=False) 52 | 53 | if __name__ == "__main__": 54 | os.makedirs(model_folder, exist_ok=True) 55 | save_model() 56 | -------------------------------------------------------------------------------- /openwrt/Dockerfile: -------------------------------------------------------------------------------- 1 | ARG BUILD_FROM 2 | FROM $BUILD_FROM 3 | 4 | ARG BUILD_ARCH 5 | 6 | # Environment variables 7 | ENV \ 8 | DEBIAN_FRONTEND="noninteractive" \ 9 | HOME="/root" \ 10 | LANG="C.UTF-8" \ 11 | PS1="$(whoami)@$(hostname):$(pwd)$ " \ 12 | S6_BEHAVIOUR_IF_STAGE2_FAILS=2 \ 13 | S6_CMD_WAIT_FOR_SERVICES=1 \ 14 | TERM="xterm-256color" 15 | 16 | # Copy root filesystem 17 | COPY rootfs / 18 | 19 | # Set shell 20 | SHELL ["/bin/ash", "-o", "pipefail", "-c"] 21 | 22 | #RUN mkdir /var/lock 23 | 24 | # Install system 25 | RUN apk update && \ 26 | apk add --no-cache \ 27 | docker \ 28 | iw \ 29 | rsync \ 30 | dnsmasq \ 31 | dropbear \ 32 | openntpd \ 33 | rng-tools 34 | 35 | RUN if [ "${BUILD_ARCH}" = "i386" ]; then curl -o /tmp/openwrt.tar.gz http://downloads.openwrt.org/releases/19.07.1/targets/x86/generic/openwrt-19.07.1-x86-generic-generic-rootfs.tar.gz; fi 36 | RUN if [ "${BUILD_ARCH}" = "amd64" ]; then curl -o /tmp/openwrt.tar.gz http://downloads.openwrt.org/releases/19.07.1/targets/x86/64/openwrt-19.07.1-x86-64-generic-rootfs.tar.gz; fi 37 | RUN if [ "${BUILD_ARCH}" = "arm7" ]; then curl -o /tmp/openwrt.tar.gz http://downloads.openwrt.org/releases/19.07.1/targets/armvirt/32/openwrt-19.07.1-armvirt-32-default-rootfs.tar.gz; fi 38 | RUN if [ "${BUILD_ARCH}" = "aarch64" ]; then curl -o /tmp/openwrt.tar.gz http://downloads.openwrt.org/releases/19.07.1/targets/armvirt/64/openwrt-19.07.1-armvirt-64-default-rootfs.tar.gz; fi 39 | RUN mkdir /tmp/openwrt && tar -zxvf /tmp/openwrt.tar.gz -C /tmp/openwrt && \ 40 | rm /tmp/openwrt/etc/hosts && \ 41 | rm /tmp/openwrt/etc/resolv.conf && \ 42 | rm -rf /tmp/openwrt/var && \ 43 | rm -rf /tmp/openwrt/init 44 | # && \ 45 | 46 | # rm -rf /tmp/openwrt/etc/init.d 47 | # rm -rf /tmp/openwrt/etc 48 | 49 | #RUN cp -rn /tmp/openwrt/* / 50 | RUN rsync --ignore-existing --recursive --quiet /tmp/openwrt/ / 51 | 52 | # Build arugments 53 | ARG BUILD_DATE 54 | ARG BUILD_REF 55 | ARG BUILD_VERSION 56 | 57 | # Labels 58 | LABEL \ 59 | io.hass.name="Addon OpenWRT for ${BUILD_ARCH}" \ 60 | io.hass.description="JPD Hass.io Add-ons: ${BUILD_ARCH} OpenWRT image" \ 61 | io.hass.arch="${BUILD_ARCH}" \ 62 | io.hass.type="base" \ 63 | io.hass.version=${BUILD_VERSION} \ 64 | maintainer="John Dowling " \ 65 | org.label-schema.description="JPD Hass.io Add-ons: ${BUILD_ARCH} OpenWRT image" \ 66 | org.label-schema.build-date=${BUILD_DATE} \ 67 | org.label-schema.name="Addon OpenWRT for ${BUILD_ARCH}" \ 68 | org.label-schema.schema-version="1.0" \ 69 | org.label-schema.url="https://addons.community" \ 70 | org.label-schema.usage="https://github.com/johnpdowling/hassio-addons/tree/master/openwrt/README.md" \ 71 | org.label-schema.vcs-ref=${REF} \ 72 | org.label-schema.vcs-url="https://github.com/johnpdowling/hassio-addons/openwrt" \ 73 | org.label-schema.vendor="John Dowling" 74 | -------------------------------------------------------------------------------- /keras-rest/Dockerfile: -------------------------------------------------------------------------------- 1 | ARG BUILD_FROM=youdownwithjpd/addon-nvidia-base:latest 2 | # hadolint ignore=DL3006 3 | FROM ${BUILD_FROM} 4 | 5 | # Set shell 6 | SHELL ["/bin/bash", "-o", "pipefail", "-c"] 7 | 8 | # Initialize constants used to control image spatial dimensions and data type 9 | ENV REDIS_HOST=localhost 10 | ENV REDIS_PORT=6379 11 | ENV IMAGE_QUEUE=image_queue 12 | ENV IMAGE_WIDTH=224 13 | ENV IMAGE_HEIGHT=224 14 | ENV IMAGE_CHANS=3 15 | ENV IMAGE_DTYPE=float32 16 | ENV BATCH_SIZE=32 17 | # Time in ms between each poll by model server against Redis 18 | ENV SERVER_SLEEP=0.25 19 | # Time in ms between each poll by web server against Redis 20 | ENV CLIENT_SLEEP=0.25 21 | # Num tries by web server to retrieve results from Redis before giving up 22 | ENV CLIENT_MAX_TRIES=100 23 | # TensorFlow vars 24 | ENV TF_FORCE_GPU_ALLOW_GROWTH=true 25 | 26 | # Install server, nvidia tensorflow-gpu reqs 27 | RUN \ 28 | apt-get update && \ 29 | apt-get install -y --fix-missing --no-install-recommends \ 30 | redis \ 31 | libhdf5-serial-dev \ 32 | hdf5-tools \ 33 | libhdf5-dev \ 34 | zlib1g-dev \ 35 | zip \ 36 | git \ 37 | libjpeg8-dev && \ 38 | # Install Shane's webserver reqs w/ nvidia flavors 39 | pip3 install --pre --no-cache-dir --extra-index-url https://developer.download.nvidia.com/compute/redist/jp/v42 \ 40 | tensorflow-gpu && \ 41 | pip3 install --pre --no-cache-dir \ 42 | uvicorn \ 43 | gunicorn \ 44 | fastapi \ 45 | redis \ 46 | pillow \ 47 | python-multipart \ 48 | email-validator && \ 49 | # Install keras 50 | apt-get install -y --fix-missing --no-install-recommends \ 51 | python3-keras && \ 52 | rm -fr \ 53 | /var/{cache,log}/* \ 54 | /var/lib/apt/lists/* 55 | 56 | # Copy root filesystem 57 | COPY rootfs / 58 | 59 | # Build arugments 60 | ARG BUILD_DATE 61 | ARG BUILD_REF 62 | ARG BUILD_VERSION 63 | 64 | # Labels 65 | LABEL \ 66 | io.hass.name="Keras REST Server" \ 67 | io.hass.description="RESTful server for Keras engine" \ 68 | io.hass.arch="${BUILD_ARCH}" \ 69 | io.hass.type="addon" \ 70 | io.hass.version=${BUILD_VERSION} \ 71 | maintainer="John Dowling " \ 72 | org.label-schema.description="JPD Hass.io Add-ons: ${BUILD_ARCH} Keras REST Server" \ 73 | org.label-schema.build-date=${BUILD_DATE} \ 74 | org.label-schema.name="Addon Keras REST Server for ${BUILD_ARCH}" \ 75 | org.label-schema.schema-version="1.0" \ 76 | org.label-schema.url="https://addons.community" \ 77 | org.label-schema.usage="https://github.com/johnpdowling/hassio-addons/keras-rest/blob/master/README.md" \ 78 | org.label-schema.vcs-ref=${REF} \ 79 | org.label-schema.vcs-url="https://github.com/johnpdowling/hassio-addons/keras-rest" \ 80 | org.label-schema.vendor="John Dowling" 81 | ############################################## 82 | -------------------------------------------------------------------------------- /rtl4332mqtt/Dockerfile: -------------------------------------------------------------------------------- 1 | # Docker file to create an image for a hass.io add-on that contains enough software to listen to events via RTL_SDR/RTL_433 and then publish them to a MQTT broker. 2 | # The script resides in a volume and can be modified to meet your needs. 3 | # This hass.io addon is based James Fry's add-on of on Chris Kacerguis' project here: https://github.com/chriskacerguis/honeywell2mqtt, 4 | # which is in turn based on Marco Verleun's rtl2mqtt image here: https://github.com/roflmao/rtl2mqtt 5 | 6 | # IMPORTANT: The container needs privileged access to /dev/bus/usb on the host. 7 | 8 | ARG BUILD_FROM=hassioaddons/base:5.0.1 9 | # hadolint ignore=DL3006 10 | FROM ${BUILD_FROM} 11 | 12 | ENV LANG C.UTF-8 13 | 14 | ENV LD_LIBRARY_PATH=/lib:/libexec:/usr/lib:/usr/local/lib:/usr/local/lib64 15 | 16 | LABEL Description="" 17 | 18 | # 19 | # First install our system 20 | # 21 | COPY rootfs / 22 | 23 | # 24 | # Then install software packages needed to compile rtl_433 and to publish MQTT events 25 | # 26 | RUN apk add --no-cache --virtual build-deps alpine-sdk cmake git libusb-dev && \ 27 | mkdir /tmp/src && \ 28 | cd /tmp/src && \ 29 | git clone git://git.osmocom.org/rtl-sdr.git && \ 30 | mkdir /tmp/src/rtl-sdr/build && \ 31 | cd /tmp/src/rtl-sdr/build && \ 32 | cmake ../ -DINSTALL_UDEV_RULES=ON -DDETACH_KERNEL_DRIVER=ON -DCMAKE_INSTALL_PREFIX:PATH=/usr/local && \ 33 | make && \ 34 | make install && \ 35 | chmod +s /usr/local/bin/rtl_* && \ 36 | cd /tmp/src/ && \ 37 | git clone https://github.com/merbanan/rtl_433 && \ 38 | cd rtl_433/ && \ 39 | mkdir build && \ 40 | cd build && \ 41 | cmake ../ && \ 42 | make && \ 43 | make install && \ 44 | apk del build-deps && \ 45 | rm -r /tmp/src && \ 46 | apk add --no-cache libusb mosquitto-clients jq 47 | 48 | # 49 | # Define an environment variable 50 | # 51 | # Use this variable when creating a container to specify the MQTT broker host. 52 | ENV MQTT_HOST="hassio.local" 53 | ENV MQTT_USER="guest" 54 | ENV MQTT_PASS="guest" 55 | ENV MQTT_TOPIC="homeassistant/sensor/rtl433" 56 | 57 | # Build arguments 58 | ARG BUILD_ARCH 59 | ARG BUILD_DATE 60 | ARG BUILD_REF 61 | ARG BUILD_VERSION 62 | 63 | # Labels 64 | LABEL \ 65 | io.hass.name="RTL433 to MQTT Bridge" \ 66 | io.hass.description="This image is used to start a script that will monitor for RF events on 433Mhz (configurable) and send the data to an MQTT server" \ 67 | io.hass.arch="${BUILD_ARCH}" \ 68 | io.hass.type="addon" \ 69 | io.hass.version=${BUILD_VERSION} \ 70 | maintainer="John Dowling " \ 71 | org.label-schema.description="RTL433 to MQTT Bridge script" \ 72 | org.label-schema.build-date=${BUILD_DATE} \ 73 | org.label-schema.name="RTL433 to MQTT Bridge" \ 74 | org.label-schema.schema-version="1.0" \ 75 | org.label-schema.url="" \ 76 | org.label-schema.usage="https://github.com/johnpdowling/hassio-addons/tree/master/rtl4332mqtt/README.md" \ 77 | org.label-schema.vcs-ref=${BUILD_REF} \ 78 | org.label-schema.vcs-url="https://github.com/johnpdowling/hassio-addons/rtl4332mqtt" \ 79 | org.label-schema.vendor="JPD Hass.io Add-ons" 80 | -------------------------------------------------------------------------------- /owntone/Dockerfile: -------------------------------------------------------------------------------- 1 | ARG BUILD_FROM=hassioaddons/debian-base 2 | # hadolint ignore=DL3006 3 | FROM ${BUILD_FROM} 4 | 5 | # Set shell 6 | SHELL ["/bin/bash", "-o", "pipefail", "-c"] 7 | 8 | VOLUME /etc/localtime:/etc/localtime:ro 9 | 10 | 11 | 12 | # Copy root filesystem 13 | COPY rootfs / 14 | 15 | #from owntone-server instructions 16 | # hadolint ignore=DL3003 17 | RUN apt-get update 18 | RUN mkdir -p /usr/share/man/man1 && \ 19 | apt-get install -y --fix-missing --no-install-recommends \ 20 | build-essential git autotools-dev autoconf automake libtool gettext gawk \ 21 | libavcodec-dev libavformat-dev libavfilter-dev libswscale-dev libavutil-dev \ 22 | libasound2-dev libmxml-dev libgcrypt20-dev libavahi-client-dev zlib1g-dev \ 23 | libevent-dev libplist-dev libsodium-dev libjson-c-dev libwebsockets-dev \ 24 | libcurl4-openssl-dev libprotobuf-c-dev \ 25 | libconfuse-dev libunistring-dev libsqlite3-dev \ 26 | gperf libantlr3c-dev antlr3 \ 27 | avahi-daemon avahi-discover avahi-utils libnss-mdns haproxy 28 | 29 | RUN cd /tmp # && \ 30 | RUN git clone https://github.com/owntone/owntone-server.git # && \ 31 | RUN cd /tmp/owntone-server # && \ 32 | RUN autoreconf -fi # && \ 33 | RUN ./configure --with-libwebsockets # && \ 34 | RUN make # && \ 35 | RUN make install 36 | 37 | RUN cd /usr/local/etc # && \ 38 | RUN sed -i -e 's/\(uid.*=.*\)/uid = "root"/g' owntone.conf # && \ 39 | RUN sed -i s#"ipv6 = yes"#"ipv6 = no"#g owntone.conf # && \ 40 | RUN sed -i s#/srv/music#/config/owntone/music#g owntone.conf # && \ 41 | RUN sed -i s#/usr/local/var/cache/owntone/songs3.db#/config/owntone/cache/songs3.db#g owntone.conf # && \ 42 | RUN sed -i s#/usr/local/var/cache/owntone/cache.db#/config/owntone/cache/cache.db#g owntone.conf # && \ 43 | RUN sed -i s#/usr/local/var/log/owntone.log#/dev/stdout#g owntone.conf # && \ 44 | RUN sed -i "/websocket_port\ =/ s/# *//" owntone.conf # && \ 45 | RUN sed -i "/trusted_networks\ =/ s/# *//" owntone.conf # && \ 46 | RUN sed -i "/pipe_autostart\ =/ s/# *//" owntone.conf # && \ 47 | RUN sed -i "/db_path\ =/ s/# *//" owntone.conf # && \ 48 | RUN sed -i "/cache_path\ =/ s/# *//" owntone.conf 49 | 50 | 51 | # Build arguments 52 | ARG BUILD_ARCH 53 | ARG BUILD_DATE 54 | ARG BUILD_REF 55 | ARG BUILD_VERSION 56 | 57 | # Labels 58 | LABEL \ 59 | io.hass.name="OwnTone Server" \ 60 | io.hass.description="The OwnTone server program" \ 61 | io.hass.arch="${BUILD_ARCH}" \ 62 | io.hass.type="addon" \ 63 | io.hass.version=${BUILD_VERSION} \ 64 | maintainer="John Dowling " \ 65 | org.label-schema.description="The OwnTone server program" \ 66 | org.label-schema.build-date=${BUILD_DATE} \ 67 | org.label-schema.name="OwnTone Server" \ 68 | org.label-schema.schema-version="1.0" \ 69 | org.label-schema.url="" \ 70 | org.label-schema.usage="https://github.com/johnpdowling/hassio-addons/tree/master/owntone/README.md" \ 71 | org.label-schema.vcs-ref=${BUILD_REF} \ 72 | org.label-schema.vcs-url="https://github.com/johnpdowling/hassio-addons/owntone" \ 73 | org.label-schema.vendor="JPD Hass.io Add-ons" 74 | -------------------------------------------------------------------------------- /rtl4332mqtt/README.md: -------------------------------------------------------------------------------- 1 | ### Note 2 | This is a fork to allow multiple protocols. See original project here: https://github.com/james-fry/hassio-addons/tree/master/rtl4332mqtt 3 | 4 | # RTL433 to MQTT Bridge hass.io addon 5 | A hass.io addon for a software defined radio tuned to listen for 433MHz RF transmissions and republish the data via MQTT 6 | 7 | This hass.io addon is based on Chris Kacerguis' project here: https://github.com/chriskacerguis/honeywell2mqtt, 8 | which is in turn based on Marco Verleun's rtl2mqtt image here: https://github.com/roflmao/rtl2mqtt 9 | 10 | ## Usage 11 | 12 | 1) Install the addon. 13 | 14 | 2) Use addon configuration to configure: 15 | - mqtt_host 16 | - mqtt_user 17 | - mqtt_password 18 | - mqtt_topic 19 | - protocol (see https://github.com/merbanan/rtl_433 for more details inc protocol IDs) 20 | 21 | 3) Copy rtl2mqtt.sh to your hass.io config dir in a subdir called rtl4332mqtt. 22 | i.e. 23 | .../config/rtl4332mqtt/rtl2mqtt.sh 24 | This allows you to edit the start script if you need to make any changes 25 | 26 | NOTE that some people have reported issues using samba to copy the script. For some reason it does not get copied to the container on start up of the addon. If you see this issue, please scp the script to your hassio config folder, or ssh in and edit the file locally with vi/nano. 27 | 28 | Also there is now an example rtl2mqtt script with MQTT sensor autodiscovery. This is very hard coded to a single sensor currentcost, but might be an interesting example for others to repurpose... 29 | 30 | 31 | 4) Start the addon 32 | 33 | 34 | ## MQTT Data 35 | 36 | Data to the MQTT server will depend on the protocol. 37 | Chris tested Honeywell devices and the JSON is as follows: 38 | 39 | ```json 40 | { 41 | "time" : "2017-08-17 13:18:58", 42 | "model" : "Honeywell Door/Window Sensor", 43 | "id" : 547651, 44 | "channel" : 8, 45 | "event" : 4, 46 | "state" : "closed", 47 | "heartbeat" : "yes" 48 | } 49 | ``` 50 | 51 | I have tested CurrentCost devices and the JSON is as follows: 52 | 53 | ```json 54 | { 55 | "time" : "2017-10-16 20:53:09", 56 | "model" : "CurrentCost TX", 57 | "dev_id" : 3063, 58 | "power0" : 617, 59 | "power1" : 0, 60 | "power2" : 0 61 | } 62 | ``` 63 | 64 | ## Hardware 65 | 66 | This has been tested and used with the following hardware (you can get it on Amazon) 67 | 68 | Chris: 69 | - Honeywell Ademco 5818MNL Recessed Door Transmitter 70 | - 5800MINI Wireless Door/Window Contact by Honeywell 71 | - NooElec NESDR Nano 2+ Tiny Black RTL-SDR USB 72 | 73 | Me: 74 | - CurrentCost TX: http://www.ebay.co.uk/itm/Current-Cost-Envi-R-Energy-Monitor-Smart-Electric-Meter-/152084708754 75 | - Super cheap RTL dongle: http://www.ebay.co.uk/itm/Mini-USB-DVB-T-RTL-SDR-Realtek-RTL2832U-R820T-Stick-Receiver-Dongle-MCX-Input-PK/222637370515 76 | 77 | 78 | ## Troubleshooting 79 | 80 | If you see this error: 81 | 82 | > Kernel driver is active, or device is claimed by second instance of librtlsdr. 83 | > In the first case, please either detach or blacklist the kernel module 84 | > (dvb_usb_rtl28xxu), or enable automatic detaching at compile time. 85 | 86 | Then run the following command on the host 87 | 88 | ```bash 89 | sudo rmmod dvb_usb_rtl28xxu rtl2832 90 | ``` 91 | -------------------------------------------------------------------------------- /kegbot/Dockerfile: -------------------------------------------------------------------------------- 1 | ARG BUILD_FROM=hassioaddons/python-base:latest 2 | # hadolint ignore=DL3006 3 | FROM ${BUILD_FROM} 4 | 5 | ENV SHELL=/bin/sh \ 6 | PIP_NO_CACHE_DIR=1 \ 7 | KEGBOT_DATA_DIR=/kegbot-data \ 8 | KEGBOT_IN_DOCKER=True \ 9 | KEGBOT_ENV=debug 10 | #ENV PATH="/app/bin:${PATH}" 11 | #ENV PYTHONPATH="/app:/app/bin:${PYTHONPATH}" 12 | #ENV PYTHON3PATH="/app:/app/bin:${PYTHON3PATH}" 13 | # other sets 14 | ENV KEGBOT_REDIS_HOST=localhost 15 | ENV KEGBOT_REDIS_PORT=6379 16 | ENV KEGBOT_SETTINGS_DIR=/config/kegbot/ 17 | ENV KEGBOT_MEDIA_ROOT=/config/kegbot/media/ 18 | # export KEGBOT_DATA_DIR=/config/kegbot/kegbot-data/ 19 | ENV KEGBOT_VERSION=master 20 | ENV LANG C.UTF-8 21 | 22 | RUN apk update && \ 23 | apk add --no-cache build-base python3-dev \ 24 | bash nginx redis curl gcc musl-dev mysql-client sed \ 25 | zlib-dev libjpeg-turbo libjpeg-turbo-dev libjpeg openjpeg \ 26 | libcrypto1.1>1.1.1d-r2 libssl1.1>1.1.1d-r2 libffi-dev \ 27 | mariadb-dev mariadb-connector-c-dev postgresql-dev 28 | 29 | COPY requirements.txt /tmp/ 30 | RUN ln -s /usr/bin/python3 /usr/bin/python && \ 31 | pip3 install -U -r /tmp/requirements.txt 32 | 33 | COPY rootfs / 34 | RUN echo "# shellcheck source=lib/kegbot.sh" >> /usr/lib/bashio/bashio.sh && \ 35 | echo "source \"\${__BASHIO_LIB_DIR}/kegbot.sh\"" >> /usr/lib/bashio/bashio.sh 36 | 37 | ADD kegbot.conf /etc/nginx/conf.d/default.conf 38 | ADD nginx.conf /etc/nginx/nginx.conf 39 | 40 | #kb 41 | 42 | RUN curl -k -L https://github.com/Kegbot/kegbot-server/archive/${KEGBOT_VERSION}.tar.gz -o /tmp/kegbot.tar.gz \ 43 | && tar -xf /tmp/kegbot.tar.gz -C /tmp/ \ 44 | && rm /tmp/kegbot-server-${KEGBOT_VERSION}/Pipfile.lock \ 45 | && sed -i s#"protobuf = \"\*\""#"protobuf = \"3.12.2\""#g /tmp/kegbot-server-${KEGBOT_VERSION}/Pipfile \ 46 | && mkdir /app \ 47 | && cp -r /tmp/kegbot-server-${KEGBOT_VERSION}/* /app/ \ 48 | && rm -rf /tmp/kegbot* 49 | 50 | WORKDIR /app 51 | RUN pipenv install --deploy 52 | RUN pipenv run python setup.py develop 53 | RUN pipenv run kegbot collectstatic -v 0 --noinput 54 | #RUN python setup.py develop 55 | 56 | RUN mkdir -p /kegbot-data/ 57 | COPY local_settings.py /kegbot-data/ 58 | 59 | #VOLUME ["/kegbot-data"] 60 | 61 | #EXPOSE 8000 62 | #CMD ["/usr/local/bin/pipenv", "run", "gunicorn", "pykeg.web.wsgi:application", "-b", "0.0.0.0:8000"] 63 | 64 | # Build arguments 65 | ARG BUILD_ARCH 66 | ARG BUILD_DATE 67 | ARG BUILD_REF 68 | ARG BUILD_VERSION 69 | 70 | # Labels 71 | LABEL \ 72 | io.hass.name="Kegbot Server" \ 73 | io.hass.description="The kegbot server program" \ 74 | io.hass.arch="${BUILD_ARCH}" \ 75 | io.hass.type="addon" \ 76 | io.hass.version=${BUILD_VERSION} \ 77 | maintainer="John Dowling " \ 78 | org.label-schema.description="The kegbot server program" \ 79 | org.label-schema.build-date=${BUILD_DATE} \ 80 | org.label-schema.name="Kegbot Server" \ 81 | org.label-schema.schema-version="1.0" \ 82 | org.label-schema.url="" \ 83 | org.label-schema.usage="https://github.com/johnpdowling/hassio-addons/tree/master/kegbot/README.md" \ 84 | org.label-schema.vcs-ref=${BUILD_REF} \ 85 | org.label-schema.vcs-url="https://github.com/johnpdowling/hassio-addons/kegbot" \ 86 | org.label-schema.vendor="JPD Hass.io Add-ons" 87 | -------------------------------------------------------------------------------- /keras-rest/rootfs/app/main.py: -------------------------------------------------------------------------------- 1 | """ 2 | Web server script that exposes REST endpoint and pushes images to Redis for classification by model server. Polls 3 | Redis for response from model server. 4 | Adapted from https://www.pyimagesearch.com/2018/02/05/deep-learning-production-keras-redis-flask-apache/ 5 | """ 6 | import base64 7 | import io 8 | import json 9 | import os 10 | import time 11 | import uuid 12 | 13 | from keras.preprocessing.image import img_to_array 14 | from keras.applications import imagenet_utils 15 | import numpy as np 16 | from PIL import Image 17 | import redis 18 | 19 | from fastapi import FastAPI, File, HTTPException 20 | from starlette.requests import Request 21 | 22 | app = FastAPI() 23 | db = redis.StrictRedis(host=os.environ.get("REDIS_HOST")) 24 | 25 | CLIENT_MAX_TRIES = int(os.environ.get("CLIENT_MAX_TRIES")) 26 | 27 | 28 | def prepare_image(image, target): 29 | # If the image mode is not RGB, convert it 30 | if image.mode != "RGB": 31 | image = image.convert("RGB") 32 | 33 | # Resize the input image and preprocess it 34 | image = image.resize(target) 35 | image = img_to_array(image) 36 | image = np.expand_dims(image, axis=0) 37 | image = imagenet_utils.preprocess_input(image) 38 | 39 | # Return the processed image 40 | return image 41 | 42 | 43 | @app.get("/") 44 | def index(): 45 | return "Hello World!" 46 | 47 | 48 | @app.post("/predict") 49 | def predict(request: Request, img_file: bytes=File(...)): 50 | data = {"success": False} 51 | 52 | if request.method == "POST": 53 | image = Image.open(io.BytesIO(img_file)) 54 | image = prepare_image(image, 55 | (int(os.environ.get("IMAGE_WIDTH")), 56 | int(os.environ.get("IMAGE_HEIGHT"))) 57 | ) 58 | 59 | # Ensure our NumPy array is C-contiguous as well, otherwise we won't be able to serialize it 60 | image = image.copy(order="C") 61 | 62 | # Generate an ID for the classification then add the classification ID + image to the queue 63 | k = str(uuid.uuid4()) 64 | image = base64.b64encode(image).decode("utf-8") 65 | d = {"id": k, "image": image} 66 | db.rpush(os.environ.get("IMAGE_QUEUE"), json.dumps(d)) 67 | 68 | # Keep looping for CLIENT_MAX_TRIES times 69 | num_tries = 0 70 | while num_tries < CLIENT_MAX_TRIES: 71 | num_tries += 1 72 | 73 | # Attempt to grab the output predictions 74 | output = db.get(k) 75 | 76 | # Check to see if our model has classified the input image 77 | if output is not None: 78 | # Add the output predictions to our data dictionary so we can return it to the client 79 | output = output.decode("utf-8") 80 | data["predictions"] = json.loads(output) 81 | 82 | # Delete the result from the database and break from the polling loop 83 | db.delete(k) 84 | break 85 | 86 | # Sleep for a small amount to give the model a chance to classify the input image 87 | time.sleep(float(os.environ.get("CLIENT_SLEEP"))) 88 | 89 | # Indicate that the request was a success 90 | data["success"] = True 91 | else: 92 | raise HTTPException(status_code=400, detail="Request failed after {} tries".format(CLIENT_MAX_TRIES)) 93 | 94 | # Return the data dictionary as a JSON response 95 | return data 96 | -------------------------------------------------------------------------------- /z-way-server/Dockerfile: -------------------------------------------------------------------------------- 1 | ARG BUILD_FROM=youdownwithjpd/addon-raspbian-base:latest 2 | # hadolint ignore=DL3006 3 | FROM ${BUILD_FROM} 4 | 5 | # Set shell 6 | SHELL ["/bin/bash", "-o", "pipefail", "-c"] 7 | 8 | # Build z-way 9 | 10 | ENV TZ=America/Los_Angeles 11 | 12 | RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone 13 | 14 | # Install additional packages that help to add Z-Wave.Me repository 15 | RUN apt-get update && \ 16 | apt-get install -y --no-install-recommends \ 17 | dirmngr \ 18 | apt-transport-https \ 19 | gnupg \ 20 | wget && \ 21 | # Add Z-Wave.Me repository 22 | apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys 0x7E148E3C && \ 23 | echo "deb https://repo.z-wave.me/z-way/raspbian stretch main" > /etc/apt/sources.list.d/z-wave-me.list && \ 24 | # Add mosquitto repository 25 | wget http://repo.mosquitto.org/debian/mosquitto-repo.gpg.key && \ 26 | apt-key add mosquitto-repo.gpg.key && \ 27 | wget http://repo.mosquitto.org/debian/mosquitto-stretch.list -P /etc/apt/sources.list.d/ && \ 28 | apt-get update && \ 29 | # upgrade and install everything zway needs itself in one go 30 | apt-get install --reinstall -y --no-install-recommends \ 31 | mosquitto \ 32 | mosquitto-clients \ 33 | z-way-full \ 34 | z-way-server \ 35 | zbw \ 36 | webif 37 | 38 | # wget \ 39 | # sharutils \ 40 | # gawk \ 41 | # libc-ares2 \ 42 | # libavahi-compat-libdnssd-dev \ 43 | # libarchive-dev \ 44 | # libcurl3 45 | 46 | #WORKDIR /opt 47 | #RUN curl -O https://storage.z-wave.me/z-way-server/z-way-server-RaspberryPiXTools-v2.3.8.tgz && \ 48 | # tar -zxvf z-way-server-RaspberryPiXTools-v2.3.8.tgz && \ 49 | # rm z-way-server-RaspberryPiXTools-v2.3.8.tgz && \ 50 | # mkdir -p /etc/z-way/ && \ 51 | # echo "v2.3.8" > /etc/z-way/VERSION && echo "razberry" > /etc/z-way/box_type 52 | 53 | #ENV LD_LIBRARY_PATH $LD_LIBRARY_PATH:/opt/z-way-server/libs 54 | # follow "package config replace" path 55 | RUN TMP_DIR=`mktemp -d` && \ 56 | pushd $TMP_DIR && \ 57 | apt-get download z-way-server && \ 58 | dpkg-deb --fsys-tarfile ./z-way-server*.deb | tar -C / -x ./opt/z-way-server/config/Defaults.xml ./opt/z-way-server/config.xml ./etc/z-way/box_type ./etc/logrotate.d/z-way-server ./etc/init.d/z-way-server && \ 59 | apt-get download webif && \ 60 | dpkg-deb --fsys-tarfile ./webif*.deb | tar -C / -x ./etc/webif.conf ./etc/mongoose/mongoose.conf && \ 61 | # no config files in zbw package 62 | # apt-get download zbw 63 | # dpkg-deb --fsys-tarfile ./zbw*.deb | sudo tar -C / -x ... 64 | popd && \ 65 | rm -R $TMP_DIR 66 | 67 | # Copy root filesystem 68 | COPY rootfs / 69 | 70 | WORKDIR /opt/z-way-server 71 | 72 | # Build arugments 73 | ARG BUILD_DATE 74 | ARG BUILD_REF 75 | ARG BUILD_VERSION 76 | 77 | # Labels 78 | LABEL \ 79 | io.hass.name="Addon Raspi base for ${BUILD_ARCH}" \ 80 | io.hass.description="JPD Hass.io Add-ons: ${BUILD_ARCH} z-way-server image" \ 81 | io.hass.arch="${BUILD_ARCH}" \ 82 | io.hass.type="addon" \ 83 | io.hass.version=${BUILD_VERSION} \ 84 | maintainer="John Dowling " \ 85 | org.label-schema.description="JPD Hass.io Add-ons: ${BUILD_ARCH} Z-Way Server" \ 86 | org.label-schema.build-date=${BUILD_DATE} \ 87 | org.label-schema.name="Addon Z-Way-Server for ${BUILD_ARCH}" \ 88 | org.label-schema.schema-version="1.0" \ 89 | org.label-schema.url="https://addons.community" \ 90 | org.label-schema.usage="https://github.com/johnpdowling/hassio-addons/z-way-server/blob/master/README.md" \ 91 | org.label-schema.vcs-ref=${REF} \ 92 | org.label-schema.vcs-url="https://github.com/johnpdowling/hassio-addons/z-way-server" \ 93 | org.label-schema.vendor="John Dowling" 94 | ############################################## 95 | -------------------------------------------------------------------------------- /kegbot/rootfs/usr/lib/bashio/kegbot.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # ============================================================================== 3 | # JPD Hass.io Add-ons: Kegbot Server 4 | # Set up kegbot environment 5 | # ============================================================================== 6 | set -e 7 | CONFIG_PATH=/data/options.json 8 | 9 | die() { 10 | bashio::log.info "Error: $@" 11 | exit 1 12 | } 13 | 14 | setup_env() { 15 | #pull required options 16 | export KEGBOT_DB_NAME=$(jq --raw-output '.db_name' $CONFIG_PATH) 17 | export KEGBOT_DB_HOST=$(jq --raw-output '.db_host' $CONFIG_PATH) 18 | export KEGBOT_DB_PORT=$(jq --raw-output '.db_port' $CONFIG_PATH) 19 | export KEGBOT_DB_USER=$(jq --raw-output '.db_user' $CONFIG_PATH) 20 | export KEGBOT_DB_PASS=$(jq --raw-output '.db_pass' $CONFIG_PATH) 21 | export KEGBOT_DEBUG=$(jq --raw-output '.debug' $CONFIG_PATH) 22 | #pull optional options 23 | export KEGBOT_EMAIL_FROM=$(jq --raw-output '.email_from' $CONFIG_PATH) 24 | export KEGBOT_EMAIL_HOST=$(jq --raw-output '.email_host' $CONFIG_PATH) 25 | export KEGBOT_EMAIL_PORT=$(jq --raw-output '.email_port' $CONFIG_PATH) 26 | export KEGBOT_EMAIL_USER=$(jq --raw-output '.email_user' $CONFIG_PATH) 27 | export KEGBOT_EMAIL_PASSWORD=$(jq --raw-output '.email_password' $CONFIG_PATH) 28 | export KEGBOT_EMAIL_USE_SSL=$(jq --raw-output '.email_use_ssl' $CONFIG_PATH) 29 | export KEGBOT_EMAIL_USE_TLS=$(jq --raw-output '.email_use_tls' $CONFIG_PATH) 30 | 31 | # Set defaults to required if missing 32 | if [ -z "${KEGBOT_DB_NAME}" ]; then 33 | export KEGBOT_DB_NAME="kegbot" 34 | fi 35 | if [ -z "${KEGBOT_DB_USER}" ]; then 36 | export KEGBOT_DB_USER="root" 37 | fi 38 | if [ -z "${KEGBOT_DB_PASS}" ]; then 39 | export KEGBOT_DB_PASS="" 40 | fi 41 | if [ -z "${KEGBOT_DB_PORT}" ]; then 42 | export KEGBOT_DB_PORT=3306 43 | fi 44 | 45 | # Remove optionals if missing 46 | if [[ -z "${KEGBOT_EMAIL_FROM}" || "${KEGBOT_EMAIL_FROM}" == "null" ]]; then 47 | export -n KEGBOT_EMAIL_FROM 48 | fi 49 | if [[ -z "${KEGBOT_EMAIL_HOST}" || "${KEGBOT_EMAIL_HOST}" == "null" ]]; then 50 | export -n KEGBOT_EMAIL_HOST 51 | fi 52 | if [[ ! -z "${KEGBOT_EMAIL_PORT}" && "${KEGBOT_EMAIL_PORT}" == "null" ]]; then 53 | export -n KEGBOT_EMAIL_PORT 54 | fi 55 | if [[ ! -z "${KEGBOT_EMAIL_USER}" && "${KEGBOT_EMAIL_USER}" == "null" ]]; then 56 | export -n KEGBOT_EMAIL_USER 57 | fi 58 | if [[ ! -z "${KEGBOT_EMAIL_PASSWORD}" && "${KEGBOT_EMAIL_PASSWORD}" == "null" ]]; then 59 | export -n KEGBOT_EMAIL_PASSWORD 60 | fi 61 | if [[ ! -z "${KEGBOT_EMAIL_USE_SSL}" && "${KEGBOT_EMAIL_USE_SSL}" == "null" ]]; then 62 | export -n KEGBOT_EMAIL_USE_SSL 63 | fi 64 | if [[ ! -z "${KEGBOT_EMAIL_USE_TLS}" && "${KEGBOT_EMAIL_USE_TLS}" == "null" ]]; then 65 | export -n KEGBOT_EMAIL_USE_TLS 66 | fi 67 | 68 | export KEGBOT_DATABASE_URL="mysql://${KEGBOT_DB_USER}:${KEGBOT_DB_PASS}@${KEGBOT_DB_HOST}:${KEGBOT_DB_PORT}/${KEGBOT_DB_NAME}" 69 | 70 | # other sets 71 | if [ -z "${KEGBOT_REDIS_PORT}" ]; then 72 | export KEGBOT_REDIS_HOST=localhost 73 | export KEGBOT_REDIS_PORT=6379 74 | fi 75 | 76 | if [ -z "${KEGBOT_SETTINGS_DIR}" ]; then 77 | export KEGBOT_SETTINGS_DIR=/config/kegbot/ 78 | fi 79 | 80 | if [ -z "${KEGBOT_MEDIA_ROOT}" ]; then 81 | export KEGBOT_MEDIA_ROOT=/config/kegbot/media/ 82 | fi 83 | 84 | # if [ -z "${KEGBOT_DATA_DIR}" ]; then 85 | # export KEGBOT_DATA_DIR=/config/kegbot/kegbot-data/ 86 | # fi 87 | 88 | # Verify mandatory variables. 89 | if [ -z "${KEGBOT_DB_HOST}" ]; then 90 | die "Must set KEGBOT_DB_HOST or MYSQL_PORT_3306_TCP_{ADDR,PORT}" 91 | fi 92 | if [ -z "${KEGBOT_REDIS_HOST}" ]; then 93 | die "Must set KEGBOT_REDIS_HOST or REDIS_PORT_6379_TCP_{ADDR,PORT}" 94 | fi 95 | 96 | export C_FORCE_ROOT=True ## needed by celery 97 | 98 | # env 99 | } 100 | 101 | #bashio::log.info "Setting up environment..." 102 | setup_env 103 | -------------------------------------------------------------------------------- /openwrt/Dockerfile.old: -------------------------------------------------------------------------------- 1 | ARG BUILD_FROM 2 | FROM $BUILD_FROM 3 | 4 | ARG BUILD_ARCH 5 | 6 | # Environment variables 7 | ENV \ 8 | DEBIAN_FRONTEND="noninteractive" \ 9 | HOME="/root" \ 10 | LANG="C.UTF-8" \ 11 | PS1="$(whoami)@$(hostname):$(pwd)$ " \ 12 | S6_BEHAVIOUR_IF_STAGE2_FAILS=2 \ 13 | S6_CMD_WAIT_FOR_SERVICES=1 \ 14 | TERM="xterm-256color" 15 | 16 | # Copy root filesystem 17 | COPY rootfs / 18 | 19 | # Set shell 20 | SHELL ["/bin/ash", "-o", "pipefail", "-c"] 21 | 22 | RUN mkdir /var/lock 23 | 24 | # Install HA system 25 | RUN \ 26 | opkg update \ 27 | && opkg install \ 28 | ca-bundle \ 29 | curl \ 30 | jq 31 | # tzdata 32 | 33 | RUN if [ "${BUILD_ARCH}" = "i386" ]; then curl -o /tmp/openwrt.tar.gz http://downloads.openwrt.org/releases/19.07.1/targets/x86/generic/openwrt-19.07.1-x86-generic-generic-rootfs.tar.gz; fi 34 | RUN if [ "${BUILD_ARCH}" = "amd64" ]; then curl -o /tmp/openwrt.tar.gz http://downloads.openwrt.org/releases/19.07.1/targets/x86/64/openwrt-19.07.1-x86-64-generic-rootfs.tar.gz; fi 35 | RUN if [ "${BUILD_ARCH}" = "arm7" ]; then curl -o /tmp/openwrt.tar.gz http://downloads.openwrt.org/releases/19.07.1/targets/armvirt/32/openwrt-19.07.1-armvirt-32-default-rootfs.tar.gz; fi 36 | RUN if [ "${BUILD_ARCH}" = "aarch64" ]; then curl -o /tmp/openwrt.tar.gz http://downloads.openwrt.org/releases/19.07.1/targets/armvirt/64/openwrt-19.07.1-armvirt-64-default-rootfs.tar.gz; fi 37 | RUN mkdir /tmp/openwrt && tar -zxvf /tmp/openwrt.tar.gz -C /tmp/openwrt && rm /tmp/openwrt/etc/hosts && rm /tmp/openwrt/etc/resolv.conf 38 | #RUN cp -fr /tmp/openwrt/* / 39 | 40 | RUN if [ "${BUILD_ARCH}" = "i386" ]; then curl -o /bin/yq https://github.com/mikefarah/yq/releases/download/2.4.0/yq_linux_386; fi 41 | RUN if [ "${BUILD_ARCH}" = "amd64" ]; then curl -o /bin/yq https://github.com/mikefarah/yq/releases/download/2.4.0/yq_linux_amd64; fi 42 | RUN if [ "${BUILD_ARCH}" = "arm7" ]; then curl -o /bin/yq https://github.com/mikefarah/yq/releases/download/2.4.0/yq_linux_arm; fi 43 | RUN if [ "${BUILD_ARCH}" = "aarch64" ]; then curl -o /bin/yq https://github.com/mikefarah/yq/releases/download/2.4.0/yq_linux_arm64; fi 44 | 45 | RUN S6_ARCH="${BUILD_ARCH}" \ 46 | && if [ "${BUILD_ARCH}" = "i386" ]; then S6_ARCH="x86"; fi \ 47 | && if [ "${BUILD_ARCH}" = "armv7" ]; then S6_ARCH="arm"; fi \ 48 | \ 49 | # && curl -L -s "https://github.com/just-containers/s6-overlay/releases/download/v1.22.1.0/s6-overlay-${S6_ARCH}.tar.gz" \ 50 | # | tar zxvf - -C / \ 51 | # \ 52 | # && mkdir -p /etc/fix-attrs.d \ 53 | # && mkdir -p /etc/services.d \ 54 | # \ 55 | && curl -J -L -o /tmp/bashio.tar.gz "https://github.com/hassio-addons/bashio/archive/v0.4.1.tar.gz" \ 56 | && mkdir /tmp/bashio \ 57 | && tar -zxvf /tmp/bashio.tar.gz -C /tmp/bashio \ 58 | \ 59 | && mv /tmp/bashio/bashio-0.4.1/lib /usr/lib/bashio \ 60 | && ln -s /usr/lib/bashio/bashio /usr/bin/bashio \ 61 | \ 62 | && rm -fr /tmp/bashio* 63 | # /var/{cache,log}/* \ 64 | # /var/lib/apt/lists/* 65 | 66 | # Build arugments 67 | ARG BUILD_DATE 68 | ARG BUILD_REF 69 | ARG BUILD_VERSION 70 | 71 | # Labels 72 | LABEL \ 73 | io.hass.name="Addon OpenWRT for ${BUILD_ARCH}" \ 74 | io.hass.description="JPD Hass.io Add-ons: ${BUILD_ARCH} OpenWRT image" \ 75 | io.hass.arch="${BUILD_ARCH}" \ 76 | io.hass.type="base" \ 77 | io.hass.version=${BUILD_VERSION} \ 78 | maintainer="John Dowling " \ 79 | org.label-schema.description="JPD Hass.io Add-ons: ${BUILD_ARCH} OpenWRT image" \ 80 | org.label-schema.build-date=${BUILD_DATE} \ 81 | org.label-schema.name="Addon OpenWRT for ${BUILD_ARCH}" \ 82 | org.label-schema.schema-version="1.0" \ 83 | org.label-schema.url="https://addons.community" \ 84 | org.label-schema.usage="https://github.com/johnpdowling/hassio-addons/tree/master/openwrt/README.md" \ 85 | org.label-schema.vcs-ref=${REF} \ 86 | org.label-schema.vcs-url="https://github.com/johnpdowling/hassio-addons/openwrt" \ 87 | org.label-schema.vendor="John Dowling" 88 | -------------------------------------------------------------------------------- /waterguru-api/waterguru_flask.py: -------------------------------------------------------------------------------- 1 | #!/usr/local/bin/python 2 | # 3 | # 8/2021 - https://github.com/bdwilson/waterguru-api 4 | # 5 | # Please do not abuse the WaterGuru API - this should not be run more than 6 | # once or twice a day. It is not intended to be run more often as it does 7 | # not properly implement a token refresh option (hint hint, please add) 8 | # 9 | # sudo apt-get install python3 python3-pip 10 | # sudo pip3 install flask requests_aws4auth boto3 warrant.aws_srp warrant 11 | # 12 | # Set your email, password and port to run this service on. 13 | # 14 | # Usage: /api/wg returns dashboard info for all WaterGuru devices. 15 | # 16 | # There is little error checking and no security here. 17 | # 18 | from flask import Flask, render_template, flash, request, jsonify, Response 19 | import os 20 | import logging 21 | from warrant import Cognito 22 | import boto3 23 | from warrant.aws_srp import AWSSRP 24 | import requests 25 | import logging 26 | from requests_aws4auth import AWS4Auth 27 | 28 | # App config. 29 | DEBUG = False 30 | app = Flask(__name__) 31 | app.config.from_object(__name__) 32 | app.config['SECRET_KEY'] = '32624076087108375603827608353' 33 | 34 | config = { 35 | "port": "WG_PORT", # port for your service to run on 36 | "user": "WG_USER", 37 | "pass": "WG_PASS" 38 | } 39 | 40 | def doWg(): 41 | region_name = "us-west-2" 42 | pool_id = "us-west-2_icsnuWQWw" 43 | identity_pool_id = "us-west-2:691e3287-5776-40f2-a502-759de65a8f1c" 44 | client_id = "7pk5du7fitqb419oabb3r92lni" 45 | idp_pool = "cognito-idp.us-west-2.amazonaws.com/" + pool_id 46 | 47 | boto3.setup_default_session(region_name = region_name) 48 | client = boto3.client('cognito-idp', region_name=region_name) 49 | # REFRESH_TOKEN_AUTH flow doesn't exist yet in warrant lib https://github.com/capless/warrant/issues/33 50 | # would love it if someone could figure out proper refresh. 51 | aws = AWSSRP(username=config['user'], password=config['pass'], pool_id=pool_id, client_id=client_id, client=client) 52 | tokens = aws.authenticate_user() 53 | 54 | id_token = tokens['AuthenticationResult']['IdToken'] 55 | refresh_token = tokens['AuthenticationResult']['RefreshToken'] 56 | access_token = tokens['AuthenticationResult']['AccessToken'] 57 | token_type = tokens['AuthenticationResult']['TokenType'] 58 | 59 | u=Cognito(pool_id,client_id,id_token=id_token,refresh_token=refresh_token,access_token=access_token) 60 | user = u.get_user() 61 | userId = user._metadata['username'] 62 | 63 | boto3.setup_default_session(region_name = region_name) 64 | identity_client = boto3.client('cognito-identity', region_name=region_name) 65 | identity_response = identity_client.get_id(IdentityPoolId=identity_pool_id) 66 | identity_id = identity_response['IdentityId'] 67 | 68 | credentials_response = identity_client.get_credentials_for_identity(IdentityId=identity_id,Logins={idp_pool:id_token}) 69 | credentials = credentials_response['Credentials'] 70 | access_key_id = credentials['AccessKeyId'] 71 | secret_key = credentials['SecretKey'] 72 | service = 'lambda' 73 | session_token = credentials['SessionToken'] 74 | expiration = credentials['Expiration'] 75 | 76 | method = 'POST' 77 | headers = {'User-Agent': 'aws-sdk-iOS/2.24.3 iOS/14.7.1 en_US invoker', 'Content-Type': 'application/x-amz-json-1.0'} 78 | body = {"userId":userId, "clientType":"WEB_APP", "clientVersion":"0.2.3"} 79 | service = 'lambda' 80 | url = 'https://lambda.us-west-2.amazonaws.com/2015-03-31/functions/prod-getDashboardView/invocations' 81 | region = 'us-west-2' 82 | 83 | auth = AWS4Auth(access_key_id, secret_key, region, service, session_token=session_token) 84 | response = requests.request(method, url, auth=auth, json=body, headers=headers) 85 | return(response.text) 86 | 87 | @app.route("/", methods=['GET']) 88 | def info(): 89 | return("Try: /api/wg") 90 | 91 | @app.route("/api/wg", methods=['GET']) 92 | def api(): 93 | val = doWg() 94 | if val: 95 | return Response(val, mimetype='application/json') 96 | 97 | if __name__ == "__main__": 98 | app.run(host='0.0.0.0', port=config['port'], debug=False) 99 | 100 | -------------------------------------------------------------------------------- /sendmail/rootfs/etc/mail/sendmail.mc: -------------------------------------------------------------------------------- 1 | divert(-1)dnl 2 | #----------------------------------------------------------------------------- 3 | # $Sendmail: debproto.mc,v 8.15.2 2018-01-13 23:43:05 cowboy Exp $ 4 | # 5 | # Copyright (c) 1998-2010 Richard Nelson. All Rights Reserved. 6 | # 7 | # cf/debian/sendmail.mc. Generated from sendmail.mc.in by configure. 8 | # 9 | # sendmail.mc prototype config file for building Sendmail 8.15.2 10 | # 11 | # Note: the .in file supports 8.7.6 - 9.0.0, but the generated 12 | # file is customized to the version noted above. 13 | # 14 | # This file is used to configure Sendmail for use with Debian systems. 15 | # 16 | # If you modify this file, you will have to regenerate /etc/mail/sendmail.cf 17 | # by running this file through the m4 preprocessor via one of the following: 18 | # * make (or make -C /etc/mail) 19 | # * sendmailconfig 20 | # * m4 /etc/mail/sendmail.mc > /etc/mail/sendmail.cf 21 | # The first two options are preferred as they will also update other files 22 | # that depend upon the contents of this file. 23 | # 24 | # The best documentation for this .mc file is: 25 | # /usr/share/doc/sendmail-doc/cf.README.gz 26 | # 27 | #----------------------------------------------------------------------------- 28 | divert(0)dnl 29 | # 30 | # Copyright (c) 1998-2005 Richard Nelson. All Rights Reserved. 31 | # 32 | # This file is used to configure Sendmail for use with Debian systems. 33 | # 34 | define(`_USE_ETC_MAIL_')dnl 35 | include(`/usr/share/sendmail/cf/m4/cf.m4')dnl 36 | include(`/etc/mail/tls/starttls.m4')dnl 37 | VERSIONID(`$Id: sendmail.mc, v 8.15.2-10 2018-01-13 23:43:05 cowboy Exp $') 38 | OSTYPE(`debian')dnl 39 | DOMAIN(`debian-mta')dnl 40 | dnl # Items controlled by /etc/mail/sendmail.conf - DO NOT TOUCH HERE 41 | undefine(`confHOST_STATUS_DIRECTORY')dnl #DAEMON_HOSTSTATS= 42 | dnl # Items controlled by /etc/mail/sendmail.conf - DO NOT TOUCH HERE 43 | dnl # 44 | dnl # General defines 45 | dnl # 46 | dnl # SAFE_FILE_ENV: [undefined] If set, sendmail will do a chroot() 47 | dnl # into this directory before writing files. 48 | dnl # If *all* your user accounts are under /home then use that 49 | dnl # instead - it will prevent any writes outside of /home ! 50 | dnl # define(`confSAFE_FILE_ENV', `')dnl 51 | dnl # 52 | dnl # Daemon options - restrict to servicing LOCALHOST ONLY !!! 53 | dnl # Remove `, Addr=' clauses to receive from any interface 54 | dnl # If you want to support IPv6, switch the commented/uncommentd lines 55 | dnl # 56 | FEATURE(`no_default_msa')dnl 57 | dnl DAEMON_OPTIONS(`Family=inet6, Name=MTA-v6, Port=smtp, Addr=::1')dnl 58 | DAEMON_OPTIONS(`Family=inet, Name=MTA-v4, Port=smtp, Addr=127.0.0.1')dnl 59 | dnl DAEMON_OPTIONS(`Family=inet6, Name=MSP-v6, Port=submission, M=Ea, Addr=::1')dnl 60 | DAEMON_OPTIONS(`Family=inet, Name=MSP-v4, Port=submission, M=Ea, Addr=127.0.0.1')dnl 61 | dnl # 62 | dnl # Be somewhat anal in what we allow 63 | define(`confPRIVACY_FLAGS',dnl 64 | `needmailhelo,needexpnhelo,needvrfyhelo,restrictqrun,restrictexpand,nobodyreturn,authwarnings')dnl 65 | dnl # 66 | dnl # Define connection throttling and window length 67 | define(`confCONNECTION_RATE_THROTTLE', `15')dnl 68 | define(`confCONNECTION_RATE_WINDOW_SIZE',`10m')dnl 69 | dnl # 70 | dnl # Features 71 | dnl # 72 | dnl # use /etc/mail/local-host-names 73 | FEATURE(`use_cw_file')dnl 74 | dnl # 75 | dnl # The access db is the basis for most of sendmail's checking 76 | FEATURE(`access_db', , `skip')dnl 77 | dnl # 78 | dnl # The greet_pause feature stops some automail bots - but check the 79 | dnl # provided access db for details on excluding localhosts... 80 | FEATURE(`greet_pause', `1000')dnl 1 seconds 81 | dnl # 82 | dnl # Delay_checks allows sender<->recipient checking 83 | FEATURE(`delay_checks', `friend', `n')dnl 84 | dnl # 85 | dnl # If we get too many bad recipients, slow things down... 86 | define(`confBAD_RCPT_THROTTLE',`3')dnl 87 | dnl # 88 | dnl # Stop connections that overflow our concurrent and time connection rates 89 | FEATURE(`conncontrol', `nodelay', `terminate')dnl 90 | FEATURE(`ratecontrol', `nodelay', `terminate')dnl 91 | dnl # 92 | dnl # If you're on a dialup link, you should enable this - so sendmail 93 | dnl # will not bring up the link (it will queue mail for later) 94 | dnl define(`confCON_EXPENSIVE',`True')dnl 95 | dnl # 96 | dnl # Dialup/LAN connection overrides 97 | dnl # 98 | include(`/etc/mail/m4/dialup.m4')dnl 99 | include(`/etc/mail/m4/provider.m4')dnl 100 | dnl # 101 | dnl # Default Mailer setup 102 | MAILER_DEFINITIONS 103 | MAILER(`local')dnl 104 | MAILER(`smtp')dnl 105 | 106 | -------------------------------------------------------------------------------- /kegbot/local_settings.py: -------------------------------------------------------------------------------- 1 | # kegbot-docker local settings. 2 | 3 | # Inspired by: https://github.com/blalor/docker-kegbot-server 4 | # Safe to edit by hand. See http://kegbot.org/docs/server/ for more info. 5 | 6 | import os 7 | 8 | DEBUG = bool(os.environ.get("KEGBOT_DEBUG", "")) 9 | TEMPLATE_DEBUG = DEBUG 10 | 11 | KEGBOT_ROOT = os.environ.get("KEGBOT_ROOT", "/kegbot-data") 12 | 13 | LOGGING = { 14 | "version": 1, 15 | "disable_existing_loggers": False, 16 | "handlers": { 17 | "file": { 18 | "level": "DEBUG", 19 | "class": "logging.FileHandler", 20 | "filename": os.path.join(KEGBOT_ROOT, "kegbot.log"), 21 | }, 22 | }, 23 | "loggers": { 24 | "root": { 25 | "handlers": ["file"], 26 | "level": "DEBUG", 27 | "propagate": True, 28 | }, 29 | }, 30 | } 31 | 32 | LOGGING = { 33 | 'version': 1, 34 | 'disable_existing_loggers': True, 35 | 'filters': { 36 | 'require_debug_true': { 37 | '()': 'django.utils.log.RequireDebugTrue', 38 | }, 39 | }, 40 | 'handlers': { 41 | 'console': { 42 | 'level': 'DEBUG', 43 | 'filters': ['require_debug_true'], 44 | 'class': 'logging.StreamHandler', 45 | 'formatter': 'verbose', 46 | }, 47 | 'null': { 48 | 'class': 'django.utils.log.NullHandler', 49 | }, 50 | 'redis': { 51 | 'level': 'INFO', 52 | 'class': 'django.utils.log.NullHandler', 53 | }, 54 | }, 55 | 'formatters': { 56 | 'verbose': { 57 | 'format': '%(asctime)s %(levelname)-8s (%(name)s) %(message)s' 58 | }, 59 | 'simple': { 60 | 'format': '%(levelname)s %(message)s' 61 | }, 62 | }, 63 | 'loggers': { 64 | 'raven': { 65 | 'level': 'DEBUG', 66 | 'handlers': ['console'], 67 | 'propagate': False, 68 | }, 69 | 'pykeg': { 70 | 'level': 'INFO', 71 | 'handlers': ['console', 'redis'], 72 | 'propagate': False, 73 | }, 74 | '': { 75 | 'level': 'INFO', 76 | 'handlers': ['console'], 77 | 'formatter': 'verbose', 78 | }, 79 | }, 80 | } 81 | 82 | ### database settings 83 | 84 | _dbhost = os.environ["KEGBOT_DB_HOST"] 85 | _dbport = os.environ.get("KEGBOT_DB_PORT", 3306) 86 | _dbname = os.environ.get("KEGBOT_DB_NAME", "kegbot") 87 | _dbuser = os.environ.get("KEGBOT_DB_USER", "root") 88 | _dbpass = os.environ.get("KEGBOT_DB_PASS", None) 89 | 90 | DATABASES = { 91 | "default": { 92 | "ENGINE": "django.db.backends.mysql", 93 | "OPTIONS": { 94 | "init_command": "SET default_storage_engine=INNODB" 95 | }, 96 | 97 | "NAME": _dbname, 98 | "HOST": _dbhost, 99 | "PORT": _dbport, 100 | "USER": _dbuser, 101 | "PASSWORD": _dbpass, 102 | } 103 | } 104 | 105 | del _dbname, _dbhost, _dbport, _dbuser, _dbpass 106 | 107 | _redishost = os.environ["KEGBOT_REDIS_HOST"] 108 | _redisport = os.environ.get("KEGBOT_REDIS_PORT", 6379) 109 | 110 | #### redis settings 111 | 112 | BROKER_URL = "redis://{}:{}/0".format(_redishost, _redisport) 113 | CELERY_RESULT_BACKEND = BROKER_URL 114 | CACHES = { 115 | 'default': { 116 | 'BACKEND': 'redis_cache.cache.RedisCache', 117 | 'LOCATION': '{}:{}:1'.format(_redishost, _redisport), 118 | 'OPTIONS': { 119 | 'CLIENT_CLASS': 'redis_cache.client.DefaultClient', 120 | } 121 | } 122 | } 123 | 124 | del _redishost, _redisport 125 | 126 | MEDIA_ROOT = os.environ.get("KEGBOT_MEDIA_ROOT", os.path.join(KEGBOT_ROOT, "media")) 127 | STATIC_ROOT = os.path.join(KEGBOT_ROOT, "static") 128 | MEDIA_URL = "/media/" 129 | STATIC_URL = "/static/" 130 | 131 | SECRET_KEY = os.environ.get("KEGBOT_SECRET_KEY", "change-me") # TODO: warning. 132 | 133 | if "KEGBOT_EMAIL_HOST" in os.environ: 134 | # Tell Kegbot use the SMTP e-mail backend. 135 | EMAIL_BACKEND = "django.core.mail.backends.smtp.EmailBackend" 136 | 137 | # "From" address for e-mails. 138 | EMAIL_FROM_ADDRESS = os.environ["KEGBOT_EMAIL_FROM"] 139 | 140 | EMAIL_HOST = os.environ["KEGBOT_EMAIL_HOST"] 141 | EMAIL_PORT = int(os.environ.get("KEGBOT_EMAIL_PORT", 25)) 142 | 143 | # Credentials for SMTP server. 144 | if "KEGBOT_EMAIL_USER" in os.environ: 145 | EMAIL_HOST_USER = os.environ["KEGBOT_EMAIL_USER"] 146 | EMAIL_HOST_PASSWORD = os.environ["KEGBOT_EMAIL_PASSWORD"] 147 | 148 | EMAIL_USE_SSL = bool(os.environ.get("KEGBOT_EMAIL_USE_SSL", "")) 149 | EMAIL_USE_TLS = bool(os.environ.get("KEGBOT_EMAIL_USE_TLS", "")) 150 | -------------------------------------------------------------------------------- /forked-daapd/Dockerfile: -------------------------------------------------------------------------------- 1 | ARG BUILD_FROM=hassioaddons/base:8.0.0 2 | # hadolint ignore=DL3006 3 | FROM ${BUILD_FROM} 4 | 5 | # Set shell 6 | SHELL ["/bin/bash", "-o", "pipefail", "-c"] 7 | 8 | #adapted from sretalla's/kevineye's dockerfiles 9 | # hadolint ignore=DL3003 10 | RUN apk update 11 | RUN apk add --no-cache \ 12 | libcrypto1.1=1.1.1i-r0 \ 13 | libssl1.1=1.1.1i-r0 14 | RUN apk add --no-cache \ 15 | musl \ 16 | musl-dev 17 | 18 | RUN apk add --no-cache --virtual deps1 \ 19 | alsa-lib-dev \ 20 | autoconf \ 21 | automake \ 22 | cmake \ 23 | avahi-dev \ 24 | bash \ 25 | bsd-compat-headers \ 26 | build-base \ 27 | confuse-dev \ 28 | curl \ 29 | curl-dev \ 30 | ffmpeg-dev \ 31 | file \ 32 | git \ 33 | gnutls-dev \ 34 | gperf \ 35 | json-c-dev \ 36 | libevent-dev \ 37 | libgcrypt-dev \ 38 | libplist-dev \ 39 | libsodium-dev \ 40 | libtool \ 41 | libunistring-dev \ 42 | openjdk7-jre-base \ 43 | protobuf-c-dev \ 44 | sqlite-dev 45 | 46 | RUN apk add --no-cache --virtual=deps2 --repository http://nl.alpinelinux.org/alpine/edge/testing --repository http://nl.alpinelinux.org/alpine/edge/community \ 47 | libantlr3c-dev \ 48 | mxml \ 49 | mxml-dev 50 | 51 | RUN apk add --no-cache \ 52 | avahi \ 53 | confuse \ 54 | dbus \ 55 | ffmpeg \ 56 | json-c \ 57 | libcurl \ 58 | libevent \ 59 | libgcrypt \ 60 | libplist \ 61 | libsodium \ 62 | libunistring \ 63 | libuv \ 64 | libuv-dev \ 65 | protobuf-c \ 66 | sqlite \ 67 | sqlite-libs \ 68 | openssl 69 | 70 | RUN apk add --no-cache --repository http://nl.alpinelinux.org/alpine/edge/testing \ 71 | libantlr3c \ 72 | mxml 73 | 74 | RUN curl -L -o /tmp/antlr-3.4-complete.jar http://www.antlr3.org/download/antlr-3.4-complete.jar && \ 75 | echo '#!/bin/bash' > /usr/local/bin/antlr3 && \ 76 | echo 'exec java -cp /tmp/antlr-3.4-complete.jar org.antlr.Tool "$@"' >> /usr/local/bin/antlr3 && \ 77 | chmod 775 /usr/local/bin/antlr3 78 | 79 | RUN cd /tmp && \ 80 | git clone --branch "v3.2.2" --depth=1 https://github.com/warmcat/libwebsockets.git && \ 81 | cd /tmp/libwebsockets && \ 82 | cmake ./ \ 83 | -DCMAKE_BUILD_TYPE=MinSizeRel \ 84 | -DCMAKE_INSTALL_PREFIX=/usr \ 85 | -DCMAKE_VERBOSE_MAKEFILE=TRUE \ 86 | -DLWS_IPV6=ON \ 87 | -DLWS_STATIC_PIC=ON \ 88 | -DLWS_UNIX_SOCK=OFF \ 89 | -DLWS_WITH_LIBUV=ON \ 90 | -DLWS_WITH_SHARED=ON \ 91 | -DLWS_WITHOUT_TESTAPPS=ON && \ 92 | make && \ 93 | make install 94 | 95 | RUN cd /tmp && \ 96 | git clone https://github.com/ejurgensen/forked-daapd.git && \ 97 | cd /tmp/forked-daapd && \ 98 | autoreconf -fi && \ 99 | ./configure \ 100 | --enable-itunes \ 101 | --with-libwebsockets && \ 102 | make && \ 103 | make install 104 | 105 | RUN apk del --purge deps1 deps2 && \ 106 | rm -rf /usr/local/bin/antlr3 /tmp/* 107 | 108 | RUN cd /usr/local/etc \ 109 | && sed -i -e 's/\(uid.*=.*\)/uid = "root"/g' forked-daapd.conf \ 110 | && sed -i s#"ipv6 = yes"#"ipv6 = no"#g forked-daapd.conf \ 111 | && sed -i s#/srv/music#/config/forked-daapd/music#g forked-daapd.conf \ 112 | && sed -i s#/usr/local/var/cache/forked-daapd/songs3.db#/config/forked-daapd/cache/songs3.db#g forked-daapd.conf \ 113 | && sed -i s#/usr/local/var/cache/forked-daapd/cache.db#/config/forked-daapd/cache/cache.db#g forked-daapd.conf \ 114 | && sed -i s#/usr/local/var/log/forked-daapd.log#/dev/stdout#g forked-daapd.conf \ 115 | && sed -i "/websocket_port\ =/ s/# *//" forked-daapd.conf \ 116 | && sed -i "/trusted_networks\ =/ s/# *//" forked-daapd.conf \ 117 | && sed -i "/pipe_autostart\ =/ s/# *//" forked-daapd.conf \ 118 | && sed -i "/db_path\ =/ s/# *//" forked-daapd.conf \ 119 | && sed -i "/cache_path\ =/ s/# *//" forked-daapd.conf 120 | 121 | VOLUME /etc/localtime /etc/localtime:ro 122 | 123 | # Copy root filesystem 124 | COPY rootfs / 125 | 126 | # Build arguments 127 | ARG BUILD_ARCH 128 | ARG BUILD_DATE 129 | ARG BUILD_REF 130 | ARG BUILD_VERSION 131 | 132 | # Labels 133 | LABEL \ 134 | io.hass.name="Forked-daapd Server" \ 135 | io.hass.description="The forked-daapd server program" \ 136 | io.hass.arch="${BUILD_ARCH}" \ 137 | io.hass.type="addon" \ 138 | io.hass.version=${BUILD_VERSION} \ 139 | maintainer="John Dowling " \ 140 | org.label-schema.description="The forked-daapd server program" \ 141 | org.label-schema.build-date=${BUILD_DATE} \ 142 | org.label-schema.name="Forked-daapd Server" \ 143 | org.label-schema.schema-version="1.0" \ 144 | org.label-schema.url="" \ 145 | org.label-schema.usage="https://github.com/johnpdowling/hassio-addons/tree/master/forked-daapd/README.md" \ 146 | org.label-schema.vcs-ref=${BUILD_REF} \ 147 | org.label-schema.vcs-url="https://github.com/johnpdowling/hassio-addons/forked-daapd" \ 148 | org.label-schema.vendor="JPD Hass.io Add-ons" 149 | -------------------------------------------------------------------------------- /kegbot/run.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -ex 2 | set -e 3 | CONFIG_PATH=/data/options.json 4 | # Run script for kegbot appserver. 5 | 6 | die() { 7 | echo "Error: $@" 8 | exit 1 9 | } 10 | 11 | do_mysql() { 12 | mysql -B -u "${KEGBOT_DB_USER}" --password="${KEGBOT_DB_PASS}" -h "${KEGBOT_DB_HOST}" -P ${KEGBOT_DB_PORT} "${@}" 13 | return $? 14 | } 15 | 16 | ### Main routines 17 | 18 | setup_env() { 19 | #pull required options 20 | export KEGBOT_DB_NAME=$(jq --raw-output '.db_name' $CONFIG_PATH) 21 | export KEGBOT_DB_HOST=$(jq --raw-output '.db_host' $CONFIG_PATH) 22 | export KEGBOT_DB_PORT=$(jq --raw-output '.db_port' $CONFIG_PATH) 23 | export KEGBOT_DB_USER=$(jq --raw-output '.db_user' $CONFIG_PATH) 24 | export KEGBOT_DB_PASS=$(jq --raw-output '.db_pass' $CONFIG_PATH) 25 | export KEGBOT_DEBUG=$(jq --raw-output '.debug' $CONFIG_PATH) 26 | #pull optional options 27 | export KEGBOT_EMAIL_FROM=$(jq --raw-output '.email_from' $CONFIG_PATH) 28 | export KEGBOT_EMAIL_HOST=$(jq --raw-output '.email_host' $CONFIG_PATH) 29 | export KEGBOT_EMAIL_PORT=$(jq --raw-output '.email_port' $CONFIG_PATH) 30 | export KEGBOT_EMAIL_USER=$(jq --raw-output '.email_user' $CONFIG_PATH) 31 | export KEGBOT_EMAIL_PASSWORD=$(jq --raw-output '.email_password' $CONFIG_PATH) 32 | export KEGBOT_EMAIL_USE_SSL=$(jq --raw-output '.email_use_ssl' $CONFIG_PATH) 33 | export KEGBOT_EMAIL_USE_TLS=$(jq --raw-output '.email_use_tls' $CONFIG_PATH) 34 | 35 | # Set defaults to required if missing 36 | if [ -z "${KEGBOT_DB_NAME}" ]; then 37 | export KEGBOT_DB_NAME="kegbot" 38 | fi 39 | if [ -z "${KEGBOT_DB_USER}" ]; then 40 | export KEGBOT_DB_USER="root" 41 | fi 42 | if [ -z "${KEGBOT_DB_PASS}" ]; then 43 | export KEGBOT_DB_PASS="" 44 | fi 45 | if [ -z "${KEGBOT_DB_PORT}" ]; then 46 | export KEGBOT_DB_PORT=3306 47 | fi 48 | 49 | # Remove optionals if missing 50 | if [[ -z "${KEGBOT_EMAIL_FROM}" || "${KEGBOT_EMAIL_FROM}" == "null" ]]; then 51 | export -n KEGBOT_EMAIL_FROM 52 | fi 53 | if [[ -z "${KEGBOT_EMAIL_HOST}" || "${KEGBOT_EMAIL_HOST}" == "null" ]]; then 54 | export -n KEGBOT_EMAIL_HOST 55 | fi 56 | if [[ ! -z "${KEGBOT_EMAIL_PORT}" && "${KEGBOT_EMAIL_PORT}" == "null" ]]; then 57 | export -n KEGBOT_EMAIL_PORT 58 | fi 59 | if [[ ! -z "${KEGBOT_EMAIL_USER}" && "${KEGBOT_EMAIL_USER}" == "null" ]]; then 60 | export -n KEGBOT_EMAIL_USER 61 | fi 62 | if [[ ! -z "${KEGBOT_EMAIL_PASSWORD}" && "${KEGBOT_EMAIL_PASSWORD}" == "null" ]]; then 63 | export -n KEGBOT_EMAIL_PASSWORD 64 | fi 65 | if [[ ! -z "${KEGBOT_EMAIL_USE_SSL}" && "${KEGBOT_EMAIL_USE_SSL}" == "null" ]]; then 66 | export -n KEGBOT_EMAIL_USE_SSL 67 | fi 68 | if [[ ! -z "${KEGBOT_EMAIL_USE_TLS}" && "${KEGBOT_EMAIL_USE_TLS}" == "null" ]]; then 69 | export -n KEGBOT_EMAIL_USE_TLS 70 | fi 71 | 72 | # other sets 73 | if [ -z "${KEGBOT_REDIS_PORT}" ]; then 74 | export KEGBOT_REDIS_HOST=localhost 75 | export KEGBOT_REDIS_PORT=6379 76 | fi 77 | 78 | if [ -z "${KEGBOT_SETTINGS_DIR}" ]; then 79 | export KEGBOT_SETTINGS_DIR=/config/kegbot/ 80 | fi 81 | 82 | if [ -z "${KEGBOT_MEDIA_ROOT}" ]; then 83 | export KEGBOT_MEDIA_ROOT=/config/kegbot/media/ 84 | fi 85 | 86 | # if [ -z "${KEGBOT_DATA_DIR}" ]; then 87 | # export KEGBOT_DATA_DIR=/config/kegbot/kegbot-data/ 88 | # fi 89 | 90 | # Verify mandatory variables. 91 | if [ -z "${KEGBOT_DB_HOST}" ]; then 92 | die "Must set KEGBOT_DB_HOST or MYSQL_PORT_3306_TCP_{ADDR,PORT}" 93 | fi 94 | if [ -z "${KEGBOT_REDIS_HOST}" ]; then 95 | die "Must set KEGBOT_REDIS_HOST or REDIS_PORT_6379_TCP_{ADDR,PORT}" 96 | fi 97 | 98 | export C_FORCE_ROOT=True ## needed by celery 99 | 100 | env 101 | } 102 | 103 | wait_for_mysql() { 104 | nc -z $KEGBOT_DB_HOST $KEGBOT_DB_PORT || sleep 30 105 | if ! do_mysql "${KEGBOT_DB_NAME}" -e "show tables"; then 106 | #no db 107 | do_mysql -e "create database ${KEGBOT_DB_NAME};" 108 | fi 109 | if [ -z $(do_mysql "${KEGBOT_DB_NAME}" -e "show tables") ]; then 110 | #db empty 111 | kegbot migrate --noinput -v 0 112 | do_mysql "${KEGBOT_DB_NAME}" -e "show tables" 113 | else 114 | echo "NO TABLES" 115 | fi 116 | } 117 | 118 | kick_off_nginx() { 119 | nginx -g 'daemon on;' 120 | } 121 | 122 | kick_off_redis() { 123 | redis-server --daemonize yes 124 | } 125 | 126 | wait_for_redis() { 127 | redis-cli -h "${KEGBOT_REDIS_HOST}" -p ${KEGBOT_REDIS_PORT} ping 128 | } 129 | 130 | # Perform first-launch setup. 131 | maybe_setup_kegbot() { 132 | if [ ! -d "${KEGBOT_SETTINGS_DIR}" ]; then 133 | mkdir -p ${KEGBOT_SETTINGS_DIR}/media 134 | mv /kegbot-data/local_settings.py ${KEGBOT_SETTINGS_DIR} 135 | fi 136 | 137 | kegbot collectstatic --noinput -v 0 138 | #do_mysql -e "create database ${KEGBOT_DB_NAME};" || die "Could not create database." 139 | true 140 | } 141 | 142 | run_daemons() { 143 | kegbot run_all --logs_dir=/kegbot-data --gunicorn_options="-w 3 -b 127.0.0.1:8000" 144 | } 145 | 146 | setup() { 147 | python /app/setup.py install 148 | } 149 | 150 | run_all() { 151 | setup_env 152 | kick_off_nginx 153 | kick_off_redis 154 | 155 | wait_for_mysql 156 | wait_for_redis 157 | 158 | maybe_setup_kegbot 159 | ls -ld /kegbot-data 160 | ls -l /kegbot-data 161 | echo `date` >> /kegbot-data/runlog 162 | run_daemons 163 | } 164 | 165 | run_all 166 | -------------------------------------------------------------------------------- /keras-rest/rootfs/app/modelserver/main.py: -------------------------------------------------------------------------------- 1 | """ 2 | Model server script that polls Redis for images to classify 3 | 4 | Adapted from https://www.pyimagesearch.com/2018/02/05/deep-learning-production-keras-redis-flask-apache/ 5 | """ 6 | import base64 7 | import json 8 | import os 9 | import sys 10 | import time 11 | 12 | from keras.applications import ResNet50 13 | from keras.applications import imagenet_utils 14 | import numpy as np 15 | import redis 16 | 17 | models_folder = "/config/keras-rest/models" 18 | 19 | # Connect to Redis server 20 | db = redis.StrictRedis(host=os.environ.get("REDIS_HOST")) 21 | 22 | def load_graph(frozen_graph_filename): 23 | # We load the protobuf file from the disk and parse it to retrieve the 24 | # unserialized graph_def 25 | with tf.gfile.GFile(frozen_graph_filename, "rb") as f: 26 | graph_def = tf.GraphDef() 27 | graph_def.ParseFromString(f.read()) 28 | 29 | # Then, we can use again a convenient built-in function to import a graph_def into the 30 | # current default Graph 31 | with tf.Graph().as_default() as graph: 32 | tf.import_graph_def( 33 | graph_def, 34 | input_map=None, 35 | return_elements=None, 36 | name="prefix", 37 | op_dict=None, 38 | producer_op_list=None 39 | ) 40 | 41 | input_name = graph.get_operations()[0].name+':0' 42 | output_name = graph.get_operations()[-1].name+':0' 43 | 44 | return graph, input_name, output_name 45 | 46 | def model_predict(model_path, input_data): 47 | # load tf graph 48 | tf_model,tf_input,tf_output = load_graph(model_path) 49 | 50 | # Create tensors for model input and output 51 | x = tf_model.get_tensor_by_name(tf_input) 52 | y = tf_model.get_tensor_by_name(tf_output) 53 | 54 | # Number of model outputs 55 | num_outputs = y.shape.as_list()[0] 56 | predictions = np.zeros((input_data.shape[0],num_outputs)) 57 | for i in range(input_data.shape[0]): 58 | with tf.Session(graph=tf_model) as sess: 59 | y_out = sess.run(y, feed_dict={x: input_data[i:i+1]}) 60 | predictions[i] = y_out 61 | 62 | return predictions 63 | 64 | def base64_decode_image(a, dtype, shape): 65 | # If this is Python 3, we need the extra step of encoding the 66 | # serialized NumPy string as a byte object 67 | if sys.version_info.major == 3: 68 | a = bytes(a, encoding="utf-8") 69 | 70 | # Convert the string to a NumPy array using the supplied data 71 | # type and target shape 72 | a = np.frombuffer(base64.decodestring(a), dtype=dtype) 73 | a = a.reshape(shape) 74 | 75 | # Return the decoded image 76 | return a 77 | 78 | 79 | def classify_process(): 80 | model_name = os.environ.get("MODEL_NAME") 81 | model_path = models_folder + '/' + model_name + '.pb' 82 | # Continually poll for new images to classify 83 | while True: 84 | # Pop off multiple images from Redis queue atomically 85 | with db.pipeline() as pipe: 86 | pipe.lrange(os.environ.get("IMAGE_QUEUE"), 0, int(os.environ.get("BATCH_SIZE")) - 1) 87 | pipe.ltrim(os.environ.get("IMAGE_QUEUE"), int(os.environ.get("BATCH_SIZE")), -1) 88 | queue, _ = pipe.execute() 89 | 90 | imageIDs = [] 91 | batch = None 92 | for q in queue: 93 | # Deserialize the object and obtain the input image 94 | q = json.loads(q.decode("utf-8")) 95 | image = base64_decode_image(q["image"], 96 | os.environ.get("IMAGE_DTYPE"), 97 | (1, int(os.environ.get("IMAGE_HEIGHT")), 98 | int(os.environ.get("IMAGE_WIDTH")), 99 | int(os.environ.get("IMAGE_CHANS"))) 100 | ) 101 | 102 | # Check to see if the batch list is None 103 | if batch is None: 104 | batch = image 105 | 106 | # Otherwise, stack the data 107 | else: 108 | batch = np.vstack([batch, image]) 109 | 110 | # Update the list of image IDs 111 | imageIDs.append(q["id"]) 112 | 113 | # Check to see if we need to process the batch 114 | if len(imageIDs) > 0: 115 | # Classify the batch 116 | print("* Batch size: {}".format(batch.shape)) 117 | # preds = model.predict(batch) 118 | preds = model_predict(model_path, batch) 119 | results = imagenet_utils.decode_predictions(preds) 120 | 121 | # Loop over the image IDs and their corresponding set of results from our model 122 | for (imageID, resultSet) in zip(imageIDs, results): 123 | # Initialize the list of output predictions 124 | output = [] 125 | 126 | # Loop over the results and add them to the list of output predictions 127 | for (imagenetID, label, prob) in resultSet: 128 | r = {"label": label, "probability": float(prob)} 129 | output.append(r) 130 | 131 | # Store the output predictions in the database, using image ID as the key so we can fetch the results 132 | db.set(imageID, json.dumps(output)) 133 | 134 | # Sleep for a small amount 135 | time.sleep(float(os.environ.get("SERVER_SLEEP"))) 136 | 137 | if __name__ == "__main__": 138 | classify_process() 139 | -------------------------------------------------------------------------------- /hass-ap/rootfs/etc/cont-init.d/wlanstart.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/with-contenv bashio 2 | # ============================================================================== 3 | # JPD Hass.io Add-ons: HASS-AP 4 | # Configures all the scripts 5 | # ============================================================================== 6 | 7 | # helper functions 8 | ip2int() 9 | { 10 | local a b c d 11 | { IFS=. read a b c d; } <<< $1 12 | echo $(((((((a << 8) | b) << 8) | c) << 8) | d)) 13 | } 14 | 15 | int2ip() 16 | { 17 | local ui32=$1; shift 18 | local ip="" 19 | local n 20 | for n in 1 2 3 4; do 21 | ip=$((ui32 & 0xff))${ip:+.}$ip 22 | ui32=$((ui32 >> 8)) 23 | done 24 | echo $ip 25 | } 26 | 27 | netmask() 28 | # Example: netmask 24 => 255.255.255.0 29 | { 30 | local mask=$((0xffffffff << (32 - $1))); shift 31 | int2ip $mask 32 | } 33 | 34 | 35 | broadcast() 36 | # Example: broadcast 192.0.2.0 24 => 192.0.2.255 37 | { 38 | local addr=$(ip2int $1); shift 39 | local mask=$((0xffffffff << (32 - $1))); shift 40 | int2ip $((addr | ~mask)) 41 | } 42 | 43 | network() 44 | # Example: network 192.0.2.42 24 => 192.0.2.0 45 | { 46 | local addr=$(ip2int $1); shift 47 | local mask=$((0xffffffff << (32 - $1))); shift 48 | int2ip $((addr & mask)) 49 | } 50 | 51 | # privileged set in config.json, but check anyway 52 | if [ ! -w "/sys" ] ; then 53 | bashio::log.error "[Error] Not running in privileged mode." 54 | exit 1 55 | fi 56 | CONFIG_PATH=/data/options.json 57 | #MQTT_HOST="$(jq --raw-output '.mqtt_host' $CONFIG_PATH)" 58 | 59 | # Default values 60 | #true ${OUTGOINGS:=eth0} 61 | OUTGOINGS="$(jq --raw-output '.outgoing' $CONFIG_PATH)" 62 | #true ${INTERFACE:=wlan0} 63 | INTERFACE="$(jq --raw-output '.interface' $CONFIG_PATH)" 64 | #true ${SUBNET:=192.168.254.0} 65 | SUBNET="$(jq --raw-output '.subnet' $CONFIG_PATH)" 66 | #true ${AP_ADDR:=192.168.254.1} 67 | AP_ADDR="$(jq --raw-output '.ap_address' $CONFIG_PATH)" 68 | #true ${SSID:=dockerap} 69 | SSID="$(jq --raw-output '.ssid' $CONFIG_PATH)" 70 | #true ${CHANNEL:=11} 71 | CHANNEL="$(jq --raw-output '.channel' $CONFIG_PATH)" 72 | #true ${WPA_PASSPHRASE:=passw0rd} 73 | WPA_PASSPHRASE="$(jq --raw-output '.wpa_passphrase' $CONFIG_PATH)" 74 | true ${HW_MODE:=g} 75 | #HW_MODE="$(jq --raw-output '.hw_mode' $CONFIG_PATH)" 76 | true ${DRIVER:=nl80211} 77 | #DRIVER="$(jq --raw-output '.driver' $CONFIG_PATH)" 78 | true ${HT_CAPAB:=[HT40-][SHORT-GI-20][SHORT-GI-40]} 79 | true ${MODE:=guest} 80 | DOCKER_ONLY="$(jq --raw-output '.docker_only' $CONFIG_PATH)" 81 | 82 | DHCP_MIN="$(jq --raw-output '.dhcp_min' $CONFIG_PATH)" 83 | DHCP_MAX="$(jq --raw-output '.dhcp_max' $CONFIG_PATH)" 84 | 85 | # Attach interface to container in guest mode 86 | if [ "$MODE" == "guest" ]; then 87 | bashio::log.info "Fetching interface data for container" 88 | 89 | CONTAINER_ID=$(cat /proc/self/cgroup | grep -o -e "/docker/.*" | head -n 1| sed "s/\/docker\/\(.*\)/\\1/") 90 | CONTAINER_PID=$(docker inspect -f '{{.State.Pid}}' ${CONTAINER_ID}) 91 | CONTAINER_IMAGE=$(docker inspect -f '{{.Config.Image}}' ${CONTAINER_ID}) 92 | 93 | bashio::log.info "Attaching interface to container" 94 | 95 | docker run -t --privileged --net=host --pid=host --rm --entrypoint /bin/sh ${CONTAINER_IMAGE} -c " 96 | PHY=\$(echo phy\$(iw dev ${INTERFACE} info | grep wiphy | tr ' ' '\n' | tail -n 1)) 97 | iw phy \$PHY set netns ${CONTAINER_PID} 98 | " 99 | bashio::log.info "Setting up interface" 100 | ip link set ${INTERFACE} name wlan0 101 | 102 | INTERFACE=wlan0 103 | fi 104 | 105 | if [ ! -f "/etc/hostapd.conf" ] ; then 106 | cat > "/etc/hostapd.conf" < /proc/sys/net/ipv4/$i 145 | fi 146 | done 147 | 148 | cat /proc/sys/net/ipv4/ip_dynaddr 149 | cat /proc/sys/net/ipv4/ip_forward 150 | 151 | if [ "${OUTGOINGS}" ] ; then 152 | ints="$(sed 's/,\+/ /g' <<<"${OUTGOINGS}")" 153 | for int in ${ints} 154 | do 155 | echo "Setting iptables for outgoing traffics on ${int}..." 156 | iptables -t nat -A POSTROUTING -o ${int} -j MASQUERADE 157 | iptables -A FORWARD -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT 158 | iptables -A FORWARD -i ${INTERFACE} -o ${int} -j ACCEPT 159 | #set outbound to subnet-only 160 | if [ "${DOCKER_ONLY}" = true ]; then 161 | ip_mask=$(ip -o -f inet addr show | awk '/scope global ${OUTGOINGS}/ {print $4}') 162 | ip_=$(echo $ip_mask | cut -d'/' -f1) 163 | _mask=$(echo $ip_mask | cut -d'/' -f2) 164 | network_prefix=$(network $ip_ $_mask) 165 | int_subnet=$(echo $ip_mask | sed "s/$ip_/$network_prefix/g") 166 | iptables -A OUTPUT -d 127.0.0.0/24 -j ACCEPT 167 | iptables -A OUTPUT -d ${int_subnet} -j ACCEPT 168 | iptables -A OUTPUT -j DROP 169 | fi 170 | done 171 | else 172 | echo "Setting iptables for outgoing traffics on all interfaces..." 173 | iptables -t nat -A POSTROUTING -j MASQUERADE 174 | iptables -A FORWARD -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT 175 | iptables -A FORWARD -i ${INTERFACE} -j ACCEPT 176 | fi 177 | echo "Configuring DHCP server .." 178 | 179 | if [ ! -f "/config/hass-ap/dhcp-reservations.conf" ] ; then 180 | mkdir -p /config/hass-ap 181 | echo "" > /config/hass-ap/dhcp-reservations.conf 182 | fi 183 | 184 | cat > "/etc/dhcp/dhcpd.conf" < 10 | # Modification for hass.io add-on: James Fry 11 | # Modification for multiple protocols, hass.io services: John Dowling 12 | 13 | # Below are rtl_433 options and the supported device protocols as of 25/10/2017 14 | # **NOTE that the protocol number is NOT persistent and seems to change** 15 | # Hence always verify protocol numbers in logs when starting the add-on 16 | # The key arguments required are: 17 | # -F json --> this sets JSON formatted output for easier MQTT 18 | # -R --> this tells rtl_433 which protocol(s) to scan for 19 | 20 | # Usage: = Tuner options = 21 | # [-d ] (default: 0) 22 | # [-g ] (default: 0 for auto) 23 | # [-f ] [-f...] Receive frequency(s) (default: 433920000 Hz) 24 | # [-H ] Hop interval for polling of multiple frequencies (default: 600 seconds) 25 | # [-p ] Set sample rate (default: 250000 Hz) 27 | # [-S] Force sync output (default: async) 28 | # = Demodulator options = 29 | # [-R ] Enable only the specified device decoding protocol (can be used multiple times) 30 | # [-G] Enable all device protocols, included those disabled by default 31 | # [-l ] Change detection level used to determine pulses [0-16384] (0 = auto) (default: 0) 32 | # [-z ] Override short value in data decoder 33 | # [-x ] Override long value in data decoder 34 | # [-n ] Specify number of samples to take (each sample is 2 bytes: 1 each of I & Q) 35 | # = Analyze/Debug options = 36 | # [-a] Analyze mode. Print a textual description of the signal. Disables decoding 37 | # [-A] Pulse Analyzer. Enable pulse analyzis and decode attempt 38 | # [-I] Include only: 0 = all (default), 1 = unknown devices, 2 = known devices 39 | # [-D] Print debug info on event (repeat for more info) 40 | # [-q] Quiet mode, suppress non-data messages 41 | # [-W] Overwrite mode, disable checks to prevent files from being overwritten 42 | # [-y ] Verify decoding of demodulated test data (e.g. "{25}fb2dd58") with enabled devices 43 | # = File I/O options = 44 | # [-t] Test signal auto save. Use it together with analyze mode (-a -t). Creates one file per signal 45 | # Note: Saves raw I/Q samples (uint8 pcm, 2 channel). Preferred mode for generating test files 46 | # [-r ] Read data from input file instead of a receiver 47 | # [-m ] Data file mode for input / output file (default: 0) 48 | # 0 = Raw I/Q samples (uint8, 2 channel) 49 | # 1 = AM demodulated samples (int16 pcm, 1 channel) 50 | # 2 = FM demodulated samples (int16) (experimental) 51 | # 3 = Raw I/Q samples (cf32, 2 channel) 52 | # Note: If output file is specified, input will always be I/Q 53 | # [-F] kv|json|csv Produce decoded output in given format. Not yet supported by all drivers. 54 | # append output to file with : (e.g. -F csv:log.csv), defaults to stdout. 55 | # [-C] native|si|customary Convert units in decoded output. 56 | # [-T] specify number of seconds to run 57 | # [-U] Print timestamps in UTC (this may also be accomplished by invocation with TZ environment variable set). 58 | # [] Save data stream to output file (a '-' dumps samples to stdout) 59 | # 60 | # Supported device protocols: 61 | # [01]* Silvercrest Remote Control 62 | # [02] Rubicson Temperature Sensor 63 | # [03] Prologue Temperature Sensor 64 | # [04] Waveman Switch Transmitter 65 | # [05]* Steffen Switch Transmitter 66 | # [06]* ELV EM 1000 67 | # [07]* ELV WS 2000 68 | # [08] LaCrosse TX Temperature / Humidity Sensor 69 | # [09]* Template decoder 70 | # [10]* Acurite 896 Rain Gauge 71 | # [11] Acurite 609TXC Temperature and Humidity Sensor 72 | # [12] Oregon Scientific Weather Sensor 73 | # [13] Mebus 433 74 | # [14]* Intertechno 433 75 | # [15] KlikAanKlikUit Wireless Switch 76 | # [16] AlectoV1 Weather Sensor (Alecto WS3500 WS4500 Ventus W155/W044 Oregon) 77 | # [17] Cardin S466-TX2 78 | # [18] Fine Offset Electronics, WH2 Temperature/Humidity Sensor 79 | # [19] Nexus Temperature & Humidity Sensor 80 | # [20] Ambient Weather Temperature Sensor 81 | # [21] Calibeur RF-104 Sensor 82 | # [22]* X10 RF 83 | # [23]* DSC Security Contact 84 | # [24]* Brennenstuhl RCS 2044 85 | # [25] GT-WT-02 Sensor 86 | # [26] Danfoss CFR Thermostat 87 | # [27]* Energy Count 3000 (868.3 MHz) 88 | # [28]* Valeo Car Key 89 | # [29] Chuango Security Technology 90 | # [30] Generic Remote SC226x EV1527 91 | # [31] TFA-Twin-Plus-30.3049 and Ea2 BL999 92 | # [32] Fine Offset Electronics WH1080/WH3080 Weather Station 93 | # [33] WT450 94 | # [34] LaCrosse WS-2310 Weather Station 95 | # [35] Esperanza EWS 96 | # [36] Efergy e2 classic 97 | # [37]* Inovalley kw9015b, TFA Dostmann 30.3161 (Rain and temperature sensor) 98 | # [38] Generic temperature sensor 1 99 | # [39] WG-PB12V1 100 | # [40]* Acurite 592TXR Temp/Humidity, 5n1 Weather Station, 6045 Lightning 101 | # [41]* Acurite 986 Refrigerator / Freezer Thermometer 102 | # [42] HIDEKI TS04 Temperature, Humidity, Wind and Rain Sensor 103 | # [43] Watchman Sonic / Apollo Ultrasonic / Beckett Rocket oil tank monitor 104 | # [44] CurrentCost Current Sensor 105 | # [45] emonTx OpenEnergyMonitor 106 | # [46] HT680 Remote control 107 | # [47] S3318P Temperature & Humidity Sensor 108 | # [48] Akhan 100F14 remote keyless entry 109 | # [49] Quhwa 110 | # [50] OSv1 Temperature Sensor 111 | # [51] Proove 112 | # [52] Bresser Thermo-/Hygro-Sensor 3CH 113 | # [53] Springfield Temperature and Soil Moisture 114 | # [54] Oregon Scientific SL109H Remote Thermal Hygro Sensor 115 | # [55] Acurite 606TX Temperature Sensor 116 | # [56] TFA pool temperature sensor 117 | # [57] Kedsum Temperature & Humidity Sensor 118 | # [58] blyss DC5-UK-WH (433.92 MHz) 119 | # [59] Steelmate TPMS 120 | # [60] Schrader TPMS 121 | # [61]* LightwaveRF 122 | # [62] Elro DB286A Doorbell 123 | # [63] Efergy Optical 124 | # [64] Honda Car Key 125 | # [65]* Template decoder 126 | # [66] Fine Offset Electronics, XC0400 127 | # [67] Radiohead ASK 128 | # [68] Kerui PIR Sensor 129 | # [69] Fine Offset WH1050 Weather Station 130 | # [70] Honeywell Door/Window Sensor 131 | # [71] Maverick ET-732/733 BBQ Sensor 132 | # [72]* RF-tech 133 | # [73] LaCrosse TX141TH-Bv2 sensor 134 | # [74] Acurite 00275rm,00276rm Temp/Humidity with optional probe 135 | # [75] LaCrosse TX35DTH-IT Temperature sensor 136 | # [76] LaCrosse TX29IT Temperature sensor 137 | # [77] Vaillant calorMatic 340f Central Heating Control 138 | # [78] Fine Offset Electronics, WH25 Temperature/Humidity/Pressure Sensor 139 | # [79] Fine Offset Electronics, WH0530 Temperature/Rain Sensor 140 | # [80] IBIS beacon 141 | # [81] Oil Ultrasonic STANDARD FSK 142 | # [82] Citroen TPMS 143 | # [83] Oil Ultrasonic STANDARD ASK 144 | # [84] Thermopro TP11 Thermometer 145 | # [85] Solight TE44 146 | # [86] Wireless Smoke and Heat Detector GS 558 147 | # [87] Generic wireless motion sensor 148 | # [88] Toyota TPMS 149 | # [89] Ford TPMS 150 | # [90] Renault TPMS 151 | # [91]* inFactory 152 | # [92] FT-004-B Temperature Sensor 153 | # [93] Ford Car Key 154 | # [94] Philips outdoor temperature sensor 155 | # [95] Schrader TPMS EG53MA4 156 | # [96] Nexa 157 | # [97] Thermopro TP12 Thermometer 158 | # [98] GE Color Effects 159 | # [99] X10 Security 160 | # [100] Interlogix GE UTC Security Devices 161 | # [101]* Dish remote 6.3 162 | # * Disabled by default, use -R n or -G 163 | 164 | export LANG=C 165 | PATH="/usr/local/bin:/usr/local/sbin:/usr/bin:/usr/sbin:/bin:/sbin" 166 | 167 | CONFIG_PATH=/data/options.json 168 | MQTT_HOST="$(jq --raw-output '.mqtt_host' $CONFIG_PATH)" 169 | MQTT_USER="$(jq --raw-output '.mqtt_user' $CONFIG_PATH)" 170 | MQTT_PASS="$(jq --raw-output '.mqtt_password' $CONFIG_PATH)" 171 | MQTT_TOPIC="$(jq --raw-output '.mqtt_topic' $CONFIG_PATH)" 172 | PROTOCOL="$(jq --raw-output '.protocol' $CONFIG_PATH | tr -d '[,]')" 173 | FREQUENCY="$(jq --raw-output '.frequency' $CONFIG_PATH)" 174 | GAIN="$(jq --raw-output '.gain' $CONFIG_PATH)" 175 | OFFSET="$(jq --raw-output '.frequency_offset' $CONFIG_PATH)" 176 | PROTOCOL_STR="" 177 | for proto in $PROTOCOL 178 | do 179 | PROTOCOL_STR="$PROTOCOL_STR -R $proto" 180 | done 181 | 182 | # Start the listener and enter an endless loop 183 | bashio::log.info "Starting RTL_433 with parameters:" 184 | bashio::log.info "MQTT Host =" $MQTT_HOST 185 | bashio::log.info "MQTT User =" $MQTT_USER 186 | bashio::log.info "MQTT Password =" $MQTT_PASS 187 | bashio::log.info "MQTT Topic =" $MQTT_TOPIC 188 | bashio::log.info "RTL_433 Protocol =" $PROTOCOL 189 | bashio::log.info "RTL_433 Protocol String =" $PROTOCOL_STR 190 | bashio::log.info "RTL_433 Frequency =" $FREQUENCY 191 | bashio::log.info "RTL_433 Gain =" $GAIN 192 | bashio::log.info "RTL_433 Frequency Offset =" $OFFSET 193 | 194 | 195 | 196 | #set -x ## uncomment for MQTT logging... 197 | 198 | /usr/local/bin/rtl_433 -F json $PROTOCOL_STR -f $FREQUENCY -g $GAIN -p $OFFSET | while read line 199 | do 200 | DEVICE="$(echo $line | jq --raw-output '.model' | tr -s ' ' '_')" # replace ' ' with '_' 201 | DEVICEID="$(echo $line | jq --raw-output '.id' | tr -s ' ' '_')" 202 | 203 | MQTT_PATH=$MQTT_TOPIC 204 | 205 | if [ ${#DEVICE} > 0 ]; then 206 | MQTT_PATH=$MQTT_PATH/"$DEVICE" 207 | fi 208 | if [ ${#DEVICEID} > 0 ]; then 209 | MQTT_PATH=$MQTT_PATH/"$DEVICEID" 210 | fi 211 | 212 | # Create file with touch /tmp/rtl_433.log if logging is needed 213 | [ -w /tmp/rtl_433.log ] && echo $line >> rtl_433.log 214 | /usr/bin/mosquitto_pub -h $MQTT_HOST -u $MQTT_USER -P $MQTT_PASS -i RTL_433 -r -m "$line" -t $MQTT_PATH 215 | done 216 | -------------------------------------------------------------------------------- /sendmail/rootfs/etc/mail/sendmail.conf: -------------------------------------------------------------------------------- 1 | #------------------------------------------------------------------------------ 2 | # 3 | # /etc/mail/sendmail.conf 4 | # 5 | # Copyright (c) 2001-2010 Richard Nelson. All Rights Reserved. 6 | # Version: 8.15.2 7 | # Time-stamp: <2018-01-13 23:43:05 cowboy> 8 | # 9 | # Parameter file for sendmail (sourced by /usr/share/sendmail/sendmail) 10 | # Make all changes herein, instead of altering /etc/init.d/sendmail. 11 | # 12 | # After making changes here, you'll need to run /usr/sbin/sendmailconfig 13 | # or /usr/share/sendmail/update_conf to have the changes take effect - 14 | # If you change DAEMON_MODE, QUEUE_MODE, or QUEUE_INTERVAL, you'll also 15 | # need to run /etc/init.d/sendmail restart. 16 | # 17 | # Changes made herein will be kept across upgrades - except for comments! 18 | # Some comment lines have special significance ... 19 | # 20 | # **** **** **** **** DO NOT EDIT THE COMMENTS **** **** **** **** 21 | # 22 | # Supported parameters (and defaults) are listed herein. 23 | # 24 | # Notes: 25 | # * This setup allows sendmail to run in several modes: 26 | # - listener and queue runner..DAEMON_MODE="daemon".QUEUE_MODE="daemon" 27 | # - listener only..............DAEMON_MODE="daemon".QUEUE_MODE="none" 28 | # - queue runner only..........DAEMON_MODE="none"...QUEUE_MODE="daemon" 29 | # - *NOTHING* ?!?..............DAEMON_MODE="none"...QUEUE_MODE="none" 30 | # 31 | # * You can also run the listener from inetd: 32 | # - listener and queue runner..DAEMON_MODE="inetd"..QUEUE_MODE="daemon" 33 | # - listener only..............DAEMON_MODE="inetd"..QUEUE_MODE="none" 34 | # 35 | # * You can also run the queue runner from cron: 36 | # - listener and queue runner..DAEMON_MODE="....."..QUEUE_MODE="cron" 37 | # - queue runner only..........DAEMON_MODE="none"...QUEUE_MODE="cron" 38 | # 39 | # * _PARMS entries herein are shown in precedence order, any later _PARMS 40 | # field will, if applicable, override any previous _PARMS fields. 41 | # 42 | # * Values *MUST* be surrounded with double quotes ("), single quotes 43 | # will *NOT* work ! 44 | # 45 | #------------------------------------------------------------------------------ 46 | # SMTP Listener Configuration 47 | # 48 | # DAEMON_NETMODE="Static"; Keyword SMTP network mode 49 | # static: Do not monitor any network interfaces for changes 50 | # dynamic: Monitor one or more interfaces for changes 51 | # 52 | DAEMON_NETMODE="Static"; 53 | # 54 | # DAEMON_NETIF="eth0"; string SMTP interface(s) 55 | # This parameter defines the network interface(s) that the daemon 56 | # will monitor for status changes (via ppp, dhcp, ifup/down hooks). 57 | # 58 | # NOTES: 59 | # 1) Only list more than one interfaces if they only used for fallback, 60 | # otherwise the daemon will wind up ping-ponging between interfaces. 61 | # 2) Do not use 'lo' unless your daemon only listens on the localhost. 62 | # 63 | DAEMON_NETIF="eth0"; 64 | # 65 | # DAEMON_MODE="Daemon"; Keyword SMTP listener 66 | # daemon: Run as standalone daemon 67 | # inetd: Run from inet supervisor (forks for each mail) 68 | # none: No listener (ie, nullclient/smarthost) 69 | # 70 | # NOTE: If you choose "none", mail will build up in the MSP queues 71 | # and you will not receive any mail from external sites. 72 | # 73 | DAEMON_MODE="Daemon"; 74 | # 75 | # DAEMON_PARMS=""; String Listener parms 76 | # Any parameters here will be ignored when run from cron. 77 | # Note that {QUEUE,MISC,CRON}_PARMS, if applicable, will override 78 | # anything declared herein. 79 | # 80 | DAEMON_PARMS=""; 81 | # 82 | # DAEMON_HOSTSTATS="No"; Boolean Listener stats 83 | # This parameter determines whether or not host stats are collected 84 | # and available for the `hoststat` command to display. There will 85 | # be a (minor) performance hit, as files will be created/updated for each 86 | # sendmail delivery attempt. The files are fixed in size, and small, 87 | # but there can be many of them. 88 | # 89 | DAEMON_HOSTSTATS="No"; 90 | # 91 | # DAEMON_MAILSTATS="No"; Boolean Listener stats 92 | # This parameter determines whether or not mailer stats are collected 93 | # and available for the `mailstats` command to display. There will 94 | # be a (minor) performance hit, as this file will be updated for each 95 | # item coming into, or out of, sendmail. The file is fixed in size, 96 | # and small, so there's no need to rotate it. 97 | # 98 | DAEMON_MAILSTATS="No"; 99 | # 100 | #------------------------------------------------------------------------------ 101 | # SMTP MTA Queue Runner Configuration 102 | # 103 | # QUEUE_MODE="${DAEMON_MODE}"; Keyword SMTP queue runner 104 | # daemon: Run as standalone daemon 105 | # cron: Run from crontab 106 | # none: No queue runner (ie, nullclient/smarthost) 107 | # 108 | QUEUE_MODE="${DAEMON_MODE}"; 109 | # 110 | # QUEUE_INTERVAL="10m"; Timespec (p?digits+w|d|h|m|s) 111 | # Interval at which to run the MTA queues. What interval should you use? 112 | # The amount of time that is acceptable before retrying delivery on 113 | # mail that couldn't be delivered in one run, or how long an item can 114 | # set in the queue before having the first delivery attempt done. 115 | # 116 | # NOTE: To use persistent queue-runners use this form: p120m 117 | # 118 | # NOTE: If you leave this field blank, You get *NO* queue runners !!! 119 | # 120 | QUEUE_INTERVAL="10m"; 121 | # 122 | # QUEUE_PARMS=""; String queue parameters 123 | # Any parameters here are also used when run from cron. 124 | # Note that MISC_PARMS and CRON_PARMS, if applicable, will override 125 | # anything declared herein. 126 | # 127 | QUEUE_PARMS=""; 128 | # 129 | #------------------------------------------------------------------------------ 130 | # SMTP - MSP Queue Runner Configuration 131 | # 132 | # MSP_MODE="Cron"; Keyword MSP queue runner mode 133 | # daemon: Run as standalone daemon 134 | # cron: Run from crontab 135 | # none: No queue runner (ie, nullclient/smarthost) 136 | # 137 | # NOTE: If QUEUE_MODE="cron" & MSP_MODE="none", the MSP queue will 138 | # be run as part of the MTA queue running process. 139 | # 140 | MSP_MODE="Cron"; 141 | # 142 | # MSP_INTERVAL="20m"; Timespec (digits+w|d|h|m|s) 143 | # Interval at which to run the MSP queues. What interval should you use? 144 | # The amount of time that is acceptable before retrying delivery on 145 | # mail that couldn't be accepted by the MTA, and was therefore left 146 | # in the message submission queue. The MTA shouldn't be down that often 147 | # so this can be larger than QUEUE_INTERVAL. 148 | # 149 | # NOTE: If you leave this field blank, The MSP queue will *NOT* be run !!! 150 | # 151 | MSP_INTERVAL="20m"; 152 | # 153 | # MSP_PARMS=""; String queue parameters 154 | # Any parameters here are also used when run from cron. 155 | # Note that MISC_PARMS and CRON_PARMS, if applicable, will override 156 | # anything declared herein. 157 | # 158 | MSP_PARMS=""; 159 | # 160 | # MSP_MAILSTATS="${DAEMON_MAILSTATS}"; Boolean Listener stats 161 | # This parameter determines whether or not mailer stats are collected 162 | # and available for the `mailstats` command to display. There will 163 | # be a (minor) performance hit, as this file will be updated for each 164 | # item coming into, or out of, sendmail. The file is fixed in size, 165 | # and small, so there's no need to rotate it. 166 | # 167 | MSP_MAILSTATS="${DAEMON_MAILSTATS}"; 168 | # 169 | #------------------------------------------------------------------------------ 170 | # Miscellaneous Confguration 171 | # 172 | # MISC_PARMS=""; String miscellaneous parameters 173 | # Miscellaneous parameters - applied to any sendmail invocation. 174 | # Any parameters here are also used when run from cron. 175 | # Applied after {DAEMON,QUEUE}_PARMS, and can therefore override them 176 | # if need be (in which case why did use them?) 177 | # Note that CRON_PARMS, if applicable, will override anything 178 | # declared herein. 179 | # 180 | # Here is where'd you setup and debugging or special parms that you 181 | # want shared betwixt the possibly separate listener/queue-runner 182 | # processes. 183 | # 184 | MISC_PARMS=""; 185 | # 186 | #------------------------------------------------------------------------------ 187 | # Cron Job Configuration 188 | # 189 | # CRON_MAILTO="root"; String cronjob output 190 | # Recipient of *rare* cronjob output. Some cronjobs will be running 191 | # under user `mail`, so any problems encountered would probably be missed 192 | # so define a user who actually (hopefully) checks email now and again. 193 | # 194 | CRON_MAILTO="root"; 195 | # 196 | # CRON_PARMS=""; String cron specific parmeters 197 | # Cron parameters - applied *only* when sendmail queue running is done 198 | # via a cronjob. Applied after QUEUE_PARMS and MISC_PARMS, and can 199 | # therefore override them if need be. 200 | # 201 | CRON_PARMS=""; 202 | # 203 | #------------------------------------------------------------------------------ 204 | # Other stuff 205 | # LOG_CMDS="No"; Binary command logging flag 206 | # Will cause syslog entries for many of the sendmail related commands 207 | # like runq, mailq, etc - you'll also see cron jobs (if enabled). 208 | # 209 | LOG_CMDS="No"; 210 | # 211 | # HANDS_OFF="No"; Binary Do *NOT* touch the configuration 212 | # Set this *ONLY* if you are going to be fully responsible for the entire 213 | # setup of sendmail - the directories, permissions, databases, etc. With 214 | # this variable set to "Yes", nothing will be done for you during updates. 215 | # 216 | # In other words, "The blood be upon your hands" if you set this... 217 | # My ability to help with problems will be greatly reduced ! 218 | # 219 | # "Well, a pet peeve of mine is people who directly edit the 220 | # .cf file instead of using the m4 configuration files. 221 | # Don't do it! [laughs] I treat the .cf file as a binary 222 | # file - you should too." 223 | # -- Eric Allman 1999/10/18 224 | # http://www.dotcomeon.com/allman_sendmail_qa.html 225 | # 226 | HANDS_OFF="No"; 227 | # 228 | #------------------------------------------------------------------------------ 229 | # Queue Aging Configuration 230 | # 231 | # Why would you want to age your queues? On every queue-run interval, 232 | # sendmail will try *every* file in the queue... If a site is down 233 | # for a while, considerable time can be wasted each interval in retrying 234 | # it. The scheme supported allows aging by time, and can move the older 235 | # files to another (less frequently run queue), thereby reducing overal 236 | # system impact - and providing better mail throughput. 237 | # 238 | # Note that this support is completely separate from QUEUE_MODE=cron, 239 | # you can age queues even if you're running QUEUE_MODE=daemon. 240 | # 241 | # There are four parts to the queue aging support, and these parts 242 | # may be repeated, to operate on multiple queues. 243 | # 244 | # 1. Interval at which to age the queues (in minutes). 245 | # What interval should you use? Roughly twice the normal queue 246 | # interval, so that messages are tried twice in each successively 247 | # slower queue. 248 | # 249 | # NOTE: some values just wont work, due to crontab pecularities 250 | # a value of 90 minutes will actually be run at every x:30 ! 251 | # Please check /etc/cron.d/sendmail to make sure it is doing what 252 | # you thought it should ! 253 | # 254 | # 2. Criteria (optional and defaults to interval). This is the 255 | # specification of which files to move. It defaults moving 256 | # files whose age in the queues exceeds the interval. 257 | # This field, if specified can be very complex - supporting 258 | # aging by just about anything! see qtool(8) for details. 259 | # 260 | # 3. To queue. This is the queue to which files will be moved. 261 | # It may be fully qualified, or relative to /var/spool/mqueue. 262 | # 263 | # 4. From queue. This is the queue from which files will be moved. 264 | # It may be fully qualified, or relative to /var/spool/mqueue. 265 | # 266 | # Samples: 267 | # AGE_DATA="[['25m', '', 'hourly', 'main']]"; 268 | # Every 25 minutes, move any file older than 25 minutes from 269 | # /var/spool/mqueue/main to /var/spool/mqueue/hourly 270 | # 271 | # AGE_DATA="[['25m', '', 'hourly', 'main'],\ 272 | # ['120m', '', 'daily', 'hourly']]"; 273 | # Same as the above, but also move files from the hourly queue 274 | # to the daily queue after 120 minutes in the hourly queue. 275 | # 276 | # AGE_DATA="[['25m',\ 277 | # '-e \'$msg{message}[0] == /Deferred: 452 4.2.2 Over quota/\'',\ 278 | # 'overquota', 'main']]"; 279 | # Every 25 minutes, move all files deferred because of quota 280 | # violations from /var/spool/mqueue/main to 281 | # /var/spool/mqueue/overquota where they can be processed on 282 | # a different interval, or by some other means. 283 | # 284 | # If the above samples suggest Perl arrays, well, they are... 285 | # 286 | # AGE_DATA=""; Perl array Queue aging data 287 | # 288 | AGE_DATA=""; 289 | # 290 | #------------------------------------------------------------------------------ 291 | # Dependant variables (set according to other variables) 292 | # 293 | # 294 | #------------------------------------------------------------------------------ 295 | # Hidden variables (the blood be upon your hands) 296 | # 297 | DAEMON_RUNASUSER="No"; 298 | # 299 | #------------------------------------------------------------------------------ 300 | # Deprecated variables (kept for reference) 301 | # 302 | DAEMON_STATS="${DAEMON_MAILSTATS}"; 303 | MSP_STATS="${MSP_MAILSTATS}"; 304 | # 305 | #------------------------------------------------------------------------------ 306 | # Unknown variables (kept for reference) 307 | # 308 | #------------------------------------------------------------------------------ 309 | # 310 | 311 | --------------------------------------------------------------------------------