├── appdaemon ├── conf │ ├── __init__.py │ ├── model │ │ ├── names │ │ ├── model.meta │ │ └── model.cfg │ ├── apps │ │ ├── lib │ │ │ ├── __init__.py │ │ │ ├── meta.py │ │ │ ├── geometry.py │ │ │ ├── detection_model.py │ │ │ └── onnx.py │ │ ├── apps.yaml │ │ ├── config.ini │ │ └── print_detect.py │ └── appdaemon.yaml └── Dockerfile ├── homeassistant ├── config │ ├── scenes.yaml │ ├── scripts.yaml │ ├── automations.yaml │ └── configuration.yaml └── Dockerfile ├── .env.template ├── .gitignore ├── LICENSE.md ├── docker-compose.yml ├── docs └── CONFIG_FILE.md └── README.md /appdaemon/conf/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /appdaemon/conf/model/names: -------------------------------------------------------------------------------- 1 | failure -------------------------------------------------------------------------------- /homeassistant/config/scenes.yaml: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /homeassistant/config/scripts.yaml: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /appdaemon/conf/apps/lib/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /homeassistant/config/automations.yaml: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /.env.template: -------------------------------------------------------------------------------- 1 | TUNNEL_TOKEN=your_tunnel_token -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .env 2 | secrets.yaml 3 | *.onnx 4 | .DS_STORE 5 | __pycache__ -------------------------------------------------------------------------------- /appdaemon/conf/model/model.meta: -------------------------------------------------------------------------------- 1 | classes= 1 2 | names = /app/model/names -------------------------------------------------------------------------------- /appdaemon/conf/apps/apps.yaml: -------------------------------------------------------------------------------- 1 | print_detect: 2 | module: print_detect 3 | class: PrintDetect -------------------------------------------------------------------------------- /homeassistant/Dockerfile: -------------------------------------------------------------------------------- 1 | # Use the base image for Home Assistant 2 | FROM ghcr.io/home-assistant/home-assistant:stable 3 | 4 | # Copy the configuration files into the image 5 | COPY ./config /config 6 | 7 | # Ensure the entire config directory is readable and writable 8 | RUN chmod -R 777 /config 9 | -------------------------------------------------------------------------------- /appdaemon/conf/appdaemon.yaml: -------------------------------------------------------------------------------- 1 | appdaemon: 2 | time_zone: !secret TIME_ZONE 3 | latitude: !secret LATITUDE 4 | longitude: !secret LONGITUDE 5 | elevation: !secret ELEVATION 6 | plugins: 7 | HASS: 8 | type: hass 9 | ha_url: !secret HASS_HOSTNAME 10 | token: !secret HASS_TOKEN -------------------------------------------------------------------------------- /homeassistant/config/configuration.yaml: -------------------------------------------------------------------------------- 1 | # Loads default set of integrations. Do not remove. 2 | default_config: 3 | 4 | # Load frontend themes from the themes folder 5 | frontend: 6 | themes: !include_dir_merge_named themes 7 | 8 | automation: !include automations.yaml 9 | script: !include scripts.yaml 10 | scene: !include scenes.yaml 11 | 12 | http: 13 | use_x_forwarded_for: true 14 | trusted_proxies: 15 | - 172.25.0.2 16 | 17 | homeassistant: 18 | media_dirs: 19 | local: /media -------------------------------------------------------------------------------- /appdaemon/Dockerfile: -------------------------------------------------------------------------------- 1 | # Use Python 3.11 with Slim base image 2 | FROM python:3.11-slim 3 | 4 | # Set working directory 5 | WORKDIR /usr/src/app 6 | 7 | # Copy the application code into the image 8 | COPY ./conf /conf 9 | 10 | # Install libgl1 for OpenCV and weget for downloading the model 11 | RUN apt-get update && apt-get install -y libgl1 libgl1-mesa-glx libglib2.0-0 wget 12 | 13 | # Download the machine learning model 14 | RUN wget https://tsd-pub-static.s3.amazonaws.com/ml-models/model-weights-5a6b1be1fa.onnx -O /conf/model/model-weights-5a6b1be1fa.onnx 15 | 16 | # Upgrade pip 17 | RUN pip install --upgrade pip 18 | 19 | # Install the required Python packages 20 | RUN pip3 install numpy==1.26.4 opencv-python==4.10.0.84 appdaemon==4.4.2 onnxruntime==1.19.2 pyyaml 21 | 22 | # Start the AppDaemon service 23 | CMD ["appdaemon", "-c", "/conf"] -------------------------------------------------------------------------------- /appdaemon/conf/apps/config.ini: -------------------------------------------------------------------------------- 1 | [DEFAULT] 2 | RunModelInterval = 5 # In Seconds 3 | TerminationTime = 120 # Time in seconds to automatically stop the print 4 | BinaryIsPrintingSensor = binary_sensor.octoprint_printing 5 | PrintingOnState = on 6 | PrinterCamera = camera.octoprint_camera 7 | PrinterStopButton = button.octoprint_stop_job 8 | Threshold = 0.25 9 | NMS = 0.4 10 | 11 | [printer.entities] 12 | BinaryIsPrintingSensor = binary_sensor.octoprint_printing 13 | PrintingOnState = on 14 | PrinterCamera = camera.octoprint_camera 15 | PrinterStopButton = button.octoprint_stop_job 16 | 17 | [program.timings] 18 | RunModelInterval = 5 19 | TerminationTime = 120 20 | 21 | [model.detection] 22 | Threshold = 0.25 23 | NMS = 0.4 24 | 25 | [notifications.config] 26 | NotifyOnWarmup = True 27 | 28 | [notifications.entities] 29 | ExtruderTempSensor = sensor.octoprint_actual_tool0_temp 30 | ExtruderTargetTempSensor = sensor.octoprint_target_tool0_temp 31 | -------------------------------------------------------------------------------- /LICENSE.md: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2024 oliverbravery 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. -------------------------------------------------------------------------------- /appdaemon/conf/apps/lib/meta.py: -------------------------------------------------------------------------------- 1 | ''' 2 | This file is adapted from the opico-server project (formally known as Spaghetti Detective). 3 | Link: https://github.com/TheSpaghettiDetective/obico-server/tree/release 4 | ''' 5 | 6 | from typing import List 7 | from dataclasses import dataclass, field 8 | import os 9 | import re 10 | 11 | @dataclass 12 | class Meta: 13 | names: List[str] = field(default_factory=list) 14 | 15 | def __init__(self, meta_path: str): 16 | names = None 17 | with open(meta_path) as f: 18 | meta_contents = f.read() 19 | match = re.search("names *= *(.*)$", meta_contents, re.IGNORECASE | re.MULTILINE) 20 | if match: 21 | names_path = match.group(1) 22 | try: 23 | if os.path.exists(names_path): 24 | with open(names_path) as namesFH: 25 | names_list = namesFH.read().strip().split("\n") 26 | names = [x.strip() for x in names_list] 27 | except TypeError: 28 | pass 29 | if names is None: 30 | names = ['failure'] 31 | 32 | self.names = names -------------------------------------------------------------------------------- /docker-compose.yml: -------------------------------------------------------------------------------- 1 | services: 2 | homeassistant: 3 | container_name: homeassistant 4 | build: ./homeassistant 5 | volumes: 6 | - /run/dbus:/run/dbus:ro 7 | - homeassistant:/homeassistant 8 | restart: unless-stopped 9 | networks: 10 | homeassistant_network: 11 | ipv4_address: 172.25.0.3 12 | ports: 13 | - 8123:8123 14 | 15 | cloudflared: 16 | container_name: cloudflared 17 | image: cloudflare/cloudflared:latest 18 | restart: unless-stopped 19 | command: tunnel run 20 | environment: 21 | - TUNNEL_TOKEN=${TUNNEL_TOKEN} 22 | networks: 23 | homeassistant_network: 24 | ipv4_address: 172.25.0.2 25 | 26 | octoprint: 27 | container_name: octoprint 28 | image: octoprint/octoprint 29 | restart: unless-stopped 30 | ports: 31 | - 80:80 32 | volumes: 33 | - octoprint:/octoprint 34 | networks: 35 | homeassistant_network: 36 | ipv4_address: 172.25.0.4 37 | environment: 38 | - ENABLE_MJPG_STREAMER=true 39 | devices: 40 | - /dev/ttyACM0:/dev/ttyACM0 41 | - /dev/video0:/dev/video0 42 | 43 | appdaemon: 44 | container_name: appdaemon 45 | build: ./appdaemon 46 | env_file: .env 47 | restart: unless-stopped 48 | networks: 49 | homeassistant_network: 50 | ipv4_address: 172.25.0.6 51 | 52 | volumes: 53 | octoprint: 54 | homeassistant: 55 | 56 | networks: 57 | homeassistant_network: 58 | driver: bridge 59 | ipam: 60 | driver: default 61 | config: 62 | - subnet: 172.25.0.0/29 63 | gateway: 172.25.0.1 -------------------------------------------------------------------------------- /appdaemon/conf/apps/lib/geometry.py: -------------------------------------------------------------------------------- 1 | ''' 2 | This file is adapted from the opico-server project (formally known as Spaghetti Detective). 3 | Link: https://github.com/TheSpaghettiDetective/obico-server/tree/release 4 | ''' 5 | 6 | from dataclasses import dataclass 7 | from typing import List, Tuple 8 | 9 | @dataclass 10 | class Box: 11 | """Detection rect""" 12 | xc: float 13 | yc: float 14 | w: float 15 | h: float 16 | 17 | @classmethod 18 | def from_tuple(cls, box: Tuple[float, float, float, float]) -> 'Box': 19 | return Box(xc=float(box[0]), yc=float(box[1]), w=float(box[2]), h=float(box[3])) 20 | 21 | def left(self) -> float: 22 | return self.xc - self.w * 0.5 23 | 24 | def right(self) -> float: 25 | return self.xc + self.w * 0.5 26 | 27 | def top(self) -> float: 28 | return self.yc - self.h * 0.5 29 | 30 | def bottom(self) -> float: 31 | return self.yc + self.h * 0.5 32 | 33 | def calc_iou(self, other: 'Box') -> float: 34 | """Calculates intersection over union ration which can be used to compare boxes""" 35 | al = self.left() 36 | ar = self.right() 37 | at = self.top() 38 | ab = self.bottom() 39 | 40 | bl = other.left() 41 | br = other.right() 42 | bt = other.top() 43 | bb = other.bottom() 44 | 45 | i_l = max(al, bl) 46 | i_r = min(ar, br) 47 | i_t = max(at, bt) 48 | i_b = min(ab, bb) 49 | 50 | o_l = min(al, bl) 51 | o_r = max(ar, br) 52 | o_t = min(at, bt) 53 | o_b = max(ab, bb) 54 | 55 | i_w = i_r - i_l 56 | i_h = i_b - i_t 57 | o_w = o_r - o_l 58 | o_h = o_b - o_t 59 | 60 | o_a = o_w * o_h 61 | if o_a <= 0.0: 62 | return 0.0 63 | return i_w * i_h / o_a 64 | 65 | 66 | @dataclass 67 | class Detection: 68 | """Detection result""" 69 | name: str 70 | confidence: float 71 | box: Box 72 | 73 | @classmethod 74 | def from_tuple_list(cls, detections: List[Tuple[str, float, Tuple[float, float, float, float]]]) -> List['Detection']: 75 | return [Detection.from_tuple(d) for d in detections] 76 | 77 | @classmethod 78 | def from_tuple(cls, detection: Tuple[str, float, Tuple[float, float, float, float]]) -> 'Detection': 79 | box = Box.from_tuple(detection[2]) 80 | return Detection(detection[0], float(detection[1]), box) -------------------------------------------------------------------------------- /appdaemon/conf/apps/lib/detection_model.py: -------------------------------------------------------------------------------- 1 | ''' 2 | This file is adapted from the opico-server project (formally known as Spaghetti Detective). 3 | Link: https://github.com/TheSpaghettiDetective/obico-server/tree/release 4 | ''' 5 | 6 | #!python3 7 | 8 | # pylint: disable=R, W0401, W0614, W0703 9 | from lib.meta import Meta 10 | from os import path 11 | from lib.onnx import OnnxNet 12 | 13 | alt_names = None 14 | onnx_ready = True 15 | 16 | def load_net(config_path, meta_path, weights_path=None): 17 | def try_loading_net(net_config_priority): 18 | for net_config in net_config_priority: 19 | weights = net_config['weights_path'] 20 | use_gpu = net_config['use_gpu'] 21 | net_main = None 22 | try: 23 | print(f'----- Trying to load weights: {weights} - use_gpu = {use_gpu} -----') 24 | if weights.endswith(".onnx"): 25 | if not onnx_ready: 26 | raise Exception('Not loading ONNX net due to previous import failure. Check earlier log for errors.') 27 | net_main = OnnxNet(weights, meta_path, use_gpu) 28 | else: 29 | raise Exception(f'Can not recognize net from weights file surfix: {weights}') 30 | print('Succeeded!') 31 | return net_main 32 | except Exception as e: 33 | print(f'Failed! - {e}') 34 | 35 | raise Exception(f'Failed to load any net after trying: {net_config_priority}') 36 | 37 | global alt_names # pylint: disable=W0603 38 | 39 | model_dir = path.join(path.dirname(path.realpath(__file__)), '..', 'model') 40 | net_config_priority = [ 41 | dict(weights_path=path.join(model_dir, 'model-weights.onnx'), use_gpu=True), 42 | dict(weights_path=path.join(model_dir, 'model-weights.onnx'), use_gpu=False), 43 | ] 44 | if weights_path is not None: 45 | net_config_priority = [ dict(weights_path=weights_path, use_gpu=True), dict(weights_path=weights_path, use_gpu=False) ] 46 | 47 | net_main = try_loading_net(net_config_priority) 48 | 49 | if alt_names is None: 50 | # In Python 3, the metafile default access craps out on Windows (but not Linux) 51 | # Read the names file and create a list to feed to detect 52 | try: 53 | meta = Meta(meta_path) 54 | alt_names = meta.names 55 | except Exception: 56 | pass 57 | 58 | return net_main 59 | 60 | def detect(net, image, thresh=.5, hier_thresh=.5, nms=.45, debug=False): 61 | return net.detect(net.meta, image, alt_names, thresh, hier_thresh, nms, debug) 62 | -------------------------------------------------------------------------------- /appdaemon/conf/model/model.cfg: -------------------------------------------------------------------------------- 1 | [net] 2 | # Testing 3 | batch=64 4 | subdivisions=8 5 | # Training 6 | # batch=64 7 | # subdivisions=8 8 | height=416 9 | width=416 10 | channels=3 11 | momentum=0.9 12 | decay=0.0005 13 | angle=0 14 | saturation = 1.5 15 | exposure = 1.5 16 | hue=.1 17 | 18 | learning_rate=0.001 19 | burn_in=1000 20 | max_batches = 50000 21 | policy=steps 22 | steps=40000,60000 23 | scales=.1,.1 24 | 25 | [convolutional] 26 | batch_normalize=1 27 | filters=32 28 | size=3 29 | stride=1 30 | pad=1 31 | activation=leaky 32 | 33 | [maxpool] 34 | size=2 35 | stride=2 36 | 37 | [convolutional] 38 | batch_normalize=1 39 | filters=64 40 | size=3 41 | stride=1 42 | pad=1 43 | activation=leaky 44 | 45 | [maxpool] 46 | size=2 47 | stride=2 48 | 49 | [convolutional] 50 | batch_normalize=1 51 | filters=128 52 | size=3 53 | stride=1 54 | pad=1 55 | activation=leaky 56 | 57 | [convolutional] 58 | batch_normalize=1 59 | filters=64 60 | size=1 61 | stride=1 62 | pad=1 63 | activation=leaky 64 | 65 | [convolutional] 66 | batch_normalize=1 67 | filters=128 68 | size=3 69 | stride=1 70 | pad=1 71 | activation=leaky 72 | 73 | [maxpool] 74 | size=2 75 | stride=2 76 | 77 | [convolutional] 78 | batch_normalize=1 79 | filters=256 80 | size=3 81 | stride=1 82 | pad=1 83 | activation=leaky 84 | 85 | [convolutional] 86 | batch_normalize=1 87 | filters=128 88 | size=1 89 | stride=1 90 | pad=1 91 | activation=leaky 92 | 93 | [convolutional] 94 | batch_normalize=1 95 | filters=256 96 | size=3 97 | stride=1 98 | pad=1 99 | activation=leaky 100 | 101 | [maxpool] 102 | size=2 103 | stride=2 104 | 105 | [convolutional] 106 | batch_normalize=1 107 | filters=512 108 | size=3 109 | stride=1 110 | pad=1 111 | activation=leaky 112 | 113 | [convolutional] 114 | batch_normalize=1 115 | filters=256 116 | size=1 117 | stride=1 118 | pad=1 119 | activation=leaky 120 | 121 | [convolutional] 122 | batch_normalize=1 123 | filters=512 124 | size=3 125 | stride=1 126 | pad=1 127 | activation=leaky 128 | 129 | [convolutional] 130 | batch_normalize=1 131 | filters=256 132 | size=1 133 | stride=1 134 | pad=1 135 | activation=leaky 136 | 137 | [convolutional] 138 | batch_normalize=1 139 | filters=512 140 | size=3 141 | stride=1 142 | pad=1 143 | activation=leaky 144 | 145 | [maxpool] 146 | size=2 147 | stride=2 148 | 149 | [convolutional] 150 | batch_normalize=1 151 | filters=1024 152 | size=3 153 | stride=1 154 | pad=1 155 | activation=leaky 156 | 157 | [convolutional] 158 | batch_normalize=1 159 | filters=512 160 | size=1 161 | stride=1 162 | pad=1 163 | activation=leaky 164 | 165 | [convolutional] 166 | batch_normalize=1 167 | filters=1024 168 | size=3 169 | stride=1 170 | pad=1 171 | activation=leaky 172 | 173 | [convolutional] 174 | batch_normalize=1 175 | filters=512 176 | size=1 177 | stride=1 178 | pad=1 179 | activation=leaky 180 | 181 | [convolutional] 182 | batch_normalize=1 183 | filters=1024 184 | size=3 185 | stride=1 186 | pad=1 187 | activation=leaky 188 | 189 | 190 | ####### 191 | 192 | [convolutional] 193 | batch_normalize=1 194 | size=3 195 | stride=1 196 | pad=1 197 | filters=1024 198 | activation=leaky 199 | 200 | [convolutional] 201 | batch_normalize=1 202 | size=3 203 | stride=1 204 | pad=1 205 | filters=1024 206 | activation=leaky 207 | 208 | [route] 209 | layers=-9 210 | 211 | [convolutional] 212 | batch_normalize=1 213 | size=1 214 | stride=1 215 | pad=1 216 | filters=64 217 | activation=leaky 218 | 219 | [reorg3d] 220 | stride=2 221 | 222 | [route] 223 | layers=-1,-4 224 | 225 | [convolutional] 226 | batch_normalize=1 227 | size=3 228 | stride=1 229 | pad=1 230 | filters=1024 231 | activation=leaky 232 | 233 | [convolutional] 234 | size=1 235 | stride=1 236 | pad=1 237 | filters=30 238 | activation=linear 239 | 240 | 241 | [region] 242 | anchors = 1.3221, 1.73145, 3.19275, 4.00944, 5.05587, 8.09892, 9.47112, 4.84053, 11.2364, 10.0071 243 | bias_match=1 244 | classes=1 245 | coords=4 246 | num=5 247 | softmax=1 248 | jitter=.3 249 | rescore=1 250 | 251 | object_scale=5 252 | noobject_scale=1 253 | class_scale=1 254 | coord_scale=1 255 | 256 | absolute=1 257 | thresh = .6 258 | random=1 -------------------------------------------------------------------------------- /docs/CONFIG_FILE.md: -------------------------------------------------------------------------------- 1 | # Sentinel Configuration File Documentation 2 | The configuration file for Sentinel is located at [`appdaemon/conf/apps/config.ini`](/appdaemon/conf/apps/config.ini). This file is used to configure the Sentinel AppDaemon app to monitor the status of your 3D printer and send notifications when a failure is detected. The configuration file is divided into sections, each with its own set of configuration variables. The following sections describe each of the configuration variables and how to set them. 3 | 4 | ## [DEFAULT] Section 5 | The `[DEFAULT]` section contains all the default configuration variables for the Sentinel app. These variables are used to configure the behavior of the app and can be overridden in the other sections of the configuration file. It is not recommended to change these variables unless you know what you are doing as they are used to set the default behavior of the app. 6 | 7 | ## [printer.entities] Section 8 | The `[printer.entities]` section contains the configuration variables for the entities that represent the 3D printer in Home Assistant. These variables are used to configure the entities that the app will monitor to detect failures. The following variables are available in this section: 9 | - **BinaryIsPrintingSensor**: The entity ID of the binary sensor that indicates whether the printer is currently printing. This sensor should be `on` when the printer is printing and `off` when it is not. You can specify the `on` state if it is different in the `PrintingOnState` variable. Defaults to Octoprint's `binary_sensor.octoprint_printing`. 10 | - **PrintingOnState**: The state of the `BinaryIsPrintingSensor` when the printer is printing. This variable is optional and defaults to `on`. 11 | - **PrinterCamera**: The entity ID of the camera that shows the printer. This camera will be used to take a snapshot when a failure is detected. This variable is optional and defaults to the Octoprint camera `camera.octoprint_camera`. 12 | - **PrinterStopButton**: The entity ID of the button that stops the printer. This button will be used to stop the printer when a failure is detected. This variable defaults to the Octoprint button `button.octoprint_stop_job`. 13 | 14 | ## [program.timings] Section 15 | The `[program.timings]` section contains the configuration variables for the timings of the monitoring program. These variables are used to configure how often the app checks the status of the printer and how long it should wait before automatically stopping the printer. The following variables are available in this section: 16 | - **RunModelInterval**: The interval in seconds at which the app checks the status of the printer. This includes the frequency for the model running when a print is occuring. This variable defaults to `5` seconds. 17 | - **TerminationTime**: The time in seconds that the app waits before automatically stopping the printer when a failure is detected. This variable defaults to `120` seconds (2 minutes). 18 | 19 | ## [model.detection] Section 20 | The `[model.detection]` section contains the configuration variables for the machine learning model used to detect failures. These variables are used to configure the model inference process and can be used to fine-tune the model's performance. The following variables are available in this section: 21 | - **Threshold**: The threshold value for the model's predictions. If the model predicts a probability of failure greater than this value, a failure is detected. This variable defaults to `0.25`. 22 | - **NMS**: The Non-Maximum Suppression (NMS) threshold for the model's predictions. Used to filter out duplicate predictions. This variable defaults to `0.4`. 23 | 24 | ## [notifications.config] Section 25 | The `[notifications.config]` section contains the configuration variables for the notifications sent by the app. The following variables are available in this section: 26 | - **NotifyOnWarmup**: Whether to send a notification when the extruder starts up. This variable defaults to `True`. 27 | 28 | ## [notifications.entities] Section 29 | The `[notifications.entities]` section contains the configuration variables for the entities used in the notifications. These variables are used to configure the entities that the app will use to send notifications. The following variables are available in this section: 30 | - **ExtruderTempSensor**: The entity ID of the sensor that measures the extruder temperature. Only used if `NotifyOnWarmup` is `True`. This variable defaults to the Octoprint sensor `sensor.octoprint_actual_tool0_temp`. 31 | - **ExtruderTargetTempSensor**: The entity ID of the sensor that measures the target extruder temperature. Only used if `NotifyOnWarmup` is `True`. This variable defaults to the Octoprint sensor `sensor.octoprint_target_tool0_temp`. 32 | -------------------------------------------------------------------------------- /appdaemon/conf/apps/lib/onnx.py: -------------------------------------------------------------------------------- 1 | ''' 2 | This file is adapted from the opico-server project (formally known as Spaghetti Detective). 3 | Link: https://github.com/TheSpaghettiDetective/obico-server/tree/release 4 | 5 | The code is used to run the machine learning model onnx file and detect print issues in an image. 6 | ''' 7 | 8 | from typing import List, Tuple 9 | import onnxruntime 10 | import numpy as np 11 | import cv2 12 | 13 | from lib.meta import Meta 14 | 15 | class OnnxNet: 16 | session: onnxruntime.InferenceSession 17 | meta: Meta 18 | 19 | def __init__(self, onnx_path: str, meta_path: str, use_gpu: bool): 20 | providers = ['CPUExecutionProvider'] 21 | self.session = onnxruntime.InferenceSession(onnx_path, providers=providers) 22 | self.meta = Meta(meta_path) 23 | 24 | def detect(self, meta, image, alt_names, thresh=.5, hier_thresh=.5, nms=.45, debug=False) -> List[Tuple[str, float, Tuple[float, float, float, float]]]: 25 | input_h = self.session.get_inputs()[0].shape[2] 26 | input_w = self.session.get_inputs()[0].shape[3] 27 | width = image.shape[1] 28 | height = image.shape[0] 29 | 30 | # Input 31 | resized = cv2.resize(image, (input_w, input_h), interpolation=cv2.INTER_LINEAR) 32 | img_in = cv2.cvtColor(resized, cv2.COLOR_BGR2RGB) 33 | img_in = np.transpose(img_in, (2, 0, 1)).astype(np.float32) 34 | img_in = np.expand_dims(img_in, axis=0) 35 | img_in /= 255.0 36 | 37 | input_name = self.session.get_inputs()[0].name 38 | outputs = self.session.run(None, {input_name: img_in}) 39 | 40 | detections = post_processing(outputs, width, height, thresh, nms, meta.names) 41 | return detections[0] 42 | 43 | 44 | def nms_cpu(boxes, confs, nms_thresh=0.5, min_mode=False): 45 | # print(boxes.shape) 46 | x1 = boxes[:, 0] 47 | y1 = boxes[:, 1] 48 | x2 = boxes[:, 2] 49 | y2 = boxes[:, 3] 50 | 51 | areas = (x2 - x1) * (y2 - y1) 52 | order = confs.argsort()[::-1] 53 | 54 | keep = [] 55 | while order.size > 0: 56 | idx_self = order[0] 57 | idx_other = order[1:] 58 | 59 | keep.append(idx_self) 60 | 61 | xx1 = np.maximum(x1[idx_self], x1[idx_other]) 62 | yy1 = np.maximum(y1[idx_self], y1[idx_other]) 63 | xx2 = np.minimum(x2[idx_self], x2[idx_other]) 64 | yy2 = np.minimum(y2[idx_self], y2[idx_other]) 65 | 66 | w = np.maximum(0.0, xx2 - xx1) 67 | h = np.maximum(0.0, yy2 - yy1) 68 | inter = w * h 69 | 70 | if min_mode: 71 | over = inter / np.minimum(areas[order[0]], areas[order[1:]]) 72 | else: 73 | over = inter / (areas[order[0]] + areas[order[1:]] - inter) 74 | 75 | inds = np.where(over <= nms_thresh)[0] 76 | order = order[inds + 1] 77 | 78 | return np.array(keep) 79 | 80 | def post_processing(output, width, height, conf_thresh, nms_thresh, names): 81 | box_array = output[0] 82 | confs = output[1] 83 | 84 | if type(box_array).__name__ != 'ndarray': 85 | box_array = box_array.cpu().detach().numpy() 86 | confs = confs.cpu().detach().numpy() 87 | 88 | num_classes = confs.shape[2] 89 | 90 | # [batch, num, 4] 91 | box_array = box_array[:, :, 0] 92 | 93 | # [batch, num, num_classes] --> [batch, num] 94 | max_conf = np.max(confs, axis=2) 95 | max_id = np.argmax(confs, axis=2) 96 | 97 | box_x1x1x2y2_to_xcycwh_scaled = lambda b: \ 98 | ( 99 | float(0.5 * width * (b[0] + b[2])), 100 | float(0.5 * height * (b[1] + b[3])), 101 | float(width * (b[2] - b[0])), 102 | float(width * (b[3] - b[1])) 103 | ) 104 | dets_batch = [] 105 | for i in range(box_array.shape[0]): 106 | 107 | argwhere = max_conf[i] > conf_thresh 108 | l_box_array = box_array[i, argwhere, :] 109 | l_max_conf = max_conf[i, argwhere] 110 | l_max_id = max_id[i, argwhere] 111 | 112 | bboxes = [] 113 | # nms for each class 114 | for j in range(num_classes): 115 | 116 | cls_argwhere = l_max_id == j 117 | ll_box_array = l_box_array[cls_argwhere, :] 118 | ll_max_conf = l_max_conf[cls_argwhere] 119 | ll_max_id = l_max_id[cls_argwhere] 120 | 121 | keep = nms_cpu(ll_box_array, ll_max_conf, nms_thresh) 122 | 123 | if (keep.size > 0): 124 | ll_box_array = ll_box_array[keep, :] 125 | ll_max_conf = ll_max_conf[keep] 126 | ll_max_id = ll_max_id[keep] 127 | 128 | for k in range(ll_box_array.shape[0]): 129 | bboxes.append([ll_box_array[k, 0], ll_box_array[k, 1], ll_box_array[k, 2], ll_box_array[k, 3], ll_max_conf[k], ll_max_conf[k], ll_max_id[k]]) 130 | 131 | detections = [(names[b[6]], float(b[4]), box_x1x1x2y2_to_xcycwh_scaled((b[0], b[1], b[2], b[3]))) for b in bboxes] 132 | dets_batch.append(detections) 133 | 134 | 135 | return dets_batch 136 | 137 | 138 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # 3D-Print-Sentinel 2 | **3D-Print-Sentinel** is a Docker-based solution that integrates **Home Assistant**, **OctoPrint**, and a custom **AppDaemon** app to automatically monitor 3D prints locally using machine learning. This project also enables secure remote access to Home Assistant via Cloudflared, allowing you to keep track of your prints from anywhere without exposing your local network. This solution can run entirely on a **Raspberry Pi**, making it accessible and easy to set up with minimal hardware requirements. 3 | 4 | The machine learning and notification system can be setup as a **standalone service** for those who already have a homeassistant setup. It can be customised via the [config file](/appdaemon/conf/apps/config.ini) to use any entities (such as [Klipper](https://www.klipper3d.org) homeassistant entities) instead of Octoprint entities if that is prefered. See the [standalone setup instructions](#standalone-docker-failure-detection--notification-setup) for more information. 5 | 6 | ## Features 7 | - **Local 3D Print Monitoring**: Uses on-device machine learning to detect potential print errors in real time using the [Obico (Spaghetti Detective)](https://github.com/TheSpaghettiDetective/obico-server/tree/release) model. 8 | - **Automatic Print Pausing**: If an error is detected, an actionable notification is sent to your phone. If not dismissed within 2 minutes, the print will be automatically stopped. 9 | - **OctoPrint Integration**: Seamlessly connects with OctoPrint to monitor print status and health. 10 | - **Home Assistant**: Provides a centralized dashboard to manage 3D printer monitoring and smart home devices. 11 | - **Notification System**: Sends alerts to your phone when a print error is detected through the Home Assistant app with image snapshots. 12 | - **Secure Remote Access**: Access your Home Assistant instance remotely through a secure Cloudflared tunnel without exposing local ports. 13 | - **Dockerized**: Easy to deploy and manage with Docker Compose. 14 | - **Raspberry Pi Compatible**: Designed to run on a Raspberry Pi with minimal hardware requirements (including the machine learning). 15 | 16 | ## Setup Guide 17 | - [Full Docker Setup](#full-docker-setup-homeassistant-octoprint--appdaemon) 18 | - [Standalone Docker Setup](#standalone-docker-failure-detection--notification-setup) 19 | ### Full Docker Setup (HomeAssistant, Octoprint & Appdaemon) 20 | This setup works on any machine with Docker and Docker Compose installed. If you are using a Raspberry Pi, follow the specific setup guide below. 21 | 22 | 1. Clone this repository: 23 | ```bash 24 | git clone https://github.com/oliverbravery/3D-Print-Sentinel.git 25 | cd 3D-Print-Sentinel 26 | ``` 27 | 2. Create a `.env` file with the your cloudflared tunnel token (see the [Cloudflared Tunnel Setup section](#cloudflared-tunnel-setup) below for more information): 28 | ```bash 29 | TUNNEL_TOKEN= 30 | ``` 31 | 3. Start the homeassistant container with Docker Compose: 32 | ```bash 33 | docker compose up --build -d homeassistant 34 | ``` 35 | 4. Access Home Assistant at `http://localhost:8123` and complete the setup wizard. 36 | 5. Obtain from the Home Assistant UI a long-lived access token for the AppDaemon integration. Go to your profile, then `Long-Lived Access Tokens`, and create a new token. 37 | 6. Create a `secrets.yaml` file in the `appdaemon/conf` directory following this template, where `HASS_TOKEN` is the long-lived access token obtained in the previous step: 38 | ``` 39 | HASS_TOKEN: 40 | HASS_HOSTNAME: 41 | LATITUDE: 42 | LONGITUDE: 43 | ELEVATION: 44 | TIME_ZONE: 45 | ``` 46 | 7. Start the remaining containers: 47 | ```bash 48 | docker compose up --build -d 49 | ``` 50 | > **Note**: For some unknown reason, to connect to Home Assistant using your Cloudflare domain, you will need to use the app. The web interface will not work, giving the error 'error while loading page ...'. 51 | 52 | ### Standalone Docker Failure Detection & Notification Setup 53 | The following setup shows how to install and setup just the failure detection and notification (Appdaemon) service to be connected with a HomeAssistant instance. 54 | 55 | 1. Clone this repository: 56 | ```bash 57 | git clone https://github.com/oliverbravery/3D-Print-Sentinel.git 58 | cd 3D-Print-Sentinel 59 | ``` 60 | 2. Obtain from the Home Assistant UI a long-lived access token for the AppDaemon integration. Go to your profile, then `Long-Lived Access Tokens`, and create a new token. 61 | 3. In your homeassistant `configuration.yaml` file include these lines to ensure the camera snapshots are accessible: 62 | ```yaml 63 | homeassistant: 64 | media_dirs: 65 | local: /media 66 | ``` 67 | 4. Create a `secrets.yaml` file in the `appdaemon/conf` directory following this template, where `HASS_TOKEN` is the long-lived access token obtained in the previous step: 68 | ``` 69 | HASS_TOKEN: 70 | HASS_HOSTNAME: 71 | LATITUDE: 72 | LONGITUDE: 73 | ELEVATION: 74 | TIME_ZONE: 75 | ``` 76 | 5. Edit the configuration file at [`appdaemon/conf/apps/config.ini`](/appdaemon/conf/apps/config.ini) with your desired configuration variables. The default configuration of the file is for an Octoprint setup. More information on the configuration file can be found [here](/docs/CONFIG_FILE.md). 77 | 6. Run the container using the following command: 78 | ```bash 79 | docker compose up --build appdaemon -d 80 | ``` 81 | 82 | ### Additional Raspberry Pi Steps 83 | In addition to the general Docker setup, the Raspberry Pi requires a few additional steps to optimize performance and enable the machine learning model. 84 | 85 | It is highly recommended to use a Raspberry Pi 5 with at least 4GB of RAM for optimal performance. An [M.2 SSD is also recommended](https://www.raspberrypi.com/documentation/accessories/m2-hat-plus.html) as home assistant performs alot of read and write operations. 86 | 87 | - **Restart octoprint container when printer turns on**: Octoprint does not have functionality to automatically detect and connect to a printer if it loses connection or is turned off. To solve this, we can use these commands to restart the octoprint container when the pi detects a printer is connected: 88 | ```bash 89 | sudo nano /etc/udev/rules.d/99-usb-serial.rules # Open the file in the nano text editor 90 | SUBSYSTEM=="tty", KERNEL=="ttyACM0", ACTION=="add", RUN+="/usr/bin/docker restart octoprint appdaemon" # Add this line to the file, save and exit 91 | sudo udevadm control --reload-rules && sudo udevadm trigger # Reload the udev rules 92 | ``` 93 | - **Installing Docker on Pi**: The following commands work to install Docker on a Raspberry Pi (you can follow any other guide if you prefer): 94 | ```bash 95 | curl -fsSL https://get.docker.com -o get-docker.sh # Download the Docker installation script 96 | sudo sh get-docker.sh # Run the Docker installation script 97 | sudo usermod -aG docker ${USER} # Add the current user to the Docker group 98 | sudo su - ${USER} # Refresh the user group 99 | docker version # Check the Docker version to verify the installation 100 | sudo apt install docker-compose -y # Install Docker Compose 101 | sudo systemctl enable docker # Enable Docker to start on boot 102 | ``` 103 | 104 | ### Cloudflared Tunnel Setup 105 | We can access homeassistant remotely through a secure Cloudflared tunnel. This requires a Cloudflare domain and a Cloudflared tunnel token. If you do not require this functionality, remove the `cloudflared` service from the `docker-compose.yml` file. 106 | 107 | It is assumed that you already have a domain set up on Cloudflare. 108 | 1. Visit the [Cloudflare Zero Trust dashboard](https://one.dash.cloudflare.com). 109 | 2. Go to `Networks` -> `Tunnels` and create a new tunnel. 110 | 3. Choose `Cloudflared` as the tunnel type and copy the tunnel token starting 'eyJh...' (from the brew install command for example). This token should be added to the `.env` file as `TUNNEL_TOKEN`. 111 | 4. To connect the tunnel to your domain, from the tunnels page, click the three dots on the right of the tunnel and select `Configure`. Go to the `Public Hostname` section and click `Add hostname`. 112 | 5. Choose your own subdomain and domain then in the `Service` dropdown, set the type to `HTTP` and the url to `homeassistant:8123`. Click `Save`. 113 | The tunnel should now be connected to your domain. 114 | 115 | You can access homeassistant remotely at `https://.`through any of the home assistant apps! Note: For some reason you have to use the app as you can't access it through a browser using the cloudflare domain. 116 | 117 | ## Usage 118 | - **Home Assistant**: Access the Home Assistant dashboard at `http://localhost:8123` to monitor your 3D prints and manage your smart home devices. If you set up the Cloudflared tunnel, you can access Home Assistant remotely at `https://.`. 119 | - **OctoPrint**: Access the OctoPrint dashboard at `http://localhost:80` to manage your 3D printer and start prints. 120 | - **Print Notifications**: Receive actionable notifications on your phone when a print error is detected. This system will automatically start whenever a print starts. You can dismiss the notification to continue the print or let it automatically stop after 2 minutes. You must have the Home Assistant app installed and set up to receive notifications on your phone. If you set up the Cloudflared tunnel, use the remote URL to access the app (e.g. `https://.`). 121 | -------------------------------------------------------------------------------- /appdaemon/conf/apps/print_detect.py: -------------------------------------------------------------------------------- 1 | import adbase as ad 2 | from lib.detection_model import * 3 | import cv2 4 | from configparser import ConfigParser 5 | import requests 6 | import yaml 7 | import os 8 | import numpy as np 9 | 10 | class PrintDetect(ad.ADBase): 11 | ''' 12 | This class is used to detect issues with a 3D print job using the machine learning model. 13 | It takes a snapshot of the print job every x seconds (5 by default) and runs the detection model on the image. 14 | If an issue is detected, a notification is sent to the user with the option to stop the print job. 15 | When an error is detected, the print job will be stopped in x minutes (2 by default) if not dismissed via the notification. 16 | ''' 17 | 18 | def initialize(self): 19 | self.cancel_handle = None # handle for the cancel function 20 | self.adapi = self.get_ad_api() # get the AppDaemon API 21 | 22 | # paths to the model files 23 | self.model_cfg = "/conf/model/model.cfg" 24 | self.model_meta = "/conf/model/model.meta" 25 | self.model_weights = "/conf/model/model-weights-5a6b1be1fa.onnx" 26 | 27 | self.warmup_complete = False # flag to check if the printer has warmed up 28 | 29 | # load all configuration file variables 30 | self.load_config() 31 | self.load_secret_values() 32 | 33 | self.printer_status = self.adapi.get_entity(self.printer_status_entity) # get the printer status 34 | self.print_camera = self.adapi.get_entity(self.printer_camera_entity) # get the camera 35 | self.stop_print_button = self.adapi.get_entity(self.printer_stop_button_entity) # get the stop print button 36 | self.extruder_temp_sensor = self.adapi.get_entity(self.extruder_temp_sensor_entity) # get the extruder temperature sensor 37 | self.extruder_target_temp_sensor = self.adapi.get_entity(self.extruder_target_temp_sensor_entity) # get the extruder target temperature sensor 38 | self.net_main_1 = load_net(self.model_cfg, self.model_meta, self.model_weights) # load the ml model 39 | 40 | if self.notification_on_warp_up and (self.extruder_temp_sensor is None or self.extruder_target_temp_sensor is None): 41 | raise RuntimeError("Invalid Config File. ExtruderTempSensor and ExtruderTargetTempSensor must be defined if NotifyOnWarmup is True.") 42 | 43 | self.adapi.run_every(self.run_every_c, "now", self.detection_interval) # run the detection every x seconds 44 | self.adapi.listen_event(self.handle_action, "mobile_app_notification_action") # listen for mobile app notification actions (e.g. stop print or dismiss) 45 | 46 | @staticmethod 47 | def get_config_value(config: ConfigParser, group: str, id: str, type: type) -> any: 48 | """ 49 | Get a value from the config file or the default. 50 | 51 | Args: 52 | config (ConfigParser): The configuration file parser 53 | group (str): The group the value belongs to 54 | id (str): The id of the value to retreive 55 | type (type): The expected type of the value wanted to be retrieved. 56 | 57 | Raises: 58 | RuntimeError: Raise error if the retreived type is not the same as the one extected. 59 | 60 | Returns: 61 | any: The value. 62 | """ 63 | value = config[group][id] or config['DEFAULT'][id] 64 | try: 65 | value = type(value) 66 | return value 67 | except ValueError: 68 | raise RuntimeError(f"Invalid Config File. {group} {id} must be of type {type}.") 69 | 70 | def load_secret_values(self) -> None: 71 | """ 72 | Load the secret values from the secrets.yaml file needed for requesting the camera snapshot. 73 | """ 74 | secrets_path = os.path.join(os.path.dirname(__file__), '..', 'secrets.yaml') 75 | with open(secrets_path, 'r') as file: 76 | secrets = yaml.safe_load(file) 77 | self.hass_token = secrets.get('HASS_TOKEN') 78 | self.hass_hostname = secrets.get('HASS_HOSTNAME') 79 | 80 | def load_config(self): 81 | """ 82 | Loads the variables from the config file. 83 | """ 84 | config = ConfigParser() 85 | config.read(os.path.join(os.path.dirname(__file__), 'config.ini')) 86 | self.printer_status_entity: str = PrintDetect.get_config_value(config=config, group='printer.entities', 87 | id='BinaryIsPrintingSensor', type=str) 88 | self.printer_printing_state: str = PrintDetect.get_config_value(config=config, group='printer.entities', 89 | id='PrintingOnState', type=str) 90 | self.printer_camera_entity: str = PrintDetect.get_config_value(config=config, group='printer.entities', 91 | id='PrinterCamera', type=str) 92 | self.printer_stop_button_entity: str = PrintDetect.get_config_value(config=config, group='printer.entities', 93 | id='PrinterStopButton', type=str) 94 | self.detection_interval: int = PrintDetect.get_config_value(config=config, group='program.timings', 95 | id='RunModelInterval', type=int) 96 | self.print_termination_time: int = PrintDetect.get_config_value(config=config, group='program.timings', 97 | id='TerminationTime', type=int) 98 | self.detection_threshold: float = PrintDetect.get_config_value(config=config, group='model.detection', 99 | id='Threshold', type=float) 100 | self.detection_nms: float = PrintDetect.get_config_value(config=config, group='model.detection', 101 | id='NMS', type=float) 102 | self.extruder_temp_sensor_entity: str = PrintDetect.get_config_value(config=config, group='notifications.entities', 103 | id='ExtruderTempSensor', type=str) 104 | self.extruder_target_temp_sensor_entity: str = PrintDetect.get_config_value(config=config, group='notifications.entities', 105 | id='ExtruderTargetTempSensor', type=str) 106 | self.notification_on_warp_up: bool = True if PrintDetect.get_config_value(config=config, group='notifications.config', 107 | id='NotifyOnWarmup', type=str) == 'True' else False 108 | 109 | def get_camera_snapshot(self): 110 | """ 111 | Get the camera snapshot and decode it into an image. 112 | 113 | Returns: 114 | The decoded image. 115 | """ 116 | url = f"{self.hass_hostname}/media/local/snapshot.jpg" 117 | headers = { 118 | 'Authorization': f'Bearer {self.hass_token}' 119 | } 120 | response = requests.request("GET", url, headers=headers, data={}, stream=True) 121 | if response.status_code != 200: 122 | self.adapi.log(f"Error getting camera snapshot: {response.status_code}") 123 | return None 124 | arr = np.asarray(bytearray(response.raw.read()), dtype=np.uint8) 125 | cv2_img = cv2.imdecode(arr, -1) 126 | return cv2_img 127 | 128 | def perform_detection(self) -> int: 129 | """ 130 | Take a snapshot of the print job and run the detection model on the image. 131 | 132 | Returns: 133 | int: The number of issues detected. 0 if a snapshot could not be taken. 134 | """ 135 | self.print_camera.call_service("snapshot", filename="/media/snapshot.jpg") 136 | custom_image_bgr = self.get_camera_snapshot() 137 | if custom_image_bgr is None: 138 | self.adapi.log("Failed to get camera snapshot, skipping detection for this cycle.") 139 | return 0 140 | detections = detect(self.net_main_1, custom_image_bgr, thresh=self.detection_threshold, nms=self.detection_nms) 141 | detection_count = len(detections) 142 | self.adapi.log(f"Detected {detection_count} issues") 143 | return detection_count 144 | 145 | def send_detection_notification_and_countdown(self): 146 | """ 147 | Send a notification to the user that an issue has been detected and start the countdown to stop the print job. 148 | """ 149 | self.adapi.call_service("notify/notify", message=f"An issue with your 3D print has been detected. The print will be stopped in {self.print_termination_time} seconds if not dismissed.", 150 | title="3D Print Issue Detected", 151 | data={ 152 | "image": "/media/local/snapshot.jpg", 153 | "actions": [ 154 | { 155 | "action": "STOP_PRINT_JOB", 156 | "title": "Stop Print" 157 | }, 158 | { 159 | "action": "DISMISS_NOTIFICATION", 160 | "title": "Dismiss" 161 | } 162 | ], 163 | "push": { 164 | "interruption-level": "critical" 165 | }}) 166 | self.cancel_handle = self.adapi.run_in(self.cancel_print_callback, self.print_termination_time) 167 | 168 | def notify_on_warmup(self): 169 | """ 170 | Notify the user when the printer is almost warmed up 171 | """ 172 | if float(self.extruder_temp_sensor.state) > (0.9 * float(self.extruder_target_temp_sensor.state)) and float(self.extruder_temp_sensor.state) < (0.96 * float(self.extruder_target_temp_sensor.state)) and self.warmup_complete == False: 173 | self.warmup_complete = True 174 | self.adapi.call_service("notify/notify", 175 | message="The 3D printer has almost warmed up. Remove any excess filament before your print starts.", 176 | title="3D Printer Warming Up", 177 | data={ 178 | "image": "/media/local/snapshot.jpg" 179 | }) 180 | if float(self.extruder_temp_sensor.state) > (0.96 * float(self.extruder_target_temp_sensor.state)): 181 | self.warmup_complete = False 182 | 183 | def extra_notifications_router(self): 184 | """ 185 | Check if extra notifications are needed. 186 | """ 187 | if self.notification_on_warp_up: 188 | self.notify_on_warmup() 189 | 190 | def run_every_c(self, cb_args): 191 | ''' 192 | This function is called every x seconds to take a snapshot of the print job and run the detection model. 193 | It will send a notification if an issue is detected. 194 | ''' 195 | # check if the printer is on and a notification has not already been sent 196 | if self.printer_status.is_state(self.printer_printing_state) and self.cancel_handle == None: 197 | # call the extra notifications router to check if any extra notifications are needed 198 | self.extra_notifications_router() 199 | # if the printer is on, take a snapshot and run the detection model 200 | detection_count = self.perform_detection() 201 | # if an issue is detected, send a notification 202 | if detection_count > 1: 203 | self.send_detection_notification_and_countdown() 204 | 205 | def handle_action(self, event_name, data, kwargs): 206 | ''' 207 | This is a routing function called when a mobile app notification action is received. 208 | It will run the appropriate function based on the action received. 209 | ''' 210 | self.adapi.log(f"Received action: {data}") 211 | if data["action"] == "STOP_PRINT_JOB": 212 | self.stop_print_job() 213 | elif data["action"] == "DISMISS_NOTIFICATION": 214 | self.dismiss_print_cancel() 215 | 216 | def cancel_print_callback(self, cb_args): 217 | ''' 218 | A callback function for when the timer to stop the print job is called. 219 | ''' 220 | self.stop_print_job() 221 | 222 | def stop_print_job(self): 223 | ''' 224 | This function is called to stop the print job. 225 | It will send a notification to the user and call the stop print button. 226 | ''' 227 | self.dismiss_print_cancel() 228 | self.stop_print_button.call_service("press") 229 | self.adapi.call_service("notify/notify", message="The 3D print has been stopped due to an issue.", title="3D Print Stopped") 230 | 231 | def dismiss_print_cancel(self): 232 | ''' 233 | This function is called to dismiss the print issue notification. 234 | It will cancel the timer to stop the print job and send a notification to the user that the issue has been dismissed. 235 | ''' 236 | if self.cancel_handle is not None: 237 | self.adapi.cancel_timer(self.cancel_handle) 238 | self.cancel_handle = None 239 | self.adapi.call_service("notify/notify", message="The 3D print issue has been dismissed.", title="3D Print Issue Dismissed") --------------------------------------------------------------------------------