├── db ├── .gitkeep └── migrations │ ├── 1.py │ └── 2_add_start_time_to_events.py ├── logs └── .gitkeep ├── src ├── __init__.py ├── mattermost_handler.py ├── frigate_api.py ├── database.py └── google_drive.py ├── credentials └── .gitkeep ├── requirements.txt ├── setup.py ├── Dockerfile ├── docker-compose.yml ├── env_example ├── README.md ├── .gitignore └── main.py /db/.gitkeep: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /logs/.gitkeep: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /src/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /credentials/.gitkeep: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | paho-mqtt 2 | google-auth-oauthlib 3 | google-auth-httplib2 4 | google-api-python-client 5 | python-dotenv 6 | requests 7 | pytz 8 | apscheduler -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | import subprocess 2 | 3 | 4 | def install_packages(): 5 | subprocess.check_call(["pip", "install", "-r", "requirements.txt"]) 6 | 7 | 8 | if __name__ == "__main__": 9 | install_packages() 10 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM python:3.8 2 | 3 | RUN apt-get update && \ 4 | apt-get install -y nano sqlite3 5 | 6 | WORKDIR /app 7 | 8 | COPY requirements.txt . 9 | RUN pip install --no-cache-dir -r requirements.txt 10 | 11 | COPY . . 12 | 13 | CMD ["python", "main.py"] 14 | -------------------------------------------------------------------------------- /docker-compose.yml: -------------------------------------------------------------------------------- 1 | services: 2 | frigate-gdrive-instant-uploader: 3 | build: . 4 | container_name: frigate-gdrive-instant-uploader 5 | restart: unless-stopped 6 | volumes: 7 | - ./credentials:/app/credentials 8 | - ./db:/app/db 9 | - ./logs:/app/logs 10 | environment: 11 | - TZ=Europe/Istanbul 12 | command: python main.py 13 | -------------------------------------------------------------------------------- /src/mattermost_handler.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import os 3 | 4 | import requests 5 | 6 | MATTERMOST_PREFIX = os.getenv('MATTERMOST_PREFIX', '') 7 | 8 | 9 | class MattermostHandler(logging.Handler): 10 | def __init__(self, webhook_url): 11 | super().__init__(level=logging.ERROR) 12 | self.webhook_url = webhook_url 13 | 14 | def emit(self, record): 15 | try: 16 | log_entry = self.format(record) 17 | prefixed_log_entry = f"{MATTERMOST_PREFIX} {log_entry}" 18 | payload = {"text": prefixed_log_entry} 19 | response = requests.post(self.webhook_url, json=payload) 20 | response.raise_for_status() 21 | except Exception as e: 22 | logging.error(f"Failed to send log to Mattermost: {e}") 23 | -------------------------------------------------------------------------------- /db/migrations/1.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import sqlite3 3 | 4 | from src.database import DB_PATH 5 | 6 | logging.debug(f"DB_PATH: {DB_PATH}") 7 | 8 | 9 | def apply_migration(): 10 | conn = sqlite3.connect(DB_PATH) 11 | try: 12 | cursor = conn.cursor() 13 | cursor.execute("SELECT name FROM migrations WHERE name='1.py'") 14 | result = cursor.fetchone() 15 | 16 | if result: 17 | logging.debug("Migration 1.py already applied.") 18 | return 19 | 20 | cursor.execute('ALTER TABLE events ADD COLUMN retry BOOLEAN DEFAULT 1') 21 | conn.commit() 22 | logging.debug("Migration 1.py applied successfully.") 23 | 24 | except Exception as e: 25 | logging.error(f"Unexpected error: {e}", exc_info=True) 26 | finally: 27 | conn.close() 28 | 29 | 30 | if __name__ == "__main__": 31 | apply_migration() 32 | -------------------------------------------------------------------------------- /db/migrations/2_add_start_time_to_events.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import sqlite3 3 | 4 | from src.database import DB_PATH 5 | 6 | def apply_migration_2(): 7 | conn = None 8 | try: 9 | conn = sqlite3.connect(DB_PATH) 10 | cursor = conn.cursor() 11 | logging.info('Running migration 2_add_start_time_to_events.py...') 12 | cursor.execute('ALTER TABLE events ADD COLUMN start_time REAL') 13 | conn.commit() 14 | logging.info('Migration 2_add_start_time_to_events.py finished successfully.') 15 | except sqlite3.OperationalError as e: 16 | if 'duplicate column name' in str(e): 17 | logging.warning('Column start_time already exists in events table. Skipping.') 18 | else: 19 | logging.error(f"Error applying migration 2: {e}") 20 | raise e 21 | except Exception as e: 22 | logging.error(f"An unexpected error occurred during migration 2: {e}") 23 | raise e 24 | finally: 25 | if conn: 26 | conn.close() 27 | 28 | # Run the migration 29 | apply_migration_2() 30 | -------------------------------------------------------------------------------- /env_example: -------------------------------------------------------------------------------- 1 | # Set the timezone for the application. This should match the TZ variable in docker-compose.yml. 2 | # For a list of valid timezones: https://en.wikipedia.org/wiki/List_of_tz_database_time_zones 3 | TZ=Europe/Istanbul 4 | 5 | # DEBUG, INFO, WARNING, ERROR or CRITICAL 6 | LOGGING_LEVEL=DEBUG 7 | 8 | # Optional: Automatically delete video files from Google Drive older than this many days. 9 | # Set to 0 to disable automatic deletion. 10 | GDRIVE_RETENTION_DAYS=0 11 | 12 | # MQTT 13 | MQTT_BROKER_ADDRESS=192.168.0.59 14 | MQTT_TOPIC=frigate/events 15 | MQTT_USER=username 16 | MQTT_PORT=1883 17 | MQTT_PASSWORD=secret 18 | 19 | 20 | # Google Drive 21 | SERVICE_ACCOUNT_FILE=credentials/service_account.json 22 | GOOGLE_ACCOUNT_TO_IMPERSONATE=myexamplemail@gmail.com 23 | 24 | # Upload directory in Google Drive, will be created automatically in Google Drive. It will be created under the root directory. 25 | # Directory structure in Google Drive will be: ///// 26 | UPLOAD_DIR=frigate 27 | 28 | 29 | # Frigate URL with protocol and port 30 | FRIGATE_URL=http://192.168.0.100:5000 31 | 32 | 33 | # Mattermost 34 | MATTERMOST_WEBHOOK_URL= 35 | MATTERMOST_PREFIX="[my message prefix]" -------------------------------------------------------------------------------- /src/frigate_api.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import requests 3 | from requests.exceptions import ChunkedEncodingError, ConnectionError 4 | from time import sleep 5 | 6 | 7 | def generate_video_url(frigate_url, event_id): 8 | return f"{frigate_url}/api/events/{event_id}/clip.mp4" 9 | 10 | 11 | def fetch_all_events(frigate_url, after=None, batch_size=100, retries=2, timeout=30): 12 | all_events = [] 13 | before = None 14 | 15 | while True: 16 | params = {'limit': batch_size, 'has_clip': 1} 17 | if before: 18 | params['before'] = before 19 | elif after: 20 | params['after'] = after 21 | 22 | for attempt in range(retries): 23 | try: 24 | response = requests.get(f'{frigate_url}/api/events', params=params, timeout=timeout) 25 | response.raise_for_status() # Raise an HTTPError for bad responses 26 | break # If the request was successful, exit the retry loop 27 | except (ChunkedEncodingError, ConnectionError) as e: 28 | logging.error(f"Attempt {attempt + 1} failed with error: {e}") 29 | if attempt < retries - 1: 30 | sleep(2) # Wait a bit before retrying 31 | else: 32 | logging.error(f"All retries failed for fetching events: {e}") 33 | return None 34 | 35 | if response.status_code == 200: 36 | events = response.json() 37 | if not events: 38 | break # No more events to fetch 39 | all_events.extend(events) 40 | before = events[-1]['start_time'] 41 | after = None # Clear after the first successful fetch 42 | logging.debug(f"Fetched {len(events)} events, next 'before' set to {before}") 43 | else: 44 | logging.error(f"Failed to fetch events: {response.status_code} {response.text}") 45 | break 46 | 47 | return all_events -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Frigate to Google Drive Instant Uploader with MQTT 2 | This is a simple script that uploads event clips from Frigate to Google Drive instantly using MQTT (without cronjobs). 3 | It uses a SQLite database to keep track of uploaded events and only uploads new events. In case of an error, 4 | the script will retry to upload the event clip. 5 | 6 | Because I'm using Mattermost as chat software, 7 | I've added a webhook to send a message to a Mattermost channel when an error occurs. 8 | 9 | You'll need a MQTT broker like Apache Mosquitto or similar. This script watches for new events from Frigate 10 | and uploads them to Google Drive within seconds. 11 | 12 | In my case I use Apache Mosquitto as MQTT broker and Frigate as NVR software. Frigate, Mosquitto 13 | and this script are running on the same Proxmox server in LXC containers. 14 | 15 | # Requirements 16 | - python 3.8 17 | - MQTT broker 18 | - Frigate with configured MQTT 19 | - Google Service Account with access to Google Drive 20 | 21 | # Example Frigate configuration 22 | ```yaml 23 | 24 | mqtt: 25 | host: 192.168.0.55 26 | user: username 27 | password: secret 28 | port: 1883 29 | topic_prefix: frigate 30 | client_id: frigate 31 | 32 | # rest of your config.yml 33 | ```` 34 | 35 | Check if your MQTT broker is working by subscribing to the topic `frigate/events` with a MQTT client like MQTT Explorer 36 | or mosquitto_sub. If so, you should see events from Frigate and can use this script. 37 | 38 | # Usage without Docker 39 | 1. clone this repository 40 | 2. rename `env_example` to `.env` and change values to your needs 41 | 3. run `python setup.py` in project root directory to install all required packages 42 | 4. create a project in google cloud console and enable drive api 43 | 5. create a service account and give it access to your Google Drive 44 | 6. activate domain-wide-delegation for the service account and add the necessary scope "https://www.googleapis.com/auth/drive" to prevent "Quota Exceeded" errors if you upload more than 15 GB per day. 45 | 7. download the service account json file from Google and copy its content to `credentials/service_account.json` 46 | 8. run `python main.py` in project root directory 47 | 48 | # Usage with Docker 49 | 1. clone this repository 50 | 2. rename `env_example` to `.env` and change values to your needs 51 | 3. create a project in google cloud console and enable drive api 52 | 4. create a service account and give it access to your Google Drive 53 | 5. download the service account json file from Google and copy its content to `credentials/service_account.json` 54 | 6. activate domain-wide-delegation for the service account and add the necessary scope "https://www.googleapis.com/auth/drive" to prevent "Quota Exceeded" errors if you upload more than 15 GB per day. 55 | 7. run `docker compose up -d` in project root directory 56 | 8. check logs with `docker logs frigate-gdrive-instant-uploader` or see `/logs/app.log` -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | share/python-wheels/ 24 | *.egg-info/ 25 | .installed.cfg 26 | *.egg 27 | MANIFEST 28 | 29 | # PyInstaller 30 | # Usually these files are written by a python script from a template 31 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 32 | *.manifest 33 | *.spec 34 | 35 | # Installer logs 36 | pip-log.txt 37 | pip-delete-this-directory.txt 38 | 39 | # Unit test / coverage reports 40 | htmlcov/ 41 | .tox/ 42 | .nox/ 43 | .coverage 44 | .coverage.* 45 | .cache 46 | nosetests.xml 47 | coverage.xml 48 | *.cover 49 | *.py,cover 50 | .hypothesis/ 51 | .pytest_cache/ 52 | cover/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | .pybuilder/ 76 | target/ 77 | 78 | # Jupyter Notebook 79 | .ipynb_checkpoints 80 | 81 | # IPython 82 | profile_default/ 83 | ipython_config.py 84 | 85 | # pyenv 86 | # For a library or package, you might want to ignore these files since the code is 87 | # intended to run in multiple environments; otherwise, check them in: 88 | # .python-version 89 | 90 | # pipenv 91 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 92 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 93 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 94 | # install all needed dependencies. 95 | #Pipfile.lock 96 | 97 | # poetry 98 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. 99 | # This is especially recommended for binary packages to ensure reproducibility, and is more 100 | # commonly ignored for libraries. 101 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control 102 | #poetry.lock 103 | 104 | # pdm 105 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. 106 | #pdm.lock 107 | # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it 108 | # in version control. 109 | # https://pdm.fming.dev/#use-with-ide 110 | .pdm.toml 111 | 112 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm 113 | __pypackages__/ 114 | 115 | # Celery stuff 116 | celerybeat-schedule 117 | celerybeat.pid 118 | 119 | # SageMath parsed files 120 | *.sage.py 121 | 122 | # Environments 123 | .env 124 | .venv 125 | env/ 126 | venv/ 127 | ENV/ 128 | env.bak/ 129 | venv.bak/ 130 | 131 | # Spyder project settings 132 | .spyderproject 133 | .spyproject 134 | 135 | # Rope project settings 136 | .ropeproject 137 | 138 | # mkdocs documentation 139 | /site 140 | 141 | # mypy 142 | .mypy_cache/ 143 | .dmypy.json 144 | dmypy.json 145 | 146 | # Pyre type checker 147 | .pyre/ 148 | 149 | # pytype static type analyzer 150 | .pytype/ 151 | 152 | # Cython debug symbols 153 | cython_debug/ 154 | 155 | # PyCharm 156 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can 157 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore 158 | # and can be added to the global gitignore or merged into this file. For a more nuclear 159 | # option (not recommended) you can uncomment the following to ignore the entire idea folder. 160 | #.idea/ 161 | /.idea 162 | 163 | /credentials/* 164 | !/credentials/.gitkeep 165 | /db/events.db 166 | !/db/.gitkeep 167 | !/db/migrations/* 168 | /logs/* 169 | !/logs/.gitkeep -------------------------------------------------------------------------------- /src/database.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sqlite3 3 | import logging 4 | from dotenv import load_dotenv 5 | 6 | DB_PATH = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'db/events.db') 7 | load_dotenv() 8 | EVENT_RETENTION_DAYS = int(os.getenv('EVENT_RETENTION_DAYS', 40)) 9 | 10 | 11 | def init_db(db_path=DB_PATH): 12 | logging.info(f"Initializing database at {db_path}") 13 | conn = sqlite3.connect(db_path) 14 | try: 15 | cursor = conn.cursor() 16 | # Create the events table if it does not exist 17 | cursor.execute(''' 18 | CREATE TABLE IF NOT EXISTS events ( 19 | event_id TEXT PRIMARY KEY, 20 | start_time REAL, 21 | uploaded BOOLEAN DEFAULT 0, 22 | created TIMESTAMP DEFAULT CURRENT_TIMESTAMP, 23 | last_updated TIMESTAMP DEFAULT CURRENT_TIMESTAMP, 24 | tries INTEGER DEFAULT 0, 25 | retry BOOLEAN DEFAULT 1 26 | ) 27 | ''') 28 | 29 | # Create the migrations table if it does not exist 30 | cursor.execute(''' 31 | CREATE TABLE IF NOT EXISTS migrations ( 32 | id INTEGER PRIMARY KEY AUTOINCREMENT, 33 | name TEXT UNIQUE NOT NULL, 34 | applied_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP 35 | ) 36 | ''') 37 | 38 | # Trigger für last_updated hinzufügen 39 | cursor.execute(''' 40 | CREATE TRIGGER IF NOT EXISTS update_last_updated 41 | AFTER UPDATE ON events 42 | FOR EACH ROW 43 | BEGIN 44 | UPDATE events 45 | SET last_updated = CURRENT_TIMESTAMP 46 | WHERE event_id = OLD.event_id; 47 | END; 48 | ''') 49 | 50 | conn.commit() 51 | except Exception as e: 52 | logging.error(f"Error initializing database: {e}") 53 | conn.rollback() 54 | finally: 55 | conn.close() 56 | 57 | 58 | def run_migrations(migrations_folder='db/migrations'): 59 | conn = sqlite3.connect(DB_PATH) 60 | 61 | try: 62 | cursor = conn.cursor() 63 | 64 | cursor.execute('SELECT name FROM migrations') 65 | applied_migrations = set(row[0] for row in cursor.fetchall()) 66 | 67 | for filename in sorted(os.listdir(migrations_folder)): 68 | if filename.endswith('.py') and filename not in applied_migrations: 69 | migration_path = os.path.join(migrations_folder, filename) 70 | logging.info(f"Running migration: {migration_path}") 71 | try: 72 | with open(migration_path) as file: 73 | logging.debug(f"Executing migration {filename}") 74 | exec(file.read(), globals()) 75 | cursor.execute('INSERT INTO migrations (name) VALUES (?)', (filename,)) 76 | conn.commit() 77 | logging.info(f"Migration {filename} applied successfully.") 78 | except Exception as e: 79 | logging.error(f"Error applying migration {filename}: {e}") 80 | conn.rollback() 81 | except Exception as e: 82 | logging.error(f"Error running migrations: {e}") 83 | finally: 84 | conn.close() 85 | 86 | 87 | def is_event_exists(event_id, db_path=DB_PATH): 88 | conn = sqlite3.connect(db_path) 89 | try: 90 | cursor = conn.cursor() 91 | cursor.execute('SELECT * FROM events WHERE event_id = ?', (event_id,)) 92 | result = cursor.fetchone() 93 | return result is not None 94 | except Exception as e: 95 | logging.error(f"Error checking event existence: {e}") 96 | return False 97 | finally: 98 | conn.close() 99 | 100 | 101 | def insert_event(event_id, start_time, db_path=DB_PATH): 102 | """ 103 | Inserts an event into the database. 104 | :param event_id: 105 | :param db_path: 106 | :return: 107 | """ 108 | conn = sqlite3.connect(db_path) 109 | try: 110 | cursor = conn.cursor() 111 | cursor.execute('INSERT INTO events (event_id, start_time) VALUES (?, ?)', (event_id, start_time)) 112 | conn.commit() 113 | except Exception as e: 114 | logging.error(f"Error inserting event: {e}") 115 | finally: 116 | conn.close() 117 | 118 | 119 | def update_event(event_id, uploaded, retry=None, db_path=DB_PATH): 120 | """ 121 | Updates an event in the database. 122 | :param event_id: 123 | :param uploaded: 124 | :param retry: 125 | :param db_path: 126 | :return: 127 | """ 128 | conn = sqlite3.connect(db_path) 129 | cursor = conn.cursor() 130 | if retry is not None: 131 | cursor.execute('UPDATE events SET uploaded = ?, retry = ?, tries = tries + 1 WHERE event_id = ?', 132 | (uploaded, retry, event_id)) 133 | else: 134 | cursor.execute('UPDATE events SET uploaded = ?, tries = tries + 1 WHERE event_id = ?', (uploaded, event_id)) 135 | conn.commit() 136 | conn.close() 137 | 138 | 139 | def select_retry(event_id, db_path=DB_PATH): 140 | """ 141 | Selects the retry status of an event. 142 | :param event_id: 143 | :param db_path: 144 | :return: 145 | """ 146 | conn = sqlite3.connect(db_path) 147 | cursor = conn.cursor() 148 | cursor.execute('SELECT retry FROM events WHERE event_id = ?', (event_id,)) 149 | result = cursor.fetchone() 150 | conn.close() 151 | return result[0] if result else None 152 | 153 | 154 | def update_event_retry(event_id, retry, db_path=DB_PATH): 155 | """ 156 | Updates the retry status of an event in the database. 157 | :param event_id: 158 | :param retry: 159 | :param db_path: 160 | :return: 161 | """ 162 | conn = sqlite3.connect(db_path) 163 | try: 164 | cursor = conn.cursor() 165 | cursor.execute('UPDATE events SET retry = ? WHERE event_id = ?', (retry, event_id)) 166 | conn.commit() 167 | except Exception as e: 168 | logging.error(f"Error updating event retry status: {e}") 169 | finally: 170 | conn.close() 171 | 172 | 173 | def select_tries(event_id, db_path=DB_PATH): 174 | """ 175 | Selects the number of tries for an event. 176 | :param event_id: 177 | :param db_path: 178 | :return: 179 | """ 180 | conn = sqlite3.connect(db_path) 181 | try: 182 | cursor = conn.cursor() 183 | cursor.execute('SELECT tries FROM events WHERE event_id = ?', (event_id,)) 184 | result = cursor.fetchone() 185 | return result[0] if result else None 186 | except Exception as e: 187 | logging.error(f"Error selecting tries: {e}") 188 | return None 189 | finally: 190 | conn.close() 191 | 192 | 193 | def select_event_uploaded(event_id, db_path=DB_PATH): 194 | """ 195 | Selects the uploaded status of an event. 196 | :param event_id: 197 | :param db_path: 198 | :return: 199 | """ 200 | conn = sqlite3.connect(db_path) 201 | try: 202 | cursor = conn.cursor() 203 | cursor.execute('SELECT uploaded FROM events WHERE event_id = ?', (event_id,)) 204 | result = cursor.fetchone() 205 | if result: 206 | uploaded_status = result[0] 207 | return uploaded_status 208 | else: 209 | logging.debug(f"Event ID {event_id} not found in database.") 210 | return None 211 | except Exception as e: 212 | logging.error(f"Error selecting event uploaded status: {e}") 213 | return None 214 | finally: 215 | conn.close() 216 | 217 | 218 | def select_not_uploaded_yet(db_path=DB_PATH): 219 | """ 220 | Selects events that are not uploaded yet and where created at least 5 minutes ago. 221 | :param db_path: 222 | :return: 223 | """ 224 | conn = sqlite3.connect(db_path) 225 | try: 226 | cursor = conn.cursor() 227 | cursor.execute( 228 | 'SELECT event_id FROM events WHERE uploaded = 0 and created <= datetime("now", "-5 minutes") and tries <= 5') 229 | result = cursor.fetchall() 230 | return [row[0] for row in result] 231 | except Exception as e: 232 | logging.error(f"Error selecting not uploaded yet events: {e}") 233 | return [] 234 | finally: 235 | conn.close() 236 | 237 | 238 | def select_not_uploaded_yet_hard(db_path=DB_PATH): 239 | """ 240 | Selects events that are not uploaded yet and have more than 5 tries. Use this e.g. for notifying the user. 241 | :param db_path: 242 | :return: 243 | """ 244 | conn = sqlite3.connect(db_path) 245 | try: 246 | cursor = conn.cursor() 247 | cursor.execute( 248 | 'SELECT event_id FROM events WHERE uploaded = 0 and created <= datetime("now", "-5 minutes") and tries >= 5') 249 | result = cursor.fetchall() 250 | return [row[0] for row in result] 251 | except Exception as e: 252 | logging.error(f"Error selecting not uploaded yet hard events: {e}") 253 | return [] 254 | finally: 255 | conn.close() 256 | 257 | 258 | def get_latest_event_start_time(db_path=DB_PATH): 259 | """ 260 | Retrieves the start_time of the most recent event from the database. 261 | """ 262 | conn = sqlite3.connect(db_path) 263 | try: 264 | cursor = conn.cursor() 265 | cursor.execute('SELECT MAX(start_time) FROM events') 266 | result = cursor.fetchone() 267 | return result[0] if result and result[0] is not None else 0 268 | except Exception as e: 269 | logging.error(f"Error getting latest event start time: {e}") 270 | return 0 271 | finally: 272 | conn.close() 273 | 274 | 275 | def cleanup_old_events(db_path=DB_PATH): 276 | """ 277 | Deletes uploaded events that are older than the configured retention period. 278 | :param db_path: 279 | :return: 280 | """ 281 | conn = sqlite3.connect(db_path) 282 | try: 283 | cursor = conn.cursor() 284 | cursor.execute( 285 | 'DELETE FROM events WHERE created <= datetime("now", ? || " days") and uploaded = 1', 286 | (f"-{EVENT_RETENTION_DAYS}",) 287 | ) 288 | conn.commit() 289 | except Exception as e: 290 | logging.error(f"Error cleaning up old events: {e}") 291 | finally: 292 | conn.close() 293 | -------------------------------------------------------------------------------- /main.py: -------------------------------------------------------------------------------- 1 | import json 2 | import logging 3 | import os 4 | import sys 5 | import threading 6 | import time 7 | from logging.handlers import RotatingFileHandler 8 | import socket 9 | 10 | # Erstelle das Log-Verzeichnis, falls es nicht existiert 11 | os.makedirs('logs', exist_ok=True) 12 | 13 | # Konfiguriere das Logging zuerst 14 | LOGGING_LEVEL = os.getenv('LOGGING_LEVEL', 'INFO').upper() 15 | 16 | # Mögliche Log-Level mit Standardwerten 17 | LOG_LEVELS = { 18 | 'DEBUG': logging.DEBUG, 19 | 'INFO': logging.INFO, 20 | 'WARNING': logging.WARNING, 21 | 'ERROR': logging.ERROR, 22 | 'CRITICAL': logging.CRITICAL 23 | } 24 | 25 | # Wähle das Log-Level aus der Umgebungsvariable oder verwende INFO als Standard 26 | NUMERIC_LEVEL = LOG_LEVELS.get(LOGGING_LEVEL, logging.INFO) 27 | print(f"Aktuelles Log-Level: {LOGGING_LEVEL} (numerisch: {NUMERIC_LEVEL})") 28 | 29 | # Root-Logger konfigurieren 30 | root_logger = logging.getLogger() 31 | root_logger.setLevel(NUMERIC_LEVEL) # Wichtig: Dies setzt das minimale Level für den Root-Logger 32 | 33 | # Bestehende Handler entfernen 34 | for handler in root_logger.handlers[:]: 35 | root_logger.removeHandler(handler) 36 | handler.close() 37 | 38 | # Konsole-Handler 39 | console_handler = logging.StreamHandler() 40 | console_handler.setLevel(NUMERIC_LEVEL) # Level für die Konsole 41 | console_formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') 42 | console_handler.setFormatter(console_formatter) 43 | 44 | # Datei-Handler 45 | log_file = 'logs/app.log' 46 | file_handler = RotatingFileHandler( 47 | log_file, 48 | maxBytes=5 * 1024 * 1024, # 5 MB 49 | backupCount=5, 50 | encoding='utf-8' 51 | ) 52 | file_handler.setLevel(NUMERIC_LEVEL) # Level für die Datei 53 | file_formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') 54 | file_handler.setFormatter(file_formatter) 55 | 56 | # Handler hinzufügen 57 | root_logger.addHandler(console_handler) 58 | root_logger.addHandler(file_handler) 59 | 60 | # Deaktiviere die Propagation zu anderen Loggern, um doppelte Logs zu vermeiden 61 | root_logger.propagate = False 62 | 63 | # Logger für dieses Modul 64 | logger = logging.getLogger(__name__) 65 | logger.info(f"Logging initialisiert mit Level {LOGGING_LEVEL}") 66 | 67 | # Jetzt die restlichen Imports durchführen, nachdem das Logging eingerichtet ist 68 | from dotenv import load_dotenv 69 | import paho.mqtt.client as mqtt 70 | from apscheduler.schedulers.background import BackgroundScheduler 71 | 72 | from src import database, google_drive 73 | from src.frigate_api import fetch_all_events 74 | from src.google_drive import cleanup_old_files_on_drive, service 75 | from src.mattermost_handler import MattermostHandler 76 | 77 | # Lade Umgebungsvariablen 78 | try: 79 | load_dotenv() 80 | logger.info("Umgebungsvariablen geladen") 81 | except Exception as e: 82 | logger.error(f"Fehler beim Laden der .env Datei: {e}") 83 | 84 | # Konfiguration aus Umgebungsvariablen laden 85 | FRIGATE_URL = os.getenv('FRIGATE_URL') 86 | MQTT_BROKER_ADDRESS = os.getenv('MQTT_BROKER_ADDRESS') 87 | MQTT_PORT = int(os.getenv('MQTT_PORT', '1883')) 88 | MQTT_TOPIC = os.getenv('MQTT_TOPIC') 89 | MQTT_USER = os.getenv('MQTT_USER') 90 | MQTT_PASSWORD = os.getenv('MQTT_PASSWORD') 91 | MATTERMOST_WEBHOOK_URL = os.getenv('MATTERMOST_WEBHOOK_URL') 92 | 93 | # Mattermost-Handler hinzufügen, falls konfiguriert 94 | if MATTERMOST_WEBHOOK_URL: 95 | try: 96 | mattermost_handler = MattermostHandler(MATTERMOST_WEBHOOK_URL) 97 | mattermost_handler.setLevel(logging.ERROR) 98 | mattermost_formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s') 99 | mattermost_handler.setFormatter(mattermost_formatter) 100 | root_logger.addHandler(mattermost_handler) 101 | logger.info("Mattermost-Benachrichtigungen aktiviert") 102 | except Exception as e: 103 | logger.error(f"Fehler beim Initialisieren des Mattermost-Handlers: {e}") 104 | else: 105 | logger.warning("MATTERMOST_WEBHOOK_URL nicht gesetzt. Mattermost-Benachrichtigungen sind deaktiviert.") 106 | 107 | 108 | def on_connect(client, userdata, flags, reason_code, properties): 109 | logging.info(f"MQTT connected with result code {reason_code}") 110 | client.subscribe(MQTT_TOPIC) 111 | 112 | 113 | def on_message(client, userdata, msg): 114 | logging.debug(f"MQTT message received `{msg.payload.decode()}` from topic `{msg.topic}`") 115 | event = json.loads(msg.payload) 116 | event_type = event.get('type', None) 117 | end_time = event.get('after', {}).get('end_time', None) 118 | has_clip = event.get('after', {}).get('has_clip', False) 119 | 120 | if event_type == 'end' and end_time is not None and has_clip is True: 121 | event_data = event['after'] 122 | handle_single_event(event_data) 123 | else: 124 | logging.debug(f"Received a MQTT message but event type, end_time or has_clip doesn't interest us. Wait for " 125 | f"the full message. Skipping...") 126 | 127 | 128 | def handle_single_event(event_data): 129 | """ 130 | Handles a single event. Uploads the video to Google Drive if available and updates the database. 131 | :param event_data: 132 | :return: 133 | """ 134 | event_id = event_data['id'] 135 | end_time = event_data['end_time'] 136 | has_clip = event_data['has_clip'] 137 | 138 | start_time = event_data['start_time'] 139 | 140 | if not database.is_event_exists(event_id): 141 | database.insert_event(event_id, start_time) 142 | 143 | if end_time is not None and has_clip is True and internet() is True: 144 | if database.select_retry(event_id) == 0: 145 | logging.debug(f"Event {event_id} is marked as non-retriable. Skipping upload.") 146 | else: 147 | uploaded_status = database.select_event_uploaded(event_id) 148 | if uploaded_status == 0 or uploaded_status is None: 149 | # Wait a few seconds to give Frigate time to finish writing the file to disk 150 | logging.debug("Waiting 5 seconds for Frigate to finalize the clip...") 151 | time.sleep(5) 152 | logging.debug(f"Uploading video {event_id} to Google Drive...") 153 | success = google_drive.upload_to_google_drive(event_data, FRIGATE_URL) 154 | if success: 155 | logging.info(f"Video {event_id} successfully uploaded.") 156 | database.update_event(event_id, 1) 157 | else: 158 | database.update_event(event_id, 0) 159 | # to prevent annoying logs / notifications... Notify only after 3 tries 160 | if database.select_tries(event_id) >= 3: 161 | logging.error(f"Failed to upload video {event_id}.") 162 | else: 163 | logging.debug(f"Event {event_id} already uploaded. Skipping...") 164 | 165 | 166 | def handle_all_events(): 167 | latest_start_time = database.get_latest_event_start_time() 168 | logging.debug(f"Fetching all events from Frigate since {latest_start_time}...") 169 | all_events = fetch_all_events(FRIGATE_URL, after=latest_start_time, batch_size=100) 170 | 171 | if all_events is None: 172 | # This indicates a connection error after retries 173 | logging.error("Failed to fetch events from Frigate after multiple retries.") 174 | elif not all_events: 175 | # This is the normal case where there are no new events 176 | logging.debug("No new events to process.") 177 | else: 178 | # Process the fetched events 179 | logging.debug(f"Received {len(all_events)} events") 180 | i = 1 181 | for event in all_events: 182 | logging.debug(f"Handling event #{i}: {event['id']} in handle_all_events") 183 | handle_single_event(event) 184 | i = i + 1 185 | 186 | 187 | # MQTT Reconnect settings 188 | FIRST_RECONNECT_DELAY = 1 189 | RECONNECT_RATE = 2 190 | MAX_RECONNECT_COUNT = 12 191 | MAX_RECONNECT_DELAY = 60 192 | 193 | 194 | def on_disconnect(client, userdata, rc): 195 | logging.info("MQTT disconnected with result code: %s", rc) 196 | reconnect_count, reconnect_delay = 0, FIRST_RECONNECT_DELAY 197 | while reconnect_count < MAX_RECONNECT_COUNT: 198 | logging.info("Reconnecting in %d seconds...", reconnect_delay) 199 | time.sleep(reconnect_delay) 200 | 201 | try: 202 | client.reconnect() 203 | logging.info("Reconnected successfully!") 204 | return 205 | except Exception as err: 206 | logging.error("%s. Reconnect failed. Retrying...", err) 207 | 208 | reconnect_delay *= RECONNECT_RATE 209 | reconnect_delay = min(reconnect_delay, MAX_RECONNECT_DELAY) 210 | reconnect_count += 1 211 | logging.info("Reconnect failed after %s attempts. Exiting...", reconnect_count) 212 | 213 | 214 | def init_db_and_run_migrations(): 215 | database.init_db() 216 | database.run_migrations() 217 | 218 | 219 | def mqtt_handler(): 220 | client = mqtt.Client(mqtt.CallbackAPIVersion.VERSION2) 221 | client.username_pw_set(MQTT_USER, MQTT_PASSWORD) 222 | client.on_connect = on_connect 223 | client.on_message = on_message 224 | client.on_disconnect = on_disconnect 225 | client.connect(MQTT_BROKER_ADDRESS, MQTT_PORT, 180) 226 | client.loop_forever() 227 | 228 | 229 | def run_every_x_minutes(): 230 | logging.debug("Handling all events and cleaning up old events...") 231 | handle_all_events() 232 | database.cleanup_old_events() 233 | 234 | 235 | def run_every_6_hours(): 236 | logging.debug("Handling failed events...") 237 | failed_events = database.select_not_uploaded_yet_hard() 238 | if failed_events: 239 | logging.error( 240 | f"{len(failed_events)} failed events: {failed_events} ... Please check the logs for more information.") 241 | else: 242 | logging.debug("No failed events found.") 243 | 244 | 245 | def internet(host="8.8.8.8", port=53, timeout=3): 246 | """ 247 | Host: 8.8.8.8 (google-public-dns-a.google.com) 248 | OpenPort: 53/tcp 249 | Service: domain (DNS/TCP) 250 | """ 251 | try: 252 | socket.setdefaulttimeout(timeout) 253 | socket.socket(socket.AF_INET, socket.SOCK_STREAM).connect((host, port)) 254 | return True 255 | except socket.error as ex: 256 | print(ex) 257 | return False 258 | 259 | 260 | def main(): 261 | """ 262 | Main function to initialize services and process events. 263 | """ 264 | logging.debug("Initializing database...") 265 | init_db_and_run_migrations() 266 | 267 | mqtt_thread = threading.Thread(target=mqtt_handler) 268 | mqtt_thread.daemon = True 269 | mqtt_thread.start() 270 | 271 | scheduler = BackgroundScheduler() 272 | scheduler.add_job(run_every_x_minutes, 'interval', minutes=10) 273 | scheduler.add_job(run_every_6_hours, 'interval', hours=6) 274 | scheduler.add_job(lambda: cleanup_old_files_on_drive(service), 'interval', days=1) 275 | scheduler.start() 276 | 277 | try: 278 | while True: 279 | time.sleep(1) 280 | except (KeyboardInterrupt, SystemExit): 281 | scheduler.shutdown() 282 | 283 | 284 | if __name__ == "__main__": 285 | main() 286 | -------------------------------------------------------------------------------- /src/google_drive.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import os 3 | import ssl 4 | import socket 5 | import tempfile 6 | import threading 7 | import time 8 | import random 9 | import requests 10 | from dotenv import load_dotenv 11 | from googleapiclient.errors import HttpError 12 | from googleapiclient.http import MediaIoBaseUpload, MediaIoBaseDownload 13 | from datetime import datetime, timedelta 14 | import pytz 15 | from google.oauth2 import service_account 16 | from googleapiclient.discovery import build 17 | from requests.adapters import HTTPAdapter 18 | from urllib3.util.retry import Retry 19 | 20 | from src import database 21 | from src.frigate_api import generate_video_url 22 | 23 | load_dotenv() 24 | GDRIVE_RETENTION_DAYS = int(os.getenv('GDRIVE_RETENTION_DAYS', 0)) 25 | 26 | UPLOAD_DIR = os.getenv('UPLOAD_DIR') 27 | # Prioritize standard 'TZ' env var, but fall back to 'TIMEZONE' for backward compatibility. 28 | TIMEZONE = os.getenv('TZ', os.getenv('TIMEZONE', 'Europe/Istanbul')) 29 | SERVICE_ACCOUNT_FILE = os.getenv('SERVICE_ACCOUNT_FILE') 30 | GOOGLE_ACCOUNT_TO_IMPERSONATE = os.getenv('GOOGLE_ACCOUNT_TO_IMPERSONATE') 31 | 32 | # Configure retry strategy for Google Drive API 33 | MAX_RETRIES = 5 34 | INITIAL_RETRY_DELAY = 1 # seconds 35 | MAX_RETRY_DELAY = 60 # seconds 36 | UPLOAD_CHUNK_SIZE = 1024 * 1024 * 10 # 10MB chunks for resumable uploads 37 | DOWNLOAD_TIMEOUT = 300 # 5 minutes for video download 38 | 39 | SCOPES = ['https://www.googleapis.com/auth/drive'] 40 | 41 | def get_google_service(): 42 | """Initialize and return a Google Drive service.""" 43 | try: 44 | # Check if service account file exists 45 | if not os.path.isfile(SERVICE_ACCOUNT_FILE): 46 | logging.error(f"Service account file not found at: {SERVICE_ACCOUNT_FILE}") 47 | logging.error(f"Current working directory: {os.getcwd()}") 48 | if os.path.exists(os.path.dirname(SERVICE_ACCOUNT_FILE)): 49 | logging.error(f"Directory contents: {os.listdir(os.path.dirname(SERVICE_ACCOUNT_FILE))}") 50 | raise FileNotFoundError(f"Service account file not found at: {SERVICE_ACCOUNT_FILE}") 51 | 52 | # Initialize credentials 53 | if GOOGLE_ACCOUNT_TO_IMPERSONATE: 54 | credentials = service_account.Credentials.from_service_account_file( 55 | SERVICE_ACCOUNT_FILE, scopes=SCOPES, subject=GOOGLE_ACCOUNT_TO_IMPERSONATE) 56 | logging.info(f"Using service account with impersonation: {GOOGLE_ACCOUNT_TO_IMPERSONATE}") 57 | else: 58 | credentials = service_account.Credentials.from_service_account_file( 59 | SERVICE_ACCOUNT_FILE, scopes=SCOPES) 60 | logging.info("Using service account without impersonation") 61 | 62 | # Build and return the service 63 | return build('drive', 'v3', credentials=credentials, cache_discovery=False) 64 | 65 | except Exception as e: 66 | error_msg = f"Error initializing Google Drive service: {str(e)}" 67 | logging.error(error_msg) 68 | raise RuntimeError(error_msg) from e 69 | 70 | # Initialize the service 71 | service = get_google_service() 72 | 73 | # Cache for folder IDs to avoid repeated lookups and improve resilience 74 | _folder_id_cache = {} 75 | 76 | # Lock to prevent race conditions when creating folders 77 | folder_creation_lock = threading.Lock() 78 | 79 | 80 | def generate_filename(camera_name, start_time, event_id): 81 | utc_time = datetime.fromtimestamp(start_time, pytz.utc) 82 | local_time = utc_time.astimezone(pytz.timezone(TIMEZONE)) 83 | return f"{local_time.strftime('%Y-%m-%d-%H-%M-%S')}__{camera_name}__{event_id}.mp4" 84 | 85 | 86 | def find_or_create_folder(name, parent_id=None): 87 | """ 88 | Finds a folder by name and parent_id, creating it if it doesn't exist. 89 | Uses a cache to avoid repeated API calls and improve resilience against network errors. 90 | """ 91 | cache_key = (parent_id, name) 92 | if cache_key in _folder_id_cache: 93 | logging.debug(f"Found folder '{name}' in cache with ID: {_folder_id_cache[cache_key]}") 94 | return _folder_id_cache[cache_key] 95 | 96 | # Use a lock to prevent race conditions where multiple threads try to create the same folder. 97 | with folder_creation_lock: 98 | # Double-check the cache inside the lock in case another thread populated it while waiting 99 | if cache_key in _folder_id_cache: 100 | logging.debug(f"Found folder '{name}' in cache (after lock) with ID: {_folder_id_cache[cache_key]}") 101 | return _folder_id_cache[cache_key] 102 | 103 | try: 104 | query = f"name='{name}' and mimeType='application/vnd.google-apps.folder' and trashed=false" 105 | if parent_id: 106 | query += f" and '{parent_id}' in parents" 107 | 108 | results = service.files().list(q=query, spaces='drive', fields='files(id, name)').execute() 109 | folders = results.get('files', []) 110 | 111 | if not folders: 112 | folder_metadata = { 113 | 'name': name, 114 | 'mimeType': 'application/vnd.google-apps.folder', 115 | 'parents': [parent_id] if parent_id else [] 116 | } 117 | folder = service.files().create(body=folder_metadata, fields='id').execute() 118 | folder_id = folder.get('id') 119 | logging.debug(f"Created folder '{name}' with ID: {folder_id}") 120 | _folder_id_cache[cache_key] = folder_id 121 | return folder_id 122 | else: 123 | folder_id = folders[0]['id'] 124 | logging.debug(f"Found existing folder '{name}' with ID: {folder_id}") 125 | _folder_id_cache[cache_key] = folder_id 126 | return folder_id 127 | 128 | except (HttpError, socket.timeout) as error: 129 | logging.error(f"An error occurred while finding or creating folder '{name}': {error}") 130 | return None 131 | 132 | 133 | def get_folder_id(drive_service, folder_name, parent_id): 134 | try: 135 | query = f"name='{folder_name}' and mimeType='application/vnd.google-apps.folder' and trashed=false" 136 | if parent_id: 137 | query += f" and '{parent_id}' in parents" 138 | 139 | results = drive_service.files().list(q=query, spaces='drive', fields='files(id, name)').execute() 140 | folders = results.get('files', []) 141 | 142 | if not folders: 143 | return None 144 | else: 145 | return folders[0]['id'] 146 | 147 | except HttpError as error: 148 | logging.error(f"An error occurred while finding folder '{folder_name}': {error}") 149 | return None 150 | 151 | 152 | def cleanup_old_files_on_drive(drive_service): 153 | """ 154 | Deletes files older than GDRIVE_RETENTION_DAYS from Google Drive and cleans up empty parent folders. 155 | """ 156 | if GDRIVE_RETENTION_DAYS == 0: 157 | logging.info("GDRIVE_RETENTION_DAYS is set to 0, skipping cleanup.") 158 | return 159 | 160 | logging.info(f"Starting cleanup of files older than {GDRIVE_RETENTION_DAYS} days on Google Drive...") 161 | 162 | try: 163 | # Calculate the cutoff date 164 | cutoff_date = datetime.now() - timedelta(days=GDRIVE_RETENTION_DAYS) 165 | cutoff_iso = cutoff_date.isoformat() + 'Z' 166 | 167 | # Find the root upload folder first 168 | upload_dir_name = os.getenv('UPLOAD_DIR', 'Frigate') 169 | folder_id = get_folder_id(drive_service, upload_dir_name, 'root') 170 | if not folder_id: 171 | logging.warning(f"Root upload folder '{upload_dir_name}' not found. Cannot perform cleanup.") 172 | return 173 | 174 | # Find and delete old files recursively. The 'trashed=false' is crucial. 175 | query = f"mimeType='video/mp4' and trashed=false and createdTime < '{cutoff_iso}'" 176 | page_token = None 177 | while True: 178 | response = drive_service.files().list(q=query, 179 | spaces='drive', 180 | fields='nextPageToken, files(id, name, parents)', 181 | pageToken=page_token).execute() 182 | for file in response.get('files', []): 183 | file_id = file.get('id') 184 | file_name = file.get('name') 185 | parent_folders = file.get('parents') 186 | logging.info(f"Deleting old file: {file_name} (ID: {file_id})") 187 | drive_service.files().delete(fileId=file_id).execute() 188 | 189 | # Cleanup empty parent folders 190 | if parent_folders: 191 | cleanup_empty_parent_folders(drive_service, parent_folders[0]) 192 | 193 | page_token = response.get('nextPageToken', None) 194 | if page_token is None: 195 | break 196 | 197 | logging.info("Google Drive cleanup finished.") 198 | 199 | except HttpError as error: 200 | logging.error(f'An error occurred during Google Drive cleanup: {error}') 201 | except Exception as e: 202 | logging.error(f'An unexpected error occurred during Google Drive cleanup: {e}') 203 | 204 | 205 | def cleanup_empty_parent_folders(drive_service, folder_id): 206 | """ 207 | Recursively deletes a folder and its parents if they become empty. 208 | """ 209 | try: 210 | # Check if the folder is empty 211 | q = f"'{folder_id}' in parents" 212 | response = drive_service.files().list(q=q, spaces='drive', fields='files(id)').execute() 213 | if not response.get('files', []): 214 | # Get folder details to find its parent 215 | folder_details = drive_service.files().get(fileId=folder_id, fields='name, parents').execute() 216 | folder_name = folder_details.get('name') 217 | parent_folders = folder_details.get('parents') 218 | 219 | logging.info(f"Deleting empty folder: {folder_name} (ID: {folder_id})") 220 | drive_service.files().delete(fileId=folder_id).execute() 221 | 222 | # Recursively check the parent folder 223 | if parent_folders: 224 | cleanup_empty_parent_folders(drive_service, parent_folders[0]) 225 | except HttpError as error: 226 | # It's possible another process deleted it, so we can ignore 'not found' errors 227 | if error.resp.status == 404: 228 | logging.warning(f"Folder with ID {folder_id} not found, likely already deleted.") 229 | else: 230 | logging.error(f'An error occurred while cleaning up empty folder {folder_id}: {error}') 231 | 232 | 233 | def exponential_backoff(retries): 234 | """Calculate exponential backoff with jitter.""" 235 | if retries == 0: 236 | return 0 237 | jitter = random.uniform(0, 1) 238 | return min(INITIAL_RETRY_DELAY * (2 ** (retries - 1)) + jitter, MAX_RETRY_DELAY) 239 | 240 | def download_video_with_retry(video_url, max_retries=3): 241 | """Download video with retry logic and proper timeout handling.""" 242 | retry_count = 0 243 | last_error = None 244 | 245 | while retry_count <= max_retries: 246 | try: 247 | with requests.Session() as session: 248 | # Configure retry strategy for the download 249 | retry_strategy = Retry( 250 | total=3, 251 | backoff_factor=1, 252 | status_forcelist=[500, 502, 503, 504], 253 | allowed_methods=["GET"] 254 | ) 255 | adapter = HTTPAdapter(max_retries=retry_strategy) 256 | session.mount("https://", adapter) 257 | session.mount("http://", adapter) 258 | 259 | with session.get(video_url, stream=True, timeout=DOWNLOAD_TIMEOUT) as response: 260 | response.raise_for_status() 261 | 262 | with tempfile.TemporaryFile() as fh: 263 | for chunk in response.iter_content(chunk_size=8192): 264 | if chunk: # filter out keep-alive new chunks 265 | fh.write(chunk) 266 | fh.seek(0) 267 | return fh.read() 268 | 269 | except (requests.RequestException, ssl.SSLError, socket.timeout) as e: 270 | last_error = e 271 | retry_count += 1 272 | if retry_count <= max_retries: 273 | wait_time = exponential_backoff(retry_count) 274 | logging.warning(f"Attempt {retry_count}/{max_retries} failed. Retrying in {wait_time:.2f}s. Error: {e}") 275 | time.sleep(wait_time) 276 | 277 | logging.error(f"Failed to download video after {max_retries} attempts. Last error: {last_error}") 278 | return None 279 | 280 | def upload_to_google_drive(event, frigate_url): 281 | """Upload a video to Google Drive with retry logic and proper error handling.""" 282 | import io # Moved here to prevent early initialization issues 283 | 284 | camera_name = event['camera'] 285 | start_time = event['start_time'] 286 | event_id = event['id'] 287 | filename = generate_filename(camera_name, start_time, event_id) 288 | year, month, day = filename.split("__")[0].split("-")[:3] 289 | video_url = generate_video_url(frigate_url, event_id) 290 | 291 | for attempt in range(MAX_RETRIES + 1): 292 | try: 293 | # 1. Ensure folder structure exists 294 | frigate_folder_id = find_or_create_folder(UPLOAD_DIR) 295 | if not frigate_folder_id: 296 | raise Exception(f"Failed to find or create folder: {UPLOAD_DIR}") 297 | 298 | year_folder_id = find_or_create_folder(year, frigate_folder_id) 299 | if not year_folder_id: 300 | raise Exception(f"Failed to find or create folder: {year}") 301 | 302 | month_folder_id = find_or_create_folder(month, year_folder_id) 303 | if not month_folder_id: 304 | raise Exception(f"Failed to find or create folder: {month}") 305 | 306 | day_folder_id = find_or_create_folder(day, month_folder_id) 307 | if not day_folder_id: 308 | raise Exception(f"Failed to find or create folder: {day}") 309 | 310 | # 2. Download video with retry logic 311 | video_data = download_video_with_retry(video_url) 312 | if video_data is None: 313 | raise Exception(f"Failed to download video from {video_url}") 314 | 315 | # 3. Upload to Google Drive with resumable upload 316 | media = MediaIoBaseUpload( 317 | io.BytesIO(video_data), 318 | mimetype='video/mp4', 319 | resumable=True, 320 | chunksize=UPLOAD_CHUNK_SIZE 321 | ) 322 | 323 | file_metadata = { 324 | 'name': filename, 325 | 'parents': [day_folder_id] 326 | } 327 | 328 | request = service.files().create( 329 | body=file_metadata, 330 | media_body=media, 331 | fields='id', 332 | supportsAllDrives=True 333 | ) 334 | 335 | response = None 336 | while response is None: 337 | status, response = request.next_chunk() 338 | if status: 339 | logging.debug(f"Upload progress: {int(status.progress() * 100)}%") 340 | 341 | if 'id' in response: 342 | logging.info(f"Video {filename} successfully uploaded to Google Drive with ID: {response['id']}.") 343 | return True 344 | else: 345 | raise Exception("No file ID returned from Google Drive") 346 | 347 | except HttpError as error: 348 | if attempt < MAX_RETRIES and error.resp.status in [500, 502, 503, 504, 429]: 349 | wait_time = exponential_backoff(attempt + 1) 350 | logging.warning(f"Attempt {attempt + 1}/{MAX_RETRIES} failed with status {error.resp.status}. " 351 | f"Retrying in {wait_time:.2f}s. Error: {error}") 352 | time.sleep(wait_time) 353 | continue 354 | logging.error(f"HTTP error uploading to Google Drive: {error}") 355 | return False 356 | 357 | except (requests.RequestException, ssl.SSLError, socket.timeout, socket.error) as e: 358 | if attempt < MAX_RETRIES: 359 | wait_time = exponential_backoff(attempt + 1) 360 | logging.warning(f"Attempt {attempt + 1}/{MAX_RETRIES} failed. Retrying in {wait_time:.2f}s. Error: {e}") 361 | time.sleep(wait_time) 362 | continue 363 | logging.error(f"Error in upload process: {e}", exc_info=True) 364 | return False 365 | 366 | except Exception as e: 367 | logging.error(f"Unexpected error: {e}", exc_info=True) 368 | return False 369 | 370 | logging.error(f"Failed to upload after {MAX_RETRIES + 1} attempts") 371 | return False 372 | 373 | 374 | --------------------------------------------------------------------------------