├── envoy_logger ├── __init__.py ├── __main__.py ├── cfg.py ├── envoy.py ├── enphaseenergy.py ├── model.py └── sampling_loop.py ├── docs ├── dashboard-live.png ├── dashboard-daily-totals.png ├── examples │ ├── dockerfile │ ├── envoy-logger.yaml │ └── cfg.yaml ├── flux_queries │ ├── net.flux │ ├── line-voltage.flux │ ├── production.flux │ ├── consumption.flux │ ├── phase_angle.flux │ ├── per-panel-daily-totals.flux │ ├── per-panel-production.flux │ ├── daily-totals.flux │ └── line-voltage-minmax.flux ├── system-service.md └── Setup-Instructions.md ├── .gitignore ├── launcher.sh ├── setup.py └── README.md /envoy_logger/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /docs/dashboard-live.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/amykyta3/envoy-logger/HEAD/docs/dashboard-live.png -------------------------------------------------------------------------------- /docs/dashboard-daily-totals.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/amykyta3/envoy-logger/HEAD/docs/dashboard-daily-totals.png -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | **/__pycache__ 2 | **/.vscode 3 | **/*.o 4 | **/*.so 5 | **/.venv 6 | **/.coverage 7 | **/.coverage.* 8 | **/*.rpt 9 | **/htmlcov 10 | **/*.so 11 | 12 | build/ 13 | dist/ 14 | *.egg-info/ 15 | .eggs/ 16 | -------------------------------------------------------------------------------- /launcher.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | set -e 3 | 4 | echo "Installing" 5 | python3 -m pip install -U setuptools 6 | python3 -m pip install --force-reinstall git+https://github.com/amykyta3/envoy-logger 7 | 8 | echo "Starting logger" 9 | python3 -m envoy_logger $ENVOY_LOGGER_CFG_PATH 10 | -------------------------------------------------------------------------------- /docs/examples/dockerfile: -------------------------------------------------------------------------------- 1 | FROM debian:bullseye-slim 2 | 3 | ADD https://raw.githubusercontent.com/amykyta3/envoy-logger/main/launcher.sh / 4 | RUN chmod +x /launcher.sh 5 | 6 | # Install Stuff 7 | RUN apt update 8 | RUN apt -y install python3 python3-pip git 9 | 10 | ENV ENVOY_LOGGER_CFG_PATH=/etc/envoy_logger/cfg.yaml 11 | 12 | # entrypoint 13 | CMD /launcher.sh 14 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | import setuptools 2 | 3 | setuptools.setup( 4 | name="envoy-logger", 5 | version="1.1.0", 6 | packages=setuptools.find_packages(exclude=["test"]), 7 | include_package_data=True, 8 | python_requires='>=3.6', 9 | install_requires=[ 10 | "requests", 11 | "appdirs", 12 | "influxdb-client", 13 | "PyYAML", 14 | ], 15 | ) 16 | -------------------------------------------------------------------------------- /docs/examples/envoy-logger.yaml: -------------------------------------------------------------------------------- 1 | # Docker compose file 2 | 3 | version: "3" 4 | 5 | services: 6 | envoy-logger: 7 | image: envoy-logger-image:latest 8 | environment: 9 | - ENVOY_LOGGER_CFG_PATH=/etc/envoy_logger/cfg.yaml 10 | volumes: 11 | - /hd-lowrel0/envoy_logger:/etc/envoy_logger:ro 12 | - /etc/localtime:/etc/localtime:ro 13 | - /etc/timezone:/etc/timezone:ro 14 | -------------------------------------------------------------------------------- /docs/flux_queries/net.flux: -------------------------------------------------------------------------------- 1 | from(bucket: "high_rate") 2 | |> range(start: v.timeRangeStart, stop: v.timeRangeStop) 3 | |> filter(fn: (r) => r["source"] == "power-meter") 4 | |> filter(fn: (r) => r["measurement-type"] == "net") 5 | |> filter(fn: (r) => r["_field"] == "P") 6 | |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false) 7 | |> group(columns: ["_time"], mode:"by") 8 | |> sum(column: "_value") 9 | |> group() 10 | |> yield() 11 | -------------------------------------------------------------------------------- /docs/flux_queries/line-voltage.flux: -------------------------------------------------------------------------------- 1 | // Per-phase line voltage plots 2 | from(bucket: "high_rate") 3 | |> range(start: v.timeRangeStart, stop: v.timeRangeStop) 4 | |> filter(fn: (r) => r["source"] == "power-meter") 5 | |> filter(fn: (r) => r["measurement-type"] == "net") 6 | |> filter(fn: (r) => r["_field"] == "V_rms") 7 | |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false) 8 | |> keep(columns: ["_value", "_time", "line-idx"]) 9 | |> yield(name: "mean") 10 | -------------------------------------------------------------------------------- /docs/flux_queries/production.flux: -------------------------------------------------------------------------------- 1 | // Plot total production (sum of both line0 and line1) 2 | from(bucket: "high_rate") 3 | |> range(start: v.timeRangeStart, stop: v.timeRangeStop) 4 | |> filter(fn: (r) => r["source"] == "power-meter") 5 | |> filter(fn: (r) => r["measurement-type"] == "production") 6 | |> filter(fn: (r) => r["_field"] == "P") 7 | |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false) 8 | |> group(columns: ["_time"], mode:"by") 9 | |> sum(column: "_value") 10 | |> group() 11 | |> yield() 12 | -------------------------------------------------------------------------------- /docs/flux_queries/consumption.flux: -------------------------------------------------------------------------------- 1 | // Plot consumption of both phases (line0 and line1) 2 | from(bucket: "high_rate") 3 | |> range(start: v.timeRangeStart, stop: v.timeRangeStop) 4 | |> filter(fn: (r) => r["source"] == "power-meter") 5 | |> filter(fn: (r) => r["measurement-type"] == "consumption") 6 | |> filter(fn: (r) => r["_field"] == "P") 7 | |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false) 8 | |> drop(columns: ["_start", "_stop", "_field", "_measurement", "measurement-type", "source"]) 9 | |> yield(name: "mean") 10 | -------------------------------------------------------------------------------- /docs/flux_queries/phase_angle.flux: -------------------------------------------------------------------------------- 1 | // Calculate power phase angle for each line 2 | import "math" 3 | from(bucket: "high_rate") 4 | |> range(start: v.timeRangeStart, stop: v.timeRangeStop) 5 | |> filter(fn: (r) => r["source"] == "power-meter") 6 | |> filter(fn: (r) => r["measurement-type"] == "consumption") 7 | |> filter(fn: (r) => r["_field"] == "P" or r["_field"] == "Q") 8 | |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false) 9 | |> pivot(rowKey: ["_time"], columnKey: ["_field"], valueColumn: "_value") 10 | |> map(fn: (r) => ({r with _value: math.atan(x: r.Q / r.P) * 180.0 / 3.14159 })) 11 | |> keep(columns: ["_value", "_time", "line-idx"]) 12 | |> yield(name: "mean") 13 | -------------------------------------------------------------------------------- /docs/flux_queries/per-panel-daily-totals.flux: -------------------------------------------------------------------------------- 1 | zero_pad = (v) => { 2 | result = if int(v) < 10 then 3 | "0${v}" 4 | else 5 | "${v}" 6 | 7 | return result 8 | } 9 | 10 | from(bucket: "low_rate") 11 | |> range(start: v.timeRangeStart, stop: v.timeRangeStop) 12 | |> filter(fn: (r) => r["source"] == "power-meter") 13 | |> filter(fn: (r) => r["measurement-type"] == "inverter") 14 | |> filter(fn: (r) => r["_field"] == "Wh") 15 | // make sure the "row" and "col" tags exist 16 | |> filter(fn: (r) => exists r.row and exists r.col) 17 | // Panels are tagged with "row" and "col". Rather than labeling them by their 18 | // serial number, use row/column to label them as "R##-C##" 19 | |> map(fn: (r) => ({r with pos: "R" + zero_pad(v: r.row) + "-C" + zero_pad(v: r.col)})) 20 | |> keep(columns: ["_time", "_value", "pos"]) 21 | |> group(columns: ["pos"]) 22 | |> yield(name: "total") 23 | -------------------------------------------------------------------------------- /docs/flux_queries/per-panel-production.flux: -------------------------------------------------------------------------------- 1 | zero_pad = (v) => { 2 | result = if int(v) < 10 then 3 | "0${v}" 4 | else 5 | "${v}" 6 | 7 | return result 8 | } 9 | 10 | from(bucket: "high_rate") 11 | |> range(start: v.timeRangeStart, stop: v.timeRangeStop) 12 | |> filter(fn: (r) => r["source"] == "power-meter") 13 | |> filter(fn: (r) => r["measurement-type"] == "inverter") 14 | |> filter(fn: (r) => r["_field"] == "P") 15 | // make sure the "row" and "col" tags exist 16 | |> filter(fn: (r) => exists r.row and exists r.col) 17 | |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false) 18 | // Panels are tagged with "row" and "col". Rather than labeling them by their 19 | // serial number, use row/column to label them as "R##-C##" 20 | |> map(fn: (r) => ({r with pos: "R" + zero_pad(v: r.row) + "-C" + zero_pad(v: r.col)})) 21 | |> keep(columns: ["_value", "_time", "pos"]) 22 | |> group(columns: ["pos"]) 23 | |> yield(name: "mean") 24 | -------------------------------------------------------------------------------- /envoy_logger/__main__.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import argparse 3 | import time 4 | 5 | from requests.exceptions import RequestException 6 | 7 | from . import enphaseenergy 8 | from .sampling_loop import SamplingLoop 9 | from .cfg import load_cfg 10 | 11 | logging.basicConfig( 12 | level=logging.INFO, 13 | format="%(asctime)s %(levelname)s [%(name)s]: %(message)s" 14 | ) 15 | 16 | parser = argparse.ArgumentParser() 17 | parser.add_argument("cfg_path") 18 | args = parser.parse_args() 19 | 20 | cfg = load_cfg(args.cfg_path) 21 | 22 | while True: 23 | # Loop forever so that if an exception occurs, logger will restart 24 | try: 25 | envoy_token = enphaseenergy.get_token( 26 | cfg.enphase_email, 27 | cfg.enphase_password, 28 | cfg.envoy_serial 29 | ) 30 | 31 | S = SamplingLoop(envoy_token, cfg) 32 | 33 | S.run() 34 | except RequestException as e: 35 | logging.error("%s: %s", str(type(e)), e) 36 | logging.info("Waiting a bit before restarting...") 37 | time.sleep(15) 38 | logging.info("Restarting data logger") 39 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | Enphase Envoy data logging service 2 | ================================== 3 | 4 | Log your solar production locally and feed it into an InfluxDB instance. 5 | 6 | This Python-based logging application handles the following: 7 | * Automatically fetch the Envoy authentication token from enphaseenergy.com 8 | * Enphase has this idiotic token-based authentication method that makes it 9 | impossible to operate completely offline. This application will fetch a 10 | new token automatically, and refresh it as-needed upon expiration. 11 | * Authenticate a session with your local Envoy hardware 12 | * Scrape solar production data: 13 | * Per-phase production, consumption, and net 14 | * Per-phase voltage, phase angle, etc. 15 | * Per-panel production 16 | * Push samples to an InfluxDB database 17 | 18 | 19 | Once in an InfluxDB interface, you can display the data on a Grafana dashboard. 20 | 21 | Some examples: 22 | 23 | ![daily](docs/dashboard-live.png) 24 | 25 | ![daily](docs/dashboard-daily-totals.png) 26 | 27 | 28 | ## Set-up instructions 29 | 30 | I have written up a [general guide in case you're stuck](docs/Setup-Instructions.md). 31 | -------------------------------------------------------------------------------- /docs/flux_queries/daily-totals.flux: -------------------------------------------------------------------------------- 1 | // Daily totals of production and consumption 2 | from(bucket: "low_rate") 3 | |> range(start: v.timeRangeStart, stop: v.timeRangeStop) 4 | |> filter(fn: (r) => r["source"] == "power-meter") 5 | |> filter(fn: (r) => r["measurement-type"] == "consumption") 6 | |> filter(fn: (r) => r["interval"] == "24h") 7 | |> filter(fn: (r) => r["_field"] == "Wh") 8 | // Shift back by 12 hours so that the bar chart shows up mid-day instead of at midnight 9 | |> timeShift(duration: -12h) 10 | |> group(columns: ["_time"], mode:"by") 11 | |> sum(column: "_value") 12 | |> group() 13 | |> yield(name: "consumed") 14 | 15 | from(bucket: "low_rate") 16 | |> range(start: v.timeRangeStart, stop: v.timeRangeStop) 17 | |> filter(fn: (r) => r["source"] == "power-meter") 18 | |> filter(fn: (r) => r["measurement-type"] == "production") 19 | |> filter(fn: (r) => r["interval"] == "24h") 20 | |> filter(fn: (r) => r["_field"] == "Wh") 21 | // Shift back by 12 hours so that the bar chart shows up mid-day instead of at midnight 22 | |> timeShift(duration: -12h) 23 | |> group(columns: ["_time"], mode:"by") 24 | |> sum(column: "_value") 25 | |> group() 26 | |> yield(name: "produced") 27 | -------------------------------------------------------------------------------- /docs/flux_queries/line-voltage-minmax.flux: -------------------------------------------------------------------------------- 1 | DOWNSAMPLE_BY = 20 2 | WINDOW_DURATION = duration(v: int(v: v.windowPeriod) * DOWNSAMPLE_BY) 3 | TIME_SHIFT = duration(v: - int(v: v.windowPeriod) * DOWNSAMPLE_BY / 2) 4 | 5 | // Min voltage over the window 6 | from(bucket: "high_rate") 7 | |> range(start: v.timeRangeStart, stop: v.timeRangeStop) 8 | |> filter(fn: (r) => r["source"] == "power-meter") 9 | |> filter(fn: (r) => r["measurement-type"] == "net") 10 | |> filter(fn: (r) => r["_field"] == "V_rms") 11 | // Use a lower resolution window 12 | |> aggregateWindow(every: WINDOW_DURATION, fn: min, createEmpty: false) 13 | // shift back in time to 'center' the downsampled points 14 | |> timeShift(duration: TIME_SHIFT) 15 | // use the min of the two line voltages 16 | |> group(columns: ["_time"], mode:"by") 17 | |> min(column: "_value") 18 | |> map(fn: (r) => ({r with name: "min"})) 19 | |> group(columns: ["name"]) 20 | |> keep(columns: ["_value", "_time", "name"]) 21 | |> yield(name: "min") 22 | 23 | // Max voltage over the window 24 | from(bucket: "high_rate") 25 | |> range(start: v.timeRangeStart, stop: v.timeRangeStop) 26 | |> filter(fn: (r) => r["source"] == "power-meter") 27 | |> filter(fn: (r) => r["measurement-type"] == "net") 28 | |> filter(fn: (r) => r["_field"] == "V_rms") 29 | // Use a lower resolution window 30 | |> aggregateWindow(every: WINDOW_DURATION, fn: max, createEmpty: false) 31 | // shift back in time to 'center' the downsampled points 32 | |> timeShift(duration: TIME_SHIFT) 33 | // use the max of the two line voltages 34 | |> group(columns: ["_time"], mode:"by") 35 | |> max(column: "_value") 36 | |> map(fn: (r) => ({r with name: "max"})) 37 | |> group(columns: ["name"]) 38 | |> keep(columns: ["_value", "_time", "name"]) 39 | |> yield(name: "max") 40 | -------------------------------------------------------------------------------- /docs/examples/cfg.yaml: -------------------------------------------------------------------------------- 1 | # Config file for envoy-logger 2 | 3 | # Your login info for enphaseenergy.com 4 | enphaseenergy: 5 | email: name@example.com 6 | password: mypassword123 7 | 8 | # Information about your specific Envoy instance. 9 | # https://enlighten.enphaseenergy.com will report the serial number under the "IQ-Gateway" information 10 | envoy: 11 | serial: 123456789012 12 | url: https://envoy.local 13 | 14 | # Give your envoy a name. 15 | # All points logged will be tagged with "source=" 16 | # Useful if you have multiple envoys 17 | tag: power-meter 18 | 19 | # How to access InfluxDB 20 | influxdb: 21 | url: http://localhost:8086 22 | token: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx 23 | org: home 24 | 25 | # Which InfluxDB bucket to send measurements. 26 | # This can be useful to control different data-retention rules 27 | # alternatively use the "bucket" key if you want everything to be sent to the 28 | # same bucket 29 | bucket_hr: high_rate 30 | bucket_lr: low_rate 31 | # bucket: all_data 32 | 33 | # Since the Envoy only tracks panel-level inverter production by serial number, 34 | # it can be useful to provide InfluxDB measurements with additional tags that 35 | # further describe your panels. This is completely optional, but can be useful 36 | # metadata for your dashboard. 37 | # You can figure out which inverter is which by logging into https://enlighten.enphaseenergy.com/ 38 | # and viewing: https://enlighten.enphaseenergy.com/pv/systems//array_layout_x.json 39 | # This will contain the X/Y coordinates of each inverter/panel. 40 | # Since my layout is pretty simple, I chose to tag them with simple row/column numbers. 41 | # Up to you to define a labeling scheme that works for you. 42 | inverters: 43 | "202212345600": # This is the inverter's serial number. NOT the inverter ID, module ID, or SKU as displayed in some places 44 | tags: 45 | row: 1 46 | col: 1 47 | "202212345601": 48 | tags: 49 | row: 1 50 | col: 2 51 | "202212345602": 52 | tags: 53 | row: 2 54 | col: 1 55 | "202212345603": 56 | tags: 57 | row: 2 58 | col: 2 59 | -------------------------------------------------------------------------------- /envoy_logger/cfg.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import sys 3 | from typing import Dict 4 | 5 | import yaml 6 | from influxdb_client import Point 7 | 8 | LOG = logging.getLogger("cfg") 9 | 10 | class Config: 11 | def __init__(self, data) -> None: 12 | try: 13 | self.enphase_email = data['enphaseenergy']['email'] # type: str 14 | self.enphase_password = data['enphaseenergy']['password'] # type: str 15 | 16 | self.envoy_serial = str(data['envoy']['serial']) 17 | self.envoy_url = data['envoy'].get('url', 'https://envoy.local') # type: str 18 | self.source_tag = data['envoy'].get('tag', 'envoy') # type: str 19 | 20 | self.influxdb_url = data['influxdb']['url'] # type: str 21 | self.influxdb_token = data['influxdb']['token'] # type: str 22 | self.influxdb_org = data['influxdb'].get('org', 'home') # type: str 23 | 24 | bucket = data['influxdb'].get('bucket', None) 25 | bucket_lr = data['influxdb'].get('bucket_lr', None) 26 | bucket_hr = data['influxdb'].get('bucket_hr', None) 27 | self.influxdb_bucket_lr = bucket_lr or bucket 28 | self.influxdb_bucket_hr = bucket_hr or bucket 29 | 30 | self.inverters = {} # type: Dict[str, InverterConfig] 31 | for serial, inverter_data in data.get("inverters", {}).items(): 32 | serial = str(serial) 33 | self.inverters[serial] = InverterConfig(inverter_data, serial) 34 | 35 | except KeyError as e: 36 | LOG.error("Missing required config key: %s", e.args[0]) 37 | sys.exit(1) 38 | 39 | 40 | def apply_tags_to_inverter_point(self, p: Point, serial: str) -> None: 41 | if serial in self.inverters.keys(): 42 | self.inverters[serial].apply_tags_to_point(p) 43 | 44 | 45 | 46 | class InverterConfig: 47 | def __init__(self, data, serial) -> None: 48 | self.serial = serial 49 | self.tags = data.get("tags", {}) 50 | 51 | def apply_tags_to_point(self, p: Point) -> None: 52 | for k, v in self.tags.items(): 53 | p.tag(k, v) 54 | 55 | 56 | def load_cfg(path: str): 57 | LOG.info("Loading config: %s", path) 58 | with open(path, "r", encoding="utf-8") as f: 59 | data = yaml.load(f.read(), Loader=yaml.FullLoader) 60 | 61 | return Config(data) 62 | -------------------------------------------------------------------------------- /envoy_logger/envoy.py: -------------------------------------------------------------------------------- 1 | import requests 2 | import urllib3 3 | from datetime import datetime, timezone 4 | import logging 5 | from typing import Dict 6 | 7 | from . import model 8 | 9 | # Local envoy access uses self-signed certificate. Ignore the warning 10 | urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning) 11 | 12 | LOG = logging.getLogger("envoy") 13 | 14 | def login(url: str, token: str) -> str: 15 | """ 16 | Login to local envoy and return the session id 17 | """ 18 | headers = { 19 | 'Authorization': f'Bearer {token}', 20 | } 21 | response = requests.get( 22 | f'{url}/auth/check_jwt', 23 | headers=headers, 24 | verify=False, 25 | timeout=30, 26 | ) 27 | response.raise_for_status() # raise HTTPError if one occurred 28 | session_id = response.cookies['sessionId'] 29 | LOG.info("Logged into envoy. SessionID: %s", session_id) 30 | return session_id 31 | 32 | 33 | def get_power_data(url: str, session_id: str) -> model.SampleData: 34 | LOG.debug("Fetching power data") 35 | ts = datetime.now(timezone.utc) 36 | cookies = { 37 | 'sessionId': session_id, 38 | } 39 | response = requests.get( 40 | f'{url}/production.json?details=1', 41 | cookies=cookies, 42 | verify=False, 43 | timeout=30, 44 | ) 45 | response.raise_for_status() # raise HTTPError if one occurred 46 | json_data = response.json() 47 | data = model.SampleData(json_data, ts) 48 | return data 49 | 50 | 51 | def get_inverter_data(url: str, session_id: str) -> Dict[str, model.InverterSample]: 52 | LOG.debug("Fetching inverter data") 53 | ts = datetime.now(timezone.utc) 54 | cookies = { 55 | 'sessionId': session_id, 56 | } 57 | response = requests.get( 58 | f'{url}/api/v1/production/inverters', 59 | cookies=cookies, 60 | verify=False, 61 | timeout=30, 62 | ) 63 | response.raise_for_status() # raise HTTPError if one occurred 64 | json_data = response.json() 65 | data = model.parse_inverter_data(json_data, ts) 66 | return data 67 | 68 | 69 | def get_inventory(url: str, session_id: str): 70 | cookies = { 71 | 'sessionId': session_id, 72 | } 73 | response = requests.get( 74 | f'{url}/inventory.json?deleted=1', 75 | cookies=cookies, 76 | verify=False, 77 | timeout=30, 78 | ) 79 | response.raise_for_status() # raise HTTPError if one occurred 80 | json_data = response.json() 81 | # TODO: Convert to objects 82 | return json_data 83 | -------------------------------------------------------------------------------- /envoy_logger/enphaseenergy.py: -------------------------------------------------------------------------------- 1 | from typing import Optional 2 | from datetime import datetime, timedelta 3 | import json 4 | import base64 5 | import os 6 | import logging 7 | 8 | import requests 9 | from appdirs import user_cache_dir 10 | 11 | LOG = logging.getLogger("enphaseenergy") 12 | 13 | def _login_enphaseenergy(email: str, password: str) -> str: 14 | LOG.info("Logging into enphaseenergy.com as %s", email) 15 | # Login and get session ID 16 | files = { 17 | 'user[email]': (None, email), 18 | 'user[password]': (None, password), 19 | } 20 | url = 'https://enlighten.enphaseenergy.com/login/login.json?' 21 | response = requests.post( 22 | url, 23 | files=files, 24 | timeout=30, 25 | ) 26 | response.raise_for_status() # raise HTTPError if one occurred 27 | resp = response.json() 28 | return resp['session_id'] 29 | 30 | def get_new_token(email: str, password: str, envoy_serial: str) -> str: 31 | """ 32 | Login to enphaseenergy.com and return an access token for the envoy. 33 | """ 34 | session_id = _login_enphaseenergy(email, password) 35 | 36 | LOG.info("Downloading new access token for envoy S/N: %s", envoy_serial) 37 | # Get the token 38 | json_data = { 39 | 'session_id': session_id, 40 | 'serial_num': envoy_serial, 41 | 'username': email, 42 | } 43 | response = requests.post( 44 | 'https://entrez.enphaseenergy.com/tokens', 45 | json=json_data, 46 | timeout=30, 47 | ) 48 | response.raise_for_status() # raise HTTPError if one occurred 49 | return response.text 50 | 51 | def token_expiration_date(token: str) -> datetime: 52 | jwt = {} 53 | for s in token.split(".")[0:2]: 54 | # Pad up the segment 55 | res = len(s) % 4 56 | if res != 0: 57 | s += "=" * (4 - res) 58 | 59 | d = base64.b64decode(s) 60 | jwt.update(json.loads(d)) 61 | exp = datetime.fromtimestamp(jwt['exp']) 62 | return exp 63 | 64 | def get_token_cache_path(envoy_serial: str) -> str: 65 | return os.path.join(user_cache_dir("enphase-envoy"), f"{envoy_serial}.token") 66 | 67 | def get_cached_token(envoy_serial: str) -> Optional[str]: 68 | path = get_token_cache_path(envoy_serial) 69 | if not os.path.exists(path): 70 | return None 71 | with open(path, 'r', encoding='utf-8') as f: 72 | LOG.info("Using cached token from: %s", path) 73 | return f.read() 74 | 75 | 76 | def save_token_to_cache(envoy_serial: str, token: str) -> None: 77 | path = get_token_cache_path(envoy_serial) 78 | LOG.info("Caching token to: %s", path) 79 | parent_dir = os.path.dirname(path) 80 | if not os.path.exists(parent_dir): 81 | os.mkdir(parent_dir) 82 | with open(path, 'w', encoding='utf-8') as f: 83 | f.write(token) 84 | 85 | def get_token(email: str, password: str, envoy_serial: str) -> str: 86 | """ 87 | Do whatever it takes to get a token 88 | """ 89 | token = get_cached_token(envoy_serial) 90 | if token is None: 91 | # cached token does not exist. Get a new one 92 | token = get_new_token(email, password, envoy_serial) 93 | save_token_to_cache(envoy_serial, token) 94 | 95 | exp = token_expiration_date(token) 96 | time_left = exp - datetime.now() 97 | if time_left < timedelta(days=1): 98 | # token will expire soon. get a new one 99 | LOG.info("Token will expire soon. Getting a new one") 100 | token = get_new_token(email, password, envoy_serial) 101 | save_token_to_cache(envoy_serial, token) 102 | 103 | return token 104 | -------------------------------------------------------------------------------- /docs/system-service.md: -------------------------------------------------------------------------------- 1 | # Run as a System Service (on Linux) Instructions 2 | 3 | This guide may be used to setup a service account and run `envoy_logger` (with 4 | auto-restart) on a modern Linux system. 5 | 6 | See the [setup instructions](https://github.com/amykyta3/envoy-logger/blob/main/docs/Setup-Instructions.md) 7 | for an overall guide. 8 | 9 | *** WARNING *** 10 | 11 | Your Enlighten username and password as well as an API token that can be used to 12 | access your Envoy/Gateway will be stored in plain text in files in the home 13 | directory of the service account created in this guide. Do not use this method 14 | on shared servers, servers exposed to the Internet, or otherwise in environments 15 | where access to the server is not well controlled. 16 | 17 | Additionally, it would be a good idea to setup a second Enlighten account and 18 | grant access to it instead of using your "Owner" account. 19 | 20 | https://support.enphase.com/s/article/How-can-I-add-users-to-my-Enlighten-account 21 | 22 | ## 1. Prepare Python 23 | 24 | Generally speaking, the more Python modules you can install via your OS package 25 | manager, the better. Modules installed via your OS package manager will 26 | automatically be updated as you update your OS. 27 | 28 | The list of modules required by `envoy_logger` can by found in the top-level 29 | [setup.py](https://github.com/amykyta3/envoy-logger/blob/main/setup.py) file. 30 | 31 | Your OS may not supply all required modules, and that's okay. On a Rocky Linux 32 | 9 server with the EPEL repository enabled, most required modules can be 33 | installed via: 34 | 35 | sudo dnf install \ 36 | python3-pip \ 37 | python3-reactivex \ 38 | python3-certifi \ 39 | python3-influxdb \ 40 | python3-appdirs 41 | 42 | At a minimum, ensure that at least PIP is installed. 43 | 44 | ## 2. Create Service Account 45 | 46 | Create a dedicated user account (service account) under which the `envoy_logger` 47 | service will run. The username doesn't matter - just remember what you chose 48 | and substitute as appropriate. 49 | 50 | sudo useradd -c 'Envoy Logger' envoylog 51 | 52 | Restrict access to the service account home directory. 53 | 54 | sudo chmod 700 ~envoylog 55 | 56 | `~envoylog` is a shortcut to the home directory of the `envoylog` user. Be 57 | careful that you don't accidentally put a space between the `~` and user name 58 | (`~` on its own is a shortcut to the current user's home directory). 59 | 60 | ## 3. Install and Configure `envoy_logger` Under the Service Account 61 | 62 | The remaining Python modules, including `envoy_logger` itself, will be installed 63 | locally in the service account's home directory. Become the service account, 64 | then install the remaining software. 65 | 66 | sudo -u envoylog -i 67 | pip3 install --user git+https://github.com/amykyta3/envoy-logger 68 | 69 | While still logged in as the service account, create a `config.yml` file using 70 | [the example configuration](https://github.com/amykyta3/envoy-logger/blob/main/docs/examples/cfg.yaml) 71 | as a base. 72 | 73 | The name of the file doesn't matter - just remember what you chose and 74 | substitute as appropriate. Once done, you can exit from the service account 75 | login session. 76 | 77 | ## 3. Configure Systemd 78 | 79 | Create a new Systemd service by running: 80 | 81 | sudo systemctl edit --force --full envoy-logger.service 82 | 83 | The name of the service (in the example above, `envoy-logger`) doesn't matter - 84 | just remember what you chose and substitute as appropriate. When the editor 85 | opens, paste the following text: 86 | 87 | [Unit] 88 | Description=Envoy Logger 89 | After=multi-user.target 90 | 91 | [Service] 92 | User=envoylog 93 | Group=envoylog 94 | RestartSec=5 95 | Restart=always 96 | ExecStart=python3 -m envoy_logger /home/envoylog/config.yml 97 | 98 | [Install] 99 | WantedBy=multi-user.target 100 | 101 | Save/exit from the editor. 102 | 103 | Finally, enable the service to start at boot time and start it: 104 | 105 | sudo systemctl enable --now envoy-logger 106 | 107 | You can view status with: 108 | 109 | sudo systemctl status envoy-logger 110 | 111 | You can view logs with: 112 | 113 | sudo journalctl -u envoy-logger 114 | 115 | (add a `-f` if you want to tail the logs) 116 | -------------------------------------------------------------------------------- /docs/Setup-Instructions.md: -------------------------------------------------------------------------------- 1 | # Set-up Instructions 2 | 3 | This is not intended to be an exhaustive guide, but rather provide some 4 | additional details for you to know how things fit together. This assumes you have 5 | a basic understanding of Grafana, InfluxDB, and general scripting. There are 6 | plenty of great tutorials for all of these should you need additional help. 7 | 8 | ## 1. Set up an InfluxDB instance 9 | This is where your time-series data gets stored. The logging script featured in 10 | this repository writes into this database, and the Grafana front-end reads from 11 | it. 12 | 13 | * Install InfluxDB 14 | * Local install, or a Docker container. Really depends on what makes sense in your home setup. 15 | * Create an InfluxDB organization, and at least one data bucket 16 | * I created two buckets: "low_rate" and "high_rate". This is entirely up to you regarding how you want to control data retention policies/etc. 17 | * Create an access token for the logging script so that it is able to read/write the database. 18 | * Create a separate read-only access token for Grafana to read from the database. 19 | * ... or just share the read/write access token. 20 | 21 | ## 2. Set up the `envoy_logger` logging script 22 | 23 | * Create a config file that describes your Envoy, how to connect to InfluxDB, and a few other things. 24 | * Use this example file as a starting point: https://github.com/amykyta3/envoy-logger/blob/main/docs/examples/cfg.yaml 25 | * Locally test that the logging script can read from your Envoy, and push data to InfluxDB 26 | * You may want to do this locally first before moving to your home automation server, docker container, or whatever your preferred environment is. This will let you tweak settings faster. 27 | * Clone and pip install, or pip install directly: 28 | * `python3 -m pip install --force-reinstall git+https://github.com/amykyta3/envoy-logger` 29 | * Launch the logging script: `python3 -m envoy_logger path/to/your/cfg.yaml` 30 | * If all goes well, you should see it go through authentication with both your Envoy and InfluxDB, and no error messages from the script. 31 | * Assuming that goes well, login to your InfluxDB back-end and start exploring the data using their "Data Explorer" tool. If it's working properly, you should start seeing the data flow in. I recommend that you poke around and get familiar with how the data is structured, since it will help you build queries for your dashboard later. 32 | * Once you have proven it works decently when running locally, it is up to you to figure out how to have the logging script run in your home setup. 33 | * If running in a home automation server, you could wrap this into a docker container. 34 | * I have a rudimentary dockerfile you can use here: https://github.com/amykyta3/envoy-logger/blob/main/docs/examples/dockerfile 35 | * ... and a docker compose file too (volume paths will definitely be different for you): https://github.com/amykyta3/envoy-logger/blob/main/docs/examples/envoy-logger.yaml 36 | * Alternatively, you could run as a system service: https://github.com/amykyta3/envoy-logger/blob/main/docs/system-service.md 37 | * Whatever you choose to run the script, be sure to have the script re-start automatically if it exits. 38 | 39 | ## 3. Set up Grafana 40 | Grafana is the front-end visualization tool where you can design dashboards to display your data. 41 | When you view a dashboard, Grafana pulls data from the InfluxDB database to display it. 42 | 43 | * Install Grafana 44 | * Local install, or a Docker container. Really depends on what makes sense in your home setup. 45 | * Add a connection to InfluxDB 46 | * Using the authentication token created earlier. 47 | * Start building dashboards from your data! 48 | * You will need to define some Flux queries to tell Grafana what data to fetch and how to organize it. 49 | * I have shared the queries I use as a reference: https://github.com/amykyta3/envoy-logger/tree/main/docs/flux_queries 50 | 51 | ## Common Issues 52 | 53 | ### No "Totals" Data 54 | 55 | #### Symptoms 56 | 57 | Grafana panels for "Daily Totals" and "Per Panel Daily Totals" return "No Data" 58 | and script exits every day around midnight with a stack trace and "Not Found" 59 | message such as: 60 | 61 | influxdb_client.rest.ApiException: (404) 62 | Reason: Not Found 63 | HTTP response headers: HTTPHeaderDict({'Content-Type': 'application/json; charset=utf-8', 'Vary': 'Accept-Encoding', 'X-Influxdb-Build': 'OSS', 'X-Influxdb-Version': 'v2.7.1', 'X-Platform-Error-Code': 'not found', 'Date': 'Thu, 06 Jul 2023 05:00:02 GMT', 'Transfer-Encoding': 'chunked'}) 64 | HTTP response body: b'{"code":"not found","message":"failed to initialize execute state: could not find bucket \\"low_rate\\""}' 65 | 66 | #### Cause 67 | 68 | The Influx API token in use has "WRITE" permissions but not "READ" permissions. 69 | The app requires "READ" permissions to calculate the daily data points that are 70 | inserted. 71 | -------------------------------------------------------------------------------- /envoy_logger/model.py: -------------------------------------------------------------------------------- 1 | from datetime import datetime 2 | from typing import Optional, Dict 3 | import logging 4 | LOG = logging.getLogger("envoy") 5 | 6 | class PowerSample: 7 | """ 8 | A generic power sample 9 | """ 10 | def __init__(self, data, ts: datetime) -> None: 11 | self.ts = ts 12 | 13 | # Instantaneous measurements 14 | self.wNow = data['wNow'] # type: float 15 | self.rmsCurrent = data['rmsCurrent'] # type: float 16 | self.rmsVoltage = data['rmsVoltage'] # type: float 17 | self.reactPwr = data['reactPwr'] # type: float 18 | self.apprntPwr = data['apprntPwr'] # type: float 19 | 20 | # Historical measurements (Today) 21 | self.whToday = data['whToday'] # type: float 22 | self.vahToday = data['vahToday'] # type: float 23 | self.varhLagToday = data['varhLagToday'] # type: float 24 | self.varhLeadToday = data['varhLeadToday'] # type: float 25 | 26 | # Historical measurements (Lifetime) 27 | self.whLifetime = data['whLifetime'] # type: float 28 | self.vahLifetime = data['vahLifetime'] # type: float 29 | self.varhLagLifetime = data['varhLagLifetime'] # type: float 30 | self.varhLeadLifetime = data['varhLeadLifetime'] # type: float 31 | 32 | # Historical measurements (Other) 33 | self.whLastSevenDays = data['whLastSevenDays'] # type: float 34 | 35 | @property 36 | def pwrFactor(self) -> float: 37 | # calculate power factor locally for better precision 38 | if self.apprntPwr < 10.0: 39 | return 1.0 40 | return self.wNow / self.apprntPwr 41 | 42 | 43 | class EIMSample: 44 | """ 45 | "EIM" measurement. 46 | 47 | Intentionally discard all total measurements. 48 | Envoy firmware has a bug where it miscalculates apparent power. 49 | Better to recalculate the values locally 50 | """ 51 | def __init__(self, data, ts: datetime) -> None: 52 | assert data['type'] == "eim" 53 | 54 | # Do not use JSON data's timestamp. Envoy's clock is wrong 55 | self.ts = ts 56 | 57 | self.lines = [] 58 | for line_data in data['lines']: 59 | line = EIMLineSample(self, line_data) 60 | self.lines.append(line) 61 | 62 | LOG.debug(f"Sampled {len(self.lines)} power lines of type: {data['measurementType']}") 63 | 64 | class EIMLineSample(PowerSample): 65 | """ 66 | Sample for a Single "EIM" line sensor 67 | """ 68 | def __init__(self, parent: EIMSample, data) -> None: 69 | self.parent = parent 70 | super().__init__(data, parent.ts) 71 | 72 | 73 | class SampleData: 74 | def __init__(self, data, ts: datetime) -> None: 75 | 76 | # Do not use JSON data's timestamp. Envoy's clock is wrong 77 | self.ts = ts 78 | 79 | self.net_consumption = None # type: Optional[EIMSample] 80 | self.total_consumption = None # type: Optional[EIMSample] 81 | self.total_production = None # type: Optional[EIMSample] 82 | 83 | for consumption_data in data['consumption']: 84 | if consumption_data['type'] == 'eim': 85 | if consumption_data['measurementType'] == 'net-consumption': 86 | self.net_consumption = EIMSample(consumption_data, self.ts) 87 | if consumption_data['measurementType'] == 'total-consumption': 88 | self.total_consumption = EIMSample(consumption_data, self.ts) 89 | 90 | for production_data in data['production']: 91 | if production_data['type'] == 'eim': 92 | if production_data['measurementType'] == 'production': 93 | self.total_production = EIMSample(production_data, self.ts) 94 | if production_data['type'] == 'inverters': 95 | # TODO: Parse this data too 96 | pass 97 | 98 | 99 | #=============================================================================== 100 | class InverterSample: 101 | def __init__(self, data, ts: datetime) -> None: 102 | # envoy time is not particularly accurate. Use my own ts 103 | self.ts = ts 104 | 105 | self.serial = data['serialNumber'] # type: str 106 | self.report_ts = data['lastReportDate'] # type: int 107 | self.watts = data['lastReportWatts'] # type: int 108 | 109 | def parse_inverter_data(data, ts: datetime) -> Dict[str, InverterSample]: 110 | """ 111 | Parse inverter JSON list and return a dictionary of inverter samples, keyed 112 | by their serial number 113 | """ 114 | inverters = {} 115 | 116 | for inverter_data in data: 117 | inverter = InverterSample(inverter_data, ts) 118 | inverters[inverter.serial] = inverter 119 | 120 | return inverters 121 | 122 | def filter_new_inverter_data(new_data: Dict[str, InverterSample], prev_data: Dict[str, InverterSample]) -> Dict[str, InverterSample]: 123 | """ 124 | Inverter measurements only update if inverter actually sends a reported 125 | value. 126 | Compare against a prior sample, and return a new dict of inverters samples 127 | that only contains the unique measurements 128 | """ 129 | unique_inverters = {} # type: Dict[str, InverterSample] 130 | for serial, inverter in new_data.items(): 131 | if serial not in prev_data.keys(): 132 | unique_inverters[serial] = inverter 133 | continue 134 | 135 | if inverter.report_ts != prev_data[serial].report_ts: 136 | unique_inverters[serial] = inverter 137 | continue 138 | 139 | return unique_inverters 140 | -------------------------------------------------------------------------------- /envoy_logger/sampling_loop.py: -------------------------------------------------------------------------------- 1 | from datetime import datetime, date 2 | import time 3 | from typing import List, Dict 4 | import logging 5 | from requests.exceptions import ReadTimeout, ConnectTimeout 6 | 7 | from influxdb_client import WritePrecision, InfluxDBClient, Point 8 | from influxdb_client.client.write_api import SYNCHRONOUS 9 | 10 | from . import envoy 11 | from .model import SampleData, PowerSample, InverterSample, filter_new_inverter_data 12 | from .cfg import Config 13 | 14 | class SamplingLoop: 15 | interval = 5 16 | 17 | def __init__(self, token: str, cfg: Config) -> None: 18 | self.cfg = cfg 19 | self.session_id = envoy.login(self.cfg.envoy_url, token) 20 | 21 | influxdb_client = InfluxDBClient( 22 | url=cfg.influxdb_url, 23 | token=cfg.influxdb_token, 24 | org=cfg.influxdb_org 25 | ) 26 | self.influxdb_write_api = influxdb_client.write_api(write_options=SYNCHRONOUS) 27 | self.influxdb_query_api = influxdb_client.query_api() 28 | 29 | # Used to track the transition to the next day for daily measurements 30 | self.todays_date = date.today() 31 | 32 | self.prev_inverter_data = None 33 | 34 | def run(self): 35 | timeout_count = 0 36 | while True: 37 | try: 38 | data = self.get_sample() 39 | inverter_data = self.get_inverter_data() 40 | except (ReadTimeout, ConnectTimeout) as e: 41 | # Envoy gets REALLY MAD if you block it's access to enphaseenergy.com 42 | # using a VLAN. 43 | # It's software gets hung up for some reason, and some requests will stall. 44 | # Allow envoy requests to timeout (and skip this sample iteration) 45 | timeout_count += 1 46 | logging.warning("Envoy request timed out (%d/10)", timeout_count) 47 | if timeout_count >= 10: 48 | # Give up after a while 49 | raise 50 | pass 51 | else: 52 | self.write_to_influxdb(data, inverter_data) 53 | timeout_count = 0 54 | 55 | def get_sample(self) -> SampleData: 56 | # Determine how long until the next sample needs to be taken 57 | now = datetime.now() 58 | time_to_next = self.interval - (now.timestamp() % self.interval) 59 | 60 | # wait! 61 | time.sleep(time_to_next) 62 | 63 | data = envoy.get_power_data(self.cfg.envoy_url, self.session_id) 64 | 65 | return data 66 | 67 | def get_inverter_data(self) -> Dict[str, InverterSample]: 68 | data = envoy.get_inverter_data(self.cfg.envoy_url, self.session_id) 69 | 70 | if self.prev_inverter_data is None: 71 | self.prev_inverter_data = data 72 | # Hard to know how stale inverter data is, so discard this sample 73 | # since I have nothing to compare to yet 74 | return {} 75 | 76 | # filter out stale inverter samples 77 | filtered_data = filter_new_inverter_data(data, self.prev_inverter_data) 78 | if filtered_data: 79 | logging.debug("Got %d unique inverter measurements", len(filtered_data)) 80 | self.prev_inverter_data = data 81 | return filtered_data 82 | 83 | def write_to_influxdb(self, data: SampleData, inverter_data: Dict[str, InverterSample]) -> None: 84 | hr_points = self.get_high_rate_points(data, inverter_data) 85 | lr_points = self.low_rate_points(data) 86 | self.influxdb_write_api.write(bucket=self.cfg.influxdb_bucket_hr, record=hr_points) 87 | if lr_points: 88 | self.influxdb_write_api.write(bucket=self.cfg.influxdb_bucket_lr, record=lr_points) 89 | 90 | def get_high_rate_points(self, data: SampleData, inverter_data: Dict[str, InverterSample]) -> List[Point]: 91 | points = [] 92 | for i, line in enumerate(data.total_consumption.lines): 93 | p = self.idb_point_from_line("consumption", i, line) 94 | points.append(p) 95 | for i, line in enumerate(data.total_production.lines): 96 | p = self.idb_point_from_line("production", i, line) 97 | points.append(p) 98 | for i, line in enumerate(data.net_consumption.lines): 99 | p = self.idb_point_from_line("net", i, line) 100 | points.append(p) 101 | 102 | for inverter in inverter_data.values(): 103 | p = self.point_from_inverter(inverter) 104 | points.append(p) 105 | 106 | return points 107 | 108 | def idb_point_from_line(self, measurement_type: str, idx: int, data: PowerSample) -> Point: 109 | p = Point(f"{measurement_type}-line{idx}") 110 | p.time(data.ts, WritePrecision.S) 111 | p.tag("source", self.cfg.source_tag) 112 | p.tag("measurement-type", measurement_type) 113 | p.tag("line-idx", idx) 114 | 115 | p.field("P", data.wNow) 116 | p.field("Q", data.reactPwr) 117 | p.field("S", data.apprntPwr) 118 | 119 | p.field("I_rms", data.rmsCurrent) 120 | p.field("V_rms", data.rmsVoltage) 121 | 122 | return p 123 | 124 | def point_from_inverter(self, inverter: InverterSample) -> Point: 125 | p = Point(f"inverter-production-{inverter.serial}") 126 | p.time(inverter.ts, WritePrecision.S) 127 | p.tag("source", self.cfg.source_tag) 128 | p.tag("measurement-type", "inverter") 129 | p.tag("serial", inverter.serial) 130 | self.cfg.apply_tags_to_inverter_point(p, inverter.serial) 131 | 132 | p.field("P", inverter.watts) 133 | 134 | return p 135 | 136 | def low_rate_points(self, data: SampleData) -> List[Point]: 137 | # First check if the day rolled over 138 | new_date = date.today() 139 | if self.todays_date == new_date: 140 | # still the same date. No summary 141 | return [] 142 | 143 | # it is a new day! 144 | self.todays_date = new_date 145 | 146 | # Collect points that summarize prior day 147 | points = self.compute_daily_Wh_points(data.ts) 148 | 149 | return points 150 | 151 | def compute_daily_Wh_points(self, ts: datetime) -> List[Point]: 152 | # Not using integral(interpolate:"linear") since it does not do what you 153 | # think it would mean. Without the "interoplation" arg, it still does 154 | # linear interpolation correctly. 155 | # https://github.com/influxdata/flux/issues/4782 156 | query = f""" 157 | from(bucket: "{self.cfg.influxdb_bucket_hr}") 158 | |> range(start: -24h, stop: 0h) 159 | |> filter(fn: (r) => r["source"] == "{self.cfg.source_tag}") 160 | |> filter(fn: (r) => r["_field"] == "P") 161 | |> integral(unit: 1h) 162 | |> keep(columns: ["_value", "line-idx", "measurement-type", "serial"]) 163 | |> yield(name: "total") 164 | """ 165 | result = self.influxdb_query_api.query(query=query) 166 | unreported_inverters = set(self.cfg.inverters.keys()) 167 | points = [] 168 | for table in result: 169 | for record in table.records: 170 | measurement_type = record['measurement-type'] 171 | if measurement_type == "inverter": 172 | serial = record['serial'] 173 | unreported_inverters.discard(serial) 174 | p = Point(f"inverter-daily-summary-{serial}") 175 | p.tag("serial", serial) 176 | self.cfg.apply_tags_to_inverter_point(p, serial) 177 | else: 178 | idx = record['line-idx'] 179 | p = Point(f"{measurement_type}-daily-summary-line{idx}") 180 | p.tag("line-idx", idx) 181 | 182 | p.time(ts, WritePrecision.S) 183 | p.tag("source", self.cfg.source_tag) 184 | p.tag("measurement-type", measurement_type) 185 | p.tag("interval", "24h") 186 | 187 | p.field("Wh", record.get_value()) 188 | points.append(p) 189 | 190 | # If any inverters did not report in for the day, fill in a 0wh measurement 191 | for serial in unreported_inverters: 192 | p = Point(f"inverter-daily-summary-{serial}") 193 | p.tag("serial", serial) 194 | self.cfg.apply_tags_to_inverter_point(p, serial) 195 | p.time(ts, WritePrecision.S) 196 | p.tag("source", self.cfg.source_tag) 197 | p.tag("measurement-type", measurement_type) 198 | p.tag("interval", "24h") 199 | p.field("Wh", 0.0) 200 | points.append(p) 201 | 202 | return points 203 | --------------------------------------------------------------------------------