├── .gitignore ├── .travis.yml ├── LICENSE.txt ├── SDS011 ├── .gitignore ├── Dockerfile ├── SDS011.py ├── SDS011.service ├── default.env └── readme.md ├── archive ├── RaspberryPi_Build_Bash │ └── raspi_emoncms_setup.sh ├── accountmonitor.php ├── backup │ ├── backup_method2.php │ ├── backup_timestore.php │ ├── backupemoncms.php │ └── emonbackup ├── bootstrap │ └── css │ │ └── bootstrap.css ├── feedstats.php ├── install_emoncms.sh ├── replication │ ├── import_full.php │ └── import_inputs.php ├── set_emoncms_settings.sh ├── update_pi_emoncms.sh └── user_diskuse.php ├── backup ├── backup.php └── lib │ ├── inputs.php │ ├── metadata.php │ ├── mysql.php │ ├── phpfina.php │ ├── phpfiwa.php │ ├── phptimeseries.php │ └── phptimestore.php ├── backup_py ├── convert_to_csv.py ├── data_downloader.py └── readme.md ├── carbonintensity ├── carbonintensity.py ├── default.carbonintensity.conf └── readme.md ├── composer.json ├── convertdata ├── Lib │ ├── EmonLogger.php │ └── PHPFina.php ├── archive │ ├── lib │ │ ├── PHPTimeSeries.php │ │ └── common.php │ ├── migrate.php │ ├── phpfina_to_phptimeseries.php │ ├── phptimestore_to_phpfina.php │ ├── phptimestore_to_phpfina_mv.php │ ├── sql_to_phptimeseries_mv.php │ └── sql_to_phptimeseries_mv_fast.php ├── check_emoncms_feeds_for_conversion.php ├── phpfina_convert_interval.php ├── phpfiwa_to_phpfina.php ├── phptimeseries_to_phpfina.php └── phptimestore_to_phpfina.php ├── create_users_and_devices_add_to_group └── create_users_and_devices_add_to_group.php ├── datarecovery ├── lib │ ├── common.php │ ├── phpfina.php │ ├── phpfiwa.php │ ├── phptimeseries.php │ └── phptimestore.php └── recover.php ├── enginereaders ├── phpfina.php ├── phpfiwa.php ├── phptimeseries.php └── phptimeseries.py ├── integritycheck ├── integritycheck.php ├── lib │ ├── phpfina.php │ ├── phpfiwa.php │ ├── phptimeseries.php │ └── phptimestore.php └── missingcheck.php ├── mbus ├── example.config.ini ├── load_config.py ├── mbus_app_reset.py ├── mbus_check_address.py ├── mbus_request_data_5b.py ├── mbus_request_data_7b.py ├── mbus_scan.py ├── mbus_set_address.py ├── mbus_set_baud.py └── mbus_set_page.py ├── module.json ├── mysql_backup └── mysql_backup_emoncms.sh ├── octopus ├── agile.py ├── default.agile.conf ├── get_octopus_mpan_serial.py ├── load_agile_rates.py └── readme.md ├── process ├── Lib │ ├── EmonLogger.php │ ├── PHPFina.php │ ├── PHPFiwa.php │ ├── PHPTimeSeries.php │ └── PHPTimestore.php ├── accumulating_kwh_reprocessor.php ├── power_to_kwh.php └── power_to_kwh_manual.php ├── readme.md ├── removespike └── remove_spike.php ├── resetpassword.php ├── sdpart ├── sdpart_imagefile └── sdpart_selfbuild ├── tests └── bulk_input_test.php ├── update_emoncms.sh └── writeloadtest ├── writeloadtest.php └── writeloadtest.service /.gitignore: -------------------------------------------------------------------------------- 1 | octopus/agile.conf 2 | carbonintensity/carbonintensity.conf 3 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | language: php 2 | php: 3 | - "7.0" 4 | - "7.1" 5 | - "7.2" 6 | - "7.3" 7 | env: 8 | global: 9 | - COMPOSER_DISABLE_XDEBUG_WARN=1 10 | install: 11 | - composer install 12 | script: 13 | - composer test 14 | -------------------------------------------------------------------------------- /SDS011/.gitignore: -------------------------------------------------------------------------------- 1 | env 2 | -------------------------------------------------------------------------------- /SDS011/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM python:2 2 | 3 | ADD SDS011.py / 4 | 5 | RUN pip install pyserial paho-mqtt 6 | 7 | # CMD [ "python", "./SDS011.py" ] 8 | CMD python -u SDS011.py 9 | -------------------------------------------------------------------------------- /SDS011/SDS011.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | # -*- coding: UTF-8 -*- 3 | # ---------------------------------------------------------------------------------- 4 | # SDS011 Nova PM Sensor to Emoncms bridge 5 | # ---------------------------------------------------------------------------------- 6 | 7 | import serial, time, struct, time, urllib2, os 8 | import paho.mqtt.client as mqtt 9 | 10 | emoncms_nodename = os.environ['EMONCMS_NODENAME'] 11 | print 'emoncms nodename: ' + emoncms_nodename 12 | 13 | emoncms_host = os.environ['EMONCMS_HOST'] 14 | print 'emoncms host: ' + emoncms_host 15 | emoncms_apikey = os.environ['EMONCMS_APIKEY'] 16 | print 'emoncms apikey: ' + emoncms_apikey 17 | http_enable = os.environ['HTTP_ENABLE'] 18 | print 'http enable: ' + http_enable 19 | 20 | mqtt_user = os.environ['MQTT_USER'] 21 | print 'mqtt user: ' + mqtt_user 22 | mqtt_passwd = os.environ['MQTT_PASSWD'] 23 | print 'mqtt pass: ' + mqtt_passwd 24 | mqtt_host = os.environ['MQTT_HOST'] 25 | print 'mqtt host: ' + mqtt_host 26 | mqtt_port = os.environ['MQTT_PORT'] 27 | print 'mqtt port: ' + mqtt_port 28 | mqtt_enable = os.environ['MQTT_ENABLE'] 29 | print 'mqtt enable: ' + mqtt_enable 30 | 31 | serial_port = os.environ['SERIAL_PORT'] 32 | print 'serial port: ' + serial_port 33 | 34 | if mqtt_enable=='True': 35 | print 'Starting MQTT...' 36 | mqttc = mqtt.Client() 37 | mqttc.username_pw_set(mqtt_user, mqtt_passwd) 38 | mqttc.connect(mqtt_host, mqtt_port, 60) 39 | 40 | print 'connecting to ' + serial_port 41 | ser = serial.Serial(serial_port, baudrate=9600, stopbits=1, parity="N", timeout=2) 42 | 43 | ser.flushInput() 44 | 45 | byte, lastbyte = "\x00", "\x00" 46 | 47 | # Reading arrive from SDS011 every second, we average 10 readings every 10 seconds and send the result to emoncms 48 | pm_25_sum = 0 49 | pm_10_sum = 0 50 | count = 0 51 | 52 | lasttime = 0 53 | 54 | print 'starting loop..' 55 | while True: 56 | lastbyte = byte 57 | byte = ser.read(size=1) 58 | 59 | # Valid packet header 60 | if lastbyte == "\xAA" and byte == "\xC0": 61 | sentence = ser.read(size=8) # Read 8 more bytes 62 | readings = struct.unpack('=10.0: 74 | lasttime = time.time() 75 | if count>0: 76 | pm_25 = round(pm_25_sum/count,3) 77 | pm_10 = round(pm_10_sum/count,3) 78 | pm_25_sum = 0 79 | pm_10_sum = 0 80 | count = 0 81 | print "PM 2.5:",pm_25,"μg/m^3 PM 10:",pm_10,"μg/m^3" 82 | if http_enable=='True': 83 | print 'http post..' 84 | contents = urllib2.urlopen(emoncms_host+'/input/post?node='+emoncms_nodename+'&fulljson={"pm_25":'+str(pm_25)+',"pm_10":'+str(pm_10)+'}&apikey='+emoncms_apikey).read() 85 | if mqtt_enable=='True': 86 | print 'mqtt post..' 87 | mqttc.publish("emon/"+emoncms_nodename+"/pm_25",pm_25) 88 | mqttc.publish("emon/"+emoncms_nodename+"/pm_10",pm_10) 89 | 90 | if mqtt_enable=='True': 91 | mqttc.loop(0.0) 92 | 93 | time.sleep(0.01) 94 | 95 | 96 | 97 | -------------------------------------------------------------------------------- /SDS011/SDS011.service: -------------------------------------------------------------------------------- 1 | # Systemd unit file for SDS011 script 2 | 3 | # INSTALL: 4 | 5 | # sudo ln -s /opt/emoncms/modules/usefulscripts/SDS011/SDS011.service /lib/systemd/system 6 | 7 | # RUN AT STARTUP 8 | # sudo systemctl daemon-reload 9 | # sudo systemctl enable SDS011.service 10 | 11 | # START / STOP With: 12 | # sudo systemctl start SDS011 13 | # sudo systemctl stop SDS011 14 | 15 | # VIEW STATUS / LOG 16 | # If Using Syslog: 17 | # sudo systemctl status SDS011 -n50 18 | # where -nX is the number of log lines to view 19 | # sudo journalctl -f -u SDS011 20 | # Otherwise: 21 | # Specify 22 | # StandardOutput=file:/var/log/SDS011.log 23 | # tail -f /var/log/SDS011.log 24 | 25 | [Unit] 26 | Description=SDS011 script 27 | Wants=mysql.service redis.service 28 | After=mysql.service redis.service 29 | Documentation=https://github.com/emoncms/usefulscripts 30 | 31 | # Uncomment this line to use a dedicated log file for StdOut and StdErr. 32 | # NOTE: only works in systemd v236+ 33 | # Debain "stretch" includes v232, "buster" includes v239 34 | StandardOutput=file:/var/log/emoncms/SDS011.log 35 | 36 | [Service] 37 | Type=idle 38 | ExecStart=/usr/bin/python /opt/emoncms/modules/usefulscripts/SDS011/SDS011.py 39 | 40 | # Restart script if stopped on a failure. Will not restart if not configured correctly 41 | Restart=on-failure 42 | # Wait 60s before restart 43 | RestartSec=60 44 | 45 | # Tag things in the log 46 | # If you want to use the journal instead of the file above, uncomment SyslogIdentifier below 47 | # View with: sudo journalctl -f -u SDS011 -o cat 48 | SyslogIdentifier=SDS011 49 | 50 | [Install] 51 | WantedBy=multi-user.target 52 | 53 | -------------------------------------------------------------------------------- /SDS011/default.env: -------------------------------------------------------------------------------- 1 | HTTP_ENABLE=False 2 | EMONCMS_HOST=https://emoncms.org 3 | EMONCMS_APIKEY=xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx 4 | EMONCMS_NODENAME=SDS011 5 | MQTT_ENABLE=False 6 | MQTT_PORT=1883 7 | MQTT_HOST=localhost 8 | MQTT_USER=emonpi 9 | MQTT_PASSWD=emonpimqtt 10 | SERIAL_PORT=/dev/ttyUSB1 11 | 12 | 13 | -------------------------------------------------------------------------------- /SDS011/readme.md: -------------------------------------------------------------------------------- 1 | # SDS011 Nova PM Sensor to Emoncms bridge 2 | 3 | e.g https://www.amazon.co.uk/gp/product/B07911ZY9W 4 | 5 | 6 | ## Run using docker (Recomended) 7 | 8 | Tested on `emonSD-17Oct19`, Ubuntu 18.04 and Synology DSM6 9 | 10 | Copy the enviroment file: 11 | 12 | `cp default.env env` 13 | 14 | Edit your enviroment file to set your mqtt / emoncms http settings: 15 | 16 | `nano env` 17 | 18 | Build and run the docker container 19 | 20 | ``` 21 | docker build -t dust . 22 | docker run --privileged --env-file=env dust 23 | ``` 24 | 25 | Run in the background on boot and always restart 26 | 27 | `docker run -d --privileged --restart=always --env-file=env dust` 28 | 29 | 30 | **** 31 | 32 | ## Run locally 33 | 34 | Copy the enviroment file: 35 | 36 | `cp default.env env` 37 | 38 | Edit your enviroment file to set your mqtt / emoncms http settings: 39 | 40 | `nano env` 41 | 42 | Export enviroment variables: 43 | 44 | `export $(xargs 2GB, if your SD card root partition is 2GB or less stop now and download and run wget http://raw.github.com/dweeber/rpiwiggle/master/rpi-wiggle to expand root to fill SD card (mins a little). Current SD card size:" 8 | echo " " 9 | df -h 10 | echo " " 11 | read -p 'If your SD card root partition is > 2GB press [Enter] to continue or [Ctrl + C] to exit' 12 | 13 | hostname="emoncmspi" 14 | DISABLE_LOGS=1 15 | 16 | echo "Updating.." 17 | apt-get update -y 18 | echo "Update..done!" 19 | 20 | echo "Intalling Mysql....." 21 | echo "Create a MYSQL root password...make note of this, you will need it later!" 22 | apt-get install -y mysql-server mysql-client 23 | echo "Mysql intall compleate!" 24 | 25 | echo "Intalling apache2....." 26 | apt-get install -y apache2 27 | echo "apache2 intall compleate!" 28 | 29 | echo "Intalling php....." 30 | apt-get install -y php5 libapache2-mod-php5 31 | apt-get install -y php5-mysql 32 | echo "php intall compleate!" 33 | 34 | echo "Enable mod rewrite.." 35 | a2enmod rewrite 36 | cp /etc/apache2/sites-enabled/000-default /etc/apache2/sites-enabled/000-default_backup -n 37 | sed 's/AllowOverride None/AllowOverride All/' /etc/apache2/sites-enabled/000-default_backup >/etc/apache2/sites-enabled/000-default 38 | 39 | echo "Enable mod rewrite done!" 40 | 41 | if [ $DISABLE_LOGS -eq 1 ] 42 | then 43 | echo "Turn off apache logs.." 44 | cp /etc/apache2/apache2.conf /etc/apache2/apache2.conf_backup -n 45 | sed -e 's\ErrorLog ${APACHE_LOG_DIR}/error.log\ErrorLog /dev/null\' -e 's\LogLevel warn\# LogLevel warn\' /etc/apache2/apache2.conf_backup >/etc/apache2/apache2.conf 46 | cp /etc/apache2/conf.d/other-vhosts-access-log /etc/apache2/conf.d/other-vhosts-access-log_backup -n 47 | sed 's\CustomLog ${APACHE_LOG_DIR}/other_vhosts_access.log vhost_combined\#CustomLog ${APACHE_LOG_DIR}/other_vhosts_access.log vhost_combined\' /etc/apache2/conf.d/other-vhosts-access-log_backup >/etc/apache2/conf.d/other-vhosts-access-log 48 | cp /etc/apache2/sites-enabled/000-default /etc/apache2/sites-enabled/000-default_backup2 -n 49 | sed -e 's\ErrorLog ${APACHE_LOG_DIR}/error.log\# ErrorLog ${APACHE_LOG_DIR}/error.log\' -e 's\LogLevel warn\# LogLevel warn\' -e 's\CustomLog ${APACHE_LOG_DIR}/access.log combined\# CustomLog ${APACHE_LOG_DIR}/access.log combined\' /etc/apache2/sites-enabled/000-default_backup2 >/etc/apache2/sites-enabled/000-default 50 | echo "Turn off apache logs..done!" 51 | fi 52 | 53 | echo "Setting Pi Local Network Hostname to "$hostname 54 | cp /etc/hostname /etc/hostname_backup -n 55 | sed "s/raspberrypi/$hostname/" /etc/hostname_backup >/etc/hostname 56 | cp /etc/hosts /etc/hosts_backup -n 57 | sed "s/raspberrypi/$hostname/" /etc/hosts_backup >/etc/hosts 58 | 59 | echo "Installing Git.." 60 | apt-get install -y git-core 61 | echo "Installing Git..done!" 62 | 63 | echo "Installing emoncms core.." 64 | cd /var/ 65 | chown pi www 66 | cd www 67 | git clone https://github.com/emoncms/emoncms.git 68 | echo "Installing emoncms core..done!" 69 | 70 | echo "Installing emoncms Raspberry Pi Module.." 71 | cd /var/www/emoncms/Modules 72 | git clone https://github.com/emoncms/raspberrypi.git 73 | echo "Installing emoncms Raspberry Pi Module..done!" 74 | 75 | echo "Create an emoncms MYSQL database.." 76 | echo "enter msql root user password.." 77 | mysql -uroot -p -e "CREATE DATABASE IF NOT EXISTS emoncms;" 78 | echo "emoncms MYSQL database..done" 79 | 80 | echo "Set emoncms database settings.." 81 | cp /var/www/emoncms/default.settings.php /var/www/emoncms/settings_backup.php -n 82 | sed -e 's\$username = "";\$username = "root";\' -e 's\$password = "";\$password = "raspberry";\' -e 's\$database = "";\$database = "emoncms";\' /var/www/emoncms/settings_backup.php > /var/www/emoncms/settings.php 83 | echo "Set emoncms database settings..done" 84 | 85 | echo "RFM12BPi Setup" 86 | cp /boot/cmdline.txt /boot/cmdline_backup.txt -n 87 | sed 's\console=ttyAMA0,115200 kgdboc=ttyAMA0,115200 console=tty1 root=/dev/mmcblk0p2\console=tty1 root=/dev/mmcblk0p2\' /boot/cmdline_backup.txt >/boot/cmdline.txt 88 | cp /etc/inittab /etc/inittab_backup -n 89 | sed 's\T0:23:respawn:/sbin/getty -L ttyAMA0 115200 vt100\#T0:23:respawn:/sbin/getty -L ttyAMA0 115200 vt100\' /etc/inittab_backup > /etc/inittab 90 | 91 | echo "Install rfm12pi gateway service" 92 | apt-get install php-pear php5-dev -y 93 | apt-get install minicom -y 94 | pecl install channel://pecl.php.net/dio-0.0.6 95 | cp /etc/php5/cli/php.ini /etc/php5/cli/php_backup.ini -n 96 | 97 | 98 | echo "add dio.so as a dynamic extension to serial php library.." 99 | cp /etc/php5/cli/php.ini /etc/php5/cli/php_backup.ini -n 100 | sed 's/; extension=modulename.extension/extension=dio.so/' /etc/php5/cli/php_backup.ini > /etc/php5/cli/php.ini 101 | 102 | echo "Install rfm12piphp gateway service" 103 | cp /var/www/emoncms/Modules/raspberrypi/rfm12piphp /etc/init.d/ 104 | chmod 755 /etc/init.d/rfm12piphp 105 | update-rc.d rfm12piphp defaults 106 | 107 | echo "Add redirect index.php in /var/www" 108 | 109 | 110 | echo "Create index.php file in webserver root folder with re-direct to emoncms" 111 | cat > /var/www/index.php << EOF 112 | 113 |

Welcome

114 |

Goto Emoncms

115 | 116 | EOF 117 | 118 | echo "rename index.html if exists" 119 | cp /var/www/index.html /var/www/old_index.html -n 120 | rm /var/www/index.html 121 | 122 | 123 | 124 | 125 | -------------------------------------------------------------------------------- /archive/accountmonitor.php: -------------------------------------------------------------------------------- 1 | 14 | 15 | 16 | 17 |
18 |
19 | 20 |

Account monitor

21 | 22 | 23 | "User1", "apikey"=>"User1apikey"), 27 | array("name"=>"User2", "apikey"=>"User2apikey") 28 | ); 29 | 30 | foreach ($users as $user) 31 | { 32 | $feeds = json_decode(file_get_contents("http://emoncms.org/feed/list.json?apikey=".$user['apikey'])); 33 | 34 | $active = 0; 35 | foreach ($feeds as $feed) { 36 | 37 | $timeupdated = (time() - $feed->time/1000); 38 | if ($timeupdated<3600) $active++; 39 | } 40 | $total = count($feeds); 41 | 42 | echo ""; 51 | } 52 | 53 | ?> 54 | 55 |
".$user['name'].""; 43 | if ($active==$total) { 44 | echo ''.$active."/".$total.""; 45 | } elseif ($active>0) { 46 | echo ''.$active."/".$total.""; 47 | } else { 48 | echo ''.$active."/".$total.""; 49 | } 50 | echo "
56 |
57 | -------------------------------------------------------------------------------- /archive/backup/backup_method2.php: -------------------------------------------------------------------------------- 1 | query("SELECT id,name FROM feeds WHERE `datatype` = 1 ORDER BY id Asc"); 19 | $n = 0; 20 | 21 | while ($row = $result->fetch_array()) 22 | { 23 | $n++; 24 | 25 | $feedid = $row['id']; 26 | 27 | $verbose = true; 28 | 29 | $starttime = 0; 30 | $backupsize = 0; 31 | 32 | $backupfeedname = "/home/username/backup/feeds/feed_$feedid.MYD"; 33 | $primaryfeedname = "/var/lib/mysql/emoncms/feed_$feedid.MYD"; 34 | 35 | $primarysize = filesize($primaryfeedname); 36 | $backupsize = 0; 37 | 38 | if ($primarysize<(1024*1024*100)) // 100 MiB 39 | { 40 | $trate = 0; 41 | echo $n." "; 42 | // 1) Does backup MYD exist? 43 | if (file_exists($backupfeedname)) { 44 | $backupsize = filesize($backupfeedname); 45 | if ($verbose) echo "E "; 46 | } else echo "- "; 47 | 48 | if ($primarysize>$backupsize) 49 | { 50 | $dnsize = $primarysize-$backupsize; 51 | $dnstart = microtime(true); 52 | echo "DN "; 53 | $primary = fopen($primaryfeedname, 'rb'); 54 | $backup = fopen($backupfeedname, 'a'); 55 | fseek($primary,$backupsize); 56 | 57 | $left_to_read = $dnsize; 58 | do 59 | { 60 | if ($left_to_read>147456) $readsize = 147456; else $readsize = $left_to_read; 61 | $left_to_read -= $readsize; 62 | 63 | $data = fread($primary,$readsize); 64 | fwrite($backup,$data); 65 | if ($dnsize>147456) sleep(2); 66 | 67 | echo $left_to_read."\n"; 68 | } 69 | while ($left_to_read>0); 70 | 71 | fclose($backup); 72 | fclose($primary); 73 | 74 | $trate = ($dnsize / (microtime(true) - $dnstart))/1000.0; 75 | } else { echo "-- "; } 76 | 77 | echo "feed ".$row['id'].":".$row['name']; 78 | if ($trate>0) echo " ".number_format($trate,0)." kB/s ($dnsize)"; 79 | echo "\n"; 80 | } 81 | } 82 | -------------------------------------------------------------------------------- /archive/backup/backup_timestore.php: -------------------------------------------------------------------------------- 1 | query("SELECT id,name FROM feeds WHERE `engine` = 1 ORDER BY id Asc"); 19 | $n = 0; 20 | 21 | while ($row = $result->fetch_array()) 22 | { 23 | $n++; 24 | 25 | $feedid = $row['id']; 26 | 27 | $verbose = true; 28 | 29 | $starttime = 0; 30 | $backupsize = 0; 31 | 32 | $backupfeedname = "/home/username/backup/feeds/feed_$feedid.MYD"; 33 | $primaryfeedname = "/var/lib/mysql/emoncms/feed_$feedid.MYD"; 34 | 35 | $primarysize = filesize($primaryfeedname); 36 | $backupsize = 0; 37 | 38 | if ($primarysize<(1024*1024*100)) // 100 MiB 39 | { 40 | $trate = 0; 41 | echo $n." "; 42 | // 1) Does backup MYD exist? 43 | if (file_exists($backupfeedname)) { 44 | $backupsize = filesize($backupfeedname); 45 | if ($verbose) echo "E "; 46 | } else echo "- "; 47 | 48 | if ($primarysize>$backupsize) 49 | { 50 | $dnsize = $primarysize-$backupsize; 51 | $dnstart = microtime(true); 52 | echo "DN "; 53 | $primary = fopen($primaryfeedname, 'rb'); 54 | $backup = fopen($backupfeedname, 'a'); 55 | fseek($primary,$backupsize); 56 | 57 | $left_to_read = $dnsize; 58 | do 59 | { 60 | if ($left_to_read>147456) $readsize = 147456; else $readsize = $left_to_read; 61 | $left_to_read -= $readsize; 62 | 63 | $data = fread($primary,$readsize); 64 | fwrite($backup,$data); 65 | if ($dnsize>147456) sleep(2); 66 | 67 | echo $left_to_read."\n"; 68 | } 69 | while ($left_to_read>0); 70 | 71 | fclose($backup); 72 | fclose($primary); 73 | 74 | $trate = ($dnsize / (microtime(true) - $dnstart))/1000.0; 75 | } else { echo "-- "; } 76 | 77 | echo "feed ".$row['id'].":".$row['name']; 78 | if ($trate>0) echo " ".number_format($trate,0)." kB/s ($dnsize)"; 79 | echo "\n"; 80 | } 81 | } 82 | -------------------------------------------------------------------------------- /archive/backup/backupemoncms.php: -------------------------------------------------------------------------------- 1 | feeds.sql 28 | mysqldump -u root -p --single-transaction emoncms users > users.sql 29 | 30 | 2) Log into backup machine 31 | ssh username@ipaddress 32 | 33 | 3) Enter mysql terminal 34 | mysql -u username -p -A emoncms 35 | 36 | 4) Drop existing users and feeds tables: 37 | DROP TABLE feeds; 38 | DROP TABLE users; 39 | exit; 40 | 41 | 5) Copy over feeds and users table export from master server 42 | scp username@ipaddress:users.sql /home/username 43 | scp username@ipaddress:feeds.sql /home/username 44 | 45 | 6) Import users and feeds table into backup mysql database 46 | mysql -u username -p emoncms < users.sql 47 | mysql -u username -p emoncms < feeds.sql 48 | 49 | */ 50 | 51 | set_time_limit (10000); 52 | 53 | // Ensure only one instance of the script can run at any one time. 54 | $fp = fopen("importlock", "w"); 55 | if (! flock($fp, LOCK_EX | LOCK_NB)) { echo "Already running\n"; die; } 56 | 57 | // Connect to the database 58 | define('EMONCMS_EXEC', 1); 59 | 60 | require "process_settings.php"; 61 | $mysqli = @new mysqli( 62 | $settings["sql"]["server"], 63 | $settings["sql"]["username"], 64 | $settings["sql"]["password"], 65 | $settings["sql"]["database"], 66 | $settings["sql"]["port"] 67 | ); 68 | 69 | // Fetch the import queue 70 | $result = $mysqli->query("SELECT * FROM importqueue ORDER BY `queid` Desc LIMIT 640"); 71 | 72 | // If there are no rows then re fill import queue 73 | if ($result->num_rows==0) { 74 | echo "No rows in import queue, CREATING NEW IMPORT QUEUE"; 75 | 76 | $result = $mysqli->query("SELECT id, userid FROM feeds ORDER BY id ASC"); 77 | 78 | echo "Feeds to process: ".$result->num_rows."\n"; 79 | while ($row = $result->fetch_array()) 80 | { 81 | // 1) check if feed data table already exists 82 | $feedid = $row['id']; 83 | $userid = $row['userid']; 84 | 85 | $user_result = $mysqli->query("SELECT apikey_write FROM users WHERE id='$userid'"); 86 | $user_row = $user_result->fetch_array(); 87 | 88 | $remotekey = $user_row['apikey_write']; 89 | 90 | $mysqli->query("INSERT INTO importqueue (`userid`,`remoteurl`,`remotekey`,`remotefeedid`,`localfeedid`) VALUES ('$userid','','$remotekey','$feedid','$feedid')"); 91 | 92 | } 93 | 94 | // now that we have refilled the importqueue, select 600 rows for this sync run 95 | $result = $mysqli->query("SELECT * FROM importqueue ORDER BY `queid` Desc LIMIT 640"); 96 | } 97 | 98 | $starttime = time(); 99 | 100 | while ($row = $result->fetch_array()) 101 | { 102 | $queid = $row['queid']; 103 | 104 | $feedid = $row['localfeedid']; 105 | $userid = $row['userid']; 106 | $feedname = "feed_".$feedid; 107 | echo "Downloading $feedname\n"; 108 | 109 | // Get datatype 110 | $result_datatype = $mysqli->query("SELECT datatype FROM feeds WHERE `id`='$feedid'"); 111 | $datatype_row = $result_datatype->fetch_array(); 112 | $datatype = $datatype_row['datatype']; 113 | 114 | echo "Datatype is: ".$datatype."\n"; 115 | 116 | // check if feed data table already exists 117 | $resultB = $mysqli->query("SHOW TABLES LIKE '$feedname'"); 118 | 119 | if (!$resultB->num_rows) 120 | { 121 | if ($datatype!=3) { 122 | echo "New feed created\n"; 123 | $mysqli->query("CREATE TABLE $feedname (time INT UNSIGNED, data float,INDEX ( `time` ))"); 124 | } 125 | 126 | if ($datatype==3) { 127 | echo "Histogram feed created\n"; 128 | $mysqli->query( 129 | "CREATE TABLE $feedname (time INT UNSIGNED, data float, data2 float,INDEX ( `time` ))"); 130 | } 131 | } 132 | 133 | 134 | // Check if we have already downloaded part of the feed and get the last 135 | // value entered so that we dont download and insert data that has already 136 | // been inserted this makes this utility useful for syncing in general 137 | // and in particlar backup that only downloads the latest changes. 138 | $start = 0; 139 | $feed_result = $mysqli->query("SELECT * FROM $feedname ORDER BY time Desc LIMIT 1"); 140 | if ($feed_result) 141 | { 142 | $feed_row = $feed_result->fetch_array(); 143 | if ($feed_row[0]) $start = $feed_row[0]; 144 | } 145 | 146 | // Open the file served from the export page on the remote server 147 | $url = 'http://emoncms.org/feed/export.json?apikey='.$row['remotekey'].'&id=' 148 | .$feedid.'&start='.$start; 149 | 150 | echo "Opening file $url\n"; 151 | $fh = @fopen( $url, 'r' ); 152 | 153 | $histogram = false; 154 | 155 | if ($fh) 156 | { 157 | // Read through the file 158 | $i = 0; $vals = ""; 159 | while (($data = fgetcsv($fh, 0, ",")) !== FALSE) 160 | { 161 | $feedtime = $data[0]; 162 | $value = $data[1]; 163 | 164 | if (count($data)==3) $histogram = true; 165 | 166 | if ($feedtime!='' && $value!='') 167 | { 168 | $i++; 169 | //Contruct values part of the query 170 | if ($i!=1) $vals .= ','; 171 | 172 | if (!$histogram) { 173 | $vals .= "('$feedtime','$value')"; 174 | } else { 175 | if (isset($data[2])){ 176 | $value2 = $data[2]; 177 | $vals .= "('$feedtime','$value','$value2')";} 178 | } 179 | 180 | // Execute query every 400 rows (same block size as export script) 181 | if ($i>400) 182 | { 183 | $i = 0; 184 | if ($vals && !$histogram) $mysqli->query("INSERT INTO $feedname (`time`,`data`) VALUES ".$vals); 185 | if ($vals && $histogram) $mysqli->query("INSERT INTO $feedname (`time`,`data`,`data2`) VALUES ".$vals); 186 | $vals = ""; 187 | } 188 | } 189 | } 190 | 191 | // If there are lines to be inserted left over insert them here at the end 192 | if ($vals && !$histogram) $mysqli->query("INSERT INTO $feedname (`time`,`data`) VALUES ".$vals); 193 | if ($vals && $histogram) $mysqli->query("INSERT INTO $feedname (`time`,`data`,`data2`) VALUES ".$vals); 194 | $vals = ""; 195 | fclose($fh); 196 | } 197 | 198 | echo "Transfer complete\n"; 199 | echo "Deleting item $queid from queue\n"; 200 | $mysqli->query("DELETE FROM importqueue WHERE queid = $queid"); 201 | 202 | //if ((time()-$starttime)>120) die; 203 | } 204 | -------------------------------------------------------------------------------- /archive/backup/emonbackup: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | ### 4 | # 5 | # All Emoncms code is released under the GNU Affero General Public License. 6 | # See COPYRIGHT.txt and LICENSE.txt. 7 | # 8 | # --------------------------------------------------------------------- 9 | # Emoncms - open source energy visualisation 10 | # Part of the OpenEnergyMonitor project: 11 | # http://openenergymonitor.org 12 | # 13 | # Script below thanks to https://github.com/myheathub/mhh 14 | # 15 | ### 16 | 17 | ### BEGIN INIT INFO 18 | # Provides: emonbackup 19 | # Required-Start: $remote_fs 20 | # Required-Stop: $remote_fs 21 | # Should-Start: apache2 mysql 22 | # Default-Start: 2 3 4 5 23 | # Default-Stop: 0 1 6 24 | # Short-Description: Start emonbackup at startup 25 | # Description: RPI script daemon 26 | ### END INIT INFO 27 | 28 | # RPI sync control 29 | ########### SETTINGS ########## 30 | 31 | ## the user that must be used to run the bot 32 | USER=root 33 | 34 | ## where rpi is located 35 | RPI_BIN="/home/username/backup/backup.php" 36 | RPI_SCRIPT="backup.php" 37 | 38 | ## where the binary is located 39 | RPI_EXEC=/usr/bin/php 40 | 41 | if [ "$2" = "log" ]; then 42 | echo "Log is turned on" 43 | LOG=1 44 | else 45 | echo "Log is turned off" 46 | LOG=0 47 | fi 48 | 49 | ########### SETTINGS END ############ 50 | 51 | 52 | 53 | 54 | set -e 55 | DEBUG=off 56 | RPI_PID_FILE="/run/emonbackup.pid" 57 | 58 | if [ ! -f "$RPI_BIN" ]; then 59 | echo "ERROR: file not found : '$RPI_BIN'" 60 | exit 1 61 | fi 62 | if [ ! -f "$RPI_EXEC" ]; then 63 | echo "ERROR: file not found : '$RPI_EXEC'" 64 | exit 1 65 | fi 66 | if [ ! -x "$RPI_EXEC" ]; then 67 | echo "ERROR: cannot execute '$RPI_EXEC'" 68 | exit 1 69 | fi 70 | 71 | if [ "$(whoami)" != "$USER" ]; then 72 | echo "ERROR: you have to run that script as $USER" 73 | exit 1 74 | fi 75 | 76 | function debug() { 77 | if [ "$DEBUG" = "on" ]; then 78 | echo DEBUG: $@ 79 | fi 80 | } 81 | 82 | function do_start { 83 | cd $(dirname $RPI_BIN) 84 | sleep 7 85 | 86 | if [ "$LOG" = 1 ]; then 87 | $RPI_EXEC -f $RPI_BIN >> /home/username/backup/backup.log & 88 | echo $! > $RPI_PID_FILE 89 | else 90 | $RPI_EXEC -f $RPI_BIN > /dev/null 2>&1 & 91 | echo $! > $RPI_PID_FILE 92 | fi 93 | } 94 | 95 | function do_stop { 96 | NB_PROCESS=`ps ax | grep $RPI_SCRIPT | grep -v grep | wc -l` 97 | if [ $NB_PROCESS -gt 1 ]; then 98 | echo "ERROR: multiple $RPI_SCRIP processes found, you'd better kill thoses processes by hand." 99 | elif [ $NB_PROCESS -eq 1 ]; then 100 | if [ -f $RPI_PID_FILE ]; then 101 | PID=$(cat $RPI_PID_FILE) 102 | NB_PROCESS=`ps hax $PID | grep $RPI_SCRIPT | grep -v grep | wc -l` 103 | if [ $NB_PROCESS -eq 1 ]; then 104 | kill -15 $PID 105 | else 106 | echo "ERROR: process $PID does not seem to be $RPI_SCRIPT" 107 | echo "kill $RPI_SCRIPT by hand" 108 | fi 109 | fi 110 | else 111 | echo "WARNING: are you sure $RPI_SCRIPT is running ?" 112 | fi 113 | } 114 | 115 | kill_script() { 116 | PID=`ps hax | grep "$RPI_SCRIPT" | grep -v grep | cut -d' ' -f1 | head -n1` 117 | echo "killing process [$PID]" 118 | kill -9 $PID 119 | } 120 | 121 | case "$1" in 122 | start) 123 | echo "Starting RPI" 124 | NB_PROCESS=`ps ax | grep $RPI_SCRIPT | grep -v grep | wc -l` 125 | if [ $NB_PROCESS -eq 0 ]; then 126 | do_start 127 | else 128 | echo "ERROR: RPI is already running" 129 | fi 130 | ;; 131 | stop) 132 | echo -n "Stopping RPI: " 133 | do_stop 134 | echo "stopped" 135 | ;; 136 | 137 | restart) 138 | echo -n "Restarting RPI" 139 | do_stop 140 | sleep 1 141 | do_start 142 | ;; 143 | 144 | status) 145 | debug "status:" 146 | NB_PROCESS=`ps ax | grep $RPI_SCRIPT | grep -v grep | wc -l` 147 | debug "NB_PROCESS: $NB_PROCESS" 148 | if [ $NB_PROCESS -gt 1 ]; then 149 | echo "WARNING: multiple $RPI_SCRIPT processes found !" 150 | elif [ $NB_PROCESS -eq 1 ]; then 151 | echo "running" 152 | else 153 | echo "stopped" 154 | fi 155 | ;; 156 | 157 | kill) 158 | kill_script 159 | ;; 160 | *) 161 | PROG_NAME=`basename $0` 162 | echo "Usage: $PROG_NAME {start|stop|restart|status|kill}" 163 | exit 1 164 | esac 165 | 166 | exit 0 167 | -------------------------------------------------------------------------------- /archive/feedstats.php: -------------------------------------------------------------------------------- 1 | query("SELECT id,datatype FROM feeds"); 6 | while ($feedsrow = $feedsresult->fetch_array()) 7 | { 8 | $feedid = $feedsrow['id']; 9 | 10 | $result = $mysqli->query("SHOW TABLE STATUS LIKE 'feed_$feedid'"); 11 | $row = $result->fetch_array(); 12 | 13 | $size = ($row['Data_length'] + $row['Index_length']); 14 | $datapoints = ($row['Data_length'] / 9); 15 | 16 | if ($datapoints>0) 17 | { 18 | $result = $mysqli->query("SELECT * FROM feed_$feedid WHERE time>2012 ORDER BY time Asc LIMIT 1"); 19 | $row = $result->fetch_array(); 20 | $start = $row['time']; 21 | 22 | $result = $mysqli->query("SELECT * FROM feed_$feedid ORDER BY time Desc LIMIT 1"); 23 | $row = $result->fetch_array(); 24 | $end = $row['time']; 25 | 26 | $totaltime = $end-$start; 27 | $interval = round($totaltime / $datapoints); 28 | 29 | if ($feedsrow['datatype']==2) $interval = 3600 * 24; 30 | if ($feedsrow['datatype']==3) $interval = 3600 * 24; 31 | 32 | //echo "Interval: $interval\n"; 33 | $result = $mysqli->query("UPDATE feeds SET `size` = '$size', `dpinterval` = '$interval' WHERE `id` = '$feedid'"); 34 | 35 | echo "feed_".$feedid." ".$interval."s ".$size."b\n"; 36 | } 37 | } 38 | -------------------------------------------------------------------------------- /archive/install_emoncms.sh: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/emoncms/usefulscripts/b5c5df772aeae864fc5d8bbc9f871dd2e68c83d2/archive/install_emoncms.sh -------------------------------------------------------------------------------- /archive/replication/import_inputs.php: -------------------------------------------------------------------------------- 1 | query("DELETE FROM input WHERE `id` = '".$input->id."'"); 16 | $mysqli->query("INSERT INTO input (`id`,`userid`,`name`,`processList`,`time`,`value`,`nodeid`,`description`) VALUES ('".$input->id."','".$userid."','".$input->name."','".$input->processList."','".$input->time."','".$input->value."','".$input->nodeid."','".$input->description."')"); 17 | 18 | } 19 | 20 | -------------------------------------------------------------------------------- /archive/set_emoncms_settings.sh: -------------------------------------------------------------------------------- 1 | cd /var/www/emoncms 2 | 3 | echo "---------------------------------------------------" 4 | echo "Loading current settings" 5 | echo "---------------------------------------------------" 6 | 7 | # Fetch current settings from settings.php 8 | tmp=$(sed -n '/username/p' settings.php) 9 | mysql_username=$(echo $tmp | awk -F\" '{print $(NF-1)}') 10 | 11 | tmp=$(sed -n '/password/p' settings.php) 12 | mysql_password=$(echo $tmp | awk -F\" '{print $(NF-1)}') 13 | 14 | tmp=$(sed -n '/server/p' settings.php) 15 | mysql_server=$(echo $tmp | awk -F\" '{print $(NF-1)}') 16 | 17 | tmp=$(sed -n '/database/p' settings.php) 18 | mysql_database=$(echo $tmp | awk -F\" '{print $(NF-1)}') 19 | 20 | tmp=$(sed -n '/dbtest/p' settings.php) 21 | 22 | # check if dbtest is true 23 | dbtest=false 24 | 25 | if echo "$tmp" | egrep -q "true" ; then 26 | dbtest=true 27 | fi 28 | 29 | if echo "$tmp" | egrep -q "TRUE" ; then 30 | dbtest=true 31 | fi 32 | 33 | # if settings dont exist then ask for user entry 34 | if [ -z "$mysql_username" ]; then 35 | echo "No mysql username set, please enter your mysql username: (usually: root)" 36 | read mysql_username 37 | fi 38 | 39 | if [ -z "$mysql_password" ]; then 40 | echo "No mysql password set, please enter your mysql password: " 41 | read mysql_password 42 | fi 43 | 44 | if [ -z "$mysql_server" ]; then 45 | echo "No mysql server set, please enter your mysql server (usually: localhost): " 46 | read mysql_server 47 | fi 48 | 49 | if [ -z "$mysql_database" ]; then 50 | echo "No mysql database set, please enter your mysql database: (usually: emoncms)" 51 | read mysql_database 52 | fi 53 | 54 | echo "MYSQL username: "$mysql_username 55 | echo "MYSQL password: "$mysql_password 56 | echo "MYSQL server: "$mysql_server 57 | echo "MYSQL database: "$mysql_database 58 | echo 59 | if [ $dbtest ]; then 60 | echo "dbcheck: true" 61 | echo "YOU MAY WANT TO SET dbcheck in settings.php to FALSE when your sure your database is setup correctly" 62 | else 63 | echo "dbcheck: false" 64 | fi 65 | echo "" 66 | 67 | timestore_adminkey=$(cat "/var/lib/timestore/adminkey.txt") 68 | 69 | echo "Fetching timestore adminkey from /var/lib/timestore/adminkey.txt: " 70 | echo $timestore_adminkey 71 | 72 | echo "---------------------------------------------------" 73 | echo "Creating new settings.php file" 74 | echo "---------------------------------------------------" 75 | 76 | echo "Deleting old settings.php file" 77 | rm settings.php 78 | echo "copying default.settings.php to settings.php" 79 | cp default.settings.php settings.php 80 | 81 | echo "inserting settings as above" 82 | sed -i 's/^ $username.*/ $username = "'$mysql_username'";/' settings.php 83 | sed -i 's/^ $password.*/ $password = "'$mysql_password'";/' settings.php 84 | sed -i 's/^ $server.*/ $server = "'$mysql_server'";/' settings.php 85 | sed -i 's/^ $database.*/ $database = "'$mysql_database'";/' settings.php 86 | 87 | timestore_adminkey=$(echo "$timestore_adminkey"|sed 's!\([]\*\$\/&[]\)!\\\1!g') 88 | sed -i 's/^ $timestore_adminkey.*/ $timestore_adminkey = "'$timestore_adminkey'";/' settings.php 89 | 90 | if [ -z $dbtest ]; then 91 | sed -i 's/^ $dbtest.*/ $dbtest = FALSE;/' settings.php 92 | fi 93 | 94 | 95 | -------------------------------------------------------------------------------- /archive/update_pi_emoncms.sh: -------------------------------------------------------------------------------- 1 | 2 | echo "---------------------------------------------------" 3 | echo "Installing timestore" 4 | echo "---------------------------------------------------" 5 | # Install timestore 6 | 7 | cd /home/pi 8 | git clone https://github.com/TrystanLea/timestore 9 | cd timestore 10 | sudo sh install 11 | cd /home/pi 12 | 13 | echo "---------------------------------------------------" 14 | echo "Installing php5-curl" 15 | echo "---------------------------------------------------" 16 | # Install php curl 17 | sudo apt-get install php5-curl 18 | 19 | # Update emoncms 20 | cd /var/www/emoncms 21 | git pull 22 | 23 | # Create new settings.php file and copy settings over 24 | cd /home/pi/usefulscripts/ 25 | sh set_emoncms_settings.sh 26 | 27 | echo "---------------------------------------------------" 28 | echo "Updating raspberrypi module" 29 | echo "---------------------------------------------------" 30 | # Update raspberrypi module 31 | 32 | if [ -d "/var/www/emoncms/Modules/raspberrypi" ]; then 33 | echo "Updating raspberrypi module" 34 | cd /var/www/emoncms/Modules/raspberrypi 35 | git pull 36 | 37 | # Update raspberrypi init script 38 | sudo cp /var/www/emoncms/Modules/raspberrypi/rfm12piphp /etc/init.d/ 39 | sudo chmod 755 /etc/init.d/rfm12piphp 40 | sudo update-rc.d rfm12piphp defaults 41 | 42 | sudo service rfm12piphp stop 43 | sudo service rfm12piphp start 44 | 45 | else 46 | echo "Raspberrypi module is not installed" 47 | fi 48 | 49 | 50 | echo "---------------------------------------------------" 51 | echo "Updating event module" 52 | echo "---------------------------------------------------" 53 | # Update event module 54 | 55 | if [ -d "/var/www/emoncms/Modules/event" ]; then 56 | echo "Updating event module" 57 | cd /var/www/emoncms/Modules/event 58 | git pull 59 | else 60 | echo "Event module is not installed" 61 | fi 62 | 63 | 64 | echo "---------------------------------------------------" 65 | echo "Updating or installing converttotimestore module" 66 | echo "---------------------------------------------------" 67 | # Download converttotimestore 68 | 69 | if [ -d "/var/www/emoncms/Modules/converttotimestore" ]; then 70 | echo "Updating converttotimestore module" 71 | cd /var/www/emoncms/Modules/converttotimestore 72 | git pull 73 | else 74 | echo "Downloading converttotimestore module" 75 | cd /var/www/emoncms/Modules 76 | git clone https://github.com/emoncms/converttotimestore.git 77 | fi 78 | -------------------------------------------------------------------------------- /archive/user_diskuse.php: -------------------------------------------------------------------------------- 1 | query("SELECT id FROM users"); 6 | while ($user = $userq->fetch_array()) 7 | { 8 | $userid = $user['id']; 9 | $feedq = $mysqli->query("SELECT size FROM feeds WHERE `userid` = '$userid'"); 10 | 11 | $total = 0; 12 | 13 | while ($feed = $feedq->fetch_array()) 14 | { 15 | $total += $feed['size']; 16 | } 17 | 18 | $result = $mysqli->query("UPDATE users SET `diskuse` = '$total' WHERE `id` = '$userid'"); 19 | echo "user_".$userid." ".$total."\n"; 20 | } 21 | -------------------------------------------------------------------------------- /backup/backup.php: -------------------------------------------------------------------------------- 1 | array( 43 | 'datadir'=> "$dir/phpfiwa/" 44 | ), 45 | 'phpfina'=>array( 46 | 'datadir'=> "$dir/phpfina/" 47 | ), 48 | 'phptimeseries'=>array( 49 | 'datadir'=> "$dir/phptimeseries/" 50 | ), 51 | 'phptimestore'=>array( 52 | 'datadir'=> "$dir/timestore/" 53 | ) 54 | ); 55 | 56 | $mysqli = false; 57 | $redis = false; 58 | 59 | if ($link_to_local_emoncms) 60 | { 61 | define('EMONCMS_EXEC', 1); 62 | chdir($local_emoncms_location); 63 | require "process_settings.php"; 64 | $mysqli = @new mysqli( 65 | $settings["sql"]["server"], 66 | $settings["sql"]["username"], 67 | $settings["sql"]["password"], 68 | $settings["sql"]["database"], 69 | $settings["sql"]["port"] 70 | ); 71 | 72 | if ($settings['redis']['enabled']) { 73 | $redis = new Redis(); 74 | $connected = $redis->connect($settings['redis']['host'], $settings['redis']['port']); 75 | if (!$connected) { echo "Can't connect to redis at ".$settings['redis']['host'].":".$settings['redis']['port']." , it may be that redis-server is not installed or started see readme for redis installation"; die; } 76 | if (!empty($settings['redis']['prefix'])) $redis->setOption(Redis::OPT_PREFIX, $settings['redis']['prefix']); 77 | if (!empty($settings['redis']['auth'])) { 78 | if (!$redis->auth($settings['redis']['auth'])) { 79 | echo "Can't connect to redis at ".$settings['redis']['host'].", autentication failed"; die; 80 | } 81 | } 82 | if (!empty($settings['redis']['dbnum'])) { 83 | $redis->select($settings['redis']['dbnum']); 84 | } 85 | } else { 86 | $redis = false; 87 | } 88 | 89 | $engines = $settings['feed']; 90 | 91 | if ($backup_inputs) backup_inputs($mysqli,$remote_server,$remote_apikey,$local_emoncms_userid); 92 | } 93 | 94 | // Fetch remote server feed list 95 | $feeds = file_get_contents($remote_server."/feed/list.json?apikey=$remote_apikey"); 96 | $feeds = json_decode($feeds); 97 | 98 | $number_of_feeds = count($feeds); 99 | echo $number_of_feeds." Emoncms.org feeds found\n"; 100 | 101 | if ($number_of_feeds==0) { 102 | echo "No feeds found at remote account\n"; 103 | die; 104 | } 105 | 106 | foreach ($feeds as $feed) 107 | { 108 | $feed->userid = $local_emoncms_userid; 109 | if ($link_to_local_emoncms) register_emoncms_feed($mysqli,$redis,$feed); 110 | 111 | if ($feed->engine==0 && $mysqli) { 112 | import_mysql($feed,$remote_server,$remote_apikey,$mysqli); 113 | } 114 | 115 | if ($feed->engine==1) { 116 | import_phptimestore($feed->id,$remote_server,$remote_apikey,$engines['phptimestore']['datadir']); 117 | } 118 | 119 | if ($feed->engine==2) { 120 | import_phptimeseries($feed->id,$remote_server,$remote_apikey,$engines['phptimeseries']['datadir']); 121 | } 122 | 123 | if ($feed->engine==5) { 124 | import_phpfina($feed->id,$remote_server,$remote_apikey,$engines['phpfina']['datadir']); 125 | } 126 | 127 | if ($feed->engine==6) { 128 | import_phpfiwa($feed->id,$remote_server,$remote_apikey,$engines['phpfiwa']['datadir']); 129 | } 130 | 131 | if ($feed->engine==4) { 132 | import_phptimestore($feed->id,$remote_server,$remote_apikey,$engines['phptimestore']['datadir']); 133 | } 134 | } 135 | 136 | if ($link_to_local_emoncms) reload_emoncms_feeds($mysqli,$redis,$local_emoncms_userid); 137 | 138 | -------------------------------------------------------------------------------- /backup/lib/inputs.php: -------------------------------------------------------------------------------- 1 | query("DELETE FROM input WHERE `id` = '".$input->id."'"); 12 | $mysqli->query("INSERT INTO input (`id`,`userid`,`name`,`processList`,`time`,`value`,`nodeid`,`description`) VALUES ('".$input->id."','".$userid."','".$input->name."','".$input->processList."','".$input->time."','".$input->value."','".$input->nodeid."','".$input->description."')"); 13 | 14 | } 15 | } 16 | -------------------------------------------------------------------------------- /backup/lib/metadata.php: -------------------------------------------------------------------------------- 1 | query("SELECT id FROM feeds WHERE `id` = '".$feed->id."'"); 7 | if (!$result->num_rows) 8 | { 9 | echo "Create feed ".$feed->id."\n"; 10 | 11 | if ($feed->public=="") $feed->public = "0"; 12 | 13 | $mysqli->query("INSERT INTO feeds (id,userid,name,tag,datatype,public,size,engine) VALUES ('".$feed->id."','".$feed->userid."','".$feed->name."','".$feed->tag."','1','".$feed->public."','".$feed->size."','".$feed->engine."')"); 14 | 15 | if ($redis) { 16 | $redis->hMSet("feed:$feed->id",array( 17 | 'id'=>$feed->id, 18 | 'userid'=>$feed->userid, 19 | 'name'=>$feed->name, 20 | 'datatype'=>1, 21 | 'tag'=>$feed->tag, 22 | 'public'=>$feed->public, 23 | 'size'=>$feed->size, 24 | 'engine'=>$feed->engine 25 | )); 26 | } 27 | } else { 28 | // echo "feed exists ".$feed->id."\n"; 29 | } 30 | } 31 | 32 | function reload_emoncms_feeds($mysqli,$redis,$userid) 33 | { 34 | if (!$redis) return false; 35 | 36 | $result = $mysqli->query("SELECT id,userid,name,datatype,tag,public,size,engine FROM feeds WHERE `userid` = '$userid'"); 37 | 38 | while ($row = $result->fetch_object()) 39 | { 40 | $redis->sAdd("user:feeds:$userid", $row->id); 41 | $redis->hMSet("feed:$row->id",array( 42 | 'id'=>$row->id, 43 | 'userid'=>$row->userid, 44 | 'name'=>$row->name, 45 | 'datatype'=>1, 46 | 'tag'=>$row->tag, 47 | 'public'=>$row->public, 48 | 'size'=>$row->size, 49 | 'engine'=>$row->engine 50 | )); 51 | } 52 | } 53 | -------------------------------------------------------------------------------- /backup/lib/mysql.php: -------------------------------------------------------------------------------- 1 | id."\n"; 9 | 10 | $feedname = "feed_".trim($feed->id).""; 11 | 12 | $result = $mysqli->query("SELECT id FROM feeds WHERE `id` = '".$feed->id."'"); 13 | 14 | if (!$result->num_rows) 15 | { 16 | // Create feed 17 | echo "create feed ".$feed->id."\n"; 18 | //$result = $mysqli->query("INSERT INTO feeds (id,userid,name,tag,datatype,public,engine) VALUES ('".$feed->id."','$userid','".$feed->name."','".$feed->tag."','".$feed->datatype."','false','0')"); 19 | 20 | if (!isset($feed->datatype)) $feed->datatype = 1; 21 | 22 | if ($feed->datatype==1 || $feed->datatype==2) 23 | { 24 | $result = $mysqli->query( 25 | "CREATE TABLE $feedname ( 26 | time INT UNSIGNED, data float, 27 | INDEX ( `time` ))"); 28 | } 29 | 30 | elseif ($feed->datatype==3) { 31 | $result = $mysqli->query( 32 | "CREATE TABLE $feedname ( 33 | time INT UNSIGNED, data float, data2 float, 34 | INDEX ( `time` ))"); 35 | } 36 | } 37 | else 38 | { 39 | $feedid = $feed->id; 40 | $result = $mysqli->query("SHOW TABLES LIKE 'feed_$feedid'"); 41 | 42 | if (!$result->num_rows) 43 | { 44 | echo "Creating data table feed:$feedid\n"; 45 | 46 | if ($feed->datatype==1 || $feed->datatype==2) 47 | { 48 | $result = $mysqli->query("CREATE TABLE $feedname (time INT UNSIGNED, data float,INDEX ( `time` ))"); 49 | } 50 | 51 | elseif ($feed->datatype==3) { 52 | $result = $mysqli->query("CREATE TABLE $feedname (time INT UNSIGNED, data float, data2 float,INDEX ( `time` ))"); 53 | } 54 | 55 | } 56 | 57 | } 58 | 59 | // Check if we have already downloaded part of the feed and get the last 60 | // value entered so that we dont download and insert data that has already 61 | // been inserted this makes this utility useful for syncing in general 62 | // and in particlar backup that only downloads the latest changes. 63 | $start = 0; 64 | $feed_result = $mysqli->query("SELECT * FROM $feedname ORDER BY time Desc LIMIT 1"); 65 | if ($feed_result) 66 | { 67 | $feed_row = $feed_result->fetch_array(); 68 | if ($feed_row[0]) $start = $feed_row[0]; 69 | } 70 | 71 | // Open the file served from the export page on the remote server 72 | $url = $server.'/feed/export.json?apikey='.$apikey.'&id='.$feed->id.'&start='.$start; 73 | 74 | // echo "Opening file $url\n"; 75 | $fh = @fopen( $url, 'r' ); 76 | 77 | $histogram = false; 78 | 79 | if ($fh) 80 | { 81 | // The first line is to be updated 82 | if ($start!=0) 83 | { 84 | $data = fgetcsv($fh, 0, ","); 85 | if (isset($data[0]) && isset($data[1]) && count($data)!=3) 86 | { 87 | $feedtime = $data[0]; $value = $data[1]; 88 | $mysqli->query("UPDATE $feedname SET `data` = '$value' WHERE `time` = '$feedtime'"); 89 | } 90 | 91 | if (isset($data[0]) && isset($data[1]) && count($data)==3) 92 | { 93 | $feedtime = $data[0]; $value = $data[1]; $value2 = $data[2]; 94 | $mysqli->query("UPDATE $feedname SET `data` = '$value', `data2`='$value2' WHERE `time` = '$feedtime'"); 95 | } 96 | } 97 | 98 | $lines = 0; 99 | // Read through the file 100 | $i = 0; $vals = ""; 101 | while (($data = fgetcsv($fh, 0, ",")) !== FALSE) 102 | { 103 | $feedtime = $data[0]; 104 | 105 | if (isset($data[1])) { 106 | $value = $data[1]; 107 | 108 | if (count($data)==3) $histogram = true; 109 | 110 | if ($feedtime!='' && $value!='') 111 | { 112 | $i++; 113 | //Contruct values part of the query 114 | if ($i!=1) $vals .= ','; 115 | 116 | if (!$histogram) { 117 | $vals .= "('$feedtime','$value')"; 118 | } else { 119 | $value2 = $data[2]; 120 | $vals .= "('$feedtime','$value','$value2')"; 121 | } 122 | 123 | // Execute query every 400 rows (same block size as export script) 124 | if ($i>400) 125 | { 126 | $i = 0; 127 | if ($vals && !$histogram) $mysqli->query("INSERT INTO $feedname (`time`,`data`) VALUES ".$vals); 128 | if ($vals && $histogram) $mysqli->query("INSERT INTO $feedname (`time`,`data`,`data2`) VALUES ".$vals); 129 | $vals = ""; 130 | $lines++; 131 | } 132 | } 133 | } 134 | } 135 | 136 | // If there are lines to be inserted left over insert them here at the end 137 | if ($vals && !$histogram) $mysqli->query("INSERT INTO $feedname (`time`,`data`) VALUES ".$vals); 138 | if ($vals && $histogram) $mysqli->query("INSERT INTO $feedname (`time`,`data`,`data2`) VALUES ".$vals); 139 | $vals = ""; 140 | fclose($fh); 141 | } 142 | 143 | echo "--lines: ".$lines."\n"; 144 | } 145 | 146 | -------------------------------------------------------------------------------- /backup/lib/phpfina.php: -------------------------------------------------------------------------------- 1 | start_time) || !isset($remote_meta->interval)) { 11 | echo "ERROR: Invalid remote meta, returned false\n"; 12 | echo json_encode($remote_meta)."\n"; 13 | return false; 14 | } 15 | 16 | // Load local meta data file 17 | if (file_exists($datadir.$id.".meta")) 18 | { 19 | $local_meta = new stdClass(); 20 | 21 | if (!$metafile = @fopen($datadir.$id.".meta", 'rb')) { 22 | echo "Cannot open local metadata file\n"; 23 | return false; 24 | } 25 | 26 | fseek($metafile,8); 27 | 28 | $tmp = unpack("I",fread($metafile,4)); 29 | $local_meta->interval = $tmp[1]; 30 | $tmp = unpack("I",fread($metafile,4)); 31 | $local_meta->start_time = $tmp[1]; 32 | 33 | fclose($metafile); 34 | 35 | } else { 36 | $local_meta = $remote_meta; 37 | 38 | if (!$metafile = @fopen($datadir.$id.".meta", 'wb')) { 39 | echo "Cannot open local metadata file\n"; 40 | return false; 41 | } 42 | 43 | // First 8 bytes used to hold id and npoints but are now removed. 44 | fwrite($metafile,pack("I",0)); 45 | fwrite($metafile,pack("I",0)); 46 | fwrite($metafile,pack("I",$local_meta->interval)); 47 | fwrite($metafile,pack("I",$local_meta->start_time)); 48 | fclose($metafile); 49 | } 50 | 51 | // We now check if the local meta data is the same as the remote meta data. 52 | // Given that the starttime, the interval and the feedname is the same we assume 53 | // that we are dealing with the same feed 54 | if ($local_meta->start_time == $remote_meta->start_time && $local_meta->interval == $remote_meta->interval) 55 | { 56 | if (file_exists($datadir.$id.".dat")) { 57 | $downloadfrom = filesize($datadir.$id.".dat"); 58 | if (intval($downloadfrom/4.0)!=($downloadfrom/4.0)) { 59 | echo "ERROR: local datafile filesize is not an integer number of 4 bytes\n"; 60 | die; 61 | } 62 | } else { 63 | $downloadfrom = 0; 64 | } 65 | 66 | $url = $server."/feed/export.json?apikey=$apikey&id=$id&start=$downloadfrom"; 67 | 68 | if (!$primary = @fopen( $url, 'r' )) { 69 | echo "Cannot access remote server\n"; 70 | return false; 71 | } 72 | 73 | if ($downloadfrom>=4) { 74 | // update last datapoint 75 | $firstdp = fread($primary,4); 76 | if (!$backup = @fopen($datadir.$id.".dat", 'c')) { 77 | echo "Cannot open local data file - to update last datapoint\n"; 78 | return false; 79 | } 80 | fseek($backup,$downloadfrom-4); 81 | fwrite($backup,$firstdp); 82 | fclose($backup); 83 | } 84 | 85 | if (!$backup = @fopen($datadir.$id.".dat", 'a')) { 86 | echo "Cannot open local data file - to append data\n"; 87 | return false; 88 | } 89 | 90 | $dnsize = 0; 91 | if ($primary) 92 | { 93 | for (;;) 94 | { 95 | $data = fread($primary,8192); 96 | fwrite($backup,$data); 97 | $dnsize += strlen($data); 98 | if (feof($primary)) break; 99 | } 100 | } 101 | 102 | fclose($backup); 103 | fclose($primary); 104 | 105 | echo "--downloaded: ".$dnsize." bytes\n"; 106 | } 107 | else 108 | { 109 | echo "ERROR: Local and remote meta data do not match\n"; 110 | echo "-- local->start = ".$local_meta->start_time." remote->start = ".$remote_meta->start_time."\n"; 111 | echo "-- local->interval = ".$local_meta->interval." remote->interval = ".$remote_meta->interval."\n"; 112 | 113 | } 114 | } 115 | -------------------------------------------------------------------------------- /backup/lib/phpfiwa.php: -------------------------------------------------------------------------------- 1 | start_time) || !isset($remote_meta->interval[0])) { 11 | echo "ERROR: Invalid remote meta, returned false\n"; 12 | echo json_encode($remote_meta)."\n"; 13 | return false; 14 | } 15 | 16 | // Load local meta data file 17 | if (file_exists($datadir.$id.".meta")) 18 | { 19 | $local_meta = new stdClass(); 20 | if (!$metafile = @fopen($datadir.$id.".meta", 'rb')) { 21 | echo "Cannot open local meta data file\n"; 22 | return false; 23 | } 24 | 25 | fseek($metafile,4); 26 | 27 | $tmp = unpack("I",fread($metafile,4)); 28 | $local_meta->start_time = $tmp[1]; 29 | $tmp = unpack("I",fread($metafile,4)); 30 | $local_meta->nlayers = $tmp[1]; 31 | 32 | $local_meta->npoints = array(); 33 | for ($i=0; $i<$local_meta->nlayers; $i++) 34 | { 35 | $tmp = unpack("I",fread($metafile,4)); 36 | $local_meta->npoints[$i] = $tmp[1]; 37 | } 38 | 39 | $local_meta->interval = array(); 40 | for ($i=0; $i<$local_meta->nlayers; $i++) 41 | { 42 | $tmp = unpack("I",fread($metafile,4)); 43 | $local_meta->interval[$i] = $tmp[1]; 44 | } 45 | 46 | fclose($metafile); 47 | } else { 48 | $local_meta = $remote_meta; 49 | 50 | if (!$metafile = @fopen($datadir.$id.".meta", 'wb')) { 51 | echo "Cannot open local meta data file: Does the phpfiwa folder exist?\n"; 52 | return false; 53 | } 54 | fwrite($metafile,pack("I",0)); 55 | fwrite($metafile,pack("I",$remote_meta->start_time)); 56 | fwrite($metafile,pack("I",$remote_meta->nlayers)); 57 | foreach ($remote_meta->npoints as $n) fwrite($metafile,pack("I",0)); // Legacy 58 | foreach ($remote_meta->interval as $d) fwrite($metafile,pack("I",$d)); 59 | fclose($metafile); 60 | } 61 | 62 | // We now check if the local meta data is the same as the remote meta data. 63 | // Given that the starttime, the interval and the feedname is the same we assume 64 | // that we are dealing with the same feed 65 | if ($local_meta->start_time == $remote_meta->start_time && $local_meta->interval[0] == $remote_meta->interval[0]) 66 | { 67 | for ($layer=0; $layer<$local_meta->nlayers; $layer++) 68 | { 69 | echo "--layer: $layer "; 70 | 71 | if (file_exists($datadir.$id."_".$layer.".dat")) { 72 | $downloadfrom = filesize($datadir.$id."_".$layer.".dat"); 73 | if (intval($downloadfrom/4.0)!=($downloadfrom/4.0)) { 74 | echo "ERROR: local datafile filesize is not an integer number of 4 bytes\n"; 75 | die; 76 | } 77 | } else { 78 | $downloadfrom = 0; 79 | } 80 | 81 | $url = $server."/feed/export.json?apikey=$apikey&id=$id&layer=$layer&start=$downloadfrom"; 82 | if (!$primary = @fopen( $url, 'r' )) { 83 | echo "Failed to access remote server\n"; 84 | return false; 85 | } 86 | 87 | if ($downloadfrom>=4) { 88 | // update last datapoint 89 | $firstdp = fread($primary,4); 90 | if (!$backup = @fopen($datadir.$id."_".$layer.".dat", 'c')) { 91 | echo "Cannot open local data file - to update last datapoint\n"; 92 | return false; 93 | } 94 | fseek($backup,$downloadfrom-4); 95 | fwrite($backup,$firstdp); 96 | fclose($backup); 97 | } 98 | 99 | if (!$backup = @fopen($datadir.$id."_".$layer.".dat", 'a')) { 100 | echo "Cannot open local data file - to append data\n"; 101 | return false; 102 | } 103 | 104 | $dnsize = 0; 105 | if ($primary) 106 | { 107 | for (;;) 108 | { 109 | $data = fread($primary,8192); 110 | $p1 = ftell($backup); 111 | fwrite($backup,$data); 112 | $dnsize += ftell($backup) - $p1; 113 | if (feof($primary)) break; 114 | } 115 | } 116 | 117 | fclose($backup); 118 | fclose($primary); 119 | 120 | echo $dnsize." bytes\n"; 121 | } 122 | } 123 | else 124 | { 125 | echo "ERROR: Local and remote meta data do not match\n"; 126 | echo "-- local->start = ".$local_meta->start_time." remote->start = ".$remote_meta->start_time."\n"; 127 | echo "-- local->interval = ".$local_meta->interval[0]." remote->interval = ".$remote_meta->interval[0]."\n"; 128 | } 129 | } 130 | -------------------------------------------------------------------------------- /backup/lib/phptimeseries.php: -------------------------------------------------------------------------------- 1 | start) || !isset($remote_meta->interval)) 18 | { 19 | echo "Error in fetching remote meta data, received: $result\n"; 20 | return false; 21 | } 22 | 23 | // Check if there is a local timestore feed (REPEAT OF ABOVE) 24 | $feedname = $datadir.str_pad($id, 16, '0', STR_PAD_LEFT).".tsdb"; 25 | 26 | if (file_exists($feedname)) 27 | { 28 | $local_meta = new stdClass(); 29 | if (!$metafile = @fopen($feedname, 'rb')) { 30 | echo "Cannot open local meta data file\n"; 31 | return false; 32 | } 33 | fseek($metafile,8); 34 | $d = fread($metafile,8); 35 | $tmp = unpack("h*",$d); // id no longer used 36 | $tmp = unpack("I",fread($metafile,4)); 37 | $local_meta->nmetrics = $tmp[1]; 38 | $tmp = unpack("I",fread($metafile,4)); // npoints no longer used 39 | $tmp = unpack("I",fread($metafile,8)); 40 | $local_meta->start = $tmp[1]; 41 | $tmp = unpack("I",fread($metafile,4)); 42 | $local_meta->interval = $tmp[1]; 43 | fclose($metafile); 44 | } 45 | else 46 | { 47 | $local_meta = $remote_meta; 48 | 49 | if (!$metafile = @fopen($feedname, 'wb')) { 50 | echo "Cannot open local meta data file\n"; 51 | return false; 52 | } 53 | fwrite($metafile,pack("I",0)); 54 | fwrite($metafile,pack("I",0)); 55 | fwrite($metafile,pack("h*",strrev(str_pad(0, 16, '0', STR_PAD_LEFT)))); 56 | fwrite($metafile,pack("I",$local_meta->nmetrics)); 57 | fwrite($metafile,pack("I",0)); // Legacy 58 | fwrite($metafile,pack("I",$local_meta->start)); 59 | fwrite($metafile,pack("I",0)); 60 | fwrite($metafile,pack("I",$local_meta->interval)); 61 | fclose($metafile); 62 | } 63 | 64 | // We now check if the local meta data is the same as the remote meta data. 65 | // Given that the starttime, the interval and the feedname is the same we assume 66 | // that we are dealing with the same feed 67 | if ($local_meta->start == $remote_meta->start && $local_meta->interval == $remote_meta->interval) 68 | { 69 | // Download all 6 timestore layers 70 | for ($layer=0; $layer<6; $layer++) 71 | { 72 | 73 | echo "--layer: $layer "; 74 | $feedname = $datadir.str_pad($id, 16, '0', STR_PAD_LEFT)."_".$layer."_.dat"; 75 | 76 | if (file_exists($feedname)) { 77 | $downloadfrom = filesize($feedname); 78 | 79 | if (intval($downloadfrom/4.0)!=($downloadfrom/4.0)) { 80 | echo "Timestore feed ".$id." corrupt\n"; 81 | die; 82 | } 83 | } else { 84 | $downloadfrom = 0; 85 | } 86 | 87 | $url = $server."/feed/export.json?apikey=$apikey&id=".$id."&layer=$layer&start=$downloadfrom"; 88 | if (!$primary = @fopen( $url, 'r' )) { 89 | echo "Failed to access remote server\n"; 90 | return false; 91 | } 92 | 93 | if ($downloadfrom>=4) { 94 | // update last datapoint 95 | $firstdp = fread($primary,4); 96 | 97 | if (!$backup = @fopen($feedname, 'c')) { 98 | echo "Cannot open local data file\n"; 99 | return false; 100 | } 101 | fseek($backup,$downloadfrom-4); 102 | fwrite($backup,$firstdp); 103 | fclose($backup); 104 | } 105 | 106 | if (!$backup = @fopen($feedname, 'a')) { 107 | echo "Cannot open local data file\n"; 108 | return false; 109 | } 110 | 111 | $dnsize = 0; 112 | if ($primary) 113 | { 114 | for (;;) 115 | { 116 | $data = fread($primary,8192); 117 | $p1 = ftell($backup); 118 | fwrite($backup,$data); 119 | $dnsize += ftell($backup) - $p1; 120 | if (feof($primary)) break; 121 | } 122 | } 123 | 124 | fclose($backup); 125 | fclose($primary); 126 | 127 | echo $dnsize." bytes\n"; 128 | } 129 | } 130 | else 131 | { 132 | echo "local and remote meta data do not match\n"; 133 | } 134 | } 135 | -------------------------------------------------------------------------------- /backup_py/convert_to_csv.py: -------------------------------------------------------------------------------- 1 | import os, json, struct 2 | from pathlib import Path 3 | 4 | username = "" 5 | 6 | def phpfina_get_meta(datadir,feedid): 7 | if os.path.isfile(datadir+str(feedid)+".meta"): 8 | fh = open(datadir+str(feedid)+".meta","rb") 9 | tmp = struct.unpack("IIII",fh.read(16)) 10 | fh.close() 11 | meta = {'start_time': tmp[2], 'interval': tmp[3], 'npoints':0} 12 | 13 | if os.path.isfile(datadir+str(feedid)+".dat"): 14 | bytesize = os.stat(datadir+str(feedid)+".dat").st_size 15 | meta['npoints'] = int(bytesize/4.0) 16 | return meta 17 | else: 18 | return False 19 | 20 | def phpfina_convert(datadir,csvdir,feedid,tag,name): 21 | meta = phpfina_get_meta(datadir,feedid) 22 | csv_fh = open(csvdir+str(feedid)+"-"+str(tag)+"-"+str(name)+".csv","w") 23 | fh = open(datadir+str(feedid)+".dat","rb") 24 | for i in range(0,meta['npoints']): 25 | time = meta['start_time'] + i*meta['interval'] 26 | val = struct.unpack("f",fh.read(4)) 27 | csv_fh.write(str(time)+","+str(val[0])+"\n") 28 | fh.close() 29 | csv_fh.close() 30 | 31 | def phptimeseries_convert(datadir,csvdir,feedid,tag,name): 32 | if os.path.isfile(datadir+"feed_"+str(feedid)+".MYD"): 33 | bytesize = os.stat(datadir+"feed_"+str(feedid)+".MYD").st_size 34 | npoints = int(bytesize/9.0) 35 | 36 | csv_fh = open(csvdir+str(feedid)+"-"+str(tag)+"-"+str(name)+".csv","w") 37 | fh = open(datadir+"feed_"+str(feedid)+".MYD","rb") 38 | 39 | for i in range(0,npoints): 40 | tmp = struct.unpack('0: 33 | end_time = meta['start_time'] + (meta['interval'] * meta['npoints']) 34 | start = datetime.fromtimestamp(end_time) 35 | print("Request from:\t\t"+start.isoformat()) 36 | else: 37 | start = datetime(2017,10,1) # First available date 38 | 39 | headers = { 40 | 'Accept': 'application/json' 41 | } 42 | 43 | while start /dev/null 2>&1 24 | 25 | *Set to 2:15am here, specific time not crucial but good to randomise to reduce load on carbon intensity server* 26 | -------------------------------------------------------------------------------- /composer.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "emoncms/usefulscripts", 3 | "homepage": "https://emoncms.org", 4 | "support": { 5 | "forum": "https://community.openenergymonitor.org/" 6 | }, 7 | "require-dev": { 8 | "php-parallel-lint/php-parallel-lint": "^1.2.0" 9 | }, 10 | "scripts": { 11 | "test": [ 12 | "parallel-lint . --exclude vendor" 13 | ] 14 | }, 15 | "license": "AGPL-3.0-or-later" 16 | } 17 | -------------------------------------------------------------------------------- /convertdata/Lib/EmonLogger.php: -------------------------------------------------------------------------------- 1 | logenabled = false; 26 | } 27 | else if ($settings['log']['location']) { 28 | $this->logfile = $settings['log']['location']."/emoncms.log"; 29 | $this->caller = basename($clientFileName); 30 | if (!file_exists($this->logfile)) 31 | { 32 | $fh = @fopen($this->logfile,"a"); 33 | @fclose($fh); 34 | } 35 | if (is_writable($this->logfile)) $this->logenabled = true; 36 | } 37 | } 38 | 39 | public function info ($message){ 40 | $this->write("INFO",$message); 41 | } 42 | 43 | public function warn ($message){ 44 | $this->write("WARN",$message); 45 | } 46 | 47 | public function error ($message){ 48 | $this->write("ERROR",$message); 49 | } 50 | 51 | private function write($type,$message){ 52 | if (!$this->logenabled) return; 53 | 54 | $now = microtime(true); 55 | $micro = sprintf("%03d",($now - ($now >> 0)) * 1000); 56 | $now = DateTime::createFromFormat('U', (int)$now); // Only use UTC for logs 57 | $now = $now->format("Y-m-d H:i:s").".$micro"; 58 | // Clear log file if more than 256MB (temporary solution) 59 | if (filesize($this->logfile)>(1024*1024*256)) { 60 | $fh = @fopen($this->logfile,"w"); 61 | @fclose($fh); 62 | } 63 | if ($fh = @fopen($this->logfile,"a")) { 64 | @fwrite($fh,$now."|$type|$this->caller|".$message."\n"); 65 | @fclose($fh); 66 | } 67 | } 68 | 69 | } 70 | -------------------------------------------------------------------------------- /convertdata/archive/lib/common.php: -------------------------------------------------------------------------------- 1 | $onepercent) { 68 | $pos = 0; 69 | $percent ++; 70 | print "$percent%\n"; 71 | } 72 | } 73 | 74 | fclose($fhr); 75 | fclose($fhw); 76 | } 77 | 78 | function attach_to_user($userid,$feeds,$enginename) 79 | { 80 | global $mysqli; 81 | 82 | foreach ($feeds as $id) 83 | { 84 | $name = $enginename."_".$id; 85 | $datatype = 1; 86 | 87 | if ($enginename=='phptimeseries') $engine = 2; 88 | if ($enginename=='phptimestore') $engine = 4; 89 | if ($enginename=='phpfina') $engine = 5; 90 | if ($enginename=='phpfiwa') $engine = 6; 91 | 92 | $result = $mysqli->query("SELECT id FROM feeds WHERE id = '$id'"); 93 | if ($result->num_rows==0) { 94 | $result = $mysqli->query("INSERT INTO feeds (id,userid,name,datatype,public,engine) VALUES ('$id','$userid','$name','$datatype',false,'$engine')"); 95 | print "Adding feed $id\n"; 96 | } 97 | } 98 | } 99 | -------------------------------------------------------------------------------- /convertdata/archive/phpfina_to_phptimeseries.php: -------------------------------------------------------------------------------- 1 | interval = $tmp[1]; 17 | $tmp = unpack("I",fread($metafile,4)); 18 | $meta->start_time = $tmp[1]; 19 | fclose($metafile); 20 | 21 | 22 | $phpfina_fh = fopen($phpfina_dir.$phpfina_feedid.".dat", 'rb'); 23 | $filesize = filesize($phpfina_dir.$phpfina_feedid.".dat"); 24 | 25 | $npoints = floor($filesize / 4.0); 26 | 27 | $phptimeseries_fh = fopen($phptimeseries_dir."feed_".$phptimeseries_feedid.".MYD", 'a'); 28 | 29 | for ($i=0; $i<$npoints; $i++) 30 | { 31 | $val = unpack("f",fread($phpfina_fh,4)); 32 | 33 | $time = $meta->start_time + $i * $meta->interval; 34 | $value = $val[1]; 35 | 36 | //print $time." ".$value."\n"; 37 | 38 | if (!is_nan($value)) fwrite($phptimeseries_fh, pack("CIf",249,$time,$value)); 39 | } 40 | -------------------------------------------------------------------------------- /convertdata/archive/phptimestore_to_phpfina.php: -------------------------------------------------------------------------------- 1 | query("SELECT * FROM feeds WHERE `engine`= 4"); 28 | 29 | $sourcedir = "/var/lib/timestore/"; 30 | $targetdir = "/var/lib/phpfina/"; 31 | 32 | while($row = $result->fetch_array()) 33 | { 34 | print $row['id']." ".$row['name']."\n"; 35 | 36 | $id = $row['id']; 37 | 38 | $sourcefile = $sourcedir.str_pad($id, 16, '0', STR_PAD_LEFT)."_0_.dat"; 39 | $targetfile = $targetdir.$id.".dat"; 40 | 41 | copy_data(array( 42 | 'sourcefile'=>$sourcefile, 43 | 'targetfile'=>$targetfile, 44 | 'bytelength'=>4 45 | )); 46 | 47 | $feedname = str_pad($id, 16, '0', STR_PAD_LEFT).".tsdb"; 48 | $meta = new stdClass(); 49 | $metafile = fopen($sourcedir.$feedname, 'rb'); 50 | 51 | fseek($metafile,8); 52 | $d = fread($metafile,8); 53 | $tmp = unpack("h*",$d); 54 | $tmp = unpack("I",fread($metafile,4)); 55 | $meta->nmetrics = $tmp[1]; 56 | $tmp = unpack("I",fread($metafile,4)); 57 | $tmp = unpack("I",fread($metafile,8)); 58 | $meta->start = $tmp[1]; 59 | $tmp = unpack("I",fread($metafile,4)); 60 | $meta->interval = $tmp[1]; 61 | fclose($metafile); 62 | 63 | $metafile = fopen($targetdir.$id.".meta", 'wb'); 64 | fwrite($metafile,pack("I",0)); 65 | fwrite($metafile,pack("I",0)); 66 | fwrite($metafile,pack("I",$meta->interval)); 67 | fwrite($metafile,pack("I",$meta->start)); 68 | fclose($metafile); 69 | 70 | $mysqli->query("UPDATE feeds SET `engine`=5 WHERE `id`='$id'"); 71 | } 72 | -------------------------------------------------------------------------------- /convertdata/archive/phptimestore_to_phpfina_mv.php: -------------------------------------------------------------------------------- 1 | connect("127.0.0.1"); 33 | if (!$connected) { 34 | print "Could not connect to redis\n"; 35 | $redis = false; 36 | } 37 | } 38 | 39 | $result = $mysqli->query("Show columns from feeds like 'timestore'"); 40 | $row = $result->fetch_array(); 41 | if ($row) $mysqli->query("UPDATE feeds SET `engine`='1' WHERE `timestore`='1' AND `engine`<4"); 42 | 43 | $result = $mysqli->query("SELECT * FROM feeds WHERE `engine`= 4 OR `engine`=1"); 44 | print "There are ".$result->num_rows." feeds to convert, would you like to continue?"; 45 | $handle = fopen ("php://stdin","r"); 46 | $line = fgets($handle); 47 | if(trim($line) != 'y'){ 48 | exit; 49 | } 50 | 51 | while($row = $result->fetch_array()) 52 | { 53 | print "userid:".$row['userid']." feed:".$row['id']." name:".$row['name']."\n"; 54 | 55 | $id = $row['id']; 56 | 57 | // read meta data 58 | $meta = new stdClass(); 59 | 60 | $feedname = str_pad($id, 16, '0', STR_PAD_LEFT).".tsdb"; 61 | $metafile = fopen($timestore_dir.$feedname, 'rb'); 62 | 63 | fseek($metafile,(8+8+4+4)); 64 | 65 | $tmp = unpack("I",fread($metafile,8)); 66 | $meta->start_time = $tmp[1]; 67 | $tmp = unpack("I",fread($metafile,4)); 68 | $meta->interval = $tmp[1]; 69 | 70 | fclose($metafile); 71 | 72 | $metafile = fopen($phpfina_dir.$id.".meta", 'wb'); 73 | fwrite($metafile,pack("I",0)); 74 | fwrite($metafile,pack("I",0)); 75 | fwrite($metafile,pack("I",$meta->interval)); 76 | fwrite($metafile,pack("I",$meta->start_time)); 77 | fclose($metafile); 78 | 79 | $sourcedata = $timestore_dir.str_pad($id, 16, '0', STR_PAD_LEFT)."_0_.dat"; 80 | $targetdata = $phpfina_dir.$id.".dat"; 81 | 82 | print "cp $sourcedata $targetdata\n"; 83 | exec("cp $sourcedata $targetdata"); 84 | 85 | if ($redis) $redis->hset("feed:$id","engine",5); 86 | $mysqli->query("UPDATE feeds SET `engine`=5 WHERE `id`='$id'"); 87 | print "Feed $id is now PHPFina\n"; 88 | } 89 | -------------------------------------------------------------------------------- /convertdata/archive/sql_to_phptimeseries_mv.php: -------------------------------------------------------------------------------- 1 | $timeseries_dir)); 29 | 30 | $result = $mysqli->query("SELECT * FROM feeds WHERE `engine`= 0 AND `datatype`= 2"); 31 | print "There are ".$result->num_rows." feeds to convert, would you like to continue?(y/n) "; 32 | $handle = fopen ("php://stdin","r"); 33 | $line = fgets($handle); 34 | if(trim($line) != 'y'){ 35 | exit; 36 | } 37 | print "\n"; 38 | 39 | print "Do you want to create a CSV backup in your PHPTimeSeries folder?(y/n) "; 40 | $handle = fopen ("php://stdin","r"); 41 | $do_backup = fgets($handle); 42 | print "\n"; 43 | 44 | 45 | while($row = $result->fetch_array()) 46 | { 47 | print "userid:".$row['userid']." feed:".$row['id']." name:".$row['name']."\n"; 48 | 49 | $id = $row['id']; 50 | $current_feed_value = $row['value']; 51 | 52 | $engineresult = $engine->create($id,0); 53 | 54 | if ($engineresult == true) { 55 | 56 | if (trim($do_backup) == 'y') { 57 | $backupfile = fopen("{$timeseries_dir}feed_{$id}backup.csv", "w") or die("Unable to open backup file!"); 58 | 59 | fwrite($backupfile, "timestamp;data\n"); 60 | 61 | } 62 | 63 | 64 | $data = $mysqli->query("SELECT * FROM feed_{$id}"); 65 | 66 | while($row = $data->fetch_array()) { 67 | //print "timestamp:".$row['time']." data:".$row['data']."\n"; 68 | $postresult = $engine->post($id,$row['time'],$row['data']); 69 | if (trim($do_backup) == 'y') {fwrite($backupfile, "{$row['time']};{$row['data']}\n");} 70 | //print $postresult."\n"; 71 | } 72 | 73 | if (trim($do_backup) == 'y') {fclose($backupfile);} 74 | 75 | $mysqli->query("UPDATE feeds SET `engine`=2 WHERE `id`='$id'"); 76 | $mysqli->query("UPDATE feeds SET `value`=$current_feed_value WHERE `id`='$id'"); 77 | 78 | exec("chown {$uid}:{$gid} {$timeseries_dir}feed_{$id}.MYD"); 79 | 80 | if (trim($do_backup) == 'y') {exec("chown {$uid}:{$gid} {$timeseries_dir}feed_{$id}backup.csv");} 81 | 82 | print "Feed $id is now PHPTimeseries\n"; 83 | 84 | 85 | 86 | } 87 | else { 88 | print("PHPTimeSeries file create failed for feedid=$id"); 89 | } 90 | 91 | 92 | } 93 | 94 | 95 | ?> 96 | -------------------------------------------------------------------------------- /convertdata/archive/sql_to_phptimeseries_mv_fast.php: -------------------------------------------------------------------------------- 1 | $timeseries_dir)); 37 | 38 | $result = $mysqli->query("SELECT * FROM feeds WHERE `engine`= 0"); 39 | print "There are ".$result->num_rows." feeds to convert\n"; 40 | 41 | print "Do you want to create a CSV backup in your PHPTimeSeries folder?(y/n) "; 42 | $handle = fopen ("php://stdin","r"); 43 | $do_backup = (trim(fgets($handle)) == "y" ? true : false); 44 | print "\n"; 45 | 46 | $fast_engine= true; 47 | 48 | while($rowfeed = $result->fetch_array()) 49 | { 50 | print "userid:".$rowfeed['userid']." feed:".$rowfeed['id']." name:".$rowfeed['name']."\n"; 51 | 52 | $id = $rowfeed['id']; 53 | $current_feed_value = $rowfeed['value']; 54 | 55 | $engineresult = $engine->create($id,0); 56 | 57 | if ($engineresult == true) { 58 | 59 | $data = $mysqli->query("SELECT * FROM feed_{$id} order by time", MYSQLI_USE_RESULT); 60 | if ($data) { 61 | print "### Feed $id converting...\n"; 62 | if ($do_backup) { 63 | $backupfile = fopen("{$timeseries_dir}feed_{$id}backup.csv", "w") or die("Unable to open backup file!"); 64 | fwrite($backupfile, "timestamp;data\n"); 65 | } 66 | 67 | if ($fast_engine) { 68 | $fh = fopen($timeseries_dir."feed_$id.MYD", 'w'); 69 | if (!$fh) { 70 | print("Could not open data file"); 71 | exit; 72 | } 73 | } 74 | 75 | $last_time = 0; 76 | while ($row = $data->fetch_assoc()) { 77 | //print "timestamp:".$row['time']." data:".$row['data']."\n"; 78 | if ($fast_engine) { 79 | $time = (int) $row['time']; 80 | if ($time == $last_time) { 81 | print "\nRepeated time: $time " . date('Y-m-d H:m:s', $time); 82 | $last_time = $time; 83 | continue; 84 | } 85 | $last_time = $time; 86 | $postresult = fwrite($fh, pack("CIf",249,$time,(float) $row['data'])); 87 | } else { 88 | $postresult = $engine->post($id,$row['time'],$row['data']); 89 | } 90 | 91 | if ($postresult == false) print "Error at ".$row['time']."\n"; 92 | else if ($row['time'] % 86400 == 0) print "."; 93 | if ($do_backup) {fwrite($backupfile, "{$row['time']};{$row['data']}\n");} 94 | } 95 | 96 | if ($do_backup) {fclose($backupfile);} 97 | 98 | $mysqli->query("UPDATE feeds SET `engine`=2 WHERE `id`='$id'"); 99 | //$mysqli->query("UPDATE feeds SET `value`=$current_feed_value WHERE `id`='$id'"); 100 | 101 | //exec("chown {$uid}:{$gid} {$timeseries_dir}feed_{$id}.MYD"); 102 | //if ($do_backup) {exec("chown {$uid}:{$gid} {$timeseries_dir}feed_{$id}backup.csv");} 103 | 104 | print "Feed $id is now PHPTimeseries\n"; 105 | 106 | } else { 107 | print "Feed $id no data\n"; 108 | } 109 | 110 | 111 | } 112 | else { 113 | print("PHPTimeSeries file create failed for feedid=$id"); 114 | } 115 | 116 | 117 | } 118 | 119 | 120 | ?> 121 | 122 | -------------------------------------------------------------------------------- /convertdata/check_emoncms_feeds_for_conversion.php: -------------------------------------------------------------------------------- 1 | query("SELECT * FROM feeds"); 37 | 38 | while($row = $result->fetch_object()) 39 | { 40 | print "feed:".$row->id." engine:"; 41 | 42 | if ($row->engine==0) { 43 | print "MYSQL"; 44 | } 45 | if ($row->engine==1) { 46 | print "TIMESTORE"; 47 | } 48 | if ($row->engine==2) { 49 | print "PHPTIMESERIES "; 50 | $datadir = "/var/lib/phptimeseries/"; 51 | if (isset($settings["feed"]["phptimeseries"])) $datadir = $settings["feed"]["phptimeseries"]["datadir"]; 52 | print "datadir:".$datadir; 53 | print " datafile:"; if (file_exists($datadir."feed_".$row->id.".MYD")) print "yes"; else print "no"; 54 | } 55 | if ($row->engine==3) { 56 | print "GRAPHITE (Conversion not supported)"; 57 | } 58 | if ($row->engine==4) { 59 | print "PHPTIMESTORE "; 60 | $datadir = "/var/lib/phptimestore/"; 61 | if (isset($settings["feed"]["phptimestore"])) $datadir = $settings["feed"]["phptimestore"]["datadir"]; 62 | print "datadir:".$datadir; 63 | } 64 | if ($row->engine==5) { 65 | print "PHPFINA "; 66 | $datadir = "/var/lib/phpfina/"; 67 | if (isset($settings["feed"]["phpfina"])) $datadir = $settings["feed"]["phpfina"]["datadir"]; 68 | print "datadir:".$datadir; 69 | print " metafile:"; if (file_exists($datadir.$row->id.".meta")) print "yes"; else print "no"; 70 | print " datafile:"; if (file_exists($datadir.$row->id.".dat")) print "yes"; else print "no"; 71 | } 72 | if ($row->engine==6) { 73 | print "PHPFIWA "; 74 | $datadir = "/var/lib/phpfiwa/"; 75 | if (isset($settings["feed"]["phpfiwa"])) $datadir = $settings["feed"]["phpfiwa"]["datadir"]; 76 | print "datadir:".$datadir; 77 | print " metafile:"; if (file_exists($datadir.$row->id.".meta")) print "yes"; else print "no"; 78 | print " datafile:"; if (file_exists($datadir.$row->id."_0.dat")) print "yes"; else print "no"; 79 | } 80 | 81 | if ($row->engine==7) print "VIRTUALFEED"; 82 | if ($row->engine==8) print "MYSQLMEMORY"; 83 | if ($row->engine==9) print "REDISBUFFER"; 84 | 85 | print "\n"; 86 | } 87 | -------------------------------------------------------------------------------- /convertdata/phpfina_convert_interval.php: -------------------------------------------------------------------------------- 1 | -n \n"; 15 | } 16 | 17 | $opts = getopt("i:n:"); 18 | 19 | foreach (array_keys($opts) as $opt) 20 | switch ($opt) { 21 | // feed id 22 | case 'i': $feed_id = $opts['i']; break; 23 | // new interval 24 | case 'n': $new_interval = $opts['n']; break; 25 | } 26 | 27 | if (isset($feed_id) && isset($new_interval)) { 28 | print "Feed ID : ".$feed_id ."\n"; 29 | print "New interval : ".$new_interval ."\n"; 30 | } else { 31 | usage($argv[0]); 32 | exit(1); 33 | } 34 | 35 | $new_feed_id = -$feed_id; 36 | 37 | if (!file_exists($dir.$feed_id.".dat") || !file_exists($dir.$feed_id.".meta")) { 38 | print "Couldn't find data for that feed!\n"; 39 | usage($argv[0]); 40 | exit(1); 41 | } 42 | if (file_exists($dir.$new_feed_id.".dat") || file_exists($dir.$new_feed_id.".meta")) { 43 | print "Existing temporary feed exists for this feed! Did you already run this?\n"; 44 | print "Try deleting the temporary file(s) -- `" . $dir . $new_feed_id . ".(dat|meta)` and try again.\n"; 45 | exit(1); 46 | } 47 | 48 | $engine = new PHPFina(array("datadir" => $dir)); 49 | 50 | $meta = $engine->get_meta($feed_id); 51 | $old_interval = $meta->interval; 52 | $npoints = $engine->get_npoints($feed_id); 53 | 54 | print "Old interval : ".$old_interval ."\n"; 55 | print "Total points : ".$npoints ."\n"; 56 | 57 | if ($old_interval > $new_interval) { 58 | print "Don't support shrinking the interval yet!\n"; 59 | exit(1); 60 | } 61 | 62 | $points_processed = 0; 63 | $interval_factor = $new_interval / $old_interval; 64 | 65 | if (!$engine->create($new_feed_id, array("interval" => $new_interval))) { 66 | print "Failed to create new feed.\n"; 67 | exit(1); 68 | } 69 | 70 | while ($points_processed < $npoints) { 71 | $to_process = $interval_factor; 72 | if ($points_processed + $to_process > $npoints) { 73 | $to_process = $npoints - $points_processed; 74 | } 75 | 76 | $start_point = floor($points_processed); 77 | $end_point = floor($points_processed + $to_process - 1); 78 | 79 | $start = ($meta->start_time + ($old_interval * $start_point)) * 1000; 80 | $end = ($meta->start_time + ($old_interval * $end_point)) * 1000; 81 | 82 | print "Processing " . ($end_point - $start_point + 1) . " points from " . $start . " to " . $end . "\n"; 83 | 84 | $data = $engine->get_data($feed_id, $start, $end, $old_interval); 85 | $count = count($data); 86 | if ($count > 0) { 87 | $total = 0; 88 | foreach ($data as $point) { 89 | $total += $point[1]; 90 | } 91 | $average = $total / count($data); 92 | 93 | $timestamp = $start / 1000; 94 | print "New point : {time = " . $timestamp . ", value = " . $average . "}\n"; 95 | $engine->prepare($new_feed_id, $timestamp, $average, false); 96 | } else { 97 | print "Skipping point at time " . $start . " as there was no data.\n"; 98 | } 99 | 100 | $points_processed += $to_process; 101 | } 102 | 103 | $engine->save(); 104 | 105 | 106 | if ( 107 | !rename($dir.$feed_id.".dat", $dir.$feed_id."-old.dat") || 108 | !rename($dir.$feed_id.".meta", $dir.$feed_id."-old.meta") || 109 | !rename($dir.$new_feed_id.".dat", $dir.$feed_id.".dat") || 110 | !rename($dir.$new_feed_id.".meta", $dir.$feed_id.".meta") 111 | ) { 112 | print "Uh oh! Failed to replace old feed files with new feed files!\n"; 113 | exit(1); 114 | } 115 | -------------------------------------------------------------------------------- /convertdata/phpfiwa_to_phpfina.php: -------------------------------------------------------------------------------- 1 | connect_error ) { 50 | echo "Can't connect to database, please verify credentials/configuration in settings.php
"; 51 | if ( $display_errors ) { 52 | echo "Error message: " . $mysqli->connect_error . ""; 53 | } 54 | die(); 55 | } 56 | 57 | if ($settings['redis']['enabled']) { 58 | $redis = new Redis(); 59 | $connected = $redis->connect($settings['redis']['host'], $settings['redis']['port']); 60 | if (!$connected) { echo "Can't connect to redis at ".$settings['redis']['host'].":".$settings['redis']['port']." , it may be that redis-server is not installed or started see readme for redis installation"; die; } 61 | if (!empty($settings['redis']['prefix'])) $redis->setOption(Redis::OPT_PREFIX, $settings['redis']['prefix']); 62 | if (!empty($settings['redis']['auth'])) { 63 | if (!$redis->auth($settings['redis']['auth'])) { 64 | echo "Can't connect to redis at ".$settings['redis']['host'].", autentication failed"; die; 65 | } 66 | } 67 | if (!empty($settings['redis']['dbnum'])) { 68 | $redis->select($settings['redis']['dbnum']); 69 | } 70 | } else { 71 | $redis = false; 72 | } 73 | 74 | // Either use default phpfiwa and phpfina data directories 75 | // or use user specified directory from emoncms/settings.php 76 | $sourcedir = "/var/lib/phpfiwa/"; 77 | if (isset($settings["feed"]["phpfiwa"])) $sourcedir = $settings["feed"]["phpfiwa"]["datadir"]; 78 | $targetdir = "/var/lib/phpfina/"; 79 | if (isset($settings["feed"]["phpfina"])) $targetdir = $settings["feed"]["phpfina"]["datadir"]; 80 | 81 | // Find all PHPFiwa feeds 82 | $result = $mysqli->query("SELECT * FROM feeds WHERE `engine`=6"); 83 | 84 | // Quick check at this point so that conversion can be aborted 85 | if ($result->num_rows>0) { 86 | print "There are ".$result->num_rows." feeds to convert, would you like to continue? (y/n): "; 87 | } else { 88 | print "There are no feeds to convert\n"; 89 | die; 90 | } 91 | $handle = fopen ("php://stdin","r"); 92 | $line = fgets($handle); 93 | if(trim($line) != 'y') exit; 94 | 95 | $phpfiwafeeds = array(); 96 | // For each PHPFIWA feed 97 | while($row = $result->fetch_array()) 98 | { 99 | print $row['id']." ".$row['name']."\n"; 100 | $id = $row['id']; 101 | $sourcefile = $sourcedir.$id."_0.dat"; 102 | 103 | $new_or_overwrite = (int) stdin("- Create a new feed or replace? (enter 1:new, 2:replace) "); 104 | 105 | if ($new_or_overwrite==1) { 106 | $userid = $row['userid']; 107 | $new_feed = $row['name']." (new)"; 108 | $datatype = DataType::REALTIME; 109 | $setengine = Engine::PHPFINA; 110 | $mysqli->query("INSERT INTO feeds (userid,name,datatype,public,engine) VALUES ('$userid','$new_feed','$datatype',false,'$setengine')"); 111 | $outid = $mysqli->insert_id; 112 | 113 | if ($redis) { 114 | $redis->sAdd("user:feeds:$userid", $outid); 115 | $redis->hMSet("feed:$outid",array('id'=>$outid,'userid'=>$userid,'name'=>$new_feed,'datatype'=>$datatype,'tag'=>"",'public'=>false,'size'=>0,'engine'=>$setengine)); 116 | } 117 | } else { 118 | $outid = $id; 119 | } 120 | 121 | $targetfile = $targetdir.$outid.".dat"; 122 | //----------------------------------- 123 | // META FILE COPY 124 | //----------------------------------- 125 | // 1. Read PHPFiwa meta file 126 | $meta = new stdClass(); 127 | if (!$metafile = @fopen($sourcedir.$id.".meta", 'rb')) { 128 | print "error opening phpfiwa meta file to read\n"; 129 | die; 130 | } 131 | $tmp = unpack("I",fread($metafile,4)); 132 | $tmp = unpack("I",fread($metafile,4)); 133 | $meta->start_time = $tmp[1]; 134 | $tmp = unpack("I",fread($metafile,4)); 135 | $meta->nlayers = $tmp[1]; 136 | 137 | $meta->npoints = array(); 138 | for ($i=0; $i<$meta->nlayers; $i++) 139 | { 140 | $tmp = unpack("I",fread($metafile,4)); 141 | } 142 | 143 | $meta->interval = array(); 144 | for ($i=0; $i<$meta->nlayers; $i++) 145 | { 146 | $tmp = unpack("I",fread($metafile,4)); 147 | $meta->interval[$i] = $tmp[1]; 148 | } 149 | fclose($metafile); 150 | 151 | // 2. Write PHPFina meta file 152 | if (file_exists($targetdir.$outid.".meta")) { 153 | print $targetdir.$id.".meta already exists?\n"; 154 | die; 155 | } 156 | 157 | if (!$metafile = @fopen($targetdir.$outid.".meta", 'wb')) { 158 | print "- error opening phpfina meta file to write\n"; 159 | die; 160 | } 161 | 162 | fwrite($metafile,pack("I",0)); 163 | fwrite($metafile,pack("I",0)); 164 | fwrite($metafile,pack("I",$meta->interval[0])); 165 | fwrite($metafile,pack("I",$meta->start_time)); 166 | fclose($metafile); 167 | print "- metafile created: start_time=".$meta->start_time.", interval=".$meta->interval[0]."\n"; 168 | 169 | //----------------------------------- 170 | // DATA FILE COPY 171 | //----------------------------------- 172 | print "- cp $sourcefile $targetfile\n"; 173 | exec("cp $sourcefile $targetfile"); 174 | 175 | // Confirm that copy is the same size 176 | $s1 = filesize($sourcefile); 177 | $s2 = filesize($targetfile); 178 | if ($s1==$s2) { 179 | print "- $id phpfiwa to phpfina complete\n"; 180 | $mysqli->query("UPDATE feeds SET `engine`=5 WHERE `id`='$outid'"); 181 | $redis->hSet("feed:".$outid,"engine",5); 182 | 183 | exec("chown www-data:www-data ".$targetdir.$outid.".meta"); 184 | exec("chown www-data:www-data ".$targetdir.$outid.".dat"); 185 | 186 | // Register feeds to delete 187 | $phpfiwafeeds[] = $id; 188 | } else { 189 | print "- copy not exact $s1 $s2\n"; 190 | } 191 | } 192 | 193 | if ($outid==$id) { 194 | print "---------------------------------------------------\n"; 195 | print "Delete phpfiwa data files from $sourcedir? (y/n): "; 196 | $handle = fopen ("php://stdin","r"); 197 | $line = fgets($handle); 198 | if(trim($line) != 'y') die; 199 | 200 | foreach ($phpfiwafeeds as $id) { 201 | print "Deleting feed $id\n"; 202 | if (file_exists($sourcedir.$id.".meta")) unlink($sourcedir.$id.".meta"); 203 | if (file_exists($sourcedir.$id."_0.dat")) unlink($sourcedir.$id."_0.dat"); 204 | if (file_exists($sourcedir.$id."_1.dat")) unlink($sourcedir.$id."_1.dat"); 205 | if (file_exists($sourcedir.$id."_2.dat")) unlink($sourcedir.$id."_2.dat"); 206 | if (file_exists($sourcedir.$id."_3.dat")) unlink($sourcedir.$id."_3.dat"); 207 | } 208 | } 209 | 210 | 211 | function stdin($prompt = null){ 212 | if($prompt){ 213 | echo $prompt; 214 | } 215 | $fp = fopen("php://stdin","r"); 216 | $line = rtrim(fgets($fp, 1024)); 217 | return $line; 218 | } 219 | -------------------------------------------------------------------------------- /convertdata/phptimeseries_to_phpfina.php: -------------------------------------------------------------------------------- 1 | connect_error ) { 49 | echo "Can't connect to database, please verify credentials/configuration in settings.php
"; 50 | if ( $display_errors ) { 51 | echo "Error message: " . $mysqli->connect_error . ""; 52 | } 53 | die(); 54 | } 55 | 56 | if ($settings['redis']['enabled']) { 57 | $redis = new Redis(); 58 | $connected = $redis->connect($settings['redis']['host'], $settings['redis']['port']); 59 | if (!$connected) { echo "Can't connect to redis at ".$settings['redis']['host'].":".$settings['redis']['port']." , it may be that redis-server is not installed or started see readme for redis installation"; die; } 60 | if (!empty($settings['redis']['prefix'])) $redis->setOption(Redis::OPT_PREFIX, $settings['redis']['prefix']); 61 | if (!empty($settings['redis']['auth'])) { 62 | if (!$redis->auth($settings['redis']['auth'])) { 63 | echo "Can't connect to redis at ".$settings['redis']['host'].", autentication failed"; die; 64 | } 65 | } 66 | if (!empty($settings['redis']['dbnum'])) { 67 | $redis->select($settings['redis']['dbnum']); 68 | } 69 | } else { 70 | $redis = false; 71 | } 72 | 73 | // Either use default phptimeseries and phpfina data directories 74 | // or use user specified directory from emoncms/settings.php 75 | $sourcedir = "/var/lib/phptimeseries/"; 76 | if (isset($settings["feed"]["phptimeseries"])) $sourcedir = $settings["feed"]["phptimeseries"]["datadir"]; 77 | $targetdir = "/var/lib/phpfina/"; 78 | if (isset($settings["feed"]["phpfina"])) $targetdir = $settings["feed"]["phpfina"]["datadir"]; 79 | 80 | $phpfina = new PHPFina($settings["feed"]['phpfina']); 81 | 82 | // Find all phptimeseries feeds 83 | $result = $mysqli->query("SELECT * FROM feeds WHERE `engine`=2"); 84 | 85 | // Quick check at this point so that conversion can be aborted 86 | if ($result->num_rows>0) { 87 | print "There are ".$result->num_rows." feeds to convert, would you like to continue? (y/n): "; 88 | } else { 89 | print "There are no feeds to convert\n"; 90 | die; 91 | } 92 | $handle = fopen ("php://stdin","r"); 93 | $line = fgets($handle); 94 | if(trim($line) != 'y') exit; 95 | 96 | print "\n"; 97 | $phptimeseriesfeeds = array(); 98 | // For each PHPTIMESERIES feed 99 | while($row = $result->fetch_array()) 100 | { 101 | print "Feedid:".$row['id']." name:".$row['name']."\n"; 102 | $id = $row['id']; 103 | $targetfile = $targetdir.$id.".dat"; 104 | 105 | $new_or_overwrite = (int) stdin("- Create a new feed or replace? (enter 1:new, 2:replace) "); 106 | 107 | if ($new_or_overwrite==1) { 108 | $userid = $row['userid']; 109 | $new_feed = $row['name']." (new)"; 110 | $datatype = DataType::REALTIME; 111 | $setengine = Engine::PHPFINA; 112 | $mysqli->query("INSERT INTO feeds (userid,name,datatype,public,engine) VALUES ('$userid','$new_feed','$datatype',false,'$setengine')"); 113 | $outid = $mysqli->insert_id; 114 | 115 | if ($redis) { 116 | $redis->sAdd("user:feeds:$userid", $outid); 117 | $redis->hMSet("feed:$outid",array('id'=>$outid,'userid'=>$userid,'name'=>$new_feed,'datatype'=>$datatype,'tag'=>"",'public'=>false,'size'=>0,'engine'=>$setengine)); 118 | } 119 | } else { 120 | $outid = $id; 121 | } 122 | 123 | print "- Enter interval for PHPFina feed, i.e enter 10 for 10 seconds: "; 124 | $handle = fopen ("php://stdin","r"); 125 | $interval = (int) trim(fgets($handle)); 126 | 127 | if (($interval%5)!=0 && $interval>5) { 128 | print "Interval must be an integer multiple of 5 and more than 10s\n"; 129 | } 130 | 131 | $phpfina->create($outid,array("interval"=>$interval)); 132 | 133 | // Open phptimeseries data to read 134 | if (!$fh = @fopen($sourcedir."feed_$id.MYD", 'rb')) { 135 | print "error opening phptimeseries data file to read\n"; 136 | die; 137 | } 138 | 139 | $filesize = filesize($sourcedir."feed_$id.MYD"); 140 | $npoints = floor($filesize / 9.0); 141 | 142 | // Read through file 143 | for ($i=0; $i<$npoints; $i++) 144 | { 145 | // Read next datapoint 146 | $d = fread($fh,9); 147 | 148 | // Itime = unsigned integer (I) assign to 'time' 149 | // fvalue = float (f) assign to 'value' 150 | $array = unpack("x/Itime/fvalue",$d); 151 | 152 | $time = $array['time']; 153 | $value = $array['value']; 154 | 155 | $phpfina->prepare($outid,$time,$value); 156 | if ($low_memory_mode) $phpfina->save(); 157 | } 158 | 159 | $phpfina->save(); 160 | 161 | // Update last time/value 162 | /* 163 | if ($redis) { 164 | $redis->hMset("feed:$id", array('value' => $value, 'time' => $time)); 165 | } else { 166 | $mysqli->query("UPDATE feeds SET `time` = '$time', `value` = $value WHERE `id`= '$id'"); 167 | } 168 | */ 169 | 170 | print "- Coversion complete, $npoints datapoints\n"; 171 | $mysqli->query("UPDATE feeds SET `engine`=5 WHERE `id`='$outid'"); 172 | $redis->hSet("feed:".$outid,"engine",5); 173 | 174 | exec("chown www-data:www-data ".$targetdir.$outid.".meta"); 175 | exec("chown www-data:www-data ".$targetdir.$outid.".dat"); 176 | 177 | // Register feeds to delete 178 | $phptimeseriesfeeds[] = $id; 179 | } 180 | 181 | if ($outid==$id) { 182 | print "---------------------------------------------------\n"; 183 | print "Delete phptimeseries data files from $sourcedir? (y/n): "; 184 | $handle = fopen ("php://stdin","r"); 185 | if(trim(fgets($handle)) != 'y') die; 186 | 187 | foreach ($phptimeseriesfeeds as $id) { 188 | print "Deleting feed $id\n"; 189 | if (file_exists($sourcedir."feed_$id.MYD")) unlink($sourcedir."feed_$id.MYD"); 190 | } 191 | } 192 | 193 | function stdin($prompt = null){ 194 | if($prompt){ 195 | echo $prompt; 196 | } 197 | $fp = fopen("php://stdin","r"); 198 | $line = rtrim(fgets($fp, 1024)); 199 | return $line; 200 | } 201 | -------------------------------------------------------------------------------- /datarecovery/lib/common.php: -------------------------------------------------------------------------------- 1 | $onepercent) { 71 | $pos = 0; 72 | $percent ++; 73 | print "$percent%\n"; 74 | } 75 | } 76 | 77 | fclose($fhr); 78 | fclose($fhw); 79 | } 80 | 81 | function attach_to_user($userid,$feeds,$enginename) 82 | { 83 | global $mysqli; 84 | 85 | foreach ($feeds as $id) 86 | { 87 | $name = $enginename."_".$id; 88 | $datatype = 1; 89 | 90 | if ($enginename=='phptimeseries') $engine = 2; 91 | if ($enginename=='phptimestore') $engine = 4; 92 | if ($enginename=='phpfina') $engine = 5; 93 | if ($enginename=='phpfiwa') $engine = 6; 94 | 95 | $result = $mysqli->query("SELECT id FROM feeds WHERE id = '$id'"); 96 | if ($result->num_rows==0) { 97 | $result = $mysqli->query("INSERT INTO feeds (id,userid,name,datatype,public,engine) VALUES ('$id','$userid','$name','$datatype',false,'$engine')"); 98 | print "Adding feed $id\n"; 99 | } 100 | } 101 | } 102 | -------------------------------------------------------------------------------- /datarecovery/lib/phpfina.php: -------------------------------------------------------------------------------- 1 | 0 && !in_array($feedid,$feeds)) $feeds[] = $feedid; 27 | } 28 | 29 | foreach ($feeds as $id) 30 | { 31 | print "Copying feed $id: \n"; 32 | 33 | copy_data(array( 34 | 'sourcefile'=>$source.$id.".dat", 35 | 'targetfile'=>$target.$id.".dat", 36 | 'bytelength'=>4 37 | )); 38 | 39 | clearstatcache(); 40 | $npoints = filesize($target.$id.".dat") / 4.0; 41 | if ((int)$npoints!=$npoints) { 42 | print "filesize error\n"; 43 | } 44 | 45 | $meta = new stdClass(); 46 | $metafile = fopen($source.$id.".meta", 'rb'); 47 | fseek($metafile,8); 48 | $tmp = unpack("I",fread($metafile,4)); 49 | $meta->interval = $tmp[1]; 50 | $tmp = unpack("I",fread($metafile,4)); 51 | $meta->start_time = $tmp[1]; 52 | fclose($metafile); 53 | 54 | if ($meta->start_time==0) print "Feed start time error!\n"; 55 | if ($meta->interval<5) print "Feed interval error!\n"; 56 | 57 | $metafile = fopen($target.$id.".meta", 'wb'); 58 | fwrite($metafile,pack("I",0)); 59 | fwrite($metafile,pack("I",0)); 60 | fwrite($metafile,pack("I",$meta->interval)); 61 | fwrite($metafile,pack("I",$meta->start_time)); 62 | fclose($metafile); 63 | } 64 | 65 | return $feeds; 66 | } 67 | -------------------------------------------------------------------------------- /datarecovery/lib/phpfiwa.php: -------------------------------------------------------------------------------- 1 | 0 && !in_array($feedid,$feeds)) $feeds[] = $feedid; 28 | } 29 | 30 | foreach ($feeds as $id) 31 | { 32 | $npoints = array(); 33 | print $id."\n"; 34 | 35 | for ($l=0; $l<10; $l++) 36 | { 37 | if (file_exists($source.$id."_".$l.".dat")) 38 | { 39 | print "Copying data file layer $id $l\n"; 40 | 41 | copy_data(array( 42 | 'sourcefile'=>$source.$id."_".$l.".dat", 43 | 'targetfile'=>$target.$id."_".$l.".dat", 44 | 'bytelength'=>4 45 | )); 46 | 47 | clearstatcache($target.$id."_".$l.".dat"); 48 | $npoints[] = filesize($target.$id."_".$l.".dat") / 4.0; 49 | } 50 | } 51 | 52 | foreach ($npoints as $np) { 53 | if ((int)$np!=$np) { 54 | print "filesize error\n"; 55 | } 56 | } 57 | 58 | $meta = new stdClass(); 59 | $metafile = fopen($source.$id.".meta", 'rb'); 60 | fseek($metafile,4); 61 | $tmp = unpack("I",fread($metafile,4)); 62 | $meta->start_time = $tmp[1]; 63 | $tmp = unpack("I",fread($metafile,4)); 64 | $meta->nlayers = $tmp[1]; 65 | 66 | for ($i=0; $i<$meta->nlayers; $i++) 67 | { 68 | $tmp = unpack("I",fread($metafile,4)); 69 | } 70 | 71 | $meta->interval = array(); 72 | for ($i=0; $i<$meta->nlayers; $i++) 73 | { 74 | $tmp = unpack("I",fread($metafile,4)); 75 | $meta->interval[$i] = $tmp[1]; 76 | } 77 | 78 | fclose($metafile); 79 | 80 | if ($meta->start_time==0) print "Feed start time error!\n"; 81 | if ($meta->interval[0]<5) print "Feed interval error!".$meta->interval[0]."\n"; 82 | 83 | $metafile = fopen($target.$id.".meta", 'wb'); 84 | fwrite($metafile,pack("I",0)); 85 | fwrite($metafile,pack("I",$meta->start_time)); 86 | fwrite($metafile,pack("I",$meta->nlayers)); 87 | foreach ($meta->interval as $n) fwrite($metafile,pack("I",0)); // Legacy 88 | foreach ($meta->interval as $d) fwrite($metafile,pack("I",$d)); 89 | fclose($metafile); 90 | 91 | } 92 | return $feeds; 93 | } 94 | -------------------------------------------------------------------------------- /datarecovery/lib/phptimeseries.php: -------------------------------------------------------------------------------- 1 | 0 && !in_array($feedid,$feeds)) $feeds[] = $feedid; 28 | } 29 | 30 | foreach ($feeds as $id) 31 | { 32 | print "Copying feed $id: \n"; 33 | copy_data(array( 34 | 'sourcefile'=>$source."feed_".$id.".MYD", 35 | 'targetfile'=>$target."feed_".$id.".MYD", 36 | 'bytelength'=>9 37 | )); 38 | } 39 | return $feeds; 40 | } 41 | -------------------------------------------------------------------------------- /datarecovery/lib/phptimestore.php: -------------------------------------------------------------------------------- 1 | 0 && !in_array($feedid,$feeds)) $feeds[] = $feedid; 29 | } 30 | 31 | foreach ($feeds as $id) 32 | { 33 | $npoints = array(); 34 | print $id."\n"; 35 | 36 | for ($l=0; $l<10; $l++) 37 | { 38 | $name = str_pad($id, 16, '0', STR_PAD_LEFT); 39 | if (file_exists($source.$name."_".$l."_.dat")) 40 | { 41 | print "Copying data file layer $id $l\n"; 42 | 43 | copy_data(array( 44 | 'sourcefile'=>$source.$name."_".$l."_.dat", 45 | 'targetfile'=>$target.$name."_".$l."_.dat", 46 | 'bytelength'=>4 47 | )); 48 | 49 | clearstatcache($target.$name."_".$l."_.dat"); 50 | $npoints[] = filesize($target.$name."_".$l."_.dat") / 4.0; 51 | } 52 | } 53 | 54 | $feedname = str_pad($id, 16, '0', STR_PAD_LEFT).".tsdb"; 55 | $meta = new stdClass(); 56 | $metafile = fopen($source.$feedname, 'rb'); 57 | 58 | fseek($metafile,8); 59 | $d = fread($metafile,8); 60 | $tmp = unpack("h*",$d); // no longer used 61 | $tmp = unpack("I",fread($metafile,4)); 62 | $meta->nmetrics = $tmp[1]; 63 | $tmp = unpack("I",fread($metafile,4)); // no longer used 64 | $tmp = unpack("I",fread($metafile,8)); 65 | $meta->start = $tmp[1]; 66 | $tmp = unpack("I",fread($metafile,4)); 67 | $meta->interval = $tmp[1]; 68 | fclose($metafile); 69 | 70 | $metafile = fopen($target.$feedname, 'wb'); 71 | fwrite($metafile,pack("I",0)); 72 | fwrite($metafile,pack("I",0)); 73 | fwrite($metafile,pack("h*",strrev(str_pad(0, 16, '0', STR_PAD_LEFT)))); 74 | fwrite($metafile,pack("I",$meta->nmetrics)); 75 | fwrite($metafile,pack("I",0)); // Legacy 76 | fwrite($metafile,pack("I",$meta->start)); 77 | fwrite($metafile,pack("I",0)); 78 | fwrite($metafile,pack("I",$meta->interval)); 79 | fclose($metafile); 80 | } 81 | return $feeds; 82 | } 83 | -------------------------------------------------------------------------------- /datarecovery/recover.php: -------------------------------------------------------------------------------- 1 | array( 39 | 'source'=> "$sourcelocation/phpfiwa/", 40 | 'target'=> "$targetlocation/phpfiwa/" 41 | ), 42 | 'phpfina'=>array( 43 | 'source'=> "$sourcelocation/phpfina/", 44 | 'target'=> "$targetlocation/phpfina/" 45 | ), 46 | 'phptimeseries'=>array( 47 | 'source'=> "$sourcelocation/phptimeseries/", 48 | 'target'=> "$targetlocation/phptimeseries/" 49 | ), 50 | 'phptimestore'=>array( 51 | 'source'=> "$sourcelocation/timestore/", 52 | 'target'=> "$targetlocation/timestore/" 53 | ) 54 | ); 55 | 56 | print "Emoncms data recovery tool\n"; 57 | 58 | foreach ($engines as $engine=>$engine_properties) 59 | { 60 | print "Recovering $engine:\n"; 61 | $function = $engine."_recover"; 62 | $feeds = $function($engine_properties); 63 | // Uncomment to attach recovered feeds to a target emoncms account: 64 | // attach_to_user($userid,$feeds,$engine); 65 | } 66 | -------------------------------------------------------------------------------- /enginereaders/phpfina.php: -------------------------------------------------------------------------------- 1 | interval = $tmp[1]; 17 | $tmp = unpack("I",fread($metafile,4)); 18 | $meta->start_time = $tmp[1]; 19 | fclose($metafile); 20 | 21 | 22 | $fh = fopen($dir."$feedid.dat", 'rb'); 23 | $filesize = filesize($dir."$feedid.dat"); 24 | 25 | $npoints = floor($filesize / 4.0); 26 | 27 | for ($i=0; $i<$npoints; $i++) 28 | { 29 | $val = unpack("f",fread($fh,4)); 30 | 31 | $time = $meta->start_time + $i * $meta->interval; 32 | $value = $val[1]; 33 | 34 | print $time." ".$value."\n"; 35 | } 36 | -------------------------------------------------------------------------------- /enginereaders/phpfiwa.php: -------------------------------------------------------------------------------- 1 | id = $feedid; 13 | 14 | $metafile = fopen($dir."$feedid.meta", 'rb'); 15 | $tmp = unpack("I",fread($metafile,4)); 16 | $tmp = unpack("I",fread($metafile,4)); 17 | $meta->start_time = $tmp[1]; 18 | $tmp = unpack("I",fread($metafile,4)); 19 | $meta->nlayers = $tmp[1]; 20 | 21 | $meta->npoints = array(); 22 | for ($i=0; $i<$meta->nlayers; $i++) 23 | { 24 | $tmp = unpack("I",fread($metafile,4)); 25 | $meta->npoints[$i] = $tmp[1]; 26 | } 27 | 28 | $meta->interval = array(); 29 | for ($i=0; $i<$meta->nlayers; $i++) 30 | { 31 | $tmp = unpack("I",fread($metafile,4)); 32 | $meta->interval[$i] = $tmp[1]; 33 | } 34 | 35 | fclose($metafile); 36 | 37 | 38 | $fh = fopen($dir.$feedid."_0.dat", 'rb'); 39 | $filesize = filesize($dir.$feedid."_0.dat"); 40 | 41 | $npoints = floor($filesize / 4.0); 42 | 43 | for ($i=0; $i<$npoints; $i++) 44 | { 45 | $val = unpack("f",fread($fh,4)); 46 | 47 | $time = $meta->start_time + $i * $meta->interval[0]; 48 | $value = $val[1]; 49 | 50 | print $time." ".$value."\n"; 51 | } 52 | -------------------------------------------------------------------------------- /enginereaders/phptimeseries.php: -------------------------------------------------------------------------------- 1 | array( 25 | 'dir'=> "$dir/phpfiwa/" 26 | ), 27 | 'phpfina'=>array( 28 | 'dir'=> "$dir/phpfina/" 29 | ), 30 | 'phptimeseries'=>array( 31 | 'dir'=> "$dir/phptimeseries/" 32 | ), 33 | 'phptimestore'=>array( 34 | 'dir'=> "$dir/timestore/" 35 | ) 36 | ); 37 | 38 | foreach ($engines as $engine=>$engine_properties) 39 | { 40 | print "\nCHECKING INTEGRITY OF $engine META DATA:\n"; 41 | $function = $engine."_check"; 42 | $feeds = $function($engine_properties); 43 | } 44 | -------------------------------------------------------------------------------- /integritycheck/lib/phpfina.php: -------------------------------------------------------------------------------- 1 | 0 && !in_array($feedid,$feeds)) $feeds[] = $feedid; 25 | } 26 | 27 | $error_count = 0; 28 | $n = 0; 29 | 30 | foreach ($feeds as $id) 31 | { 32 | $error = false; 33 | $errormsg = ""; 34 | 35 | // 1) Analyse meta file 36 | 37 | $feedname = "$id.meta"; 38 | 39 | // CHECK 1: META FILE EXISTS 40 | if (!file_exists($dir.$feedname)) { 41 | print "[Meta file does not exist: $id]\n"; 42 | $error = true; 43 | } 44 | else 45 | { 46 | $meta = new stdClass(); 47 | $metafile = fopen($dir.$feedname, 'rb'); 48 | fseek($metafile,8); 49 | $tmp = unpack("I",fread($metafile,4)); 50 | $meta->interval = $tmp[1]; 51 | $tmp = unpack("I",fread($metafile,4)); 52 | $meta->start_time = $tmp[1]; 53 | fclose($metafile); 54 | 55 | clearstatcache($dir.$id.".dat"); 56 | $npoints = filesize($dir.$id.".dat") / 4; 57 | 58 | if ($meta->interval < 5){ 59 | $errormsg .= "[interval: ".$meta->interval."]"; 60 | $error = true; 61 | } 62 | 63 | if (intval($npoints) != $npoints){ 64 | $errormsg .= "[integer npoints:".intval($npoints)." != npoints:$npoints]"; 65 | $error = true; 66 | } 67 | 68 | if ($meta->start_time==0 && $npoints>0) { 69 | $errormsg .= "[start==0]"; 70 | $error = true; 71 | } 72 | 73 | if ($error) print "Feed $id ".$errormsg." [".date("d:m:Y G:i",filemtime($dir.$feedname))."]\n"; 74 | } 75 | 76 | if ($error) $error_count ++; 77 | $n++; 78 | } 79 | 80 | print "Error count: ".$error_count."\n"; 81 | print "Number of feeds: $n\n"; 82 | } 83 | -------------------------------------------------------------------------------- /integritycheck/lib/phpfiwa.php: -------------------------------------------------------------------------------- 1 | 0 && !in_array($feedid,$feeds)) $feeds[] = $feedid; 25 | } 26 | 27 | $error_count = 0; 28 | $n = 0; 29 | 30 | foreach ($feeds as $id) 31 | { 32 | $error = false; 33 | $errormsg = ""; 34 | 35 | // 1) Analyse meta file 36 | 37 | $feedname = "$id.meta"; 38 | 39 | // CHECK 1: META FILE EXISTS 40 | if (!file_exists($dir.$feedname)) { 41 | print "[Meta file does not exist: $id]\n"; 42 | $error = true; 43 | } 44 | else 45 | { 46 | $meta = new stdClass(); 47 | $metafile = fopen($dir.$feedname, 'rb'); 48 | fseek($metafile,4); 49 | $tmp = unpack("I",fread($metafile,4)); 50 | $meta->start_time = $tmp[1]; 51 | $tmp = unpack("I",fread($metafile,4)); 52 | $meta->nlayers = $tmp[1]; 53 | for ($i=0; $i<$meta->nlayers; $i++) { 54 | $tmp = unpack("I",fread($metafile,4)); 55 | } 56 | $meta->interval = array(); 57 | for ($i=0; $i<$meta->nlayers; $i++) { 58 | $tmp = unpack("I",fread($metafile,4)); 59 | $meta->interval[$i] = $tmp[1]; 60 | } 61 | fclose($metafile); 62 | 63 | if ($meta->nlayers<1 || $meta->nlayers>4) { 64 | $errormsg .= "[nlayers out of range: ".$meta->nlayers."]"; 65 | $error = true; 66 | } 67 | 68 | if ($meta->start_time>0 && filesize($dir.$id."_0.dat")==0) 69 | { 70 | $errormsg .= "[Start time set but datafile is empty]"; 71 | $error = true; 72 | } 73 | 74 | if ($error) print "Feed $id ".$errormsg." [".date("d:m:Y G:i",filemtime($dir.$feedname))."]\n"; 75 | } 76 | 77 | if ($error) $error_count ++; 78 | $n++; 79 | } 80 | 81 | print "Error count: ".$error_count."\n"; 82 | print "Number of feeds: $n\n"; 83 | } 84 | -------------------------------------------------------------------------------- /integritycheck/lib/phptimeseries.php: -------------------------------------------------------------------------------- 1 | 0 && !in_array($feedid,$feeds)) $feeds[] = $feedid; 27 | } 28 | 29 | $n = 0; 30 | $errorcount = 0; 31 | 32 | foreach ($feeds as $id) 33 | { 34 | $error = false; 35 | $errormsg = ""; 36 | 37 | $feedname = "feed_$id.MYD"; 38 | 39 | $size = filesize($dir.$feedname); 40 | 41 | if (($size / 9.0)!=(int)($size / 9.0)) 42 | { 43 | $error = true; 44 | $errormsg .= "[SIZE ERROR]"; 45 | } 46 | 47 | if ($error) 48 | { 49 | print "Feed $id $errormsg ".date("d:m:Y G:i",filemtime($dir.$feedname))."\n"; 50 | $errorcount ++; 51 | } 52 | $n++; 53 | } 54 | 55 | 56 | print "Error count: ".$errorcount."\n"; 57 | print "Number of feeds: $n\n"; 58 | } 59 | -------------------------------------------------------------------------------- /integritycheck/lib/phptimestore.php: -------------------------------------------------------------------------------- 1 | 0 && !in_array($feedid,$feeds)) $feeds[] = $feedid; 26 | } 27 | 28 | $error_count = 0; 29 | $n = 0; 30 | 31 | foreach ($feeds as $id) 32 | { 33 | $error = false; 34 | $errormsg = ""; 35 | 36 | $feedname = str_pad($id, 16, '0', STR_PAD_LEFT).".tsdb"; 37 | $meta = new stdClass(); 38 | $size = filesize($dir.$feedname); 39 | 40 | if (!($size==36 || $size == 272)) { 41 | $errormsg .= "[feed:$id metadata filesize error, size = $size]"; 42 | $error = true; 43 | } 44 | 45 | $metafile = fopen($dir.$feedname, 'rb'); 46 | 47 | fseek($metafile,8); 48 | $d = fread($metafile,8); 49 | $tmp = unpack("h*",$d); 50 | $tmp = unpack("I",fread($metafile,4)); 51 | $meta->nmetrics = $tmp[1]; 52 | $tmp = unpack("I",fread($metafile,4)); 53 | $tmp = unpack("I",fread($metafile,8)); 54 | $meta->start = $tmp[1]; 55 | $tmp = unpack("I",fread($metafile,4)); 56 | $meta->interval = $tmp[1]; 57 | fclose($metafile); 58 | 59 | if ($meta->nmetrics!=1) { 60 | $errormsg .= "[nmetrics is not 1]"; 61 | $error = true; 62 | } 63 | 64 | if ($meta->interval<5 || $meta->interval>(24*3600)) 65 | { 66 | $errormsg .= "[interval is out of range = ".$meta->interval."]"; 67 | $error = true; 68 | } 69 | 70 | $npoints = filesize($dir.str_pad($id, 16, '0', STR_PAD_LEFT)."_0_.dat") / 4; 71 | if (intval($npoints)!=$npoints) { 72 | $errormsg .= "[npoints:".intval($npoints)." != npoints:$npoints]"; 73 | $error = true; 74 | } 75 | 76 | $feedname = str_pad($id, 16, '0', STR_PAD_LEFT).".tsdb"; 77 | if ($error) print "Feed $id ".$errormsg." [".date("d:m:Y G:i",filemtime($dir.$feedname))."]\n"; 78 | if ($error) $error_count ++; 79 | $n++; 80 | } 81 | 82 | print "Error count: ".$error_count."\n"; 83 | print "Number of feeds: $n\n"; 84 | } 85 | -------------------------------------------------------------------------------- /integritycheck/missingcheck.php: -------------------------------------------------------------------------------- 1 | /path/to/emoncms_backup_`date +"%d%m%Y"`.sql 10 | sudo service emonhub start 11 | -------------------------------------------------------------------------------- /octopus/agile.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import sys, os, requests, json, time 4 | from datetime import datetime 5 | from configobj import ConfigObj 6 | 7 | script_path = os.path.dirname(os.path.realpath(__file__)) 8 | settings = ConfigObj(script_path+"/agile.conf", file_error=True) 9 | 10 | # Step 1: Create feed via API call or use input interface in emoncms to create manually 11 | result = requests.get(settings['emoncms']['server']+"/feed/getid.json",params={'tag':settings['emoncms']['tag'],'name':settings['emoncms']['name'],'apikey':settings['emoncms']['apikey']}) 12 | if not result.text: 13 | # Create feed 14 | params = {'tag':settings['emoncms']['tag'],'name':settings['emoncms']['name'],'datatype':1,'engine':5,'options':'{"interval":1800}','unit':'kWh','apikey':settings['emoncms']['apikey']} 15 | result = requests.get(settings['emoncms']['server']+"/feed/create.json",params) 16 | result = json.loads(result.text) 17 | if result['success']: 18 | feedid = int(result['feedid']) 19 | print("Emoncms feed created:\t"+str(feedid)) 20 | else: 21 | print("Error creating feed") 22 | sys.exit(0) 23 | else: 24 | feedid = int(result.text) 25 | print("Using emoncms feed:\t"+str(feedid)) 26 | 27 | 28 | 29 | # Step 2: Fetch feed meta data to find last data point time and value 30 | result = requests.get(settings['emoncms']['server']+"/feed/getmeta.json",params={'id':feedid,'apikey':settings['emoncms']['apikey']}) 31 | meta = json.loads(result.text) 32 | print("Feed meta data:\t\t"+result.text) 33 | 34 | 35 | end_time = 0 36 | if meta['npoints']>0: 37 | end_time = meta['start_time'] + (meta['interval'] * meta['npoints']) 38 | # params['period_from'] = datetime.fromtimestamp(end_time).astimezone().isoformat() 39 | # print("Request from:\t\t"+params['period_from']) 40 | 41 | # Calculate number of half hours since last data point 42 | now = time.time() 43 | diff = now - end_time 44 | half_hours = int(diff / 1800) 45 | page_size = half_hours 46 | 47 | if page_size > 3000: 48 | page_size = 3000 49 | 50 | if page_size < 100: 51 | page_size = 100 52 | 53 | data = [] 54 | 55 | # Start fetching data from page 1 56 | page = 1 57 | max_page_load = 200 58 | mpan = settings['octopus']['mpan'] 59 | serial_number = settings['octopus']['serial_number'] 60 | 61 | while True: 62 | print(f"Fetching page {page}") 63 | 64 | url = f"https://api.octopus.energy/v1/electricity-meter-points/{mpan}/meters/{settings['octopus']['serial_number']}/consumption/?page={page}&page_size={page_size}" 65 | response = requests.get(url, auth=(settings['octopus']['agile_apikey'], '')) 66 | 67 | if response.status_code == 200: 68 | result = response.json() 69 | 70 | if result and 'results' in result: 71 | print(f"- Number of data points: {len(result['results'])}") 72 | 73 | if len(result['results']) == 0: 74 | break # No more data to fetch 75 | 76 | # Print from to dates 77 | print (f"- From: {result['results'][0]['interval_start']} to {result['results'][-1]['interval_start']}") 78 | 79 | for row in result['results']: 80 | date = datetime.fromisoformat(row['interval_start'].replace('Z', '+00:00')) 81 | timestamp = int(date.timestamp()) 82 | 83 | if timestamp < end_time: 84 | max_page_load = 0 85 | break # Stop if we reach older data than required 86 | 87 | data.append([timestamp, row['consumption']]) # Store timestamp and consumption value 88 | else: 89 | break 90 | else: 91 | print(f"Error fetching data: {response.status_code}") 92 | print(response.text) 93 | break 94 | 95 | page += 1 96 | if page > max_page_load: 97 | break 98 | 99 | # Print number of data points 100 | print(f"Total number of data points: {len(data)}") 101 | 102 | # If data exists, sort and send to EmonCMS 103 | if data: 104 | data.sort(key=lambda x: x[0]) # Sort by timestamp in ascending order 105 | 106 | # Send data to EmonCMS 107 | url = f"{settings['emoncms']['server']}/feed/post.json?id={feedid}&apikey={settings['emoncms']['apikey']}" 108 | response = requests.post(url, data={'data': json.dumps(data)}) 109 | 110 | if response.status_code == 200: 111 | print("Data uploaded successfully") 112 | else: 113 | print("Error uploading data") 114 | print(response.text) 115 | -------------------------------------------------------------------------------- /octopus/default.agile.conf: -------------------------------------------------------------------------------- 1 | # [emoncms] apikey = read/write API key 2 | # tag = node name 3 | # name = Feed Name 4 | # Neither can exist 5 | # AGILE-18-02-21, AGILE-22-07-22, AGILE-22-08-31, AGILE-23-12-06, AGILE-VAR-22-10-19, AGILE-FLEX-22-11-25 6 | 7 | [emoncms] 8 | apikey = 9 | server = http://localhost 10 | tag = agile 11 | name = consumption 12 | 13 | [octopus] 14 | account_number = 15 | mpan = 16 | serial_number = 17 | agile_apikey = 18 | tariff_name = AGILE-23-12-06 19 | gsp_id = D -------------------------------------------------------------------------------- /octopus/get_octopus_mpan_serial.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | # Fetches the MPAN and Serial number from the Octopus API 4 | # https://api.octopus.energy/v1/accounts// 5 | 6 | import sys, os, requests, json 7 | from datetime import datetime 8 | from configobj import ConfigObj 9 | 10 | script_path = os.path.dirname(os.path.realpath(__file__)) 11 | settings = ConfigObj(script_path+"/agile.conf", file_error=True) 12 | 13 | # Step 1: Fetch account details 14 | url = "https://api.octopus.energy/v1/accounts/%s/" % (settings['octopus']['account_number']) 15 | result = requests.get(url,auth=(settings['octopus']['agile_apikey'],'')) 16 | data = json.loads(result.text) 17 | 18 | if not data: sys.exit(0) 19 | 20 | # List all the MPANs and Serial numbers 21 | for property in data['properties']: 22 | print (f"Property: {property['address_line_1']}, {property['postcode']}") 23 | for meter in property['electricity_meter_points']: 24 | print(f"MPAN: {meter['mpan']}, Serial: {meter['meters'][0]['serial_number']}") 25 | 26 | # Print most recent tariff 27 | most_recent_agreement = property['electricity_meter_points'][0]['agreements'][-1] 28 | tariff = most_recent_agreement['tariff_code'] 29 | valid_from = most_recent_agreement['valid_from'] 30 | valid_to = most_recent_agreement['valid_to'] 31 | print(f"Tariff: {tariff}, Valid from: {valid_from}, Valid to: {valid_to}") -------------------------------------------------------------------------------- /octopus/load_agile_rates.py: -------------------------------------------------------------------------------- 1 | import requests 2 | from datetime import datetime 3 | import time 4 | import sys 5 | import json 6 | import os 7 | from configobj import ConfigObj 8 | 9 | script_path = os.path.dirname(os.path.realpath(__file__)) 10 | settings = ConfigObj(script_path+"/agile.conf", file_error=True) 11 | 12 | # Emoncms account details 13 | host = settings['emoncms']['server'] 14 | apikey = settings['emoncms']['apikey'] 15 | 16 | # AGILE-18-02-21, AGILE-22-07-22, AGILE-22-08-31, AGILE-23-12-06, AGILE-VAR-22-10-19, AGILE-FLEX-22-11-25 17 | tariff_name = settings['octopus']['tariff_name'] 18 | 19 | # Regions 20 | """ 21 | "A": "Eastern_England", 22 | "B": "East_Midlands", 23 | "C": "London", 24 | "D": "Merseyside_and_Northern_Wales", 25 | "E": "West_Midlands", 26 | "F": "North_Eastern_England", 27 | "G": "North_Western_England", 28 | "H": "Southern_England", 29 | "J": "South_Eastern_England", 30 | "K": "Southern_Wales", 31 | "L": "South_Western_England", 32 | "M": "Yorkshire", 33 | "N": "Southern_Scotland", 34 | "P": "Northern_Scotland" 35 | """ 36 | gsp_id = settings['octopus']['gsp_id'] 37 | 38 | feed_name = f"{tariff_name}-{gsp_id}" 39 | 40 | # Get feedid of agile rates feed from emoncms 41 | url = f"{host}/feed/list.json?apikey={apikey}" 42 | response = requests.get(url) 43 | 44 | if response.status_code == 200: 45 | feeds = response.json() 46 | feedid = None 47 | for feed in feeds: 48 | if feed['name'] == feed_name: 49 | feedid = feed['id'] 50 | break 51 | 52 | end_time = 0 53 | 54 | # if feed not found, create a new feed 55 | # https://emoncms.org/feed/create.json?tag=Octopus&name=AGILE-23-12-06-D&engine=5&options={"interval":1800} 56 | if feedid is None: 57 | print ("Creating agile feed") 58 | options = { "interval": 1800 } 59 | # convert options to json 60 | options = json.dumps(options) 61 | url = f"{host}/feed/create.json?tag=Octopus&name={feed_name}&engine=5&options={options}&apikey={apikey}" 62 | print (url) 63 | 64 | response = requests.get(url) 65 | 66 | if response.status_code == 200: 67 | feedid = response.json()['feedid'] 68 | else: 69 | # If the feed exists get the last data point 70 | url = f"{host}/feed/getmeta.json?id={feedid}&apikey={apikey}" 71 | response = requests.get(url) 72 | if response.status_code == 200: 73 | meta = response.json() 74 | if meta['npoints'] > 0: 75 | end_time = meta['start_time'] + (meta['interval'] * meta['npoints']) 76 | 77 | # Calculate number of half hours since last data point 78 | now = time.time() 79 | diff = now - end_time 80 | half_hours = int(diff / 1800) 81 | page_size = half_hours 82 | 83 | if page_size > 1500: 84 | page_size = 1500 85 | 86 | if page_size < 100: 87 | page_size = 100 88 | 89 | print (f"Number of half hours since last data point: {half_hours}, page size: {page_size}") 90 | 91 | data = [] 92 | 93 | # start from page 1 and go through all pages 94 | page = 1 95 | max_page_load = 30 96 | while True: 97 | 98 | complete = False 99 | 100 | print (f"Fetching page {page}") 101 | url = f"https://api.octopus.energy/v1/products/{tariff_name}/electricity-tariffs/E-1R-{tariff_name}-{gsp_id}/standard-unit-rates/?page={page}&page_size={page_size}" 102 | response = requests.get(url) 103 | 104 | if response.status_code == 200: 105 | result = response.json() 106 | 107 | if result is not None and 'results' in result: 108 | 109 | print (f"Number of data points: {len(result['results'])}") 110 | if len(result['results']) == 0: 111 | complete = True 112 | break 113 | 114 | for row in result['results']: 115 | date = datetime.fromisoformat(row['valid_from'].replace('Z', '+00:00')) 116 | timestamp = int(date.timestamp()) 117 | 118 | if timestamp < end_time: 119 | complete = True 120 | break 121 | 122 | data.append([int(date.timestamp()), row['value_exc_vat']]) 123 | else: 124 | complete = True 125 | break 126 | else: 127 | complete = True 128 | break 129 | 130 | if complete: 131 | break 132 | 133 | page += 1 134 | if page > max_page_load: 135 | break 136 | 137 | 138 | 139 | # print number of data points 140 | print(f"Number of data points: {len(data)}") 141 | 142 | # sys.exit(0) 143 | 144 | if len(data): 145 | # sort by timestamp asc 146 | data.sort(key=lambda x: x[0]) 147 | 148 | # Send data to emoncms feed/post API 149 | url = f"{host}/feed/post.json?id={feedid}&apikey={apikey}" 150 | response = requests.post(url, data={'data': json.dumps(data)}) 151 | 152 | if response.status_code == 200: 153 | print("Data uploaded successfully") 154 | else: 155 | print("Error uploading data") 156 | print(response.text) 157 | 158 | -------------------------------------------------------------------------------- /octopus/readme.md: -------------------------------------------------------------------------------- 1 | # agile.py: python script to import Octopus Agile consumption data into emoncms 2 | 3 | This script can be ran daily from cron to pull in the latest data automatically. The script requests new data since the last valid reading. 4 | 5 | ## Setup 6 | 7 | 1\. Create config file from default: 8 | 9 | cd /opt/emoncms/modules/usefulscripts/octopus/ 10 | cp default.agile.conf agile.conf 11 | 12 | 2\. Enter configuration including emoncms apikey and authentication and meter details from Octopus 13 | 14 | nano agile.conf 15 | 16 | 3\. Run script for the first time to pull in data: 17 | 18 | python3 agile.py 19 | 20 | 4\. Run script from crontab every morning to pull in latest data: 21 | 22 | 30 9 * * * /usr/bin/python3 /opt/emoncms/modules/usefulscripts/octopus/agile.py > /dev/null 2>&1 23 | 24 | *This example runs the script at 9:30am every morning, consider changing the exact timing to reduce simultaneous load on the octopus servers* 25 | 26 | # load_agile_rates.py: python script to load the rates for a particular agile tariff 27 | 28 | This script can be ran daily from cron to pull in the latest data automatically. 29 | 30 | ## Setup 31 | 32 | 1\. As above, if you have not created the agile.conf file already, create config file from default: 33 | 34 | cd /opt/emoncms/modules/usefulscripts/octopus/ 35 | cp default.agile.conf agile.conf 36 | 37 | 2\. Enter configuration including emoncms server, write apikey, tariff_name and gsp_id (region): 38 | 39 | nano agile.conf 40 | 41 | 3\. Run script manually: 42 | 43 | python3 load_agile_rates.py 44 | 45 | 4\. Run script from crontab every evening at 8pm to pull in latest data: 46 | 47 | 03 17 * * * /usr/bin/python3 /opt/emoncms/modules/usefulscripts/octopus/load_agile_rates.py > /dev/null 2>&1 -------------------------------------------------------------------------------- /process/Lib/EmonLogger.php: -------------------------------------------------------------------------------- 1 | logenabled = false; 26 | } 27 | else if ($settings['log']['location']) { 28 | $this->logfile = $settings['log']['location']."/emoncms.log"; 29 | $this->caller = basename($clientFileName); 30 | if (!file_exists($this->logfile)) 31 | { 32 | $fh = @fopen($this->logfile,"a"); 33 | @fclose($fh); 34 | } 35 | if (is_writable($this->logfile)) $this->logenabled = true; 36 | } 37 | } 38 | 39 | public function info ($message){ 40 | $this->write("INFO",$message); 41 | } 42 | 43 | public function warn ($message){ 44 | $this->write("WARN",$message); 45 | } 46 | 47 | public function error ($message){ 48 | $this->write("ERROR",$message); 49 | } 50 | 51 | private function write($type,$message){ 52 | if (!$this->logenabled) return; 53 | 54 | $now = microtime(true); 55 | $micro = sprintf("%03d",($now - ($now >> 0)) * 1000); 56 | $now = DateTime::createFromFormat('U', (int)$now); // Only use UTC for logs 57 | $now = $now->format("Y-m-d H:i:s").".$micro"; 58 | // Clear log file if more than 256MB (temporary solution) 59 | if (filesize($this->logfile)>(1024*1024*256)) { 60 | $fh = @fopen($this->logfile,"w"); 61 | @fclose($fh); 62 | } 63 | if ($fh = @fopen($this->logfile,"a")) { 64 | @fwrite($fh,$now."|$type|$this->caller|".$message."\n"); 65 | @fclose($fh); 66 | } 67 | } 68 | 69 | } 70 | -------------------------------------------------------------------------------- /process/Lib/PHPFiwa.php: -------------------------------------------------------------------------------- 1 | dir = $settings['datadir']; 26 | 27 | $this->log = new EmonLogger(__FILE__); 28 | } 29 | 30 | public function get_meta($filename) 31 | { 32 | // Load metadata from cache if it exists 33 | if (isset($this->metadata_cache[$filename])) 34 | { 35 | return $this->metadata_cache[$filename]; 36 | } 37 | elseif (file_exists($this->dir.$filename.".meta")) 38 | { 39 | $meta = new stdClass(); 40 | $meta->id = $filename; 41 | 42 | $metafile = fopen($this->dir.$filename.".meta", 'rb'); 43 | 44 | $tmp = unpack("I",fread($metafile,4)); 45 | $tmp = unpack("I",fread($metafile,4)); 46 | $meta->start_time = $tmp[1]; 47 | $tmp = unpack("I",fread($metafile,4)); 48 | $meta->nlayers = $tmp[1]; 49 | 50 | for ($i=0; $i<$meta->nlayers; $i++) { 51 | $tmp = unpack("I",fread($metafile,4)); 52 | } 53 | 54 | $meta->interval = array(); 55 | for ($i=0; $i<$meta->nlayers; $i++) 56 | { 57 | $tmp = unpack("I",fread($metafile,4)); 58 | $meta->interval[$i] = $tmp[1]; 59 | } 60 | fclose($metafile); 61 | 62 | // Save to metadata_cache so that we dont need to open the file next time 63 | $this->metadata_cache[$filename] = $meta; 64 | 65 | return $meta; 66 | } 67 | else 68 | { 69 | return false; 70 | } 71 | } 72 | 73 | public function readnext($filename) 74 | { 75 | if (!isset($this->filehandle[$filename])) { 76 | $this->filehandle[$filename] = fopen($this->dir.$filename."_0.dat", 'rb'); 77 | $this->dpposition[$filename] = 0; 78 | } 79 | $fh = $this->filehandle[$filename]; 80 | if (feof($fh)) return false; 81 | 82 | $meta = $this->get_meta($filename); 83 | 84 | $d = fread($fh,4); 85 | if (strlen($d)!=4) return false; 86 | 87 | $val = unpack("f",$d); 88 | $value = $val[1]; 89 | 90 | $time = $meta->start_time + $this->dpposition[$filename] * $meta->interval[0]; 91 | $this->dpposition[$filename] += 1; 92 | 93 | return array('time'=>$time, 'value'=>$value); 94 | } 95 | } 96 | 97 | -------------------------------------------------------------------------------- /process/Lib/PHPTimeSeries.php: -------------------------------------------------------------------------------- 1 | dir = $settings['datadir']; 22 | 23 | $this->log = new EmonLogger(__FILE__); 24 | } 25 | 26 | public function readnext($id) 27 | { 28 | if (!isset($this->filehandle[$id])) { 29 | $this->filehandle[$id] = fopen($this->dir."feed_$id.MYD", 'rb'); 30 | } 31 | $fh = $this->filehandle[$id]; 32 | if (feof($fh)) return false; 33 | 34 | $d = fread($fh,9); 35 | if (strlen($d)!=9) return false; 36 | 37 | $array = unpack("x/Itime/fvalue",$d); 38 | 39 | return array('time'=>$array['time'], 'value'=>$array['value']); 40 | } 41 | } 42 | 43 | -------------------------------------------------------------------------------- /process/Lib/PHPTimestore.php: -------------------------------------------------------------------------------- 1 | dir = $settings['datadir']; 26 | 27 | $this->log = new EmonLogger(__FILE__); 28 | } 29 | 30 | public function get_meta($feedid) 31 | { 32 | $feedid = (int) $feedid; 33 | $metafile = $this->dir.str_pad($feedid, 16, '0', STR_PAD_LEFT).".tsdb"; 34 | $datafile = $this->dir.str_pad($feedid, 16, '0', STR_PAD_LEFT)."_0_.dat"; 35 | 36 | // Load metadata from cache if it exists 37 | if (isset($this->metadata_cache[$feedid])) 38 | { 39 | return $this->metadata_cache[$feedid]; 40 | } 41 | elseif (file_exists($metafile)) 42 | { 43 | $meta = new stdClass(); 44 | $meta->feedid = $feedid; 45 | $size = filesize($metafile); 46 | $fh = fopen($metafile, 'rb'); 47 | 48 | fseek($fh,8); 49 | $d = fread($fh,8); 50 | $tmp = unpack("h*",$d); 51 | //$meta->feedid = (int) strrev($tmp[1]); 52 | $tmp = unpack("I",fread($fh,4)); 53 | $meta->nmetrics = $tmp[1]; 54 | $tmp = unpack("I",fread($fh,4)); 55 | // $legacy_npoints = $tmp[1]; 56 | $tmp = unpack("I",fread($fh,8)); 57 | $meta->start_time = $tmp[1]; 58 | $tmp = unpack("I",fread($fh,4)); 59 | $meta->interval = $tmp[1]; 60 | fclose($fh); 61 | 62 | // Double verification of npoints 63 | clearstatcache($datafile); 64 | $filesize = filesize($datafile); 65 | $meta->npoints = floor($filesize / 4.0); 66 | 67 | return $meta; 68 | } 69 | else 70 | { 71 | return false; 72 | } 73 | } 74 | 75 | public function readnext($feedid) 76 | { 77 | $datafile = $this->dir.str_pad($feedid, 16, '0', STR_PAD_LEFT)."_0_.dat"; 78 | if (!isset($this->filehandle[$feedid])) { 79 | $this->filehandle[$feedid] = fopen($datafile, 'rb'); 80 | $this->dpposition[$feedid] = 0; 81 | } 82 | 83 | $fh = $this->filehandle[$feedid]; 84 | if (feof($fh)) return false; 85 | 86 | $meta = $this->get_meta($feedid); 87 | 88 | $d = fread($fh,4); 89 | if (strlen($d)!=4) return false; 90 | 91 | $val = unpack("f",$d); 92 | $value = $val[1]; 93 | 94 | $time = $meta->start_time + $this->dpposition[$feedid] * $meta->interval; 95 | $this->dpposition[$feedid] += 1; 96 | 97 | return array('time'=>$time, 'value'=>$value); 98 | } 99 | } 100 | 101 | -------------------------------------------------------------------------------- /process/power_to_kwh_manual.php: -------------------------------------------------------------------------------- 1 | feed $target\n"; 32 | 33 | 34 | if ($source_engine==Engine::PHPFINA) { 35 | echo "Deleting data for ".$settings['feed']['phpfina']['datadir'].$target.".dat\n"; 36 | unlink($settings['feed']['phpfina']['datadir'].$target.".dat"); 37 | 38 | echo "Creating new data file\n"; 39 | $fh = fopen($settings['feed']['phpfina']['datadir'].$target.".dat", 'wb'); 40 | fclose($fh); 41 | } 42 | 43 | // Starting kWh of feed, default:0 44 | $kwh = 0; 45 | $time = 0; 46 | 47 | while ($dp = $engine[$source_engine]->readnext($source)) 48 | { 49 | $last_time = $time; 50 | 51 | if (!is_nan($dp['value'])) { 52 | 53 | $time = $dp['time']; 54 | $power = $dp['value']; 55 | 56 | //------------------------------------------------ 57 | // 2) Calculate increase in kWh and next total kwh value 58 | //------------------------------------------------ 59 | 60 | // only update if last datapoint was less than 2 hour old 61 | // this is to reduce the effect of monitor down time on creating 62 | // often large kwh readings. 63 | if ($last_time && ($time-$last_time)<7200) 64 | { 65 | // kWh calculation 66 | $time_elapsed = ($time - $last_time); 67 | $kwh_inc = ($time_elapsed * $power) / 3600000.0; 68 | $kwh += $kwh_inc; 69 | } else { 70 | // in the event that redis is flushed the last time will 71 | // likely be > 7200s ago and so kwh inc is not calculated 72 | // rather than enter 0 we enter the last value 73 | $kwh = $kwh; 74 | } 75 | 76 | // print $time." ".$kwh."\n"; 77 | //------------------------------------------------ 78 | // 3) Save value to phpfina feed 79 | //------------------------------------------------ 80 | 81 | // Save $kwh to feed 82 | // print $time." "+$kwh."\n"; 83 | $engine[Engine::PHPFINA]->prepare($target,$time,$kwh); 84 | if ($low_memory_mode) $engine[Engine::PHPFINA]->save(); 85 | } 86 | } 87 | $engine[Engine::PHPFINA]->save(); 88 | 89 | print "Recalculated in ".round(microtime(true)-$start)."s\n"; 90 | -------------------------------------------------------------------------------- /readme.md: -------------------------------------------------------------------------------- 1 | # Useful scripts 2 | 3 | Useful scripts for managing your emoncms installation. 4 | 5 | ## Update Emoncms 6 | 7 | `update_emoncms.sh` script can be used to update Emoncms, this script pulls the latest chages from Emoncms GitHub and Emoncms Modules repos. The script assumes Emoncms path `/var/www/html/emoncms` edit to match your setup. Contribuited by @pb66 (Paul) [see forum thread](https://community.openenergymonitor.org/t/emoncms-9-8-3-released-to-emonpi-stable-branch/3759/6). 8 | 9 | ## Backup/Replication 10 | 11 | This is a tool for backing up the data in an emoncms.org account or other remote emoncms server to a local computer. It can be used with or without a local installation of emoncms. To use, open Backup/backup.php in a text editor. 12 | 13 | - Set $remote_server and $remote_apikey to correspond to the remote emoncms account you wish to download from. 14 | - Set $link\_to\_local\_emoncms to true if you wish to access your data within a local installation of emoncms. Set $local\_emoncms\_location and $local\_emoncms\_userid to link to your local emoncms installation. 15 | - Set $link\_to\_local\_emoncms to false if you just want to download the data without linking to a local emoncms install (non-mysql data only). Set $dir to the directory on your computer you wish to download the data. Manually create the folders: phpfina, phpfiwa, phptimeseries, phptimestore within this folder. 16 | 17 | - Run the backup script from terminal with: 18 | 19 | php backup.php 20 | 21 | Tested with emoncms.org (v8.0.9: 4 July 2014), and local emoncms v8.2.8 22 | 23 | ## Data recovery 24 | 25 | If backing up feed data via the normal directory copy method fails due to a disk problem, this data recovery tool may help you extract your feed data skipping the problematic sector on your disk. See forum post here for further information: [Data Recovery forum post](http://openenergymonitor.org/emon/node/5213) 26 | 27 | To use the data recovery tool, open recover.php and set both source and target directories for the data to recover. Run the recover tool from terminal with: 28 | 29 | php recover.php 30 | 31 | ## Integrity check 32 | 33 | A tool for checking the integrity of your emoncms phpfiwa, phpfina, phptimeseries and phptimestore feeds. To use, open recover.php and set the engine data directories. Run the recover tool from terminal with: 34 | 35 | php integritycheck.php 36 | 37 | ## Convert data 38 | 39 | There are two scripts for converting phpfiwa or phptimestore to phpfina. PHPFina has a much lower write load when used as the storage engine on SD Cards. Using these conversion scripts you can convert your data to PHPFina so that it can be used with the new SD Card branch of emoncms called 'bufferedwrite' 40 | 41 | phpfiwa_to_phpfina.php 42 | phptimestore_to_phpfina.php 43 | 44 | Set the source and target directory as well as emoncms install location and then run as above. 45 | Once the script has completed the conversion, flush redis for the changes to take effect; 46 | 47 | `redis-cli flushall` 48 | 49 | ## Remove spikes 50 | 51 | This script is useful to remove irregularities in feed data, and works through the phpfiwa data files searching for values that are larger or smaller than the limits, when it finds one it sets it to NAN which means its ignored in the graph. 52 | 53 | To run the script; 54 | 55 | sudo php remove_spike.php -i FeedId -n MinValue -x MaxValue 56 | 57 | for example to remove value above 5.5V and below 1V (battery feed in V for example) on feed 22 the syntax is; 58 | 59 | sudo php remove_spike.php -i 22 -n 1 -x 5.5 60 | 61 | ONLY WORKS WITH PHPFIWA DATA! 62 | 63 | ## Emoncms MYSQL backup script 64 | 65 | This shell script can be called manually, or automated by using Node-red or Cron to create a MYSQL dump of your emoncms MYSQL database. 66 | Firstly, edit the mysql_backup_emoncms.sh script as follows; 67 | -u 'username' = your emoncms MYSQL username (default is emoncms) 68 | -p'password' = your emoncms MYSQL password (Note - there is no space after the leading 'p'!!) 69 | 'database' = the name of your emoncms MYSQL database (default is emoncms) 70 | Replace '/path/to/' with the absolute path to where your backup will be stored 71 | 72 | Make the file executable: 73 | 74 | `chmod +x mysql_backup_emoncms.sh` 75 | 76 | To run, from the script installation directory: 77 | 78 | `./mysql_backup_emoncms.sh` 79 | 80 | ## sdpart - A script to set-up SD cards for emoncms 81 | 82 | There are two variants of this script; sdpart_imagefile and sdpart_selfbuild. 83 | **sdpart_selfbuild** is a script intended to be used to prepare a SD card for users wishing to build their system step-by-step using the emoncms self-build guides. 84 | **sdpart_imagefile** is a script to be used **only** with the official emoncms imagefile, which will make the necessary changes to your SD card for emoncms to run effectively. 85 | 86 | ####sdpart_selfbuild 87 | 88 | This shell script can be run under 2 different scenarios; 89 | 1) The script is run immediatly after installing the operating system - **BEFORE** running `$ sudo rasp-config` and expanding the filesystem. This is the preferred option! 90 | 2) If the filesystem has already been expanded, you must unmount the root partition & reduce it's size using Gparted or other similar tools, by at least 350Mb if using a 4Gb SD card, or 1.5Gb if using a larger card. 91 | 92 | 93 | The script will check your SD card for size and will make the following changes; 94 | **On SD cards less than 5Gb capacity** 95 | * create a 300Mb data partition formatted to ext2 with a blocksize of 1024 bytes 96 | * create a partition 'buffer' of 10Mb 97 | * expand the root filesystem to fill the available space. 98 | 99 | 100 | **On SD cards more than 5Gb capacity** 101 | * create a 1Gb data partition formatted to ext2 with a blocksize of 1024 bytes 102 | * create a partition 'buffer' of 50Mb 103 | * expand the root filesystem to fill the available space. 104 | 105 | To run the script, navigate to the usefulscripts/sdpart folder and run `sudo ./sdpart_selfbuild` - following the prompts. 106 | 107 | ####sdpart_imagefile 108 | 109 | Firstly, write the emoncms imagefile to your SD card, and boot emoncms in your Raspberry Pi. 110 | **Do not run raspi-config** but run the sdpart_imagefile script as follows; 111 | 112 | ``` 113 | rpi-rw 114 | cd usefulscripts 115 | git pull 116 | sudo sdpart/./sdpart_imagefile 117 | ``` 118 | Symlink to use `sudo emonSDexpand` with: 119 | 120 | `sudo ln -s /home/pi/usefulscripts/sdpart/sdpart_imagefile /sbin/emonSDexpand` 121 | 122 | The script will determine, and make the necessary changes, but will take 20 minutes or so to complete, and it's important that the process is not interupted. 123 | So leave well alone! and once complete, your Raspberry Pi will poweroff and shutdown. 124 | 125 | 126 | ## Password Reset 127 | 128 | [Forum thread discussion](http://openenergymonitor.org/emon/node/12155) 129 | 130 | Reset Emoncms password. Run with: 131 | 132 | php resetpassword.php 133 | 134 | It then asks for the userid (default:1) and for a new password or option to auto generate: 135 | 136 | ``` 137 | ======================================= 138 | EMONCMS PASSWORD RESET 139 | ======================================= 140 | Select userid, or press enter for default: 141 | Using default user 1 142 | Enter new password, or press enter to auto generate: 143 | Auto generated password: 9f7599c8da 144 | ``` 145 | 146 | ## Create users and devices, add the users to a group 147 | 148 | Script for bulk creation of users. If device module is installed it can create one device for the user. If group module is installed it can add the user to a group as a passive member. 149 | 150 | The script outputs a csv table including: username, userid, password, apikey_read, apikey_write, device_key. This table can be copied and pasted into a csv file for importing into a spreadsheet. 151 | 152 | `php create_users_and_devices_add_to_group.php -d device_template -g group_name --dnode device_id --dname device_name -u username1 -u username2` 153 | 154 | To get some basic help: 155 | 156 | `php create_users_and_devices_add_to_group.php -h` 157 | 158 | -------------------------------------------------------------------------------- /removespike/remove_spike.php: -------------------------------------------------------------------------------- 1 | $max) 55 | { 56 | echo $val." > ".$max ."\n"; 57 | fseek($fh,$i*4); 58 | fwrite($fh,pack("f",NAN)); 59 | } 60 | 61 | if ($val<$min) 62 | { 63 | echo $val." < ".$min . "\n"; 64 | fseek($fh,$i*4); 65 | fwrite($fh,pack("f",NAN)); 66 | } 67 | 68 | } 69 | } 70 | 71 | fclose($fh); 72 | } 73 | } 74 | -------------------------------------------------------------------------------- /resetpassword.php: -------------------------------------------------------------------------------- 1 | connect_error ) { 20 | echo "Can't connect to database, please verify credentials/configuration in settings.php
"; 21 | if ( $display_errors ) { 22 | echo "Error message: " . $mysqli->connect_error . ""; 23 | } 24 | die(); 25 | } 26 | 27 | $v9 = (int)stdin("Are you running emoncms v9? (y/n): "); 28 | if ($v9=="n") $old = true; 29 | 30 | $userid = (int)stdin("Select userid, or press enter for default: "); 31 | if ($userid==0) { 32 | echo "Using default user 1\n"; 33 | $userid = 1; 34 | } 35 | 36 | $newpass = stdin("Enter new password, or press enter to auto generate: "); 37 | if ($newpass=="") { 38 | // Generate new random password 39 | $newpass = hash('sha256',md5(uniqid(rand(), true))); 40 | $newpass = substr($newpass, 0, 10); 41 | print "Auto generated password: $newpass\n"; 42 | } 43 | 44 | // Hash and salt 45 | $hash = hash('sha256', $newpass); 46 | $salt = md5(uniqid(rand(), true)); 47 | if ($old) $salt = substr($salt, 0, 3); 48 | $password = hash('sha256', $salt . $hash); 49 | 50 | // Save password and salt 51 | $mysqli->query("UPDATE users SET password = '$password', salt = '$salt' WHERE id = '$userid'"); 52 | 53 | echo "Complete: new password set\n"; 54 | 55 | 56 | function stdin($prompt = null){ 57 | if($prompt){ 58 | echo $prompt; 59 | } 60 | $fp = fopen("php://stdin","r"); 61 | $line = rtrim(fgets($fp, 1024)); 62 | return $line; 63 | } 64 | -------------------------------------------------------------------------------- /sdpart/sdpart_imagefile: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | ################################################################### 3 | # All Emoncms code is released under the GNU Affero General Public License. 4 | # See COPYRIGHT.txt and LICENSE.txt. 5 | # --------------------------------------------------------------------- 6 | # Emoncms - open source energy visualisation 7 | # Part of the OpenEnergyMonitor project: http://openenergymonitor.org 8 | ################################################################### 9 | # This script's purpose is to prepare a SD Card after a emoncms imagefile 10 | # has been written to a SD card. 11 | # It will calculate and expand the /data partition to the preferred size, 12 | # and then also expand the partition filesystem. 13 | # The whole process takes about 20 minutes to complete, depending upon the 14 | # size of the SD card, and its important that the script is not interupted. 15 | # Upon completion, your Raspberry Pi will poweroff. 16 | # 17 | # Author: Paul Reed 18 | ################################################################### 19 | if [ $(id -u) -ne 0 ]; then 20 | printf "Script must be run as root. Try 'sudo ./sdpart'\n" 21 | exit 1 22 | fi 23 | 24 | data_dir="/var/opt/emoncms" 25 | 26 | if [ -d "/home/pi" ]; then 27 | if [ -d "/home/pi/data" ]; then 28 | data_dir="/home/pi/data" 29 | elif [ -d "/var/opt/emoncms" ]; then 30 | data_dir="/var/opt/emoncms" 31 | else 32 | echo This does not appear to be an emoncms imagefile installation 33 | echo the script is aborting. 34 | exit 1 35 | fi 36 | fi 37 | 38 | echo "data_dir: $data_dir" 39 | 40 | ################################################################### 41 | if [ -f /usr/bin/rpi-rw ]; then 42 | rpi-rw 43 | fi 44 | echo 45 | PART_END="$(($(blockdev --getsz /dev/mmcblk0)))" 46 | DATA_START="$(parted /dev/mmcblk0 -ms unit s p | grep "^3" | cut -f2 -d: | sed 's/[^0-9]*//g')" 47 | [ "$DATA_START" ] || exit 1 48 | # Create smaller data partition for 4Gb cards 49 | if [ $PART_END -lt 10485760 ] 50 | then 51 | # Creates a 300Mb data partition and 10Mb buffer 52 | DATA_END="$(((PART_END)-20480))" 53 | else 54 | # Creates a 1Gb data partition and 50Mb buffer 55 | DATA_END="$(((PART_END)-102400))" 56 | fi 57 | ################################################################### 58 | # Display current SD card data 59 | echo ====================================================== 60 | echo 61 | echo Current Disk Info: 62 | fdisk -l /dev/mmcblk0 63 | echo 64 | echo ====================================================== 65 | echo 66 | echo Proposed changes to be made: 67 | echo " SD card total disk size = "$(python -c "print $PART_END * 512.0 / 1073741824")"Gb" 68 | echo " Data Partition size = "$(python -c "print ($DATA_END - $DATA_START + 1) * 512.0 / 1073741824")"Gb" 69 | echo 70 | read -r -p "Are you sure you want to proceed? [Y/n] " response 71 | if [[ $response =~ ^([nN][oO]|[nN])$ ]] 72 | then 73 | exit 1 74 | else 75 | 76 | if [ -f /usr/bin/rpi-rw ]; then 77 | rpi-rw 78 | fi 79 | ################################################################### 80 | # Create a systemd unit file to extend filesystem after a reboot 81 | cat <<\EOF > /lib/systemd/system/resize2fs_once.service && 82 | [Unit] 83 | Description=Resize2fs_once Service 84 | 85 | [Service] 86 | Type=idle 87 | ExecStart=/home/pi/resize2fs_once.sh 88 | StandardOutput=null 89 | TimeoutStartSec=infinity 90 | 91 | [Install] 92 | WantedBy=multi-user.target 93 | Alias=resize2fs_once.service 94 | 95 | EOF 96 | ################################################################### 97 | # Enable the unit file so it runs after a reboot 98 | systemctl enable resize2fs_once.service 99 | # 100 | # Fdisk will now make the changes.. 101 | printf "d\n3\nn\np\n3\n$DATA_START\n$DATA_END\np\nw\n" | fdisk /dev/mmcblk0 102 | echo This error message can however be disregarded, because your system 103 | echo is about to be rebooted, and the new partition table will then be 104 | echo read by your operating system. 105 | echo 106 | ################################################################### 107 | echo Writing the resize2fs_once script 108 | cat <<\EOF > /home/pi/resize2fs_once.sh && 109 | #!/bin/sh 110 | log_daemon_msg "Starting resize2fs_once script" 111 | umount /dev/mmcblk0p3 112 | e2fsck -yf /dev/mmcblk0p3 113 | resize2fs -f /dev/mmcblk0p3 114 | if [ -f /usr/bin/rpi-rw ]; then 115 | rpi-rw 116 | fi 117 | rm /lib/systemd/system/resize2fs_once.service 118 | systemctl disable resize2fs_once.service 119 | rm /home/pi/resize2fs_once.sh 120 | sed -i "s~^#\(.*$data_dir\)~\1~" /etc/fstab 121 | /bin/sync 122 | /sbin/shutdown -h now 123 | 124 | EOF 125 | ################################################################### 126 | # Make the resize2fs_once script executable 127 | chmod +x /home/pi/resize2fs_once.sh 128 | # Ensure that the /data partition is unmounted after a reboot 129 | sed -i "s~^[^#]*$data_dir~#&~" /etc/fstab 130 | 131 | echo 132 | echo 133 | echo ====================================================================== 134 | echo 135 | echo So far, so good... in 20s your system will reboot 136 | echo and resize the $data_dir filesystem to fill the resized partition. 137 | echo **THIS OPERATION WILL TAKE UP TO 20 MINUTES** 138 | echo When completed the Pi will poweroff and close down. 139 | echo PLEASE DO NOT UNPLUG THE POWER FOR AT LEAST 30min 140 | echo because it will likely result in an unusable filesystem. 141 | echo 142 | echo ====================================================================== 143 | sleep 20 144 | /bin/sync 145 | /sbin/reboot 146 | fi 147 | ################################################################### 148 | # END OF SCRIPT 149 | ################################################################### 150 | -------------------------------------------------------------------------------- /sdpart/sdpart_selfbuild: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | ################################################################### 3 | # All Emoncms code is released under the GNU Affero General Public License. 4 | # See COPYRIGHT.txt and LICENSE.txt. 5 | # --------------------------------------------------------------------- 6 | # Emoncms - open source energy visualisation 7 | # Part of the OpenEnergyMonitor project: http://openenergymonitor.org 8 | ################################################################### 9 | # This script is intended to prepare SD card partitioning as follows; 10 | # * Calculate partitioning to accomodate a Data partition 11 | # * Create a data partition 12 | # * Expand the root partition to a preset size 13 | # * Install a suitable file system to the data partition 14 | # * Expand the root filesystem 15 | # 16 | # Author: Paul Reed 17 | # Credits: Kevin Reed (Dweeber) 18 | # jojopi on Raspberry Pi Forum who provided sample code 19 | # MrEngman on Raspberry Pi Forum for testing 20 | # Examples from http://github.com/asb/raspi-config 21 | # 22 | ################################################################### 23 | if [ $(id -u) -ne 0 ]; then 24 | printf "Script must be run as root. Try 'sudo ./sdpart'\n" 25 | exit 1 26 | fi 27 | ################################################################### 28 | echo 29 | PART_END="$(($(blockdev --getsz /dev/mmcblk0)))" 30 | ROOT_START="$(parted /dev/mmcblk0 -ms unit s p | grep "^2" | cut -f2 -d: | sed 's/[^0-9]*//g')" 31 | [ "$ROOT_START" ] || exit 1 32 | # Create smaller data partition for 4Gb cards 33 | if [ $PART_END -lt 10485760 ] 34 | then 35 | # Creates a 300Mb data partition and 10Mb buffer 36 | ROOT_END="$(((PART_END)-634880))" 37 | DATA_END="$(((PART_END)-20480))" 38 | else 39 | # Creates a 1Gb data partition and 50Mb buffer 40 | ROOT_END="$(((PART_END)-2199552))" 41 | DATA_END="$(((PART_END)-102400))" 42 | fi 43 | DATA_START="$(((ROOT_END)+1))" 44 | ################################################################### 45 | # Display current SD card data 46 | echo ====================================================== 47 | echo 48 | echo Current Disk Info: 49 | fdisk -l /dev/mmcblk0 50 | echo 51 | echo ====================================================== 52 | echo 53 | echo Proposed changes to be made: 54 | echo " SD card total disk size = "$(python -c "print $PART_END * 512.0 / 1073741824")"Gb" 55 | echo " Root Partition size = "$(python -c "print ($ROOT_END - $ROOT_START + 1) * 512.0 / 1048576")"Mb" 56 | echo " Data Partition size = "$(python -c "print ($DATA_END - $DATA_START + 1) * 512.0 / 1048576")"Mb" 57 | echo 58 | read -r -p "Are you sure you want to proceed? [Y/n] " response 59 | if [[ $response =~ ^([nN][oO]|[nN])$ ]] 60 | then 61 | exit 1 62 | else 63 | # Fdisk now to make the changes 64 | printf "d\n2\nn\np\n2\n$ROOT_START\n$ROOT_END\nn\np\n3\n$DATA_START\n$DATA_END\np\nw\n" | fdisk /dev/mmcblk0 65 | echo 66 | echo Setting up init.d resize2fs_once script 67 | 68 | cat <<\EOF > /etc/init.d/resize2fs_once && 69 | #!/bin/sh 70 | ### BEGIN INIT INFO 71 | # Provides: resize2fs_once 72 | # Required-Start: 73 | # Required-Stop: 74 | # Default-Start: 2 3 4 5 75 | # Default-Stop: 0 1 6 76 | # Short-Description: Run resize2fs_once on boot 77 | # Description: 78 | ### END INIT INFO 79 | 80 | . /lib/lsb/init-functions 81 | 82 | case "$1" in 83 | start) 84 | log_daemon_msg "Starting resize2fs_once, THIS WILL TAKE A FEW MINUTES " && 85 | resize2fs /dev/mmcblk0p2 && 86 | mkfs.ext2 -b 1024 /dev/mmcblk0p3 && 87 | 88 | # Remove the script 89 | rm /etc/init.d/resize2fs_once && 90 | update-rc.d resize2fs_once remove && 91 | log_end_msg $? 92 | ;; 93 | *) 94 | echo "Usage: $0 start" >&2 95 | exit 3 96 | ;; 97 | esac 98 | EOF 99 | chmod +x /etc/init.d/resize2fs_once && 100 | update-rc.d resize2fs_once defaults && 101 | 102 | echo 103 | echo ##################################################################### 104 | echo System is now ready to resize your system. A REBOOT IS REQUIRED NOW! 105 | echo "Press ENTER to reboot : \c" 106 | read aok 107 | echo REBOOTING.... 108 | /bin/sync 109 | /sbin/reboot 110 | echo 111 | echo Script Complete... 112 | 113 | fi 114 | ################################################################### 115 | # END OF SCRIPT 116 | ################################################################### 117 | -------------------------------------------------------------------------------- /update_emoncms.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | # emoncms updater script 3 | 4 | EMONCMS_DIR=/var/www/html/emoncms 5 | printf "\nUpdate emoncms....\n" 6 | git -C $EMONCMS_DIR pull 7 | for M in $EMONCMS_DIR/Modules/* 8 | do 9 | if [ -d "$M/.git" ]; then 10 | printf "\nUpdate emoncms/$(basename $M)....\n" 11 | git -C $M pull 12 | git -C $M status 13 | git -C $M describe --tags 14 | fi 15 | done 16 | -------------------------------------------------------------------------------- /writeloadtest/writeloadtest.php: -------------------------------------------------------------------------------- 1 | =60.0) 13 | { 14 | $last = time(); 15 | 16 | $last_sectors_written2 = $total_sectors_written2; 17 | $total_sectors_written2 = exec("awk '/mmcblk0p2/ {print $10}' /proc/diskstats"); 18 | $sectors_written2 = $total_sectors_written2 - $last_sectors_written2; 19 | 20 | $last_sectors_written3 = $total_sectors_written3; 21 | $total_sectors_written3 = exec("awk '/mmcblk0p3/ {print $10}' /proc/diskstats"); 22 | $sectors_written3 = $total_sectors_written3 - $last_sectors_written3; 23 | 24 | if ($last_sectors_written2!=-1 && $last_sectors_written3!=-1) { 25 | // Send to emoncms 26 | file_get_contents("$host/input/post?node=diskstats&data=totalsectorswritten2:$total_sectors_written2,sectorswritten2:$sectors_written2,totalsectorswritten3:$total_sectors_written3,sectorswritten3:$sectors_written3&apikey=$apikey"); 27 | } 28 | 29 | } 30 | 31 | sleep(1.0); 32 | } 33 | -------------------------------------------------------------------------------- /writeloadtest/writeloadtest.service: -------------------------------------------------------------------------------- 1 | # Systemd unit file for writeloadtest script 2 | 3 | # INSTALL: 4 | 5 | # sudo ln -s /opt/emoncms/modules/usefulscripts/writeloadtest/writeloadtest.service /lib/systemd/system 6 | 7 | # RUN AT STARTUP 8 | # sudo systemctl daemon-reload 9 | # sudo systemctl enable writeloadtest.service 10 | 11 | # START / STOP With: 12 | # sudo systemctl start writeloadtest 13 | # sudo systemctl stop writeloadtest 14 | 15 | # VIEW STATUS / LOG 16 | # If Using Syslog: 17 | # sudo systemctl status writeloadtest -n50 18 | # where -nX is the number of log lines to view 19 | # sudo journalctl -f -u writeloadtest 20 | # Otherwise: 21 | # Specify 22 | #StandardOutput=file:/var/log/writeloadtest.log 23 | # tail -f /var/log/writeloadtest.log 24 | 25 | ### 26 | # 27 | # All Emoncms code is released under the GNU Affero General Public License. 28 | # See COPYRIGHT.txt and LICENSE.txt. 29 | # 30 | # --------------------------------------------------------------------- 31 | # Emoncms - open source energy visualisation 32 | # Part of the OpenEnergyMonitor project: 33 | # http://openenergymonitor.org 34 | ### 35 | 36 | [Unit] 37 | Description=writeloadtest script 38 | Wants=mysql.service redis.service 39 | After=mysql.service redis.service 40 | Documentation=https://github.com/emoncms/usefulscripts 41 | 42 | # Uncomment this line to use a dedicated log file for StdOut and StdErr. 43 | # NOTE: only works in systemd v236+ 44 | # Debain "stretch" includes v232, "buster" includes v239 45 | #StandardOutput=file:/var/log/writeloadtest.log 46 | 47 | [Service] 48 | Type=idle 49 | ExecStart=/usr/bin/php /opt/emoncms/modules/usefulscripts/writeloadtest/writeloadtest.php 50 | 51 | # Restart script if stopped on a failure. Will not restart if not configured correctly 52 | Restart=on-failure 53 | # Wait 60s before restart 54 | RestartSec=60 55 | 56 | # Tag things in the log 57 | # If you want to use the journal instead of the file above, uncomment SyslogIdentifier below 58 | # View with: sudo journalctl -f -u writeloadtest -o cat 59 | SyslogIdentifier=writeloadtest 60 | 61 | [Install] 62 | WantedBy=multi-user.target 63 | 64 | --------------------------------------------------------------------------------