├── dev-mode ├── example │ └── node.lua ├── README.md └── dev-mode ├── ib-shell ├── README.md └── ib-shell ├── rpc.lua ├── README.md ├── hosted.js ├── hosted.lua ├── p2p-helpers ├── p2pevt.py ├── README.md └── p2plib.py └── hosted.py /dev-mode/example/node.lua: -------------------------------------------------------------------------------- 1 | gl.setup(NATIVE_WIDTH, NATIVE_HEIGHT) 2 | 3 | function node.render() 4 | gl.clear(1, 0, 0, 1) 5 | end 6 | -------------------------------------------------------------------------------- /ib-shell/README.md: -------------------------------------------------------------------------------- 1 | # A terminal client for your hosted devices 2 | 3 | Like SSH, but without key management and the need to configure anything 4 | on your devices. And you can connect to them from anywhere. 5 | 6 | On Ubuntu you might have to install the package `python-websocket` 7 | and `python-requests`. 8 | 9 | Optionally set the environment variable `API_KEY` to your 10 | [info-beamer hosted API key](https://info-beamer.com/account) and start 11 | the `ib-shell` command like this. If you do not specify `API_KEY` and 12 | OAuth flow will be used and a refresh token is saved in `~/.ib-shell.token`: 13 | 14 | ``` 15 | $ ib-shell $device_id 16 | ``` 17 | 18 | with `$device_id` being the device you want to connect to. The program will open 19 | a new secure connection and log you into your device. `exit` or `Ctrl-D` will 20 | close the connection. 21 | -------------------------------------------------------------------------------- /rpc.lua: -------------------------------------------------------------------------------- 1 | --[[ 2 | 3 | ------------------------- 4 | -- Example usage from Lua 5 | ------------------------- 6 | local rpc = require "rpc" 7 | local py = rpc.create() 8 | 9 | -- Call 'bar' in connected Python client(s). 10 | py.bar(1,2,3) 11 | 12 | -- set up callable function 'fnord' 13 | py.register("fnord", function(a, b) 14 | print("fnord called with", a, b) 15 | end) 16 | 17 | 18 | ---------------------------- 19 | -- Example usage from Python 20 | -- (see also hosted.py) 21 | ---------------------------- 22 | from hosted import node 23 | lua = node.rpc() 24 | 25 | # Connect to info-beamer/Lua and run fnord function 26 | lua.fnord("a", "b") 27 | 28 | # Register callable function 'bar' 29 | @lua.call 30 | def bar(a, b, c): 31 | print("called from lua", a, b, c) 32 | 33 | ]] 34 | 35 | local function create(endpoints) 36 | endpoints = endpoints or {} 37 | local json = require "json" 38 | local clients = {} 39 | node.event("connect", function(client, prefix) 40 | if prefix == "rpc/python" then 41 | clients[client] = true 42 | end 43 | end) 44 | node.event("disconnect", function(client) 45 | clients[client] = nil 46 | end) 47 | node.event("input", function(line, client) 48 | if clients[client] then 49 | local call = json.decode(line) 50 | local fn = table.remove(call, 1) 51 | if endpoints[fn] then 52 | endpoints[fn](unpack(call)) 53 | end 54 | end 55 | end) 56 | local function send_call(call, ...) 57 | local args = {...} 58 | table.insert(args, 1, call) 59 | local pkt = json.encode(args) 60 | local sent = false 61 | for client, _ in pairs(clients) do 62 | sent = true 63 | node.client_write(client, pkt) 64 | end 65 | return sent 66 | end 67 | return setmetatable({ 68 | register = function(name, fn) 69 | endpoints[name] = fn 70 | end, 71 | }, { 72 | __index = function(t, call) 73 | return function(...) 74 | return send_call(call, ...) 75 | end 76 | end 77 | }) 78 | end 79 | 80 | return { 81 | create = create, 82 | } 83 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # info-beamer hosted package SDK 2 | 3 | In this repository you'll find useful files that help 4 | you develop info-beamer hosted packages. 5 | 6 | ## dev-mode 7 | 8 | [dev-mode](dev-mode) is a rapid development helper tool 9 | that allows you to instantly sync a package that you're 10 | developing to an info-beamer device. 11 | 12 | ## hosted.py 13 | 14 | [hosted.py](hosted.py) can be used as part of an 15 | [info-beamer hosted service](https://info-beamer.com/doc/package-services). 16 | Add this file to your package. Inside your python 17 | based service import some of the utility objects 18 | provided. Example: 19 | 20 | ```python 21 | from hosted import config, node, device, api 22 | ``` 23 | 24 | There are more functions that you can call from your 25 | service that allow you to, for example, turn the 26 | connected screen on/off or reboot the device: 27 | 28 | ```python 29 | device.turn_screen_off() 30 | ``` 31 | 32 | You can access all configuration for the current node made 33 | by the user in the `config` object like this: 34 | 35 | ```python 36 | print(config['timezone']) 37 | 38 | # or 39 | 40 | print(config.timezone) 41 | ``` 42 | 43 | You can automatically restart your service by calling 44 | 45 | ```python 46 | config.restart_on_update() 47 | ``` 48 | 49 | once. If the system detects that the configuration 50 | file changed, your service will be terminated and 51 | restarted so it can use the new settings. 52 | 53 | Additionally there are certain info-beamer provided 54 | APIs that you can call. The APIs are experimental 55 | right now and more will be added in the future. 56 | Stay tuned. 57 | 58 | ```python 59 | print(api.list()) # gets list of APIs 60 | 61 | # call 'weather' API for a location. Behaves like a `requests` 62 | # get request, except the url is set automatically. It returns 63 | # a dict with the API response. 64 | print(api.weather.get(params={'lat': 50, 'lon': 9})) 65 | ``` 66 | 67 | ## hosted.lua 68 | 69 | [hosted.lua](hosted.lua) can be use in an info-beamer 70 | node that needs easier access to the configurations 71 | made by the user. In your `node.lua` file, call 72 | 73 | ```lua 74 | util.init_hosted() 75 | ``` 76 | 77 | once at the top of your script. info-beamer will 78 | look for `hosted.lua`, `node.json` and `package.json` 79 | and will automatically parse `config.json` for you. 80 | 81 | You can then access the configuration in the global 82 | `CONFIG` value: 83 | 84 | ```lua 85 | print(CONFIG.timezone) 86 | ``` 87 | 88 | ## hosted.js 89 | 90 | The mockup `hosted.js` allows you to develop 91 | [custom ui pages](https://info-beamer.com/doc/package-reference#customconfigurationinterface) 92 | without pushing your package to info-beamer hosted. 93 | Instead you can create a mockup environment to test 94 | your code locally. See the linked documentation for 95 | more information how all of that works together. 96 | -------------------------------------------------------------------------------- /hosted.js: -------------------------------------------------------------------------------- 1 | /* 2 | * info-beamer hosted.js Mockup for local development. 3 | * You can find the latest version of this file at: 4 | * 5 | * https://github.com/info-beamer/package-sdk 6 | * 7 | * BSD 2-Clause License 8 | * 9 | * Copyright (c) 2017-2019 Florian Wesch 10 | * All rights reserved. 11 | * 12 | * Redistribution and use in source and binary forms, with or without 13 | * modification, are permitted provided that the following conditions are met: 14 | * 15 | * * Redistributions of source code must retain the above copyright notice, this 16 | * list of conditions and the following disclaimer. 17 | * 18 | * * Redistributions in binary form must reproduce the above copyright notice, 19 | * this list of conditions and the following disclaimer in the documentation 20 | * and/or other materials provided with the distribution. 21 | * 22 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 23 | * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 25 | * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE 26 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 28 | * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 29 | * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 30 | * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 | * 33 | */ 34 | (function() { 35 | 36 | var head = document.getElementsByTagName("head")[0]; 37 | var asset_root = "https://cdn.infobeamer.com/s/mock-use-latest/"; 38 | 39 | function setupResources(js, css) { 40 | for (var idx = 0; idx < js.length; idx++) { 41 | var script = document.createElement('script'); 42 | script.setAttribute("type","text/javascript"); 43 | script.setAttribute("src", asset_root + 'js/' + js[idx]); 44 | head.appendChild(script); 45 | } 46 | 47 | for (var idx = css.length-1; idx >= 0; idx--) { 48 | var link = document.createElement('link') 49 | link.setAttribute('rel', 'stylesheet') 50 | link.setAttribute('type', 'text/css') 51 | link.setAttribute('href', asset_root + 'css/' + css[idx]) 52 | head.insertBefore(link, head.firstChild); 53 | } 54 | } 55 | 56 | var style = document.createElement('style'); 57 | var rules = document.createTextNode( 58 | "body { width: 750px; margin: auto; }" 59 | ) 60 | style.type = 'text/css'; 61 | style.appendChild(rules); 62 | head.appendChild(style); 63 | 64 | if (window.MOCK_ASSETS == undefined) 65 | console.error("[MOCK HOSTED.JS] window.MOCK_ASSETS undefined"); 66 | if (window.MOCK_NODE_ASSETS == undefined) 67 | console.error("[MOCK HOSTED.JS] window.MOCK_NODE_ASSETS undefined"); 68 | if (window.MOCK_DEVICES == undefined) 69 | console.error("[MOCK HOSTED.JS] window.MOCK_DEVICES undefined"); 70 | if (window.MOCK_CONFIG == undefined) 71 | console.error("[MOCK HOSTED.JS] window.MOCK_CONFIG undefined"); 72 | 73 | var ib = { 74 | assets: window.MOCK_ASSETS, 75 | node_assets: window.MOCK_NODE_ASSETS, 76 | config: window.MOCK_CONFIG, 77 | devices: window.MOCK_DEVICES, 78 | doc_link_base: 'data:text/plain,This would have opened the package documentation for ', 79 | apis: { 80 | geo: { 81 | get: function(params) { 82 | if (!params.q) { 83 | console.error("no q parameter for weather query"); 84 | } 85 | return new Promise(function(resolve, reject) { 86 | setTimeout(function() { // simulate latency 87 | resolve({"hits":[ 88 | {"lat":49.00937,"lon":8.40444,"name":"Karlsruhe (Baden-W\u00fcrttemberg, Germany)"}, 89 | {"lat":48.09001,"lon":-100.62042,"name":"Karlsruhe (North Dakota, United States)"} 90 | ]}) 91 | }, 800); 92 | }) 93 | }, 94 | } 95 | } 96 | } 97 | 98 | ib.setDefaultStyle = function() { 99 | setupResources([], ['reset.css', 'bootstrap.css']) 100 | } 101 | 102 | var asset_chooser_response = window.MOCK_ASSET_CHOOSER_RESPONSE 103 | if (asset_chooser_response) { 104 | console.log("[MOCK HOSTED.JS] emulating asset chooser"); 105 | ib.assetChooser = function() { 106 | console.log("[MOCK HOSTED.JS] asset chooser mockup returns", asset_chooser_response); 107 | return new Promise(function(resolve) { 108 | resolve(asset_chooser_response); 109 | }) 110 | } 111 | } 112 | 113 | ib.setConfig = function(config) { 114 | var as_string = JSON.stringify(config); 115 | ib.config = JSON.parse(as_string); 116 | console.log("[MOCK HOSTED.JS] setConfig", as_string); 117 | } 118 | 119 | ib.getConfig = function(cb) { 120 | console.warn("[MOCK HOSTED.JS] using .getConfig is deprecated. Use .ready.then(...) instead"); 121 | cb(ib.config); 122 | } 123 | 124 | ib.getDocLink = function(name) { 125 | return ib.doc_link_base + name; 126 | } 127 | 128 | ib.onAssetUpdate = function(cb) { 129 | console.warn("[MOCK HOSTED.JS] onAssetUpdate is a no-op in the mock environment"); 130 | } 131 | 132 | ib.ready = new Promise(function(resolve) { 133 | console.log("[MOCK HOSTED.JS] ready"); 134 | resolve(ib.config); 135 | }) 136 | 137 | window.infobeamer = window.ib = ib; 138 | })(); 139 | -------------------------------------------------------------------------------- /dev-mode/README.md: -------------------------------------------------------------------------------- 1 | # A rapid package development tool for info-beamer hosted 2 | 3 | Learn more about this feature on https://info-beamer.com/lnk/dev-mode. 4 | 5 | The `dev-mode` tool allows you to rapidly develop new 6 | [packages](https://info-beamer.com/doc/building-packages). It works 7 | by syncing local changes directly to an info-beamer hosted device of 8 | your choice. That way you avoid the slower `git pull` approach 9 | and instantly see the effect of changes you make. 10 | 11 | You can rapidly build new packages or tweak existing packages 12 | that way: Just edit their node.lua file or assets and they get 13 | instantly synced to the device. 14 | 15 | ## Installation on Linux 16 | 17 | You'll have to install the python3-watchdog and python3-pathspec 18 | packages. On Ubuntu/Debian they should be available out-of-the-box: 19 | 20 | ``` 21 | apt install python3-watchdog python3-pathspec 22 | ``` 23 | 24 | ## Installation on MacOS 25 | 26 | If installing on Mac OSX please be sure to use python3 and 27 | install the python watchdog module. `pip install watchdog` 28 | should be enough. Also if you have problems, use a virtual 29 | env with python3 to run `dev-mode` or specify the full path 30 | to the interpreter on the command line `/usr/local/bin/python3 dev-mode`. 31 | 32 | ## Installation on Windows 33 | 34 | You need to install Python3 from [python.org](https://www.python.org/downloads/windows/). 35 | When installing, be sure to select the option to include python/pip in your 36 | `%PATH%`. After that, start `cmd.exe` as Administrator (enter `cmd.exe` 37 | in the start menu, right click on "Command Prompt" and select "Run as administrator") 38 | and run `pip install watchdog`. This will install the watchdog module 39 | used to monitor file changes. 40 | 41 | ## Setting up your development machine 42 | 43 | Fetch the `dev-mode` Python tool from this repository. It should work 44 | with python2 or python3 out of the box if you followed the instruction for 45 | your operating system above. 46 | 47 | If you have any kind of firewall active you must allow incoming 48 | connections to TCP port 3333 to your development box. This is especially 49 | true on Windows where the firewall blocks incoming connections by 50 | default. When the firewall dialog opens, be sure to allow Python3. 51 | 52 | ## Preparing your info-beamer device 53 | 54 | The first thing you'll have to do is enable the rapid development 55 | mode on your info-beamer device. For that, place an empty file 56 | `/config/dev-mode` on the SD card of your device and restart it. 57 | The device is now ready. 58 | 59 | **WARNING**: Never enable developer mode in an untrusted network 60 | or for production devices: developer mode allows unauthenticated 61 | access to your device and anyone with network access to the 62 | device can upload any content to it. This might result in a complete 63 | device takeover. 64 | 65 | ## Syncing your first package 66 | 67 | If you're working on an info-beamer package you should have all 68 | files for that package in or below a single directory. Run your 69 | `dev-mode` tool like this: 70 | 71 | ``` 72 | $ dev-mode 192.168.1.101 /path/to/package 73 | ``` 74 | 75 | where `192.168.1.101` must be replace by the IP of the device you 76 | prepared in the above step. On OSX or Windows you might have to 77 | explicitly run the tool using Python3 like this instead: 78 | 79 | ``` 80 | c:\> python3 /path/to/dev-mode 192.168.1.101 /path/to/package 81 | ``` 82 | 83 | `dev-mode` will contact your info-beamer device and instruct it 84 | to fetch the package files from your dev machine, just like 85 | it usually fetches them from the info-beamer hosted file 86 | storage servers. 87 | 88 | If you don't have a package ready, you can use the included 89 | minimal example in `example/`. Start `dev-mode` like this: 90 | 91 | ``` 92 | $ dev-mode example/ 93 | ``` 94 | 95 | Then open the node.lua file in another terminal, edit the 96 | `gl.clear` values and save the file. Your changes should 97 | be visible immediately on the device. 98 | 99 | The syncing process on the device directly fetches the files and 100 | doesn't do any of the syntax or semantic checks that info-beamer 101 | hosted usually enforces when importing or updating packages on 102 | info-beamer.com. So the development is less strict and it makes 103 | sense to push your code to info-beamer.com once you've completed 104 | a task using the rapid development mode. 105 | 106 | Usually packages you develop don't include a `config.json` file. 107 | When using the `dev-mode` tool, you'll have to write your own 108 | file. You might even include it in the package source code 109 | itself (unless it contains any secrets of course), as `config.json` 110 | files are ignored by the info-beamer.com import process. 111 | 112 | It is always recommended to `ssh` into your device to read the 113 | info-beamer log output. Have a look at the 114 | [documentation](https://info-beamer.com/doc/debugging) to learn 115 | how to do that. 116 | 117 | ## FAQ 118 | 119 | ### Why is "dev-mode" blinking on my device? 120 | 121 | This is there to remind you that your device is configured for 122 | developer mode and to prevent the device from accidentally used 123 | in production. 124 | 125 | ### How can I create a config.json similar to the real one generated by info-beamer? 126 | 127 | Have a look at the [package reference](https://info-beamer.com/doc/package-reference). 128 | All option types and their expected config.json output are documented. If you don't 129 | want to generate a `config.json` from scratch, the easiest way to get one is 130 | to push your package to info-beamer.com, install it on your device and then 131 | take a look at the generated `config.json` file. 132 | -------------------------------------------------------------------------------- /hosted.lua: -------------------------------------------------------------------------------- 1 | --[[ 2 | 3 | Part of info-beamer hosted. You can find the latest version 4 | of this file at: 5 | 6 | https://github.com/info-beamer/package-sdk 7 | 8 | Copyright (c) 2014,2015,2016,2017 Florian Wesch 9 | All rights reserved. 10 | 11 | Redistribution and use in source and binary forms, with or without 12 | modification, are permitted provided that the following conditions are 13 | met: 14 | 15 | Redistributions of source code must retain the above copyright 16 | notice, this list of conditions and the following disclaimer. 17 | 18 | Redistributions in binary form must reproduce the above copyright 19 | notice, this list of conditions and the following disclaimer in the 20 | documentation and/or other materials provided with the 21 | distribution. 22 | 23 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS 24 | IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 25 | THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 26 | PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR 27 | CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 28 | EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 29 | PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 30 | PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 31 | LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 32 | NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 33 | SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 34 | 35 | ]]-- 36 | 37 | local resource_types = { 38 | ["image"] = function(value) 39 | local surface 40 | local image = { 41 | asset_name = value.asset_name, 42 | filename = value.filename, 43 | type = value.type, 44 | } 45 | 46 | function image.ensure_loaded() 47 | if not surface then 48 | surface = resource.load_image(value.asset_name) 49 | end 50 | return surface 51 | end 52 | function image.load() 53 | image.ensure_loaded() 54 | local state = surface:state() 55 | return state ~= "loading" 56 | end 57 | function image.get_surface() 58 | return image.ensure_loaded() 59 | end 60 | function image.draw(...) 61 | image.ensure_loaded():draw(...) 62 | end 63 | function image.unload() 64 | if surface then 65 | surface:dispose() 66 | surface = nil 67 | end 68 | end 69 | function image.get_config() 70 | return image 71 | end 72 | return image 73 | end; 74 | ["video"] = function(value) 75 | local surface 76 | local video = { 77 | asset_name = value.asset_name, 78 | filename = value.filename, 79 | type = value.type, 80 | } 81 | function video.ensure_loaded(opt) 82 | if not surface then 83 | surface = util.videoplayer(value.asset_name, opt) 84 | end 85 | return surface 86 | end 87 | function video.load(opt) 88 | video.ensure_loaded(opt) 89 | local state = surface:state() 90 | return state ~= "loading" 91 | end 92 | function video.get_surface(opt) 93 | return video.ensure_loaded(opt) 94 | end 95 | function video.draw(...) 96 | video.ensure_loaded():draw(...) 97 | end 98 | function video.unload() 99 | if surface then 100 | surface:dispose() 101 | surface = nil 102 | end 103 | end 104 | function video.get_config() 105 | return video 106 | end 107 | return video 108 | end; 109 | ["child"] = function(value) 110 | local surface 111 | local child = { 112 | asset_name = value.asset_name, 113 | filename = value.filename, 114 | type = value.type, 115 | } 116 | function child.ensure_loaded() 117 | if surface then 118 | surface:dispose() 119 | end 120 | surface = resource.render_child(value.asset_name) 121 | return surface 122 | end 123 | function child.load() 124 | return true 125 | end 126 | function child.get_surface() 127 | return child.ensure_loaded() 128 | end 129 | function child.draw(...) 130 | child.ensure_loaded():draw(...) 131 | end 132 | function child.unload() 133 | if surface then 134 | surface:dispose() 135 | surface = nil 136 | end 137 | end 138 | function child.get_config() 139 | return child 140 | end 141 | return child 142 | end; 143 | ["json"] = function(value) 144 | return require("json").decode(value) 145 | end; 146 | } 147 | 148 | local types = { 149 | ["date"] = function(value) 150 | return value 151 | end; 152 | ["json"] = function(value) 153 | return value 154 | end; 155 | ["text"] = function(value) 156 | return value 157 | end; 158 | ["string"] = function(value) 159 | return value 160 | end; 161 | ["integer"] = function(value) 162 | return value 163 | end; 164 | ["select"] = function(value) 165 | return value 166 | end; 167 | ["device"] = function(value) 168 | return value 169 | end; 170 | ["boolean"] = function(value) 171 | return value 172 | end; 173 | ["duration"] = function(value) 174 | return value 175 | end; 176 | ["custom"] = function(value) 177 | return value 178 | end; 179 | ["color"] = function(value) 180 | local color = {} 181 | color.r = value.r 182 | color.g = value.g 183 | color.b = value.b 184 | color.a = value.a 185 | color.rgba_table = {color.r, color.g, color.b, color.a} 186 | color.rgba = function() 187 | return color.r, color.g, color.b, color.a 188 | end 189 | color.rgb_with_a = function(a) 190 | return color.r, color.g, color.b, a 191 | end 192 | color.clear = function() 193 | gl.clear(color.r, color.g, color.b, color.a) 194 | end 195 | return color 196 | end; 197 | ["resource"] = function(value) 198 | return resource_types[value.type](value) 199 | end; 200 | ["font"] = function(value) 201 | return resource.load_font(value.asset_name) 202 | end; 203 | } 204 | 205 | local function parse_config(options, config) 206 | local function parse_recursive(options, config, target) 207 | for _, option in ipairs(options) do 208 | local name = option.name 209 | if name then 210 | if option.type == "list" then 211 | local list = {} 212 | for _, child_config in ipairs(config[name]) do 213 | local child = {} 214 | parse_recursive(option.items, child_config, child) 215 | list[#list + 1] = child 216 | end 217 | target[name] = list 218 | else 219 | target[name] = types[option.type](config[name]) 220 | end 221 | end 222 | end 223 | end 224 | local current_config = {} 225 | parse_recursive(options, config, current_config) 226 | return current_config 227 | end 228 | 229 | return { 230 | parse_config = parse_config; 231 | } 232 | -------------------------------------------------------------------------------- /ib-shell/ib-shell: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | import os, sys, subprocess, json, threading, tty, termios, select, time, argparse, textwrap 3 | from binascii import hexlify 4 | try: 5 | from urllib.parse import urlencode 6 | except ImportError: 7 | from urllib import urlencode 8 | try: 9 | input = raw_input 10 | except NameError: 11 | pass 12 | import websocket # needs apt-get install python-websocket on ubuntu 13 | import requests # needs apt-get install python-requests on ubuntu 14 | 15 | class ConnectionError(Exception): 16 | pass 17 | 18 | def connect(api_key, device_id, mode, invalidate_api_key): 19 | r = requests.get( 20 | url = 'https://info-beamer.com/api/v1/device/%d' % device_id, 21 | auth = ('', api_key), 22 | ) 23 | if r.status_code != 200: 24 | print(r.reason) 25 | return 26 | try: 27 | device = r.json() 28 | except: 29 | print(r.content) 30 | return 31 | print("------------------------------") 32 | print("Device %d (Serial %s)" % (device_id, device['serial'])) 33 | print(u"%s - %s" % (device['description'], device['location'])) 34 | print("------------------------------") 35 | 36 | r = requests.post( 37 | url = 'https://info-beamer.com/api/v1/device/%d/session' % device_id, 38 | auth = ('', api_key), 39 | data = {'mode': mode}, 40 | ) 41 | r.raise_for_status() 42 | terminal = r.json() 43 | print("Connecting in %s mode" % (terminal['mode'],)) 44 | ws = websocket.create_connection( 45 | url = terminal['endpoint'], 46 | timeout = 10, 47 | ) 48 | 49 | def recv(): 50 | type, data = ws.recv_data() 51 | if type != websocket.ABNF.OPCODE_TEXT: 52 | raise ConnectionError("unexpected packet") 53 | return json.loads(data) 54 | 55 | def send(**pkt): 56 | ws.send(json.dumps(pkt)) 57 | 58 | print("Waiting for device") 59 | pkt = recv() 60 | if pkt['event'] != 'connected': 61 | raise ConnectionError("didn't get connection") 62 | 63 | size = subprocess.check_output(['stty', 'size']) 64 | rows, cols = [int(v) for v in size.strip().split()] 65 | send(event='setup', rows=rows, cols=cols) 66 | 67 | ws.settimeout(None) 68 | forwarding = True 69 | 70 | def forward_stdin(): 71 | fd = sys.stdin.fileno() 72 | old_settings = termios.tcgetattr(fd) 73 | tty.setraw(sys.stdin) 74 | stdin = os.fdopen(fd, 'rb', 0) 75 | last_keep_alive = time.time() 76 | while forwarding: 77 | s = select.select([stdin], [], [], 0.5)[0] 78 | if not s: 79 | if time.time() - last_keep_alive > 30: 80 | send(event='keepalive') 81 | last_keep_alive = time.time() 82 | continue 83 | inp = stdin.read(1) 84 | if not inp: 85 | break 86 | send(event='stdin', data=inp.decode('utf8', 'ignore')) 87 | if inp == '\x04': 88 | break 89 | termios.tcsetattr(fd, termios.TCSADRAIN, old_settings) 90 | stdin_sender = threading.Thread(target=forward_stdin) 91 | stdin_sender.start() 92 | 93 | while 1: 94 | try: 95 | pkt = recv() 96 | except: 97 | break 98 | if pkt['event'] == 'data': 99 | sys.stdout.write(pkt['data']) 100 | sys.stdout.flush() 101 | elif pkt['event'] == 'eof': 102 | break 103 | forwarding = False 104 | stdin_sender.join() 105 | ws.close() 106 | if invalidate_api_key: 107 | r = requests.get( 108 | url = 'https://info-beamer.com/api/v1/session/destroy', 109 | auth = ('', api_key), 110 | ) 111 | print("Session exited") 112 | 113 | class OAuth(object): 114 | def __init__(self, client_id, scope, state_file, 115 | authorize_url="https://info-beamer.com/oauth/authorize", 116 | token_url="https://info-beamer.com/oauth/token", 117 | ): 118 | self._client_id = client_id 119 | self._authorize_url = authorize_url 120 | self._token_url = token_url 121 | self._scope = scope 122 | self._state_file = state_file 123 | self.load_refresh_token() 124 | 125 | def load_refresh_token(self): 126 | try: 127 | with open(self._state_file) as f: 128 | self._refresh_token = f.read().strip() 129 | except: 130 | self._refresh_token = None 131 | 132 | def save_refresh_token(self): 133 | with open(self._state_file, "wb") as f: 134 | f.write(self._refresh_token.encode('utf8')) 135 | 136 | def wipe_refresh_token(self): 137 | try: 138 | os.unlink(self._state_file) 139 | except: 140 | pass 141 | 142 | def oauth_flow(self): 143 | import webbrowser, base64, hashlib 144 | code_verifier = hexlify(os.urandom(16)) 145 | code_challenge = base64.urlsafe_b64encode(hashlib.sha256(code_verifier).digest()).strip(b"=") 146 | redirect_target = "%s?%s" % (self._authorize_url, urlencode(dict( 147 | response_type = 'code', 148 | client_id = self._client_id, 149 | state = hexlify(os.urandom(16)), # not used in oob 150 | scope = self._scope, 151 | redirect_uri = 'oob', 152 | code_challenge = code_challenge, 153 | code_challenge_method = 'S256', 154 | ))) 155 | if input("Open a browser for you to start the OAuth authorization? [y/N] > ") == "y": 156 | webbrowser.open(redirect_target) 157 | code = input("Using the opened browser, grant access to ib-shell, then paste the auth code here: > ").strip() 158 | else: 159 | print("Please manually open a browser and visit\n\n %s\n" % (redirect_target,)) 160 | code = input("Grant access to ib-shell, then paste the auth code here: > ").strip() 161 | r = requests.post(self._token_url, data=dict( 162 | grant_type = 'authorization_code', 163 | code = code, 164 | redirect_uri = 'oob', 165 | client_id = self._client_id, 166 | code_verifier = code_verifier, 167 | )) 168 | if r.status_code == 403: 169 | raise Exception("Auth code invalid. Please try again") 170 | r.raise_for_status() 171 | auth = r.json() 172 | self._refresh_token = auth['refresh_token'] 173 | self.save_refresh_token() 174 | print("Using api key from authorization flow") 175 | return auth['access_token'] 176 | 177 | def fetch_access_token(self): 178 | if not self._refresh_token: 179 | return self.oauth_flow() 180 | r = requests.post(self._token_url, data=dict( 181 | grant_type = 'refresh_token', 182 | refresh_token = self._refresh_token, 183 | )) 184 | if r.status_code == 403: 185 | self.wipe_refresh_token() 186 | raise Exception("Refresh token invalid. Please restart ib-shell to reauthorize") 187 | r.raise_for_status() 188 | auth = r.json() 189 | print("Using api key from refresh token request") 190 | return auth['access_token'] 191 | 192 | if __name__ == "__main__": 193 | parser = argparse.ArgumentParser(description=textwrap.dedent(""" 194 | Allows you terminal access to your info-beamer device. 195 | Uses the OAuth2 flow to request access to your account. 196 | Alternatively you can set the environment variable 197 | API_KEY if you want to use a fixed API_KEY. You can find 198 | your API keys on https://info-beamer.com/account 199 | """)) 200 | parser.add_argument("device_id", type=int, help='device ID to connect to') 201 | group = parser.add_mutually_exclusive_group() 202 | group.add_argument("--root", dest='mode', action='store_const', const='root', help='request full root access') 203 | group.add_argument("--viewer", dest='mode', action='store_const', const='viewer', help="request limited access (default)") 204 | args = parser.parse_args() 205 | invalidate_api_key = False 206 | api_key = os.getenv('API_KEY') 207 | if api_key is not None: 208 | print("Using api key from API_KEY environment variable") 209 | else: 210 | try: 211 | oauth = OAuth( 212 | client_id = "b63449b7c0987c6b29767cde73a4e79b", 213 | scope = 'device:read device:remote', 214 | state_file = os.path.join(os.path.expanduser("~"), ".ib-shell.token"), 215 | ) 216 | api_key = oauth.fetch_access_token() 217 | invalidate_api_key = True 218 | except Exception as err: 219 | print("Cannot fetch access token: %s" % (err,)) 220 | sys.exit(1) 221 | 222 | connect(api_key, args.device_id, args.mode or 'viewer', invalidate_api_key) 223 | -------------------------------------------------------------------------------- /p2p-helpers/p2pevt.py: -------------------------------------------------------------------------------- 1 | # 2 | # Part of info-beamer hosted. You can find the latest version 3 | # of this file at: 4 | # 5 | # https://github.com/info-beamer/package-sdk 6 | # 7 | # Copyright (c) 2021 Florian Wesch 8 | # All rights reserved. 9 | # 10 | # Redistribution and use in source and binary forms, with or without 11 | # modification, are permitted provided that the following conditions are 12 | # met: 13 | # 14 | # Redistributions of source code must retain the above copyright 15 | # notice, this list of conditions and the following disclaimer. 16 | # 17 | # Redistributions in binary form must reproduce the above copyright 18 | # notice, this list of conditions and the following disclaimer in the 19 | # documentation and/or other materials provided with the 20 | # distribution. 21 | # 22 | # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS 23 | # IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 24 | # THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 25 | # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR 26 | # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 27 | # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 28 | # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 29 | # PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 30 | # LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 31 | # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 32 | # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 33 | 34 | VERSION = "1.0" 35 | 36 | import sys, threading, heapq, os, itertools 37 | from binascii import hexlify 38 | from collections import namedtuple 39 | from p2plib import PeerGroup, monotonic_time 40 | 41 | def log(msg, name='p2pevt.py'): 42 | print >>sys.stderr, "[{}] {}".format(name, msg) 43 | 44 | Event = namedtuple("Event", "timestamp id data") 45 | 46 | class EventQueue(object): 47 | def __init__(self): 48 | self._queue = [] 49 | self._ids = set() 50 | 51 | def add(self, event): 52 | if event.id in self._ids: 53 | return False 54 | heapq.heappush(self._queue, event) 55 | self._ids.add(event.id) 56 | return True 57 | 58 | def has_event(self, event_id): 59 | return event_id in self._ids 60 | 61 | def discard_older(self, threshold_timestamp): 62 | while self._queue: 63 | oldest_event = self._queue[0] 64 | if oldest_event.timestamp >= threshold_timestamp: 65 | break 66 | heapq.heappop(self._queue) 67 | self._ids.remove(oldest_event.id) 68 | 69 | def pop(self): 70 | if not self._queue: 71 | return 72 | oldest_event = self._queue[0] 73 | heapq.heappop(self._queue) 74 | self._ids.remove(oldest_event.id) 75 | 76 | def next(self): 77 | if not self._queue: 78 | return None 79 | return self._queue[0] 80 | 81 | class OrderedEventGroup(PeerGroup): 82 | def setup_peer(self): 83 | self._leader_running = False 84 | 85 | # on leader 86 | self._send_interval = 0.1 87 | self._leader_events = [] 88 | self._leader_events_lock = threading.Lock() 89 | self._leader_has_event = threading.Condition(self._leader_events_lock) 90 | self._leader_id = hexlify(os.urandom(8)) 91 | self._leader_event_seq = itertools.count() 92 | 93 | # on peers 94 | self._events = EventQueue() 95 | self._handled_events = EventQueue() 96 | self._events_lock = threading.Lock() 97 | self._has_event = threading.Condition(self._events_lock) 98 | self._last_leader_id = None 99 | self._leader_time_offset = None 100 | 101 | def _reset_peer(self): 102 | self._events = EventQueue() 103 | self._handled_events = EventQueue() 104 | self._leader_time_offset = None 105 | 106 | def _leader_offset(self, leader_time, our_time, ping): 107 | true_leader_time = leader_time + ping 108 | time_offset = true_leader_time - our_time 109 | if self._leader_time_offset is None: 110 | self._leader_time_offset = time_offset 111 | else: 112 | self._leader_time_offset = 0.95 * self._leader_time_offset + 0.05 * time_offset 113 | return self._leader_time_offset 114 | 115 | def leader_thread(self): 116 | while self._leader_running: 117 | with self._leader_events_lock: 118 | self._leader_has_event.wait(timeout=self._send_interval) 119 | now = self.time() 120 | while self._leader_events and self._leader_events[0].timestamp < now - 1: 121 | self._leader_events.pop(0) 122 | # Find slice of packets to sync to peers 123 | packet_events, idx = [], 0 124 | for idx, event in enumerate(self._leader_events): 125 | if event.timestamp > now + 1: 126 | packet_events = self._leader_events[:idx] 127 | else: 128 | packet_events = self._leader_events 129 | self.broadcast_to_all( 130 | lid = self._leader_id, 131 | ts = now, 132 | e = packet_events, 133 | ) 134 | 135 | def on_leader_message(self, message, peer_info): 136 | # import json; print >>sys.stderr, json.dumps(message), peer_info 137 | leader_id = (peer_info.device_id, message['lid']) 138 | with self._events_lock: 139 | now = self.time() 140 | if leader_id != self._last_leader_id: 141 | log("new leader detected. wiping queue") 142 | self._reset_peer() 143 | self._last_leader_id = leader_id 144 | leader_offset = self._leader_offset(message['ts'], now, peer_info.ping) 145 | new_events = False 146 | for event in message['e']: 147 | event = Event(*event) 148 | if self._handled_events.has_event(event.id): 149 | continue 150 | new_events = new_events or self._events.add(Event( 151 | timestamp = event.timestamp - leader_offset, 152 | id = event.id, 153 | data = event.data, 154 | )) 155 | if new_events: 156 | self._has_event.notify() 157 | 158 | def time(self): 159 | return monotonic_time() 160 | 161 | def send_event(self, timestamp, event): 162 | if not self._leader_running: 163 | return 164 | with self._leader_events_lock: 165 | self._leader_events.append(Event(timestamp, next(self._leader_event_seq), event)) 166 | self._leader_events.sort() 167 | self._leader_has_event.notify() 168 | 169 | def events(self): 170 | wait = 10 171 | while 1: 172 | with self._events_lock: 173 | self._has_event.wait(wait) 174 | while 1: 175 | next_event = self._events.next() 176 | if not next_event: 177 | wait = 10 178 | break 179 | now = self.time() 180 | wait = next_event.timestamp - now 181 | if wait > 0: 182 | break 183 | try: 184 | yield now - next_event.timestamp, next_event.data 185 | finally: 186 | self._handled_events.add(next_event) 187 | self._handled_events.discard_older(now - 3) 188 | self._events.pop() 189 | 190 | def promote_leader(self, peer_info): 191 | log("Now a leader: %r" % (peer_info,)) 192 | self._leader_running = True 193 | self._leader_thread = threading.Thread(target=self.leader_thread) 194 | self._leader_thread.daemon = True 195 | self._leader_thread.start() 196 | 197 | def demote_leader(self): 198 | log("No longer a leader") 199 | self._leader_running = False 200 | self._leader_thread.join() 201 | 202 | # def sender(group): 203 | # for i in xrange(10000): 204 | # group.send_event(group.time() + 0.5, dict( 205 | # fn = 'load', 206 | # args = ["%d.jpg" % (i%2+1)], 207 | # )) 208 | # group.send_event(group.time() + 1.0, dict( 209 | # fn = 'switch', 210 | # args = [], 211 | # )) 212 | # time.sleep(2) 213 | # 214 | # if __name__ == "__main__": 215 | # group = OrderedEventGroup() 216 | # 217 | # thread = threading.Thread(target=sender, args=(group,)) 218 | # thread.daemon = True 219 | # thread.start() 220 | # 221 | # lua = node.rpc() 222 | # 223 | # for delay, event in group.events(): 224 | # print >>sys.stderr, delay, event 225 | # getattr(lua, event['fn'])(*event['args']) 226 | 227 | class GroupRPC(OrderedEventGroup): 228 | def setup_peer(self): 229 | super(GroupRPC, self).setup_peer() 230 | 231 | from hosted import node 232 | self._lua = node.rpc() 233 | 234 | thread = threading.Thread(target=self.lua_forwarder) 235 | thread.daemon = True 236 | thread.start() 237 | 238 | def lua_forwarder(self): 239 | for delay, event in self.events(): 240 | log("event delivery time offset is %f" % (delay,)) 241 | getattr(self._lua, event['fn'])(*event['args']) 242 | 243 | def call(self, offset, fn, *args): 244 | if offset < 0.1: 245 | log('calls with offset < 0.1 are usually not delivered in time') 246 | self.send_event(self.time() + offset, dict( 247 | fn = fn, 248 | args = args, 249 | )) 250 | -------------------------------------------------------------------------------- /dev-mode/dev-mode: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # 3 | # Part of info-beamer hosted. You can find the latest version 4 | # of this file at: 5 | # 6 | # https://github.com/info-beamer/package-sdk/blob/master/dev-mode/dev-mode 7 | # 8 | # Copyright (c) 2018 Florian Wesch 9 | # All rights reserved. 10 | # 11 | # Redistribution and use in source and binary forms, with or without 12 | # modification, are permitted provided that the following conditions are 13 | # met: 14 | # 15 | # Redistributions of source code must retain the above copyright 16 | # notice, this list of conditions and the following disclaimer. 17 | # 18 | # Redistributions in binary form must reproduce the above copyright 19 | # notice, this list of conditions and the following disclaimer in the 20 | # documentation and/or other materials provided with the 21 | # distribution. 22 | # 23 | # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS 24 | # IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 25 | # THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 26 | # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR 27 | # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 28 | # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 29 | # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 30 | # PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 31 | # LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 32 | # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 33 | # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 34 | 35 | from __future__ import print_function 36 | import os 37 | import time 38 | import gzip 39 | import sys 40 | import hashlib 41 | import fnmatch 42 | import requests 43 | import threading 44 | import textwrap 45 | 46 | DEVMODE_VERSION = 1 47 | DEVMODE_PORT = 3333 # for both client and server, so don't change it and expect it to work 48 | 49 | def fatal(msg): 50 | print("ERROR: %s" % ("\r\n".join(textwrap.wrap(msg, width=80)))) 51 | sys.exit(1) 52 | 53 | def log(msg): 54 | print("[dev-mode] %s" % (msg,)) 55 | 56 | try: 57 | from cStringIO import StringIO as MemoryIO 58 | except ImportError: 59 | from io import BytesIO as MemoryIO 60 | 61 | try: 62 | from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer 63 | except ImportError: 64 | from http.server import BaseHTTPRequestHandler, HTTPServer 65 | 66 | try: 67 | from watchdog.observers import Observer 68 | from watchdog.events import FileSystemEventHandler 69 | except ImportError: 70 | fatal("dev-mode needs a way to monitor file system changes. " 71 | "You need to install the 'watchdog' module. " 72 | "On Ubuntu/Debian, try `apt install python3-watchdog`. " 73 | "On Windows/OSX, try `pip install watchdog`.") 74 | 75 | class SyncError(Exception): 76 | pass 77 | 78 | class Contents(object): 79 | def __init__(self): 80 | self._files = {} 81 | self._last_change = 0 82 | self._last_sync = 0 83 | self._lock = threading.RLock() 84 | self._ignorer = None 85 | 86 | def set_ignore_test(self, ignorer): 87 | self._ignorer = ignorer 88 | 89 | def add_file(self, path): 90 | if not os.path.isfile(path): 91 | log("ignoring added path %s: not a file" % (path,)) 92 | return 93 | 94 | if path.startswith('.git'): 95 | # files in .git don't start with ., so the 96 | # next guard won't work. 97 | return 98 | 99 | basename, filename = os.path.split(path) 100 | if filename.startswith("."): 101 | log("ignoring file %s: starts with dot" % (path,)) 102 | return 103 | 104 | if filename != "config.json" and self._ignorer and self._ignorer(path): 105 | return 106 | 107 | try: 108 | h, size = hashlib.md5(), 0 109 | with open(path, "rb") as f: 110 | while 1: 111 | chunk = f.read(16384) 112 | if not chunk: 113 | break 114 | h.update(chunk) 115 | size += len(chunk) 116 | with self._lock: 117 | self._files[path] = h.hexdigest(), size 118 | self._last_change = time.time() 119 | log("added file %s: %s %s" % (path, h.hexdigest(), size)) 120 | except Exception as err: 121 | log("cannot add file %s: %s" % (path, err)) 122 | 123 | def del_file(self, path): 124 | with self._lock: 125 | if self._files.pop(path, None): 126 | log("removed file %s" % (path,)) 127 | self._last_change = time.time() 128 | 129 | def get_sync_file(self, my_ip): 130 | out = MemoryIO() 131 | gzipped = gzip.GzipFile(fileobj=out, mode="wb") 132 | for path, (md5, size) in sorted(self._files.items()): 133 | gzipped.write(("sync %s %s %d http://%s:%d/%s\n" % ( 134 | path, md5, size, my_ip, DEVMODE_PORT, md5 135 | )).encode("utf-8")) 136 | gzipped.close() 137 | return out.getvalue() 138 | 139 | def get_path(self, query_md5): 140 | # brute force, but whatever.. 141 | for path, (md5, size) in self._files.items(): 142 | if md5 == query_md5: 143 | return path 144 | 145 | def send_sync_file(self, my_ip, info_beamer_host): 146 | try: 147 | with self._lock: 148 | self._last_sync = time.time() 149 | sync_file = self.get_sync_file(my_ip) 150 | r = requests.post( 151 | url = "http://%s:%d/api/sync-file" % (info_beamer_host, DEVMODE_PORT), 152 | timeout = 5, 153 | data = dict( 154 | sync_file = sync_file, 155 | ) 156 | ) 157 | r.raise_for_status() 158 | resp = r.json() 159 | except Exception as err: 160 | raise SyncError(err) 161 | if not resp['ok']: 162 | raise SyncError("sync not successful") 163 | log("=> device is now syncing") 164 | 165 | def needs_sync(self, max_age): 166 | now = time.time() 167 | with self._lock: 168 | return self._last_change > self._last_sync and \ 169 | now > self._last_change + max_age 170 | 171 | contents = Contents() 172 | 173 | class EventHandler(FileSystemEventHandler): 174 | def path_from_event(self, event): 175 | return os.path.relpath(event.src_path) 176 | 177 | def on_modified(self, event): 178 | contents.add_file(self.path_from_event(event)) 179 | 180 | def on_created(self, event): 181 | contents.add_file(self.path_from_event(event)) 182 | 183 | def on_moved(self, event): 184 | contents.del_file(os.path.relpath(event.src_path)) 185 | contents.add_file(os.path.relpath(event.dest_path)) 186 | 187 | def on_deleted(self, event): 188 | contents.del_file(self.path_from_event(event)) 189 | 190 | class DevModeHandler(BaseHTTPRequestHandler): 191 | def do_GET(self): 192 | path = contents.get_path(self.path.lstrip("/")) 193 | if not path: 194 | return self.send_error(404, "Not found") 195 | 196 | try: 197 | f = open(path, "rb") 198 | except IOError as err: 199 | return self.send_error(404, "Not found") 200 | 201 | self.send_response(200) 202 | self.end_headers() 203 | while 1: 204 | chunk = f.read(16384) 205 | if not chunk: 206 | break 207 | self.wfile.write(chunk) 208 | 209 | f.close() 210 | 211 | def start_devmode_server(): 212 | def devmode_server(): 213 | server = HTTPServer(('0.0.0.0', DEVMODE_PORT), DevModeHandler) 214 | server.serve_forever() 215 | thread = threading.Thread(target=devmode_server) 216 | thread.daemon = True 217 | thread.start() 218 | 219 | def start_fswatcher(): 220 | def fswatcher(): 221 | handler = EventHandler() 222 | obs = Observer() 223 | obs.schedule(handler, '.', recursive=True) 224 | obs.start() 225 | thread = threading.Thread(target=fswatcher) 226 | thread.daemon = True 227 | thread.start() 228 | 229 | def main(): 230 | if len(sys.argv) != 3: 231 | fatal("command usage: %s " % sys.argv[0]) 232 | 233 | info_beamer_host, root_path = sys.argv[1], sys.argv[2] 234 | 235 | os.chdir(root_path) 236 | 237 | log("trying to contact info-beamer device @ %s" % (info_beamer_host,)) 238 | try: 239 | r = requests.get( 240 | url = "http://%s:%d/api/hello" % (info_beamer_host, DEVMODE_PORT), 241 | allow_redirects = False, 242 | timeout = 5 243 | ) 244 | r.raise_for_status() 245 | hello = r.json() 246 | except Exception as err: 247 | fatal("Cannot contact info-beamer device at %s. Make sure that devmode is " 248 | "activated (see https://info-beamer.com/lnk/dev-mode) and that you " 249 | "specified the correct device IP. You can find the device IP on its " 250 | "details page on the dashboard." % (info_beamer_host,)) 251 | 252 | if not hello['ok']: 253 | fatal("unexpected response from %s" % (info_beamer_host,)) 254 | 255 | if hello['version'] != DEVMODE_VERSION: 256 | fatal("remote info-beamer device is running a different devmode version %d. " 257 | "We only speak %d. Consider upgrading at https://github.com/info-beamer/tools" % (hello['version'], DEVMODE_VERSION)) 258 | 259 | my_ip = hello['your_ip'] 260 | log("device is ready for dev mode. device detail page is:\n\n\t%s\n" % (hello['device_url'],)) 261 | 262 | if os.path.exists(".gitignore"): 263 | try: 264 | import pathspec 265 | with open('.gitignore', 'r') as f: 266 | spec = pathspec.PathSpec.from_lines('gitwildmatch', f) 267 | contents.set_ignore_test(spec.match_file) 268 | except ImportError: 269 | log("warning: consider install 'pathspec' module for precise .gitignore matching") 270 | with open(".gitignore", "r") as f: 271 | ignores = [] 272 | for line in f: 273 | line = line.strip() 274 | if line.endswith("/"): 275 | line += "*" 276 | ignores.append(line) 277 | def test_ignore(path): 278 | for ignore in ignores: 279 | if fnmatch.fnmatch(path, ignore): 280 | return True 281 | return False 282 | contents.set_ignore_test(test_ignore) 283 | 284 | for root, dirs, files in os.walk('.'): 285 | for fname in files: 286 | path = os.path.normpath(os.path.join(root, fname)) 287 | if os.path.isfile(path): 288 | contents.add_file(path) 289 | for dir in dirs: 290 | if dir.startswith("."): 291 | dirs.remove(dir) 292 | 293 | start_devmode_server() 294 | start_fswatcher() 295 | 296 | try: 297 | while 1: 298 | # I'm too lazy to make this not poll based. 299 | if contents.needs_sync(max_age=0.25): 300 | try: 301 | contents.send_sync_file(my_ip, info_beamer_host) 302 | except SyncError as err: 303 | log("cannot sync: %s" % (err, )) 304 | time.sleep(0.1) 305 | except KeyboardInterrupt: 306 | print() 307 | log("restoring assigned device setup") 308 | try: 309 | requests.post( 310 | url = "http://%s:%d/api/restore" % (info_beamer_host, DEVMODE_PORT), 311 | timeout = 1 312 | ) 313 | except: 314 | pass 315 | except: 316 | raise 317 | 318 | if __name__ == "__main__": 319 | main() 320 | -------------------------------------------------------------------------------- /p2p-helpers/README.md: -------------------------------------------------------------------------------- 1 | # Peer-to-Peer (P2P) support helpers 2 | 3 | The `p2plib.py` library allows easy collaboration of [package services](https://info-beamer.com/doc/package-services) 4 | running on multiple co-located devices. It uses the [Peer-to-Peer feature](https://info-beamer.com/doc/device-configuration#p2p) 5 | for device detection, automatic leader selection and secure communication setup. 6 | 7 | The `p2pevt.py` library adds timed event distribution to a group of devices as well as a group RCP mechanism 8 | (with help of [../rpc.lua](rpc.lua)) on top of that. 9 | 10 | It requires info-beamer OS with a version later than '202104xx'. 11 | 12 | ## Introduction to p2plib.py 13 | 14 | The library can be used to synchronize content playback or other state across multiple devices running the 15 | same setup. Examples might be: 16 | 17 | * The selected leader device decides which item within a playlist to play next and notifies all follower devices. 18 | * An event (for example GPIO) triggered on a single device should trigger an action on all other devices as well. 19 | * Some data (like a counter) value should be synced across multiple devices. 20 | 21 | A single device within the group of devices running the same setup is automatically promoted to leader. All 22 | other devices become followers. The leader can broadcast short (recommended size ~1KB) messages to all 23 | follower devices. Similarly any device can send a message to the current leader device. The library can 24 | also be used if only a single device is active: All messages are then locally delivery within the same program. 25 | This allows you to use the same code regardless of the number of active devices. 26 | 27 | The library automatically handles device detection within the local network using features provided by the 28 | info-beamer OS. It then uses knowledge of other devices to select a single 'leader' among those devices. 29 | It automatically sets up cryptographic keys that allow secure two-way communication across the devices. 30 | 31 | For communication it uses UDP. The protocol guarantees at-most-once delivery of messages to other devices. 32 | Due to the lossy nature of UDP it cannot guarantee delivery though. So you should be prepared to lose 33 | messages if the network isn't reliably. Using Ethernet is preferred to WiFi. Message size should not exceed 34 | around 1KB when serialized as JSON. The protocol overhead is around 40 bytes per message. 35 | 36 | Clocks on all participating devices must be reasonably in sync (at most 3 seconds difference) for 37 | communication to work. This also means that it won't work on completely offline devices that don't 38 | have a correct time at all. 39 | 40 | A network split might result in multiple leaders. Usually detection of new or disappeared peers happens 41 | within 5-10 seconds and results in a newly selected leader. 42 | 43 | ## Example Code for p2plib.py 44 | 45 | ```python 46 | #!/usr/bin/python 47 | import time, sys, threading 48 | from p2plib import PeerGroup 49 | 50 | class ExampleGroup(PeerGroup): 51 | def leader_thread(self): 52 | while self._leader_running: 53 | self.broadcast_to_all( 54 | value = time.time(), 55 | ) 56 | time.sleep(1) 57 | 58 | def promote_leader(self, peer_info): 59 | print >>sys.stderr, "Now a leader" 60 | self._leader_running = True 61 | self._leader_thread = threading.Thread(target=self.leader_thread) 62 | self._leader_thread.daemon = True 63 | self._leader_thread.start() 64 | 65 | def demote_leader(self): 66 | print >>sys.stderr, "No longer a leader" 67 | self._leader_running = False 68 | self._leader_thread.join() 69 | 70 | def on_leader_message(self, message, peer_info): 71 | print >>sys.stderr, "received message from leader %r: %r" % (peer_info, message) 72 | 73 | if __name__ == "__main__": 74 | group = ExampleGroup() 75 | while 1: 76 | time.sleep(1) 77 | ``` 78 | 79 | The ExampleGroup is based on the PeerGroup class. When instantiated it 80 | automatically starts two threads handling peer detection and network 81 | communication. After a short moment the `promote_leader` callback 82 | will be called on a single device. In the example code this then starts 83 | its own worker thread that sends a short message every second to all 84 | other peers (including the leader peer itself) using `broadcast_to_all`. 85 | 86 | On all devices running the same setup, the `on_leader_message` will be 87 | called for each message sent by the leader. In the example the message 88 | is written to stderr (and ends up in the log). 89 | 90 | Should the current leader device go offline, it might take a few 91 | seconds for all remaining devices to select a new leader. 92 | The `promote_leader` will again be called on the selected device. 93 | 94 | Should the previous offline leader return, the current leader will 95 | be demoted and its `demote_leader` callback will be called. In the 96 | example this shuts down the thread. 97 | 98 | Similarly a device can also send messages to the current leader 99 | using `send_to_leader`. On the leader, such messages will 100 | be delivered to the `on_peer_message` callback. 101 | 102 | ## Hints 103 | 104 | If a device has disabled support Peer-to-Peer support or a device 105 | is alone in its current networking environment, a PeerGroup 106 | will consist of only this one device and it will be selected 107 | as the leader. `broadcast_to_all` and `send_to_leader` will be 108 | local method calls in that case. This allows you to use the 109 | same code regardless of the Peer-to-Peer state on a device as 110 | it transparently scales back from multiple devices to just a 111 | single one. 112 | 113 | A device should always handle lost messages. If the state you need 114 | to sync is reasonably small, it might be easiest to just sync the 115 | state from leader to follower every second. A lost message will 116 | be hopefully received on a later attempt. 117 | 118 | For precise timing it's recommended to send an exact future 119 | timestamp within the message itself. That way all devices will 120 | eventually receive the message and can begin acting on it based 121 | on the timestamp within the message itself. The protocol itself 122 | makes no guarantee with regards to latency. So you should not use 123 | the `broadcast_to_all` to directly trigger events within the 124 | `on_leader_message` callback. 125 | 126 | # API 127 | 128 | The following API calls are provided by the PeerGroup class. 129 | The API is threadsafe and any property or method can be used 130 | within all callbacks or by an external caller. 131 | 132 | ## Constructor `PeerGroup(port=None)` 133 | 134 | Sets up a new peer group. The optional port argument can be provided 135 | to force the use of a specific UDP port for communication. Usually 136 | this is not recommended as an automatic port is selected based 137 | on the current node directory. 138 | 139 | Currently there is no way to shut down a constructed PeerGroup. 140 | The code you write should always handle an unclean device shutdown 141 | or termination of the process. 142 | 143 | ## Callback `group.setup_peer(self)` 144 | 145 | This call is invoked once after the PeerGroup is fully set up. 146 | It can be used to handle one-time initializations required on 147 | all devices. 148 | 149 | ## Callback `group.promote_leader(self, peer_info)` 150 | 151 | Called on a newly selected leader. Set up required data structures 152 | needed to fulfil the leader role. This can include starting new background 153 | threads. `peer_info` provides information about the leader. 154 | 155 | ## Callback `group.demote_leader(self)` 156 | 157 | Called if the device is no longer the leader. You should shut down 158 | any resources allocated in the `promote_leader` callback. 159 | 160 | ## Callback `group.promote_follower(self, peer_info)` 161 | 162 | Called on a device if it's not a leader. Usually not required as 163 | the distinction between a follower (so any device not being the leader) 164 | and all devices (so leader and all followers) might not be useful. 165 | Usually all devices (including the leader device itself) handle 166 | data send by the leader and you don't need the follower distinction. 167 | `peer_info` provides information about the follower. 168 | 169 | ## Callback `group.demote_follower(self)` 170 | 171 | Called on a devices if it's no longer a follower. 172 | 173 | ## Callback `group.on_peer_added(self, peer_info)` 174 | 175 | Called if a new peer is added to the group. This callback is called 176 | before a peer with the same `ip` value in `peer_info` is provided in 177 | an eventual call to `promote_follower` or `promote_leader`. 178 | 179 | ## Callback `group.on_peer_removed(self, peer_info)` 180 | 181 | Called if a peer is no longer available. This callback is called 182 | after a peer is demoted. 183 | 184 | ## Callback `group.on_leader_message(self, msg, peer_info)` 185 | 186 | Called on a device if a message from the current leader is received. 187 | `msg` is the data sent by `broadcast_to_all`. `peer_info` is some 188 | metadata about the current leader. See below. 189 | 190 | ## Callback `group.on_peer_message(self, msg, peer_info)` 191 | 192 | Called on the leader device if any of the other peers calls 193 | `send_to_leader`. `msg` is the sent data and `peer_info` provides 194 | information about the sender of the message. 195 | 196 | ## Property `group.is_follower` 197 | 198 | True if the device is a follower (so not a leader). 199 | 200 | ## Property `group.is_follower` 201 | 202 | True if the device is the selected leader. 203 | 204 | ## Property `group.num_peers` 205 | 206 | Returns the total number of devices (including this one) 207 | within the PeerGroup. 208 | 209 | ## Property `group.peers` 210 | 211 | Returns a list of `peer_info` objects for all peers. 212 | 213 | ## Method `group.broadcast_to_all(**message)` 214 | 215 | This method can be called on the leader to send the message 216 | to all devices within the PeerGroup. If the device isn't 217 | the leader, the call has no effect. 218 | 219 | The data in `message` must be serializable as JSON and the 220 | resulting serialized version should not exceed around 1KB 221 | in total size. Otherwise message delivery might not be as 222 | reliable as the resulting UDP paket will be fragmented. 223 | It's best to only send short messages. 224 | 225 | Note that this method also invokes `on_leader_message` 226 | on the same local PeerGroup instance itself using a local 227 | method call. 228 | 229 | Message delivery is not guaranteed (as UDP is used for 230 | transport and devices might disappear or be unreachable 231 | at any time). The transport protocol guarantees at-most-once 232 | delivery though if the message rate does not exceed 233 | 300 messages/second. 234 | 235 | ## Method `group.send_to_leader(**message)` 236 | 237 | This method send a message from any device to the current 238 | leader. It can be called on any device within the 239 | current PeerGroup. If it's called on the leader itself, 240 | a local method call to `on_peer_message` will be issued 241 | instead of using network communication. 242 | 243 | The data in `message` must be serializable as JSON and the 244 | resulting serialized version should not exceed around 1KB 245 | in total size. Otherwise message delivery might not be as 246 | reliable as the resulting UDP paket will be fragmented. 247 | It's best to only send short messages. 248 | 249 | Message delivery is not guaranteed (as UDP is used for 250 | transport and devices might disappear or be unreachable 251 | at any time). The transport protocol guarantees at-most-once 252 | delivery though if the message rate does not exceed 253 | 300 messages/second. 254 | 255 | ## `peer_info` Object 256 | 257 | The `peer_info` object provides basic information about known peers. 258 | It defines three properties: 259 | 260 | * `ip` is the IP address of the peer. This is always '127.0.0.1' for this device. 261 | * `device_id` is the device id of the device. 262 | * `delta` is a wall clock time delta and provides an estimated time offset to the local device's clock in seconds. 263 | * `ping` is the calculated network latency when sending data to this peer. 264 | 265 | If you need to keep a mapping from `peer_info` to your own data, use the 266 | `ip` value as key, as it's the only value guaranteed to be unique across 267 | different peers. 268 | 269 | ## Example Code for p2pevt.py 270 | 271 | A package service can use the library to send RPC to all devices running 272 | the same setup in the local network like this. The first parameter for 273 | the `call` methods is a time offset on when those calls will be forwarded 274 | to the Lua code in all peers. 275 | 276 | A simple synchronized image playback looks like this: 277 | 278 | ```python 279 | #!/usr/bin/python 280 | import time 281 | from itertools import cycle 282 | from p2pevt import GroupRPC 283 | 284 | if __name__ == "__main__": 285 | group = GroupRPC() 286 | for image in cycle(['1.jpg', '2.jpg']): 287 | group.call(0.2, 'load', image) 288 | group.call(0.4, 'switch') 289 | time.sleep(5) 290 | ``` 291 | 292 | The corresponding `node.lua` code: 293 | 294 | ```lua 295 | gl.setup(NATIVE_WIDTH, NATIVE_HEIGHT) 296 | util.no_globals() 297 | 298 | local rpc = require "rpc" 299 | local py = rpc.create() 300 | 301 | local img, next_img 302 | 303 | py.register("load", function(filename) 304 | if next_img then 305 | next_img:dispose() 306 | end 307 | next_img = resource.load_image(filename) 308 | end) 309 | 310 | py.register("switch", function() 311 | local old = img 312 | img = next_img 313 | next_img = nil 314 | if old then 315 | old:dispose() 316 | end 317 | end) 318 | 319 | function node.render() 320 | gl.clear(0,0,0,1) 321 | if img then 322 | img:draw(0, 0, WIDTH, HEIGHT) 323 | end 324 | end 325 | ``` 326 | -------------------------------------------------------------------------------- /p2p-helpers/p2plib.py: -------------------------------------------------------------------------------- 1 | # 2 | # Part of info-beamer hosted. You can find the latest version 3 | # of this file at: 4 | # 5 | # https://github.com/info-beamer/package-sdk 6 | # 7 | # Copyright (c) 2021 Florian Wesch 8 | # All rights reserved. 9 | # 10 | # Redistribution and use in source and binary forms, with or without 11 | # modification, are permitted provided that the following conditions are 12 | # met: 13 | # 14 | # Redistributions of source code must retain the above copyright 15 | # notice, this list of conditions and the following disclaimer. 16 | # 17 | # Redistributions in binary form must reproduce the above copyright 18 | # notice, this list of conditions and the following disclaimer in the 19 | # documentation and/or other materials provided with the 20 | # distribution. 21 | # 22 | # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS 23 | # IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 24 | # THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 25 | # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR 26 | # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 27 | # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 28 | # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 29 | # PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 30 | # LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 31 | # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 32 | # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 33 | 34 | VERSION = "1.0" 35 | 36 | import requests, threading, time, sys, traceback, socket, json, hmac, hashlib, struct, os, ctypes 37 | from Crypto import Random 38 | from Crypto.Cipher import AES 39 | from binascii import unhexlify, hexlify 40 | from collections import namedtuple 41 | 42 | # This service uses its own UDP protocol. Multiple nodes might use 43 | # this service. To avoid port collisions, based on a base port, a 44 | # unique port number is derived for each node within the assigned 45 | # setup. 46 | P2P_GROUP_BASE_PORT = 61234 47 | 48 | # The avoid delivering duplicate messages (either accidental or 49 | # maliciously), each peer keeps a list of recently received messages. 50 | # To avoid keeping too many of those the following limits the 51 | # saved message ids. 52 | MAX_MESSAGE_PER_SEC = 300 53 | 54 | def log(msg, name='p2plib.py'): 55 | print >>sys.stderr, "[{}] {}".format(name, msg) 56 | 57 | PeerInfo = namedtuple('PeerInfo', 'ip device_id delta ping') 58 | 59 | CLOCK_MONOTONIC_RAW = 4 # see 60 | 61 | class timespec(ctypes.Structure): 62 | _fields_ = [ 63 | ('tv_sec', ctypes.c_long), 64 | ('tv_nsec', ctypes.c_long), 65 | ] 66 | 67 | librt = ctypes.CDLL('librt.so.1') 68 | clock_gettime = librt.clock_gettime 69 | clock_gettime.argtypes = [ctypes.c_int, ctypes.POINTER(timespec)] 70 | 71 | def monotonic_time(): 72 | t = timespec() 73 | clock_gettime(CLOCK_MONOTONIC_RAW , ctypes.pointer(t)) 74 | return t.tv_sec + t.tv_nsec * 1e-9 75 | 76 | class Peer(object): 77 | def __init__(self, ip): 78 | self._ip = ip 79 | 80 | # protocol version 81 | self._version = 1 82 | 83 | # throw away packets if the device's time is off by more than x seconds 84 | self._discard_time_diff = 3 85 | 86 | # state to handle throwing away duplicate messages. The delivery 87 | # guarantees at-most-once semantics as long as no more than 88 | # MAX_MESSAGE_PER_SEC messages are sent per second. 89 | self._last_cleanup = 0 90 | self._max_msg_ids = MAX_MESSAGE_PER_SEC * self._discard_time_diff 91 | self._msg_id_order = [] 92 | self._msg_id_set = set() 93 | 94 | def update(self, device_id, pair_key, delta, ping, is_leader): 95 | self._device_id = device_id 96 | self._pair_key = pair_key 97 | self._delta = delta 98 | self._ping = ping 99 | self._is_leader = is_leader 100 | 101 | def add_seen_msg_id(self, msg_id): 102 | now = monotonic_time() 103 | if msg_id in self._msg_id_set: 104 | log('discarding message: duplicate') 105 | return False 106 | if now > self._last_cleanup + 1 or len(self._msg_id_order) > self._max_msg_ids: 107 | self.cleanup_msg_ids(now) 108 | if len(self._msg_id_order) > self._max_msg_ids: 109 | log('discarding message: in-queue overflow') 110 | return False 111 | discard_threshold = now + self._discard_time_diff 112 | if self._msg_id_order: 113 | assert discard_threshold >= self._msg_id_order[-1][0] 114 | self._msg_id_order.append((discard_threshold, msg_id)) 115 | self._msg_id_set.add(msg_id) 116 | return True 117 | 118 | def cleanup_msg_ids(self, now): 119 | deleted = 0 120 | while self._msg_id_order: 121 | discard_threshold, msg_id = self._msg_id_order[0] 122 | if now < discard_threshold: 123 | break 124 | self._msg_id_order.pop(0) 125 | self._msg_id_set.remove(msg_id) 126 | deleted += 1 127 | # log('cleaned up %d msg ids' % (deleted,)) 128 | self._last_cleanup = now 129 | 130 | @property 131 | def is_leader(self): 132 | return self._is_leader 133 | 134 | @property 135 | def device_id(self): 136 | return self._device_id 137 | 138 | @property 139 | def ip(self): 140 | return self._ip 141 | 142 | @property 143 | def peer_info(self): 144 | return PeerInfo(self._ip, self._device_id, self._delta, self._ping) 145 | 146 | def encode(self, data, direction, group_time): 147 | # 148 | # | 16 | 16 0 1 5 x (%16=0) 149 | # | | | I | timestamp | {json message} ' ' | 150 | # | | | | 151 | # | |- msg_id/IV -+-------------- Message --------------------| 152 | # | | '---------------------.----' | 153 | # | | AES CBC | 154 | # | | | | 155 | # | | v | 156 | # | |---------------------- Ciphertext -----------------------' 157 | # | .---------- HMAC ------------------' | 158 | # | v | | 159 | # |---- MAC ----| | 160 | # 161 | group_time = int(group_time) 162 | message = struct.pack("> 7 205 | if direction != expected_direction: 206 | return None 207 | if local_group_time != 0xFFFFFFFF and remote_group_time != 0xFFFFFFFF: 208 | # remote and our time too far off? 209 | if abs(local_group_time - remote_group_time) > self._discard_time_diff: 210 | log('discarding message: outside of expected receive group time') 211 | return None 212 | if not self.add_seen_msg_id(msg_id): 213 | return None 214 | try: 215 | return json.loads(data) 216 | except Exception as err: 217 | return None 218 | 219 | def __repr__(self): 220 | return '<%d: %s>' % (self._device_id, self._ip) 221 | 222 | ROLE_LEADER, ROLE_FOLLOWER = 1, 2 223 | DIRECTION_LEADER_TO_PEER, DIRECTION_PEER_TO_LEADER = 0, 1 224 | 225 | class PeerGroup(object): 226 | def __init__(self, port=None): 227 | # This assumed that the service is running within its node 228 | # directory. 229 | with open('config.json') as f: 230 | metadata = json.load(f)['__metadata'] 231 | 232 | if port is not None: 233 | self._port = port 234 | else: 235 | self._port = P2P_GROUP_BASE_PORT + metadata['node_idx'] 236 | 237 | # Every node on the device can calculate its own 238 | # unique node_scope value based on port, instance_id 239 | # and current work directory. Multiple devices 240 | # with the same setup assigned will all calculate 241 | # the same value. This avoid reusing the same 242 | # pairwise keys across multiple peer groups running 243 | # in different package services. 244 | self._node_scope = hashlib.sha256( 245 | '%d:%d:%s' % ( 246 | self._port, metadata['instance_id'], os.getcwd(), 247 | ) 248 | ).digest() 249 | 250 | self._no_p2p_fallback = [dict( 251 | device_id = metadata['device_id'], 252 | pair_key = '0' * 16, 253 | delta = 0, 254 | ping = 0, 255 | ip = '127.0.0.1', 256 | )] 257 | 258 | log("using peer group port %d, node scope %s" % ( 259 | self._port, hexlify(self._node_scope) 260 | )) 261 | 262 | self._sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) 263 | self._sock.bind(('0.0.0.0', self._port)) 264 | 265 | self._role = None 266 | self._me = None 267 | self._leader = None 268 | self._peers, self._peers_lock = {}, threading.Lock() 269 | 270 | self._group_time_base = -monotonic_time() 271 | 272 | self.setup_peer() 273 | 274 | thread = threading.Thread(target=self._update_thread) 275 | thread.daemon = True 276 | thread.start() 277 | 278 | thread = threading.Thread(target=self._listen_thread) 279 | thread.daemon = True 280 | thread.start() 281 | 282 | ##----- Methods to overwrite ------ 283 | 284 | def setup_peer(self): 285 | pass 286 | 287 | def promote_leader(self, peer_info): 288 | pass 289 | 290 | def demote_leader(self): 291 | pass 292 | 293 | def promote_follower(self, peer_info): 294 | pass 295 | 296 | def demote_follower(self): 297 | pass 298 | 299 | def on_peer_added(self, peer_info): 300 | pass 301 | 302 | def on_peer_removed(self, peer_info): 303 | pass 304 | 305 | # Handle message sent by broadcast_to_all. 306 | # The message is received at most once on all peers. 307 | def on_leader_message(self, msg, peer_info): 308 | pass 309 | 310 | # Handle message sent by send_to_leader. 311 | # This callback is only called when this peer is 312 | # a leader. 313 | def on_peer_message(self, msg, peer_info): 314 | pass 315 | 316 | ##----- Public Methods/Properties ----- 317 | 318 | @property 319 | def is_follower(self): 320 | return self._role == ROLE_FOLLOWER 321 | 322 | @property 323 | def is_leader(self): 324 | return self._role == ROLE_LEADER 325 | 326 | @property 327 | def num_peers(self): 328 | with self._peers_lock: 329 | return len(self._peers) 330 | 331 | @property 332 | def leader(self): 333 | return self._leader 334 | 335 | @property 336 | def peers(self): 337 | peers = [] 338 | with self._peers_lock: 339 | for ip, peer in self._peers.iteritems(): 340 | peers.append(peer.peer_info) 341 | peers.sort() 342 | return peers 343 | 344 | def broadcast_to_all(self, **message): 345 | if self._role != ROLE_LEADER: 346 | return 347 | local_device = None 348 | with self._peers_lock: 349 | for ip, peer in self._peers.iteritems(): 350 | if peer is self._me: 351 | local_device = self._me 352 | else: 353 | pkt = peer.encode(message, 354 | direction = DIRECTION_LEADER_TO_PEER, 355 | group_time = self._group_time, 356 | ) 357 | try: 358 | self._sock.sendto(pkt, (ip, self._port)) 359 | except socket.error: 360 | pass 361 | if local_device is not None: 362 | self.on_leader_message(json.loads(json.dumps(message)), local_device.peer_info) 363 | 364 | def send_to_leader(self, **message): 365 | local_device = None 366 | with self._peers_lock: 367 | if self._leader is self._me: 368 | local_device = self._me 369 | else: 370 | pkt = self._leader.encode(message, 371 | direction = DIRECTION_PEER_TO_LEADER, 372 | group_time = self._group_time, 373 | ) 374 | self._sock.sendto(pkt, (self._leader.ip, self._port)) 375 | if local_device is not None: 376 | self.on_peer_message(json.loads(json.dumps(message)), local_device.peer_info) 377 | 378 | ##-------------------------------- 379 | 380 | @property 381 | def _group_time(self): 382 | return self._group_time_base + monotonic_time() 383 | 384 | def _listen_thread(self): 385 | while 1: 386 | try: 387 | pkt, (ip, port) = self._sock.recvfrom(2**16) 388 | if port != self._port: 389 | continue 390 | receiver = None 391 | with self._peers_lock: 392 | peer = self._peers.get(ip) 393 | if peer is None: 394 | continue 395 | if peer.is_leader and self._role == ROLE_FOLLOWER: 396 | receiver = self.on_leader_message 397 | message = peer.decode(pkt, 398 | expected_direction = DIRECTION_LEADER_TO_PEER, 399 | arrival_group_time = self._group_time, 400 | ) 401 | elif not peer.is_leader and self._role == ROLE_LEADER: 402 | receiver = self.on_peer_message 403 | message = peer.decode(pkt, 404 | expected_direction = DIRECTION_PEER_TO_LEADER, 405 | arrival_group_time = self._group_time, 406 | ) 407 | else: 408 | continue 409 | if message is not None: 410 | receiver(message, peer.peer_info) 411 | except Exception as err: 412 | traceback.print_exc() 413 | 414 | def _update_thread(self): 415 | while 1: 416 | try: 417 | setup = requests.get( 418 | 'http://127.0.0.1:81/api/v1/peers/setup', timeout=0.5 419 | ).json() 420 | peers, group_time = setup['peers'], setup['group_time'] 421 | if not peers: 422 | # If P2P is disabled or P2P traffic completely blocked 423 | # for some reason, use a fallback list to make this 424 | # device its own leader. 425 | peers = self._no_p2p_fallback 426 | self._group_time_base = group_time - monotonic_time() 427 | # log('group time base: %f' % (self._group_time_base,)) 428 | self._update_peers(peers) 429 | except Exception as err: 430 | log('cannot update setup peers: %s' % err) 431 | traceback.print_exc() 432 | time.sleep(3) 433 | 434 | def _update_peers(self, peers): 435 | me, leader = None, None 436 | seen = set() 437 | added, deleted = set(), set() 438 | with self._peers_lock: 439 | for idx, peer_info in enumerate(peers): 440 | device_id = peer_info['device_id'] 441 | delta = peer_info['delta'] 442 | ping = peer_info['ping'] 443 | ip = peer_info['ip'] 444 | pair_key = hmac.HMAC( 445 | unhexlify(peer_info['pair_key']), 446 | self._node_scope, 447 | hashlib.sha256 448 | ).digest()[:16] 449 | seen.add(ip) 450 | peer = self._peers.get(ip) 451 | is_added = peer is None 452 | if is_added: 453 | log('added peer %s' % ip) 454 | peer = self._peers[ip] = Peer(ip) 455 | peer.update(device_id, pair_key, delta, ping, idx == 0) 456 | if is_added: 457 | added.add(peer.peer_info) 458 | 459 | # First device is always the leader. the peers/setup 460 | # response is guaranteed to be sorted the same on all devices 461 | # assuming they all know each other. 462 | if idx == 0: 463 | leader = peer 464 | 465 | # This device will always be marked by 127.0.0.1 466 | if ip == '127.0.0.1': 467 | me = peer 468 | known = set(self._peers.keys()) 469 | for ip in known - seen: 470 | log('removed peer %s' % ip) 471 | peer = self._peers.pop(ip) 472 | deleted.add(peer.peer_info) 473 | 474 | self._me = me 475 | self._leader = leader 476 | 477 | if me is None: 478 | new_role = None 479 | else: 480 | new_role = ROLE_LEADER if me == leader else ROLE_FOLLOWER 481 | 482 | for peer_info in added: 483 | self.on_peer_added(peer_info) 484 | 485 | if new_role != self._role: 486 | if self._role == ROLE_FOLLOWER: 487 | self.demote_follower() 488 | elif self._role == ROLE_LEADER: 489 | self.demote_leader() 490 | self._role = new_role 491 | if self._role == ROLE_FOLLOWER: 492 | self.promote_follower(me.peer_info) 493 | elif self._role == ROLE_LEADER: 494 | self.promote_leader(me.peer_info) 495 | 496 | for peer_info in deleted: 497 | self.on_peer_removed(peer_info) 498 | -------------------------------------------------------------------------------- /hosted.py: -------------------------------------------------------------------------------- 1 | # 2 | # Part of info-beamer hosted. You can find the latest version 3 | # of this file at: 4 | # 5 | # https://github.com/info-beamer/package-sdk 6 | # 7 | # Copyright (c) 2014-2020 Florian Wesch 8 | # All rights reserved. 9 | # 10 | # Redistribution and use in source and binary forms, with or without 11 | # modification, are permitted provided that the following conditions are 12 | # met: 13 | # 14 | # Redistributions of source code must retain the above copyright 15 | # notice, this list of conditions and the following disclaimer. 16 | # 17 | # Redistributions in binary form must reproduce the above copyright 18 | # notice, this list of conditions and the following disclaimer in the 19 | # documentation and/or other materials provided with the 20 | # distribution. 21 | # 22 | # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS 23 | # IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 24 | # THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 25 | # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR 26 | # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 27 | # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 28 | # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 29 | # PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 30 | # LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 31 | # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 32 | # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 33 | 34 | VERSION = "1.9" 35 | 36 | import os, re, sys, json, time, traceback, marshal, hashlib 37 | import errno, socket, select, threading, Queue, ctypes 38 | import pyinotify, requests 39 | from functools import wraps 40 | from collections import namedtuple 41 | from tempfile import NamedTemporaryFile 42 | 43 | types = {} 44 | 45 | def init_types(): 46 | def type(fn): 47 | types[fn.__name__] = fn 48 | return fn 49 | 50 | @type 51 | def color(value): 52 | return value 53 | 54 | @type 55 | def string(value): 56 | return value 57 | 58 | @type 59 | def text(value): 60 | return value 61 | 62 | @type 63 | def section(value): 64 | return value 65 | 66 | @type 67 | def boolean(value): 68 | return value 69 | 70 | @type 71 | def select(value): 72 | return value 73 | 74 | @type 75 | def duration(value): 76 | return value 77 | 78 | @type 79 | def integer(value): 80 | return value 81 | 82 | @type 83 | def float(value): 84 | return value 85 | 86 | @type 87 | def font(value): 88 | return value 89 | 90 | @type 91 | def device(value): 92 | return value 93 | 94 | @type 95 | def resource(value): 96 | return value 97 | 98 | @type 99 | def device_token(value): 100 | return value 101 | 102 | @type 103 | def json(value): 104 | return value 105 | 106 | @type 107 | def custom(value): 108 | return value 109 | 110 | @type 111 | def date(value): 112 | return value 113 | 114 | init_types() 115 | 116 | def log(msg, name='hosted.py'): 117 | sys.stderr.write("[{}] {}\n".format(name, msg)) 118 | 119 | def abort_service(reason): 120 | log("restarting service (%s)" % reason) 121 | os._exit(0) 122 | time.sleep(2) 123 | os.kill(os.getpid(), 2) 124 | time.sleep(2) 125 | os.kill(os.getpid(), 15) 126 | time.sleep(2) 127 | os.kill(os.getpid(), 9) 128 | time.sleep(100) 129 | 130 | CLOCK_MONOTONIC_RAW = 4 # see 131 | 132 | class timespec(ctypes.Structure): 133 | _fields_ = [ 134 | ('tv_sec', ctypes.c_long), 135 | ('tv_nsec', ctypes.c_long), 136 | ] 137 | 138 | librt = ctypes.CDLL('librt.so.1') 139 | clock_gettime = librt.clock_gettime 140 | clock_gettime.argtypes = [ctypes.c_int, ctypes.POINTER(timespec)] 141 | 142 | def monotonic_time(): 143 | t = timespec() 144 | clock_gettime(CLOCK_MONOTONIC_RAW , ctypes.pointer(t)) 145 | return t.tv_sec + t.tv_nsec * 1e-9 146 | 147 | class InfoBeamerQueryException(Exception): 148 | pass 149 | 150 | class InfoBeamerQuery(object): 151 | def __init__(self, host='127.0.0.1', port=4444): 152 | self._sock = None 153 | self._conn = None 154 | self._host = host 155 | self._port = port 156 | self._timeout = 2 157 | self._version = None 158 | 159 | def _reconnect(self): 160 | if self._conn is not None: 161 | return 162 | try: 163 | self._sock = socket.create_connection((self._host, self._port), self._timeout) 164 | self._conn = self._sock.makefile() 165 | intro = self._conn.readline() 166 | except socket.timeout: 167 | self._reset() 168 | raise InfoBeamerQueryException("Timeout while reopening connection") 169 | except socket.error as err: 170 | self._reset() 171 | raise InfoBeamerQueryException("Cannot connect to %s:%s: %s" % ( 172 | self._host, self._port, err)) 173 | m = re.match("^Info Beamer PI ([^ ]+)", intro) 174 | if not m: 175 | self._reset() 176 | raise InfoBeamerQueryException("Invalid handshake. Not info-beamer?") 177 | self._version = m.group(1) 178 | 179 | def _parse_line(self): 180 | line = self._conn.readline() 181 | if not line: 182 | return None 183 | return line.rstrip() 184 | 185 | def _parse_multi_line(self): 186 | lines = [] 187 | while 1: 188 | line = self._conn.readline() 189 | if not line: 190 | return None 191 | line = line.rstrip() 192 | if not line: 193 | break 194 | lines.append(line) 195 | return '\n'.join(lines) 196 | 197 | def _send_cmd(self, min_version, cmd, multiline=False): 198 | for retry in (1, 2): 199 | self._reconnect() 200 | if self._version <= min_version: 201 | raise InfoBeamerQueryException( 202 | "This query is not implemented in your version of info-beamer. " 203 | "%s or higher required, %s found" % (min_version, self._version) 204 | ) 205 | try: 206 | self._conn.write(cmd + "\n") 207 | self._conn.flush() 208 | response = self._parse_multi_line() if multiline else self._parse_line() 209 | if response is None: 210 | self._reset() 211 | continue 212 | return response 213 | except socket.error: 214 | self._reset() 215 | continue 216 | except socket.timeout: 217 | self._reset() 218 | raise InfoBeamerQueryException("Timeout waiting for response") 219 | except Exception: 220 | self._reset() 221 | continue 222 | raise InfoBeamerQueryException("Failed to get a response") 223 | 224 | def _reset(self, close=True): 225 | if close: 226 | try: 227 | if self._conn: self._conn.close() 228 | if self._sock: self._sock.close() 229 | except: 230 | pass 231 | self._conn = None 232 | self._sock = None 233 | 234 | @property 235 | def addr(self): 236 | return "%s:%s" % (self._host, self._port) 237 | 238 | def close(self): 239 | self._reset() 240 | 241 | @property 242 | def ping(self): 243 | "tests if info-beamer is reachable" 244 | return self._send_cmd( 245 | "0.6", "*query/*ping", 246 | ) == "pong" 247 | 248 | @property 249 | def uptime(self): 250 | "returns the uptime in seconds" 251 | return int(self._send_cmd( 252 | "0.6", "*query/*uptime", 253 | )) 254 | 255 | @property 256 | def objects(self): 257 | "returns the number of allocated info-beamer objects" 258 | return int(self._send_cmd( 259 | "0.9.4", "*query/*objects", 260 | )) 261 | 262 | @property 263 | def version(self): 264 | "returns the running info-beamer version" 265 | return self._send_cmd( 266 | "0.6", "*query/*version", 267 | ) 268 | 269 | @property 270 | def fps(self): 271 | "returns the FPS of the top level node" 272 | return float(self._send_cmd( 273 | "0.6", "*query/*fps", 274 | )) 275 | 276 | @property 277 | def display(self): 278 | "returns the display configuration" 279 | return json.loads(self._send_cmd( 280 | "1.0", "*query/*display", 281 | )) 282 | 283 | ResourceUsage = namedtuple("ResourceUsage", "user_time system_time memory") 284 | @property 285 | def resources(self): 286 | "returns information about used resources" 287 | return self.ResourceUsage._make(int(v) for v in self._send_cmd( 288 | "0.6", "*query/*resources", 289 | ).split(',')) 290 | 291 | ScreenSize = namedtuple("ScreenSize", "width height") 292 | @property 293 | def screen(self): 294 | "returns the native screen size" 295 | return self.ScreenSize._make(int(v) for v in self._send_cmd( 296 | "0.8.1", "*query/*screen", 297 | ).split(',')) 298 | 299 | @property 300 | def runid(self): 301 | "returns a unique run id that changes with every restart of info-beamer" 302 | return self._send_cmd( 303 | "0.9.0", "*query/*runid", 304 | ) 305 | 306 | @property 307 | def nodes(self): 308 | "returns a list of nodes" 309 | nodes = self._send_cmd( 310 | "0.9.3", "*query/*nodes", 311 | ).split(',') 312 | return [] if not nodes[0] else nodes 313 | 314 | class Node(object): 315 | def __init__(self, ib, path): 316 | self._ib = ib 317 | self._path = path 318 | 319 | @property 320 | def mem(self): 321 | "returns the Lua memory usage of this node" 322 | return int(self._ib._send_cmd( 323 | "0.6", "*query/*mem/%s" % self._path 324 | )) 325 | 326 | @property 327 | def fps(self): 328 | "returns the framerate of this node" 329 | return float(self._ib._send_cmd( 330 | "0.6", "*query/*fps/%s" % self._path 331 | )) 332 | 333 | def io(self, raw=True): 334 | "creates a tcp connection to this node" 335 | status = self._ib._send_cmd( 336 | "0.6", "%s%s" % ("*raw/" if raw else '', self._path), 337 | ) 338 | if status != 'ok!': 339 | raise InfoBeamerQueryException("Cannot connect to node %s" % self._path) 340 | sock = self._ib._sock 341 | sock.settimeout(None) 342 | return self._ib._conn 343 | 344 | @property 345 | def has_error(self): 346 | "queries the error flag" 347 | return bool(int(self._ib._send_cmd( 348 | "0.8.2", "*query/*has_error/%s" % self._path, 349 | ))) 350 | 351 | @property 352 | def error(self): 353 | "returns the last Lua traceback" 354 | return self._ib._send_cmd( 355 | "0.8.2", "*query/*error/%s" % self._path, multiline=True 356 | ) 357 | 358 | def __repr__(self): 359 | return "%s/%s" % (self._ib, self._path) 360 | 361 | def node(self, node): 362 | return self.Node(self, node) 363 | 364 | def __repr__(self): 365 | return "" % self.addr 366 | 367 | 368 | class Configuration(object): 369 | def __init__(self): 370 | self._restart = False 371 | self._options = [] 372 | self._config = {} 373 | self._parsed = {} 374 | self.parse_node_json(do_update=False) 375 | self.parse_config_json() 376 | 377 | def restart_on_update(self): 378 | log("going to restart when config is updated") 379 | self._restart = True 380 | 381 | def parse_node_json(self, do_update=True): 382 | with open("node.json") as f: 383 | self._options = json.load(f).get('options', []) 384 | if do_update: 385 | self.update_config() 386 | 387 | def parse_config_json(self, do_update=True): 388 | with open("config.json") as f: 389 | self._config = json.load(f) 390 | if do_update: 391 | self.update_config() 392 | 393 | def update_config(self): 394 | if self._restart: 395 | return abort_service("restart_on_update set") 396 | 397 | def parse_recursive(options, config, target): 398 | # print 'parsing', config 399 | for option in options: 400 | if not 'name' in option: 401 | continue 402 | if option['type'] == 'list': 403 | items = [] 404 | for item in config[option['name']]: 405 | parsed = {} 406 | parse_recursive(option['items'], item, parsed) 407 | items.append(parsed) 408 | target[option['name']] = items 409 | continue 410 | target[option['name']] = types[option['type']](config[option['name']]) 411 | 412 | parsed = {} 413 | parse_recursive(self._options, self._config, parsed) 414 | log("updated config") 415 | self._parsed = parsed 416 | 417 | @property 418 | def raw(self): 419 | return self._config 420 | 421 | @property 422 | def metadata(self): 423 | return self._config['__metadata'] 424 | 425 | @property 426 | def metadata_timezone(self): 427 | import pytz 428 | return pytz.timezone(self.metadata['timezone']) 429 | 430 | def __getitem__(self, key): 431 | return self._parsed[key] 432 | 433 | def __getattr__(self, key): 434 | return self._parsed[key] 435 | 436 | def setup_inotify(configuration): 437 | class EventHandler(pyinotify.ProcessEvent): 438 | def process_default(self, event): 439 | basename = os.path.basename(event.pathname) 440 | if basename == 'node.json': 441 | log("node.json changed") 442 | configuration.parse_node_json() 443 | elif basename == 'config.json': 444 | log("config.json changed!") 445 | configuration.parse_config_json() 446 | elif basename.endswith('.py'): 447 | abort_service("python file changed") 448 | 449 | wm = pyinotify.WatchManager() 450 | 451 | notifier = pyinotify.ThreadedNotifier(wm, EventHandler()) 452 | notifier.daemon = True 453 | notifier.start() 454 | 455 | wm.add_watch('.', pyinotify.IN_MOVED_TO) 456 | 457 | class RPC(object): 458 | def __init__(self, path, callbacks): 459 | self._path = path 460 | self._callbacks = callbacks 461 | self._lock = threading.Lock() 462 | self._con = None 463 | thread = threading.Thread(target=self._listen_thread) 464 | thread.daemon = True 465 | thread.start() 466 | 467 | def _get_connection(self): 468 | if self._con is None: 469 | try: 470 | self._con = InfoBeamerQuery().node( 471 | self._path + "/rpc/python" 472 | ).io(raw=True) 473 | except InfoBeamerQueryException: 474 | return None 475 | return self._con 476 | 477 | def _close_connection(self): 478 | with self._lock: 479 | if self._con: 480 | try: 481 | self._con.close() 482 | except: 483 | pass 484 | self._con = None 485 | 486 | def _send(self, line): 487 | with self._lock: 488 | con = self._get_connection() 489 | if con is None: 490 | return 491 | try: 492 | con.write(line + '\n') 493 | con.flush() 494 | return True 495 | except: 496 | self._close_connection() 497 | return False 498 | 499 | def _recv(self): 500 | with self._lock: 501 | con = self._get_connection() 502 | try: 503 | return con.readline() 504 | except: 505 | self._close_connection() 506 | 507 | def _listen_thread(self): 508 | while 1: 509 | line = self._recv() 510 | if not line: 511 | self._close_connection() 512 | time.sleep(0.5) 513 | continue 514 | try: 515 | args = json.loads(line) 516 | method = args.pop(0) 517 | callback = self._callbacks.get(method) 518 | if callback: 519 | callback(*args) 520 | else: 521 | log("callback '%s' not found" % (method,)) 522 | except: 523 | traceback.print_exc() 524 | 525 | def register(self, name, fn): 526 | self._callbacks[name] = fn 527 | 528 | def call(self, fn): 529 | self.register(fn.__name__, fn) 530 | 531 | def __getattr__(self, method): 532 | def call(*args): 533 | args = list(args) 534 | args.insert(0, method) 535 | return self._send(json.dumps( 536 | args, 537 | ensure_ascii=False, 538 | separators=(',',':'), 539 | ).encode('utf8')) 540 | return call 541 | 542 | class Cache(object): 543 | def __init__(self, scope='default'): 544 | self._touched = set() 545 | self._prefix = 'cache-%s-' % scope 546 | 547 | def key_to_fname(self, key): 548 | return self._prefix + hashlib.md5(key).hexdigest() 549 | 550 | def has(self, key, max_age=None): 551 | try: 552 | stat = os.stat(self.key_to_fname(key)) 553 | if max_age is not None: 554 | now = time.time() 555 | if now > stat.st_mtime + max_age: 556 | return False 557 | return True 558 | except: 559 | return False 560 | 561 | def get(self, key, max_age=None): 562 | try: 563 | with open(self.file_ref(key)) as f: 564 | if max_age is not None: 565 | stat = os.fstat(f.fileno()) 566 | now = time.time() 567 | if now > stat.st_mtime + max_age: 568 | return None 569 | return f.read() 570 | except: 571 | return None 572 | 573 | def get_json(self, key, max_age=None): 574 | data = self.get(key, max_age) 575 | if data is None: 576 | return None 577 | return json.loads(data) 578 | 579 | def set(self, key, value): 580 | with open(self.file_ref(key), "wb") as f: 581 | f.write(value) 582 | 583 | def set_json(self, key, data): 584 | self.set(key, json.dumps(data)) 585 | 586 | def file_ref(self, key): 587 | fname = self.key_to_fname(key) 588 | self._touched.add(fname) 589 | return fname 590 | 591 | def start(self): 592 | self._touched = set() 593 | 594 | def prune(self): 595 | existing = set() 596 | for fname in os.listdir("."): 597 | if not fname.startswith(self._prefix): 598 | continue 599 | existing.add(fname) 600 | prunable = existing - self._touched 601 | for fname in prunable: 602 | try: 603 | log("pruning %s" % fname) 604 | os.unlink(fname) 605 | except: 606 | pass 607 | 608 | def clear(self): 609 | self.start() 610 | self.prune() 611 | 612 | def call(self, max_age=None): 613 | def deco(fn): 614 | @wraps(fn) 615 | def wrapper(*args, **kwargs): 616 | key = marshal.dumps((fn.__name__, args, kwargs), 2) 617 | cached = self.get(key, max_age) 618 | if cached is not None: 619 | return marshal.loads(cached) 620 | val = fn(*args, **kwargs) 621 | self.set(key, marshal.dumps(val, 2)) 622 | return val 623 | return wrapper 624 | return deco 625 | 626 | def file_producer(self, max_age=None): 627 | def deco(fn): 628 | @wraps(fn) 629 | def wrapper(*args, **kwargs): 630 | key = marshal.dumps((fn.__name__, args, kwargs), 2) 631 | if self.has(key, max_age): 632 | return self.file_ref(key) 633 | val = fn(*args, **kwargs) 634 | if val is None: 635 | return None 636 | self.set(key, val) 637 | return self.file_ref(key) 638 | return wrapper 639 | return deco 640 | 641 | class Node(object): 642 | def __init__(self, node): 643 | self._node = node 644 | self._sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) 645 | 646 | def send_raw(self, raw): 647 | log("sending %r" % (raw,)) 648 | self._sock.sendto(raw, ('127.0.0.1', 4444)) 649 | 650 | def send(self, data): 651 | self.send_raw(self._node + data) 652 | 653 | def send_json(self, path, data): 654 | self.send('%s:%s' % (path, json.dumps( 655 | data, 656 | ensure_ascii=False, 657 | separators=(',',':'), 658 | ).encode('utf8'))) 659 | 660 | @property 661 | def is_top_level(self): 662 | return self._node == "root" 663 | 664 | @property 665 | def path(self): 666 | return self._node 667 | 668 | def write_file(self, filename, content): 669 | f = NamedTemporaryFile(prefix='.hosted-py-tmp', dir=os.getcwd()) 670 | try: 671 | f.write(content) 672 | except: 673 | traceback.print_exc() 674 | f.close() 675 | raise 676 | else: 677 | f.delete = False 678 | f.close() 679 | os.rename(f.name, filename) 680 | 681 | def write_json(self, filename, data): 682 | self.write_file(filename, json.dumps( 683 | data, 684 | ensure_ascii=False, 685 | separators=(',',':'), 686 | ).encode('utf8')) 687 | 688 | class Sender(object): 689 | def __init__(self, node, path): 690 | self._node = node 691 | self._path = path 692 | 693 | def __call__(self, data): 694 | if isinstance(data, (dict, list)): 695 | raw = "%s:%s" % (self._path, json.dumps( 696 | data, 697 | ensure_ascii=False, 698 | separators=(',',':'), 699 | ).encode('utf8')) 700 | else: 701 | raw = "%s:%s" % (self._path, data) 702 | self._node.send_raw(raw) 703 | 704 | def __getitem__(self, path): 705 | return self.Sender(self, self._node + path) 706 | 707 | def __call__(self, data): 708 | return self.Sender(self, self._node)(data) 709 | 710 | def connect(self, suffix=""): 711 | ib = InfoBeamerQuery() 712 | return ib.node(self.path + suffix).io(raw=True) 713 | 714 | def rpc(self, **callbacks): 715 | return RPC(self.path, callbacks) 716 | 717 | def cache(self, scope='default'): 718 | return Cache(scope) 719 | 720 | def scratch_cached(self, filename, generator): 721 | cached = os.path.join(os.environ['SCRATCH'], filename) 722 | 723 | if not os.path.exists(cached): 724 | f = NamedTemporaryFile(prefix='scratch-cached-tmp', dir=os.environ['SCRATCH']) 725 | try: 726 | generator(f) 727 | except: 728 | raise 729 | else: 730 | f.delete = False 731 | f.close() 732 | os.rename(f.name, cached) 733 | 734 | if os.path.exists(filename): 735 | try: 736 | os.unlink(filename) 737 | except: 738 | pass 739 | os.symlink(cached, filename) 740 | 741 | class APIError(Exception): 742 | pass 743 | 744 | class APIProxy(object): 745 | def __init__(self, apis, api_name): 746 | self._apis = apis 747 | self._api_name = api_name 748 | 749 | @property 750 | def url(self): 751 | index = self._apis.get_api_index() 752 | if not self._api_name in index: 753 | raise APIError("api '%s' not available" % (self._api_name,)) 754 | return index[self._api_name]['url'] 755 | 756 | def unwrap(self, r): 757 | r.raise_for_status() 758 | if r.status_code == 304: 759 | return None 760 | if r.headers['content-type'] == 'application/json': 761 | resp = r.json() 762 | if not resp['ok']: 763 | raise APIError(u"api call failed: %s" % ( 764 | resp.get('error', ''), 765 | )) 766 | return resp.get(self._api_name) 767 | else: 768 | return r.content 769 | 770 | def add_default_args(self, kwargs): 771 | if not 'timeout' in kwargs: 772 | kwargs['timeout'] = 10 773 | return kwargs 774 | 775 | def get(self, **kwargs): 776 | try: 777 | return self.unwrap(self._apis.session.get( 778 | url = self.url, 779 | **self.add_default_args(kwargs) 780 | )) 781 | except APIError: 782 | raise 783 | except Exception as err: 784 | raise APIError(err) 785 | 786 | def post(self, **kwargs): 787 | try: 788 | return self.unwrap(self._apis.session.post( 789 | url = self.url, 790 | **self.add_default_args(kwargs) 791 | )) 792 | except APIError: 793 | raise 794 | except Exception as err: 795 | raise APIError(err) 796 | 797 | def delete(self, **kwargs): 798 | try: 799 | return self.unwrap(self._apis.session.delete( 800 | url = self.url, 801 | **self.add_default_args(kwargs) 802 | )) 803 | except APIError: 804 | raise 805 | except Exception as err: 806 | raise APIError(err) 807 | 808 | 809 | class OnDeviceAPIs(object): 810 | def __init__(self, config): 811 | self._config = config 812 | self._index = None 813 | self._valid_until = 0 814 | self._lock = threading.Lock() 815 | self._session = requests.Session() 816 | self._session.headers.update({ 817 | 'User-Agent': 'hosted.py version/%s' % (VERSION,) 818 | }) 819 | 820 | def update_apis(self): 821 | log("fetching api index") 822 | r = self._session.get( 823 | url = self._config.metadata['api'], 824 | timeout = 5, 825 | ) 826 | r.raise_for_status() 827 | resp = r.json() 828 | if not resp['ok']: 829 | raise APIError("cannot retrieve api index") 830 | self._index = resp['apis'] 831 | self._valid_until = resp['valid_until'] - 300 832 | 833 | def get_api_index(self): 834 | with self._lock: 835 | now = time.time() 836 | if now > self._valid_until: 837 | self.update_apis() 838 | return self._index 839 | 840 | @property 841 | def session(self): 842 | return self._session 843 | 844 | def list(self): 845 | try: 846 | index = self.get_api_index() 847 | return sorted(index.keys()) 848 | except Exception as err: 849 | raise APIError(err) 850 | 851 | def __getitem__(self, api_name): 852 | return APIProxy(self, api_name) 853 | 854 | def __getattr__(self, api_name): 855 | return APIProxy(self, api_name) 856 | 857 | class HostedAPI(object): 858 | def __init__(self, api, on_device_token): 859 | self._api = api 860 | self._on_device_token = on_device_token 861 | self._lock = threading.Lock() 862 | self._next_refresh = 0 863 | self._api_key = None 864 | self._uses = 0 865 | self._expire = 0 866 | self._base_url = None 867 | self._session = requests.Session() 868 | self._session.headers.update({ 869 | 'User-Agent': 'hosted.py version/%s - on-device' % (VERSION,) 870 | }) 871 | 872 | def use_api_key(self): 873 | with self._lock: 874 | now = time.time() 875 | self._uses -= 1 876 | if self._uses <= 0: 877 | log('hosted API adhoc key used up') 878 | self._api_key = None 879 | elif now > self._expire: 880 | log('hosted API adhoc key expired') 881 | self._api_key = None 882 | else: 883 | log('hosted API adhoc key usage: %d uses, %ds left' %( 884 | self._uses, self._expire - now 885 | )) 886 | if self._api_key is None: 887 | if time.time() < self._next_refresh: 888 | return None 889 | log('refreshing hosted API adhoc key') 890 | self._next_refresh = time.time() + 15 891 | try: 892 | r = self._api['api_key'].get( 893 | params = dict( 894 | on_device_token = self._on_device_token 895 | ), 896 | timeout = 5, 897 | ) 898 | except: 899 | return None 900 | self._api_key = r['api_key'] 901 | self._uses = r['uses'] 902 | self._expire = now + r['expire'] - 1 903 | self._base_url = r['base_url'] 904 | return self._api_key 905 | 906 | def add_default_args(self, kwargs): 907 | if not 'timeout' in kwargs: 908 | kwargs['timeout'] = 10 909 | return kwargs 910 | 911 | def ensure_api_key(self, kwargs): 912 | api_key = self.use_api_key() 913 | if api_key is None: 914 | raise APIError('cannot retrieve API key') 915 | kwargs['auth'] = ('', api_key) 916 | 917 | def get(self, endpoint, **kwargs): 918 | try: 919 | self.ensure_api_key(kwargs) 920 | r = self._session.get( 921 | url = self._base_url + endpoint, 922 | **self.add_default_args(kwargs) 923 | ) 924 | r.raise_for_status() 925 | return r.json() 926 | except APIError: 927 | raise 928 | except Exception as err: 929 | raise APIError(err) 930 | 931 | def post(self, endpoint, **kwargs): 932 | try: 933 | self.ensure_api_key(kwargs) 934 | r = self._session.post( 935 | url = self._base_url + endpoint, 936 | **self.add_default_args(kwargs) 937 | ) 938 | r.raise_for_status() 939 | return r.json() 940 | except APIError: 941 | raise 942 | except Exception as err: 943 | raise APIError(err) 944 | 945 | def delete(self, endpoint, **kwargs): 946 | try: 947 | self.ensure_api_key(kwargs) 948 | r = self._session.delete( 949 | url = self._base_url + endpoint, 950 | **self.add_default_args(kwargs) 951 | ) 952 | r.raise_for_status() 953 | return r.json() 954 | except APIError: 955 | raise 956 | except Exception as err: 957 | raise APIError(err) 958 | 959 | class DeviceKV(object): 960 | def __init__(self, api): 961 | self._api = api 962 | self._cache = {} 963 | self._cache_complete = False 964 | self._use_cache = True 965 | 966 | def cache_enabled(self, enabled): 967 | self._use_cache = enabled 968 | self._cache = {} 969 | self._cache_complete = False 970 | 971 | def __setitem__(self, key, value): 972 | if self._use_cache: 973 | if key in self._cache and self._cache[key] == value: 974 | return 975 | self._api['kv'].post( 976 | data = { 977 | key: value 978 | } 979 | ) 980 | if self._use_cache: 981 | self._cache[key] = value 982 | 983 | def __getitem__(self, key): 984 | if self._use_cache: 985 | if key in self._cache: 986 | return self._cache[key] 987 | result = self._api['kv'].get( 988 | params = dict( 989 | keys = key, 990 | ), 991 | timeout = 5, 992 | )['v'] 993 | if key not in result: 994 | raise KeyError(key) 995 | value = result[key] 996 | if self._use_cache: 997 | self._cache[key] = value 998 | return value 999 | 1000 | # http api cannot reliably determine if a key has 1001 | # been deleted, so __delitem__ always succeeds and 1002 | # does not throw KeyError for missing keys. 1003 | def __delitem__(self, key): 1004 | if self._use_cache and self._cache_complete: 1005 | if key not in self._cache: 1006 | return 1007 | self._api['kv'].delete( 1008 | params = dict( 1009 | keys = key, 1010 | ), 1011 | timeout = 5, 1012 | ) 1013 | if self._use_cache and key in self._cache: 1014 | if key in self._cache: 1015 | del self._cache[key] 1016 | 1017 | def update(self, dct): 1018 | if self._use_cache: 1019 | for key, value in dct.items(): 1020 | if key in self._cache and self._cache[key] == value: 1021 | dct.pop(key) 1022 | if not dct: 1023 | return 1024 | self._api['kv'].post( 1025 | data = dct 1026 | ) 1027 | if self._use_cache: 1028 | for key, value in dct.iteritems(): 1029 | self._cache[key] = value 1030 | 1031 | def get(self, key, default=None): 1032 | try: 1033 | return self[key] 1034 | except KeyError: 1035 | return default 1036 | 1037 | def items(self): 1038 | if self._use_cache and self._cache_complete: 1039 | return self._cache.items() 1040 | result = self._api['kv'].get( 1041 | timeout = 5, 1042 | )['v'] 1043 | if self._use_cache: 1044 | for key, value in result.iteritems(): 1045 | self._cache[key] = value 1046 | self._cache_complete = True 1047 | return result.items() 1048 | 1049 | iteritems = items 1050 | 1051 | def clear(self): 1052 | self._api['kv'].delete() 1053 | if self._use_cache: 1054 | self._cache = {} 1055 | self._cache_complete = False 1056 | 1057 | class GPIO(object): 1058 | def __init__(self): 1059 | self._pin_fd = {} 1060 | self._state = {} 1061 | self._fd_2_pin = {} 1062 | self._poll = select.poll() 1063 | self._lock = threading.Lock() 1064 | 1065 | def setup_pin(self, pin, direction="in", invert=False): 1066 | if not os.path.exists("/sys/class/gpio/gpio%d" % pin): 1067 | with open("/sys/class/gpio/export", "wb") as f: 1068 | f.write(str(pin)) 1069 | # mdev is giving the newly create GPIO directory correct permissions. 1070 | for i in range(10): 1071 | try: 1072 | with open("/sys/class/gpio/gpio%d/active_low" % pin, "wb") as f: 1073 | f.write("1" if invert else "0") 1074 | break 1075 | except IOError as err: 1076 | if err.errno != errno.EACCES: 1077 | raise 1078 | time.sleep(0.1) 1079 | log("waiting for GPIO permissions") 1080 | else: 1081 | raise IOError(errno.EACCES, "Cannot access GPIO") 1082 | with open("/sys/class/gpio/gpio%d/direction" % pin, "wb") as f: 1083 | f.write(direction) 1084 | 1085 | def set_pin_value(self, pin, high): 1086 | with open("/sys/class/gpio/gpio%d/value" % pin, "wb") as f: 1087 | f.write("1" if high else "0") 1088 | 1089 | def monitor(self, pin, invert=False): 1090 | if pin in self._pin_fd: 1091 | return 1092 | self.setup_pin(pin, direction="in", invert=invert) 1093 | with open("/sys/class/gpio/gpio%d/edge" % pin, "wb") as f: 1094 | f.write("both") 1095 | fd = os.open("/sys/class/gpio/gpio%d/value" % pin, os.O_RDONLY) 1096 | self._state[pin] = bool(int(os.read(fd, 5))) 1097 | self._fd_2_pin[fd] = pin 1098 | self._pin_fd[pin] = fd 1099 | self._poll.register(fd, select.POLLPRI | select.POLLERR) 1100 | 1101 | def poll(self, timeout=1000): 1102 | changes = [] 1103 | for fd, evt in self._poll.poll(timeout): 1104 | os.lseek(fd, 0, 0) 1105 | state = bool(int(os.read(fd, 5))) 1106 | pin = self._fd_2_pin[fd] 1107 | with self._lock: 1108 | prev_state, self._state[pin] = self._state[pin], state 1109 | if state != prev_state: 1110 | changes.append((pin, state)) 1111 | return changes 1112 | 1113 | def poll_forever(self): 1114 | while 1: 1115 | for event in self.poll(): 1116 | yield event 1117 | 1118 | def on(self, pin): 1119 | with self._lock: 1120 | return self._state.get(pin, False) 1121 | 1122 | class SyncerAPI(object): 1123 | def __init__(self): 1124 | self._session = requests.Session() 1125 | 1126 | def unwrap(self, r): 1127 | r.raise_for_status() 1128 | return r.json() 1129 | 1130 | def get(self, path, params={}): 1131 | return self.unwrap(self._session.get( 1132 | 'http://127.0.0.1:81%s' % path, 1133 | params=params, timeout=10 1134 | )) 1135 | 1136 | def post(self, path, data={}): 1137 | return self.unwrap(self._session.post( 1138 | 'http://127.0.0.1:81%s' % path, 1139 | data=data, timeout=10 1140 | )) 1141 | 1142 | class ProofOfPlay(object): 1143 | def __init__(self, api, dirname): 1144 | self._api = api 1145 | self._prefix = os.path.join(os.environ['SCRATCH'], dirname) 1146 | try: 1147 | os.makedirs(self._prefix) 1148 | except: 1149 | pass 1150 | 1151 | pop_info = self._api.pop.get() 1152 | 1153 | self._max_delay = pop_info['max_delay'] 1154 | self._max_lines = pop_info['max_lines'] 1155 | self._submission_min_delay = pop_info['submission']['min_delay'] 1156 | self._submission_error_delay = pop_info['submission']['error_delay'] 1157 | 1158 | self._q = Queue.Queue() 1159 | self._log = None 1160 | 1161 | thread = threading.Thread(target=self._submit_thread) 1162 | thread.daemon = True 1163 | thread.start() 1164 | 1165 | thread = threading.Thread(target=self._writer_thread) 1166 | thread.daemon = True 1167 | thread.start() 1168 | 1169 | def _submit(self, fname, queue_size): 1170 | with open(fname, 'rb') as f: 1171 | return self._api.pop.post( 1172 | timeout = 10, 1173 | data = { 1174 | 'queue_size': queue_size, 1175 | }, 1176 | files={ 1177 | 'pop-v1': f, 1178 | } 1179 | ) 1180 | 1181 | def _submit_thread(self): 1182 | time.sleep(3) 1183 | while 1: 1184 | delay = self._submission_min_delay 1185 | try: 1186 | log('[pop][submit] gathering files') 1187 | files = [ 1188 | fname for fname 1189 | in os.listdir(self._prefix) 1190 | if fname.startswith('submit-') 1191 | ] 1192 | log('[pop][submit] %d files' % len(files)) 1193 | for fname in files: 1194 | fullname = os.path.join(self._prefix, fname) 1195 | if os.stat(fullname).st_size == 0: 1196 | os.unlink(fullname) 1197 | continue 1198 | try: 1199 | log('[pop][submit] submitting %s' % fullname) 1200 | status = self._submit(fullname, len(files)) 1201 | if status['disabled']: 1202 | log('[pop][submit] WARNING: Proof of Play disabled for this device. Submission discarded') 1203 | else: 1204 | log('[pop][submit] success') 1205 | except APIError as err: 1206 | log('[pop][submit] failure to submit log %s: %s' % ( 1207 | fullname, err 1208 | )) 1209 | delay = self._submission_error_delay 1210 | break 1211 | os.unlink(fullname) 1212 | break 1213 | if not files: 1214 | delay = 10 1215 | except Exception as err: 1216 | log('[pop][submit] error: %s' % err) 1217 | log('[pop][submit] sleeping %ds' % delay) 1218 | time.sleep(delay) 1219 | 1220 | def reopen_log(self): 1221 | log_name = os.path.join(self._prefix, 'current.log') 1222 | if self._log is not None: 1223 | self._log.close() 1224 | self._log = None 1225 | if os.path.exists(log_name): 1226 | os.rename(log_name, os.path.join( 1227 | self._prefix, 'submit-%s.log' % os.urandom(16).encode('hex') 1228 | )) 1229 | self._log = open(log_name, 'wb') 1230 | return self._log 1231 | 1232 | def _writer_thread(self): 1233 | submit, log_file, lines = monotonic_time() + self._max_delay, self.reopen_log(), 0 1234 | while 1: 1235 | reopen = False 1236 | max_wait = max(0.1, submit - monotonic_time()) 1237 | log('[pop] got %d lines. waiting %ds for more log lines' % (lines, max_wait)) 1238 | try: 1239 | line = self._q.get(block=True, timeout=max_wait) 1240 | log_file.write(line + '\n') 1241 | log_file.flush() 1242 | os.fsync(log_file.fileno()) 1243 | lines += 1 1244 | log('[pop] line added: %r' % line) 1245 | except Queue.Empty: 1246 | if lines == 0: 1247 | submit += self._max_delay # extend deadline 1248 | else: 1249 | reopen = True 1250 | except Exception as err: 1251 | log("[pop] error writing pop log line") 1252 | if lines >= self._max_lines: 1253 | reopen = True 1254 | if reopen: 1255 | log('[pop] closing log of %d lines' % lines) 1256 | submit, log_file, lines = monotonic_time() + self._max_delay, self.reopen_log(), 0 1257 | 1258 | def log(self, play_start, duration, asset_id, asset_filename): 1259 | uuid = "%08x%s" % ( 1260 | time.time(), os.urandom(12).encode('hex') 1261 | ) 1262 | self._q.put(json.dumps([ 1263 | uuid, 1264 | play_start, 1265 | duration, 1266 | 0 if asset_id is None else asset_id, 1267 | asset_filename, 1268 | ], 1269 | ensure_ascii = False, 1270 | separators = (',',':'), 1271 | ).encode('utf8')) 1272 | 1273 | class Device(object): 1274 | def __init__(self, kv, api): 1275 | self._socket = None 1276 | self._gpio = GPIO() 1277 | self._kv = kv 1278 | self._api = api 1279 | 1280 | @property 1281 | def kv(self): 1282 | return self._kv 1283 | 1284 | @property 1285 | def gpio(self): 1286 | return self._gpio 1287 | 1288 | @property 1289 | def serial(self): 1290 | return os.environ['SERIAL'] 1291 | 1292 | @property 1293 | def screen_resolution(self): 1294 | with open("/sys/class/graphics/fb0/virtual_size", "rb") as f: 1295 | return [int(val) for val in f.read().strip().split(',')] 1296 | 1297 | @property 1298 | def screen_w(self): 1299 | return self.screen_resolution[0] 1300 | 1301 | @property 1302 | def screen_h(self): 1303 | return self.screen_resolution[1] 1304 | 1305 | @property 1306 | def syncer_api(self): 1307 | return SyncerAPI() 1308 | 1309 | def ensure_connected(self): 1310 | if self._socket: 1311 | return True 1312 | try: 1313 | log("establishing upstream connection") 1314 | self._socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) 1315 | self._socket.connect(os.getenv('SYNCER_SOCKET', "/tmp/syncer")) 1316 | return True 1317 | except Exception as err: 1318 | log("cannot connect to upstream socket: %s" % (err,)) 1319 | return False 1320 | 1321 | def send_raw(self, raw): 1322 | try: 1323 | if self.ensure_connected(): 1324 | self._socket.send(raw + '\n') 1325 | except Exception as err: 1326 | log("cannot send to upstream: %s" % (err,)) 1327 | if self._socket: 1328 | self._socket.close() 1329 | self._socket = None 1330 | 1331 | def send_upstream(self, **data): 1332 | self.send_raw(json.dumps(data)) 1333 | 1334 | def turn_screen_off(self): 1335 | self.send_raw("tv off") 1336 | 1337 | def turn_screen_on(self): 1338 | self.send_raw("tv on") 1339 | 1340 | def screen(self, on=True): 1341 | if on: 1342 | self.turn_screen_on() 1343 | else: 1344 | self.turn_screen_off() 1345 | 1346 | def reboot(self): 1347 | self.send_raw("system reboot") 1348 | 1349 | def halt_until_powercycled(self): 1350 | self.send_raw("system halt") 1351 | 1352 | def restart_infobeamer(self): 1353 | self.send_raw("infobeamer restart") 1354 | 1355 | def verify_cache(self): 1356 | self.send_raw("syncer verify_cache") 1357 | 1358 | def pop(self, dirname='pop'): 1359 | return ProofOfPlay(self._api, dirname) 1360 | 1361 | def hosted_api(self, on_device_token): 1362 | return HostedAPI(self._api, on_device_token) 1363 | 1364 | if __name__ == "__main__": 1365 | print("nothing to do here") 1366 | sys.exit(1) 1367 | else: 1368 | log("starting version %s" % (VERSION,)) 1369 | 1370 | node = NODE = Node(os.environ['NODE']) 1371 | config = CONFIG = Configuration() 1372 | api = API = OnDeviceAPIs(CONFIG) 1373 | device = DEVICE = Device( 1374 | kv = DeviceKV(api), 1375 | api = api, 1376 | ) 1377 | 1378 | setup_inotify(CONFIG) 1379 | log("ready to go!") 1380 | --------------------------------------------------------------------------------