├── backend ├── backend │ ├── __init__.py │ ├── main.py │ ├── utils.py │ ├── app.py │ └── restapi.py ├── tests │ ├── __init__.py │ ├── conftest.py │ ├── database_plugin.py │ ├── test_all.py │ └── uvicorn_asyncio_plugin.py ├── requirements.txt └── sql │ ├── owm.sql │ └── owm.rollback.sql ├── .gitignore └── README.md /backend/backend/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /backend/tests/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /backend/backend/main.py: -------------------------------------------------------------------------------- 1 | import uvicorn 2 | 3 | from backend.app import create_app 4 | 5 | app = create_app() 6 | 7 | if __name__ == "__main__": 8 | print("Run using 'gunicorn backend.main:app -w 4 -k uvicorn.workers.UvicornWorker' in production.") 9 | uvicorn.run(app, host="127.0.0.1", port=8000) 10 | -------------------------------------------------------------------------------- /backend/requirements.txt: -------------------------------------------------------------------------------- 1 | fastapi 2 | uvicorn 3 | gunicorn 4 | uvloop 5 | httptools 6 | httpx 7 | asyncpg 8 | regex 9 | pyyaml 10 | python-dateutil 11 | python-json-logger 12 | pytz 13 | pytest 14 | pytest-cov 15 | flake8 16 | pytest-flake8 17 | pytest-mypy 18 | pytest-profiling 19 | freezegun 20 | async-timeout 21 | yoyo-migrations 22 | psycopg2 23 | aiofiles 24 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | **/*$py.class 2 | */.coverage 3 | **/.DS_Store 4 | **/*.egg 5 | **/*.egg-info/ 6 | **/.egg-info/ 7 | **/.eggs/ 8 | **/eggs/ 9 | **/.env 10 | **/env/ 11 | **/.git 12 | **/.installed.cfg 13 | **/.mypy_cache/ 14 | pip-wheel-metadata/ 15 | */prof/ 16 | **/__pycache__/ 17 | **/*.py[cod] 18 | **/.venv 19 | **/venv/ 20 | **/.vscode 21 | **/.tmp/ 22 | **/.idea 23 | -------------------------------------------------------------------------------- /backend/tests/conftest.py: -------------------------------------------------------------------------------- 1 | import logging 2 | 3 | import pytest 4 | 5 | from backend.app import create_app 6 | 7 | LOG = logging.getLogger(__name__) 8 | 9 | 10 | pytest_plugins = [ 11 | "tests.database_plugin", 12 | "tests.uvicorn_asyncio_plugin" 13 | ] 14 | 15 | 16 | @pytest.fixture 17 | def test_app(uvicorn_client, database, monkeypatch): 18 | app = create_app() 19 | yield uvicorn_client(app, config_args={"proxy_headers": False}) 20 | -------------------------------------------------------------------------------- /backend/backend/utils.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | if os.getenv("OWM_DB_PWD", None) is None: 4 | raise Exception("OWM_DB_PWD not set") 5 | DB_URL = f"postgresql://owmuser:{os.getenv('OWM_DB_PWD')}@localhost/owmdb" 6 | DB_TIMEOUT = 5 7 | NODE_DATA_DIR = "/var/opt/ffmapdata/" 8 | if not os.path.isdir(NODE_DATA_DIR): 9 | NODE_DATA_DIR = "/dev/shm/ffmapdata/" 10 | os.makedirs(NODE_DATA_DIR, exist_ok=True) 11 | print("******************** DEVELOPMENT MODE! Using temp dir for storing node data.") 12 | -------------------------------------------------------------------------------- /backend/sql/owm.sql: -------------------------------------------------------------------------------- 1 | CREATE EXTENSION IF NOT EXISTS earthdistance CASCADE; 2 | 3 | CREATE TABLE nodes ( 4 | id VARCHAR PRIMARY KEY, 5 | hostname VARCHAR NOT NULL, 6 | lat double precision NOT NULL, 7 | lng double precision NOT NULL, 8 | links VARCHAR NOT NULL, 9 | ctime TIMESTAMP WITHOUT TIME ZONE DEFAULT (NOW() AT TIME ZONE 'utc'), 10 | mtime TIMESTAMP WITHOUT TIME ZONE DEFAULT (NOW() AT TIME ZONE 'utc') 11 | ); 12 | 13 | CREATE INDEX idx_nodes_id ON nodes (id); 14 | CREATE INDEX idx_nodes_lat ON nodes (lat); 15 | CREATE INDEX idx_nodes_lng ON nodes (lng); 16 | CREATE INDEX idx_nodes_mtime ON nodes (mtime); 17 | CREATE INDEX idx_nodes_location ON nodes USING gist (ll_to_earth(lat, lng)); 18 | -------------------------------------------------------------------------------- /backend/sql/owm.rollback.sql: -------------------------------------------------------------------------------- 1 | DO 'DECLARE r RECORD; 2 | BEGIN 3 | FOR r IN (SELECT tablename FROM pg_tables WHERE schemaname = current_schema() AND tablename NOT LIKE $$%yoyo_%$$) LOOP 4 | EXECUTE $$DROP TABLE IF EXISTS $$ || quote_ident(r.tablename) || $$ CASCADE$$; 5 | END LOOP; 6 | FOR r in (SELECT n.nspname AS "schema", t.typname 7 | FROM pg_catalog.pg_type t 8 | JOIN pg_catalog.pg_namespace n ON n.oid = t.typnamespace 9 | JOIN pg_catalog.pg_enum e ON t.oid = e.enumtypid 10 | WHERE n.nspname = $$public$$ 11 | GROUP BY 1, 2) LOOP 12 | EXECUTE $$DROP TYPE IF EXISTS $$ || quote_ident(r.typname) || $$ CASCADE$$; 13 | END LOOP; 14 | END' -------------------------------------------------------------------------------- /backend/backend/app.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import os 3 | 4 | from asyncpg import create_pool 5 | from starlette.middleware.cors import CORSMiddleware 6 | 7 | from starlette.requests import Request 8 | from starlette.responses import Response 9 | from yoyo import get_backend, read_migrations 10 | 11 | from fastapi import FastAPI 12 | 13 | from backend import restapi 14 | from backend.utils import DB_URL 15 | 16 | LOG = logging.getLogger(__name__) 17 | 18 | 19 | def create_app(): 20 | app = FastAPI(title="OWM Backend") 21 | 22 | async def startup(): 23 | database_url = DB_URL 24 | backend = get_backend(database_url) 25 | sqlfolder = os.path.abspath(os.path.join(__file__, '..', '..', "sql")) 26 | migrations = read_migrations(sqlfolder) 27 | with backend.lock(): 28 | backend.apply_migrations(backend.to_apply(migrations)) 29 | app.extra["db"] = await create_pool(dsn=database_url, max_size=25) 30 | 31 | async def shutdown(): 32 | await app.extra["db"].close() 33 | 34 | app.add_middleware( 35 | CORSMiddleware, 36 | allow_origins=["https://openwifimap.net", "http://localhost:3000"], allow_headers=["*"], allow_methods=["*"] 37 | ) 38 | 39 | app.add_event_handler('startup', startup) 40 | app.add_event_handler('shutdown', shutdown) 41 | 42 | app.include_router(restapi.router, prefix="") 43 | 44 | @app.exception_handler(Exception) 45 | async def custom_exception_handler(request: Request, exception: Exception): 46 | LOG.exception("uncaught exception") 47 | return Response(status_code=500) 48 | 49 | return app 50 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # openwifimap-api (Python) 2 | OpenWiFiMap is a database and map for free network WiFi routers (freifunk and others, too!). 3 | 4 | This is the database/backend part of the openwifimap. 5 | Make sure to also take a look at its web frontend, the [openwifimap HTML5 app](https://github.com/freifunk/openwifimap-html5). 6 | 7 | The original backend was written in Javascript using CouchDB. 8 | Since maintaining that was problematic, it was rewritten in 2020/2021 to use Python/FastAPI/PostgreSQL. 9 | 10 | # API 11 | See the Swagger API docs at `/docs` on the running backend (at [api.openwifimap.net/docs](https://api.openwifimap.net/docs), for example). 12 | 13 | The somewhat more verbose old API doc can be found in the [old API.md](https://github.com/freifunk/openwifimap-api/blob/f9001452f4f4a72c4dbd59dd736436b6c5733775/API.md). 14 | 15 | # License 16 | openwifimap is licensed under the [MIT license](http://opensource.org/licenses/MIT). 17 | 18 | # Development info 19 | The backend is basically keeping a list of JSON documents on disk which can get queried and updated via a web API. 20 | The database is used as search index only. 21 | PostgreSQL is total overkill for this but ¯\_(ツ)_/¯ 22 | 23 | The interesting part of the code is in [restapi.py](/backend/backend/restapi.py). 24 | 25 | In case you wonder, endpoint definitions are a bit complicated (BaseModel, response_model, Field, Query, ...) since FastAPI can generate nice Swagger API docs from this. 26 | 27 | ## Dev notes 28 | * sudo docker run --name owm_psql -p 127.0.0.1:5432:5432 -e POSTGRES_PASSWORD= -e POSTGRES_USER=owmuser -e POSTGRES_DB=owmdb -d postgres:latest 29 | * PostgreSQL earthdistance extension: https://postindustria.com/postgresql-geo-queries-made-easy/ (not used yet) 30 | -------------------------------------------------------------------------------- /backend/tests/database_plugin.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import logging 3 | import os 4 | 5 | import pytest 6 | import asyncpg 7 | 8 | from yoyo import read_migrations 9 | from yoyo import get_backend 10 | 11 | from backend.utils import DB_URL 12 | 13 | LOG = logging.getLogger(__name__) 14 | 15 | 16 | def create_db(db_url: str): 17 | backend = get_backend(db_url) 18 | sqlfolder = os.path.abspath(os.path.join(__file__, '..', '..', "sql")) 19 | migrations = read_migrations(sqlfolder) 20 | with backend.lock(): 21 | backend.apply_migrations(backend.to_apply(migrations)) 22 | 23 | 24 | async def drop_all(pool: asyncpg.pool.Pool): 25 | await pool.execute( 26 | """ 27 | DO $$ DECLARE 28 | r RECORD; 29 | BEGIN 30 | FOR r IN (SELECT tablename FROM pg_tables WHERE schemaname = current_schema()) LOOP 31 | EXECUTE 'DROP TABLE IF EXISTS ' || quote_ident(r.tablename) || ' CASCADE'; 32 | END LOOP; 33 | FOR r in (SELECT n.nspname AS "schema", t.typname 34 | FROM pg_catalog.pg_type t 35 | JOIN pg_catalog.pg_namespace n ON n.oid = t.typnamespace 36 | JOIN pg_catalog.pg_enum e ON t.oid = e.enumtypid 37 | WHERE n.nspname = 'public' 38 | GROUP BY 1, 2) LOOP 39 | EXECUTE 'DROP TYPE IF EXISTS ' || quote_ident(r.typname) || ' CASCADE'; 40 | END LOOP; 41 | END $$; 42 | """) 43 | 44 | 45 | @pytest.fixture 46 | def database(loop: asyncio.events.AbstractEventLoop): 47 | pool = loop.run_until_complete(asyncpg.create_pool(DB_URL, loop=loop)) 48 | try: 49 | create_db(DB_URL) 50 | yield pool 51 | 52 | finally: 53 | LOG.info("Test done, dropping tables") 54 | loop.run_until_complete(drop_all(pool)) 55 | loop.run_until_complete(pool.close()) 56 | -------------------------------------------------------------------------------- /backend/tests/test_all.py: -------------------------------------------------------------------------------- 1 | import json 2 | import logging 3 | 4 | from backend.utils import NODE_DATA_DIR 5 | 6 | LOG = logging.getLogger(__name__) 7 | 8 | 9 | async def test_all( 10 | test_app, database 11 | ): 12 | LOG.info("**** PUTing node info") 13 | data = { 14 | "type": "node", 15 | "hostname": "test_node", 16 | "latitude": 52.520791, 17 | "longitude": 13.40951, 18 | "links": [{"id": "other_node.olsr", "quality": 1}] 19 | } 20 | resp = await test_app.put("/update_node/test_node.olsr", json=data) 21 | assert resp.status_code == 200 22 | 23 | LOG.info("**** PUTing node info again") 24 | resp = await test_app.put("/update_node/test_node.olsr", json=data) 25 | assert resp.status_code == 200 26 | 27 | LOG.info("**** Testing view_nodes_spatial") 28 | resp = await test_app.get("/view_nodes_spatial?bbox=12.9,52.27,14.12,52.7") 29 | assert resp.status_code == 200 30 | assert len(resp.json()["rows"]) == 1 31 | assert len(resp.json()["rows"][0]["value"]["links"]) == 1 32 | 33 | LOG.info("**** Testing view_nodes_spatial, count only") 34 | resp = await test_app.get("/view_nodes_spatial?bbox=12.9,52.27,14.12,52.7&count=true") 35 | assert resp.status_code == 200 36 | assert resp.json()["count"] == 1 37 | 38 | LOG.info("**** Testing view_nodes") 39 | resp = await test_app.post("/view_nodes", json={"keys": ["test_node.olsr", "unknown.olsr"]}) 40 | assert resp.status_code == 200 41 | assert resp.json()["rows"][0]["id"] == "test_node.olsr" 42 | 43 | LOG.info("**** Testing view_nodes_coarse") 44 | resp = await test_app.post("/view_nodes_coarse", json={"keys": [[8, 137, 83]]}) 45 | assert resp.status_code == 200 46 | assert len(resp.json()["rows"]) == 1 47 | 48 | LOG.info("**** Testing getting detailed node info") 49 | resp = await test_app.get("/db/test_node.olsr") 50 | assert resp.status_code == 200 51 | assert resp.json()["hostname"] == "test_node" 52 | 53 | LOG.info("**** Testing removing node data") 54 | with open(f"{NODE_DATA_DIR}/test_node%2Eolsr.json", "r") as f: 55 | data_str = f.read() 56 | del data["latitude"] 57 | del data["longitude"] 58 | resp = await test_app.put("/update_node/test_node.olsr", json=data) 59 | assert resp.status_code == 200 60 | 61 | LOG.info("**** Testing node is gone") 62 | resp = await test_app.get("/view_nodes_spatial?bbox=12.9,52.27,14.12,52.7") 63 | assert resp.status_code == 200 64 | assert len(resp.json()["rows"]) == 0 65 | 66 | LOG.info("**** Testing syncing db from disk") 67 | with open(f"{NODE_DATA_DIR}/test_node%2Eolsr.json", "w") as f: 68 | f.write(data_str) 69 | resp = await test_app.post("/sync_db_from_disk") 70 | assert resp.json()["total_nodes"] > 0 71 | 72 | LOG.info("**** Testing view_nodes_spatial, count only") 73 | resp = await test_app.get("/view_nodes_spatial?bbox=12.9,52.27,14.12,52.7&count=true") 74 | assert resp.status_code == 200 75 | assert resp.json()["count"] > 0 76 | -------------------------------------------------------------------------------- /backend/tests/uvicorn_asyncio_plugin.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import os 3 | import sys 4 | import logging 5 | import gc 6 | import contextlib 7 | import warnings 8 | import socket 9 | from typing import Optional, Dict, Any 10 | import uvloop 11 | from pytest import fixture 12 | import uvicorn 13 | import httpx 14 | 15 | # prevent asyncio debug logs 16 | asyncio_logger = logging.getLogger("asyncio") 17 | asyncio_logger.setLevel(logging.INFO) 18 | 19 | # pytest async helper code is partially taken from 20 | # https://github.com/aio-libs/aiohttp/blob/master/aiohttp/pytest_plugin.py 21 | # Copyright 2013-2020 aio-libs collaboration. 22 | # Licensed under the Apache License, Version 2.0 23 | 24 | 25 | def pytest_addoption(parser): # type: ignore 26 | parser.addoption( 27 | '--disable-extra-checks', action='store_true', default=False, 28 | help='run tests faster by disabling extra checks') 29 | parser.addoption( 30 | '--enable-loop-debug', action='store_true', default=False, 31 | help='enable event loop debug mode') 32 | 33 | 34 | def teardown_test_loop(loop, fast=False): 35 | closed = loop.is_closed() 36 | if not closed: 37 | pending = asyncio.all_tasks(loop) 38 | loop.run_until_complete(asyncio.gather(*pending, loop=loop)) 39 | loop.call_soon(loop.stop) 40 | loop.run_forever() 41 | loop.close() 42 | if not fast: 43 | gc.collect() 44 | asyncio.set_event_loop(None) 45 | 46 | 47 | @contextlib.contextmanager 48 | def _passthrough_loop_context(loop, fast=False): # type: ignore 49 | """ 50 | setups and tears down a loop unless one is passed in via the loop 51 | argument when it's passed straight through. 52 | """ 53 | if loop: 54 | # loop already exists, pass it straight through 55 | yield loop 56 | else: 57 | # this shadows loop_context's standard behavior 58 | loop = asyncio.new_event_loop() 59 | asyncio.set_event_loop(loop) 60 | yield loop 61 | teardown_test_loop(loop, fast=fast) 62 | 63 | 64 | @contextlib.contextmanager 65 | def _runtime_warning_context(): # type: ignore 66 | """ 67 | Context manager which checks for RuntimeWarnings, specifically to 68 | avoid "coroutine 'X' was never awaited" warnings being missed. 69 | If RuntimeWarnings occur in the context a RuntimeError is raised. 70 | """ 71 | with warnings.catch_warnings(record=True) as _warnings: 72 | yield 73 | rw = ['{w.filename}:{w.lineno}:{w.message}'.format(w=w) 74 | for w in _warnings 75 | if w.category == RuntimeWarning] 76 | if rw: 77 | raise RuntimeError('{} Runtime Warning{},\n{}'.format( 78 | len(rw), 79 | '' if len(rw) == 1 else 's', 80 | '\n'.join(rw) 81 | )) 82 | 83 | 84 | def pytest_pyfunc_call(pyfuncitem): # type: ignore 85 | """ 86 | Run coroutines in an event loop instead of a normal function call. 87 | """ 88 | fast = pyfuncitem.config.getoption("--disable-extra-checks") 89 | if asyncio.iscoroutinefunction(pyfuncitem.function): 90 | existing_loop = pyfuncitem.funcargs.get('loop', None) 91 | with _runtime_warning_context(): 92 | with _passthrough_loop_context(existing_loop, fast=fast) as _loop: 93 | testargs = {arg: pyfuncitem.funcargs[arg] 94 | for arg in pyfuncitem._fixtureinfo.argnames} 95 | _loop.run_until_complete(pyfuncitem.obj(**testargs)) 96 | return True 97 | 98 | 99 | @fixture(scope="session") 100 | def fast(request): # type: ignore 101 | """--fast config option""" 102 | return request.config.getoption('--disable-extra-checks') 103 | 104 | 105 | @fixture(scope="session") 106 | def loop_debug(request): # type: ignore 107 | """--enable-loop-debug config option""" 108 | return request.config.getoption('--enable-loop-debug') 109 | 110 | 111 | @fixture(scope="session") 112 | def loop(fast, loop_debug): 113 | policy = uvloop.EventLoopPolicy() 114 | asyncio.set_event_loop_policy(policy) 115 | _loop = asyncio.new_event_loop() 116 | if loop_debug: 117 | _loop.set_debug(True) # pragma: no cover 118 | asyncio.set_event_loop(_loop) 119 | yield _loop 120 | teardown_test_loop(_loop, fast=fast) 121 | 122 | 123 | class TestClient: 124 | def __init__(self, host, port): 125 | self.base_url = f'http://{host}:{port}' 126 | self.client = httpx.AsyncClient() 127 | 128 | def get(self, url, *args, **kwargs): 129 | url = f'{self.base_url}{url}' 130 | return self.client.get(url, *args, **kwargs) 131 | 132 | def post(self, url, *args, **kwargs): 133 | url = f'{self.base_url}{url}' 134 | return self.client.post(url, *args, **kwargs) 135 | 136 | def put(self, url, *args, **kwargs): 137 | url = f'{self.base_url}{url}' 138 | return self.client.put(url, *args, **kwargs) 139 | 140 | def delete(self, url, *args, **kwargs): 141 | url = f'{self.base_url}{url}' 142 | return self.client.delete(url, *args, **kwargs) 143 | 144 | def options(self, url, *args, **kwargs): 145 | url = f'{self.base_url}{url}' 146 | return self.client.options(url, *args, **kwargs) 147 | 148 | 149 | @fixture 150 | def uvicorn_client(loop): 151 | servers = [] 152 | 153 | async def _init_server(config): 154 | server = uvicorn.Server(config=config) 155 | config.load() 156 | server.lifespan = config.lifespan_class(config) 157 | await server.startup() 158 | return server 159 | 160 | def create_client(app, config_args: Optional[Dict[str,Any]] = None): 161 | """ 162 | Run server for app on an unused port and return client for testing. 163 | """ 164 | sock = get_port_socket('127.0.0.1', 0) 165 | host, port = sock.getsockname()[:2] 166 | if config_args is None: 167 | config_args = {} 168 | config = uvicorn.Config(app, port=port, log_config={ 169 | "version": 1, 170 | "disable_existing_loggers": False 171 | }, **config_args) 172 | server = loop.run_until_complete(_init_server(config)) 173 | assert not server.should_exit, "Failed to start server process" 174 | task = loop.create_task(server.main_loop()) 175 | client = TestClient(host, port) 176 | servers.append((client, server, task)) 177 | return client 178 | 179 | yield create_client 180 | 181 | async def _shutdown(client, server, task): 182 | await client.client.aclose() 183 | server.should_exit = True 184 | await task 185 | await server.shutdown() 186 | 187 | for (client, server, task) in servers: 188 | loop.create_task(_shutdown(client, server, task)) 189 | 190 | 191 | REUSE_ADDRESS = os.name == 'posix' and sys.platform != 'cygwin' 192 | 193 | 194 | def get_port_socket(host: str, port: int) -> socket.socket: 195 | s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) 196 | if REUSE_ADDRESS: 197 | s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) 198 | s.bind((host, port)) 199 | return s 200 | -------------------------------------------------------------------------------- /backend/backend/restapi.py: -------------------------------------------------------------------------------- 1 | import glob 2 | import json 3 | import logging 4 | import math 5 | import os 6 | import re 7 | import urllib.parse 8 | from typing import List 9 | 10 | import asyncpg 11 | from asyncpg.pool import Pool 12 | from datetime import datetime, timedelta 13 | import dateutil.parser 14 | from fastapi import APIRouter, Depends, Query 15 | from fastapi.responses import FileResponse 16 | from pydantic import BaseModel, Field 17 | from starlette.requests import Request 18 | from starlette.responses import JSONResponse, Response 19 | 20 | from backend.utils import DB_TIMEOUT, NODE_DATA_DIR 21 | 22 | LOG = logging.getLogger(__name__) 23 | 24 | router = APIRouter() 25 | 26 | 27 | async def pool(request: Request) -> asyncpg.pool.Pool: 28 | return request.app.extra["db"] 29 | 30 | 31 | @router.put("/update_node/{node_id}", status_code=200) 32 | async def update_node( 33 | node_id: str, # with ".olsr" 34 | request: Request, 35 | response: Response, 36 | pool: Pool = Depends(pool) 37 | ): 38 | try: 39 | r_json = await request.json() 40 | except Exception as e: 41 | return Response(status_code=400, content=f"Error parsing JSON: {str(e)}") 42 | 43 | if len(node_id) > 50: 44 | return Response(status_code=400, content="node_id too long") 45 | 46 | # make sure hostname, id, _id, ctime, mtime are in JSON file 47 | if "hostname" not in r_json: 48 | return Response(status_code=400, content="hostname missing in JSON") 49 | r_json["id"] = node_id 50 | r_json["_id"] = node_id 51 | f_path = f"{NODE_DATA_DIR}/{safe_file_name_from_node_id(node_id)}.json" 52 | if os.path.isfile(f_path): 53 | with open(f_path, 'r') as json_file: 54 | data = json_file.read() 55 | r_json["ctime"] = json.loads(data)["ctime"] 56 | else: 57 | r_json["ctime"] = datetime.utcnow().isoformat() + "Z" 58 | r_json["mtime"] = datetime.utcnow().isoformat() + "Z" 59 | 60 | await upsert_node_in_db(node_id, r_json["hostname"], 61 | r_json.get("latitude", None), r_json.get("longitude", None), 62 | build_node_link_list_short(r_json), 63 | r_json["ctime"], r_json["mtime"], pool) 64 | 65 | with open(f"{f_path}.new", "w") as json_file: 66 | data_s = json.dumps(r_json, indent=4) 67 | data_s = re.sub(r'\"mac\": \"..:..:XX:XX:..:..\"', '\"mac\": \"redacted\"', data_s) 68 | json_file.write(data_s) 69 | os.replace(f"{f_path}.new", f_path) 70 | 71 | return Response(status_code=200, content="OK") 72 | 73 | 74 | @router.get("/view_nodes_spatial", status_code=200) 75 | async def view_nodes_spatial( 76 | bbox: str = Query(..., description="Bounding box ('minLng,minLat,maxLng,maxLat')"), 77 | count: bool = Query(False, description="If true, return node count only instead of list containing node data"), 78 | pool: Pool = Depends(pool) 79 | ): 80 | try: 81 | b_coords = [float(x) for x in bbox.split(",")] 82 | assert len(b_coords) == 4 83 | except Exception as e: 84 | return Response(status_code=400, content=f"Error parsing bbox: {str(e)}") 85 | 86 | if not count: 87 | nodes = await pool.fetch( 88 | """ 89 | SELECT * FROM nodes 90 | WHERE lng >= $1 AND lat >= $2 AND lng <= $3 AND lat <= $4 AND mtime > $5 91 | """, 92 | b_coords[0], 93 | b_coords[1], 94 | b_coords[2], 95 | b_coords[3], 96 | datetime.utcnow() - timedelta(days=7), 97 | timeout=DB_TIMEOUT 98 | ) 99 | data = {"rows": [get_node_data_from_node_row(node) for node in nodes]} 100 | return JSONResponse(status_code=200, content=data) 101 | else: 102 | node_count = await pool.fetchval( 103 | """ 104 | SELECT count(id) FROM nodes 105 | WHERE lng >= $1 AND lat >= $2 AND lng <= $3 AND lat <= $4 AND mtime > $5 106 | """, 107 | b_coords[0], 108 | b_coords[1], 109 | b_coords[2], 110 | b_coords[3], 111 | datetime.utcnow() - timedelta(days=7), 112 | timeout=DB_TIMEOUT 113 | ) 114 | data = {"count": int(node_count)} 115 | return JSONResponse(status_code=200, content=data) 116 | 117 | 118 | class ViewNodesRequestData(BaseModel): 119 | keys: List[str] = Field(..., description="List of node IDs to return data for") 120 | 121 | 122 | class ViewNodeInfoData(BaseModel): 123 | hostname: str = Field(..., description="Node hostname") 124 | ctime: str = Field(..., description="Creation time (ISO format)") 125 | mtime: str = Field(..., description="Last modified time (ISO format)") 126 | id: str = Field(..., description="Node ID") 127 | latlng: List[float] = Field(..., description="Latitude/longitude of node") 128 | 129 | 130 | class ViewNodeResponseData(BaseModel): 131 | id: str = Field(..., description="Node ID") 132 | key: str = Field(..., description="Node ID (Couch legacy)") 133 | value: ViewNodeInfoData 134 | 135 | 136 | class ViewNodesResponseData(BaseModel): 137 | rows: List[ViewNodeResponseData] 138 | 139 | 140 | @router.post("/view_nodes", response_model=ViewNodesResponseData, status_code=200) 141 | async def view_nodes( 142 | view_nodes_data: ViewNodesRequestData, 143 | pool: Pool = Depends(pool) 144 | ): 145 | data = [] 146 | for node_id in view_nodes_data.keys: 147 | node = await pool.fetchrow( 148 | """ 149 | SELECT * FROM nodes 150 | WHERE id = $1 151 | """, 152 | node_id, 153 | timeout=DB_TIMEOUT 154 | ) 155 | if node is not None: 156 | node_data = get_node_data_from_node_row(node) 157 | data.append(node_data) 158 | return JSONResponse(status_code=200, content={"rows": data}) 159 | 160 | 161 | def get_node_data_from_node_row(node): 162 | node_data = { 163 | "id": node["id"], 164 | "key": node["id"], 165 | "value": { 166 | "hostname": node["hostname"], 167 | "ctime": node["ctime"].isoformat() + "Z", 168 | "mtime": node["mtime"].isoformat() + "Z", 169 | "id": node["id"], 170 | "links": json.loads(node["links"]), 171 | "latlng": [node["lat"], node["lng"]] 172 | } 173 | } 174 | return node_data 175 | 176 | 177 | class ViewNodesCoarseRequestData(BaseModel): 178 | keys: List[List[int]] = Field(..., description="List of [zoom, x, y] Slippy Map tile IDs to return node counts for") 179 | 180 | 181 | class VncCoarseTileResponseData(BaseModel): 182 | key: List[int] = Field(..., description="Slippy Map tile ID") 183 | value: int = Field(..., description="Number of active nodes in this tile") 184 | 185 | 186 | class ViewNodesCoarseResponseData(BaseModel): 187 | rows: List[VncCoarseTileResponseData] 188 | 189 | 190 | @router.post("/view_nodes_coarse", response_model=ViewNodesCoarseResponseData, status_code=200) 191 | async def view_nodes_coarse( 192 | view_nodes_coarse_data: ViewNodesCoarseRequestData, 193 | pool: Pool = Depends(pool) 194 | ): 195 | def num2deg(zoom, x_tile, y_tile): 196 | n = 2.0 ** zoom 197 | lon_deg = x_tile / n * 360.0 - 180.0 198 | lat_rad = math.atan(math.sinh(math.pi * (1 - 2 * y_tile / n))) 199 | lat_deg = math.degrees(lat_rad) 200 | return lat_deg, lon_deg 201 | 202 | data = [] 203 | for key in view_nodes_coarse_data.keys: 204 | if len(key) != 3: 205 | return Response(status_code=400, content="Invalid Slippy Map tile ID") 206 | nw = num2deg(key[0], key[1], key[2]) 207 | se = num2deg(key[0], key[1] + 1, key[2] + 1) 208 | node_count = await pool.fetchval( 209 | """ 210 | SELECT count(id) FROM nodes 211 | WHERE lng >= $1 AND lng <= $2 AND lat >= $3 AND lat <= $4 AND mtime > $5 212 | """, 213 | nw[1], 214 | se[1], 215 | se[0], 216 | nw[0], 217 | datetime.utcnow() - timedelta(days=7), 218 | timeout=DB_TIMEOUT 219 | ) 220 | if node_count > 0: 221 | data.append({"key": key, "value": node_count}) 222 | 223 | return JSONResponse(status_code=200, content={"rows": data}) 224 | 225 | 226 | @router.get("/db/{node_id}", status_code=200) 227 | async def get_node_by_id( 228 | node_id: str 229 | ): 230 | f_path = f"{NODE_DATA_DIR}/{safe_file_name_from_node_id(node_id)}.json" 231 | return FileResponse(f_path) 232 | 233 | 234 | @router.post("/sync_db_from_disk", status_code=200) 235 | async def sync_db_from_disk( 236 | request: Request, 237 | pool: Pool = Depends(pool) 238 | ): 239 | if request.client.host != "127.0.0.1": 240 | return Response(status_code=403, content="Only localhost is allowed to do this") 241 | q_total_nodes = 0 242 | q_nodes_with_latlng = 0 243 | errors = [] 244 | async with pool.acquire() as connection: 245 | async with connection.transaction(): 246 | await connection.execute("TRUNCATE nodes") 247 | node_files = glob.glob(f"{NODE_DATA_DIR}/*.json") 248 | for node_file in node_files: 249 | try: 250 | with open(node_file, 'r') as json_file: 251 | data = json_file.read() 252 | r_json = json.loads(data) 253 | n_lat = r_json.get("latitude", None) 254 | n_lng = r_json.get("longitude", None) 255 | q_total_nodes += 1 256 | if n_lat is not None and n_lng is not None: 257 | q_nodes_with_latlng += 1 258 | await upsert_node_in_db(r_json["id"], r_json["hostname"], n_lat, n_lng, 259 | build_node_link_list_short(r_json), 260 | r_json["ctime"], r_json["mtime"], connection) 261 | except Exception as e: 262 | LOG.exception(f"Exception reading {node_file}") 263 | errors.append({"node_file": node_file, "Exception": str(e)}) 264 | return JSONResponse( 265 | status_code=200, 266 | content={"total_nodes": q_total_nodes, "nodes_with_latlng": q_nodes_with_latlng, "errors": errors} 267 | ) 268 | 269 | 270 | def safe_file_name_from_node_id(node_id: str) -> str: 271 | return urllib.parse.quote_plus(node_id).replace(".", "%2E") 272 | 273 | 274 | async def upsert_node_in_db(node_id: str, hostname: str, lat: float, lng: float, links, c_time: str, m_time: str, pool): 275 | c_time_dt = dateutil.parser.parse(c_time[:-1]) 276 | m_time_dt = dateutil.parser.parse(m_time[:-1]) 277 | if lat is not None and lng is not None: 278 | assert await pool.execute( 279 | """ 280 | INSERT INTO nodes (id, lat, lng, hostname, links, ctime, mtime) 281 | VALUES ($1, $2, $3, $4, $5, $6, $7) 282 | ON CONFLICT (id) DO 283 | UPDATE SET lat = $2, lng = $3, hostname = $4, links = $5, ctime = $6, mtime = $7; 284 | """, 285 | node_id, 286 | lat, 287 | lng, 288 | hostname, 289 | json.dumps(links), 290 | c_time_dt, 291 | m_time_dt, 292 | timeout=DB_TIMEOUT 293 | ) == "INSERT 0 1" 294 | else: 295 | await pool.execute( 296 | """ 297 | DELETE FROM nodes 298 | WHERE id = $1 299 | """, 300 | node_id 301 | ) 302 | 303 | 304 | def build_node_link_list_short(r_json): 305 | links = [] 306 | for link in r_json.get("links", []): 307 | if "id" in link and "quality" in link: 308 | links.append({"id": link["id"], "quality": link["quality"]}) 309 | return links 310 | --------------------------------------------------------------------------------