├── .envrc ├── .github └── FUNDING.yml ├── .gitignore ├── .ruff.toml ├── LICENSE.md ├── README.md ├── biome.jsonc ├── config ├── .env.sample ├── cloudflare.ini.sample └── rclone.conf.sample ├── docs ├── _not_used │ ├── ext4-ideas.txt │ ├── extract_ext4.sh │ ├── fs_stats │ │ ├── README.md │ │ ├── btrfs_after_delete.txt │ │ ├── btrfs_before_delete.txt │ │ ├── btrfs_not_used.txt │ │ ├── dedupl_fixed.log │ │ ├── ext4.md │ │ ├── naive_mbutil_ext4.md │ │ └── planet_version │ ├── kernel-ideas.txt │ ├── loop_test.sh │ └── nginx-ideas.txt ├── assets │ ├── name-maputnik-details.png │ ├── name-maputnik-view.png │ └── name-osm-search.png ├── benchmark │ ├── README.md │ ├── nginx_to_path_list.py │ ├── results.md │ └── wrk_custom_list.lua ├── debugging_names.md ├── dev_setup.md └── self_hosting.md ├── init-server.py ├── lint.sh ├── modules ├── debug_proxy │ ├── .gitignore │ ├── package.json │ ├── pnpm-lock.yaml │ ├── src │ │ └── index.js │ └── wrangler.toml ├── http_host │ ├── cron.d │ │ ├── ofm_http_host │ │ └── ofm_roundrobin_reader │ ├── http_host.py │ ├── http_host_lib │ │ ├── __init__.py │ │ ├── assets.py │ │ ├── btrfs.py │ │ ├── config.py │ │ ├── mount.py │ │ ├── nginx.py │ │ ├── nginx_confs │ │ │ ├── le.conf │ │ │ ├── location_static.conf │ │ │ └── roundrobin.conf │ │ ├── shared.py │ │ ├── sync.py │ │ ├── utils.py │ │ └── versions.py │ ├── scripts │ │ └── metadata_to_tilejson.py │ └── setup.py ├── loadbalancer │ ├── cron.d │ │ └── ofm_loadbalancer │ ├── loadbalancer.py │ ├── loadbalancer_lib │ │ ├── __init__.py │ │ ├── cloudflare.py │ │ ├── config.py │ │ ├── loadbalance.py │ │ ├── shared.py │ │ └── telegram_.py │ └── setup.py ├── prepare-virtualenv.sh ├── roundrobin │ └── rclone_write.sh └── tile_gen │ ├── cron.d │ └── ofm_tile_gen │ ├── scripts │ ├── README.md │ ├── extract_mbtiles.py │ └── shrink_btrfs.py │ ├── setup.py │ ├── tile_gen.py │ └── tile_gen_lib │ ├── __init__.py │ ├── btrfs.py │ ├── config.py │ ├── planetiler.py │ ├── rclone.py │ ├── set_version.py │ ├── shared.py │ └── utils.py ├── package.json ├── prepare-virtualenv.sh ├── setup.py ├── ssh_lib ├── __init__.py ├── assets │ └── nginx │ │ ├── cloudflare.conf │ │ ├── default_disable.conf │ │ ├── mime.types │ │ └── nginx.conf ├── benchmark.py ├── kernel.py ├── nginx.py ├── pkg_base.py ├── planetiler.py ├── rclone.py ├── tasks.py └── utils.py └── website ├── .gitignore ├── README.md ├── astro.config.mjs ├── package.json ├── pnpm-lock.yaml ├── public ├── berlin.webp ├── bsky.svg ├── favicon.ico ├── github.svg ├── logo.jpg ├── robots.txt ├── scripts │ └── map.js └── x.svg ├── src ├── components │ ├── Donate.astro │ ├── Logo.astro │ ├── Map.astro │ └── StyleUrlBug.astro ├── content │ ├── donate │ │ ├── pro.md │ │ └── sponsor.md │ ├── how_to_use │ │ ├── custom_styles.md │ │ ├── leaflet.md │ │ ├── mapbox.md │ │ ├── maplibre.md │ │ ├── mobile.md │ │ ├── openlayers.md │ │ └── self_hosting.md │ └── index │ │ ├── after_donate.md │ │ ├── before_donate.md │ │ └── whatis.md ├── env.d.ts ├── examples │ └── cluster.html ├── layouts │ └── Layout.astro ├── pages │ ├── 404.astro │ ├── index.astro │ ├── privacy.md │ ├── quick_start.astro │ └── tos.md └── styles │ ├── _style.css │ ├── global.css │ ├── map.css │ └── reset.css └── tsconfig.json /.envrc: -------------------------------------------------------------------------------- 1 | # used by direnv to 2 | # auto-activate python virtualenv 3 | # https://github.com/direnv/direnv 4 | 5 | source .venv/bin/activate 6 | 7 | unset PS1 8 | -------------------------------------------------------------------------------- /.github/FUNDING.yml: -------------------------------------------------------------------------------- 1 | # These are supported funding model platforms 2 | 3 | github: [hyperknot] 4 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | *.pyc 2 | *.egg-info 3 | 4 | *.sqlite 5 | *.mbtiles 6 | *.pbf 7 | 8 | rclone.conf 9 | .env 10 | .DS_Store 11 | 12 | node_modules 13 | tmp 14 | temp 15 | _temp 16 | 17 | /.idea 18 | venv 19 | 20 | 21 | /pnpm-lock.yaml 22 | 23 | /deploy-*.sh 24 | -------------------------------------------------------------------------------- /.ruff.toml: -------------------------------------------------------------------------------- 1 | target-version = "py310" 2 | line-length = 100 3 | extend-exclude = ["temp"] 4 | 5 | 6 | lint.select = [ 7 | "E", # pycodestyle errors 8 | "W", # pycodestyle warnings 9 | "F", # pyflakes 10 | "I", # isort 11 | 'UP', # pyupgrade 12 | 'A', # flake8-builtins 13 | "C4", # flake8-comprehensions 14 | 'EXE', # flake8-executable 15 | 'FA', # flake8-future-annotations 16 | 'PT', # flake8-pytest-style 17 | 'RSE', # flake8-raise 18 | 'SIM', # flake8-simplify 19 | 'DTZ', # flake8-datetimez, https://beta.ruff.rs/docs/rules/#flake8-datetimez-dtz 20 | ] 21 | 22 | lint.ignore = [ 23 | 'A003', 24 | 'E501', 25 | 'E711', 26 | 'E712', 27 | # 'E721', # type comparison 28 | 'E741', 29 | 'F401', # unused imports 30 | 'F841', 31 | 'SIM102', 32 | #'SIM103', # needless-bool, return the condition {condition} directly 33 | 'SIM105', 34 | 'SIM108', 35 | 'SIM115', 36 | # 'DTZ007', # Naive datetime constructed using `datetime.datetime.strptime()` without %z 37 | ] 38 | 39 | [format] 40 | quote-style = "single" 41 | 42 | [lint.isort] 43 | known-first-party = ["ssh_lib"] 44 | lines-after-imports = 2 45 | 46 | [lint.flake8-comprehensions] 47 | allow-dict-calls-with-keyword-arguments = true 48 | 49 | -------------------------------------------------------------------------------- /LICENSE.md: -------------------------------------------------------------------------------- 1 | ## Licenses 2 | 3 | The license of this project is [MIT](https://www.tldrlegal.com/license/mit-license). 4 | 5 | Map data is from [OpenStreetMap](https://www.openstreetmap.org/copyright). 6 | 7 | [OpenMapTiles](https://github.com/openmaptiles/openmaptiles) code is released under the [BSD 3-Clause License](), design under [CC BY 4.0](https://creativecommons.org/licenses/by/4.0/). [LICENSE.md](https://github.com/openmaptiles/openmaptiles/blob/master/LICENSE.md). 8 | 9 | ### Styles 10 | 11 | #### Bright 12 | 13 | Fork from [openmaptiles/osm-bright-gl-style](https://github.com/openmaptiles/osm-bright-gl-style). The OSM Bright GL Style is derived from [Mapbox Open Styles](https://github.com/mapbox/mapbox-gl-styles). The OSM Bright GL Style’s code is released under the [BSD 3-Clause License](). The OSM Bright GL Style’s design is released under [CC BY 4.0](https://creativecommons.org/licenses/by/4.0/). [LICENSE.md](https://github.com/openmaptiles/osm-bright-gl-style/blob/master/LICENSE.md). 14 | 15 | #### Liberty 16 | 17 | Fork from [maputnik/osm-liberty](https://github.com/maputnik/osm-liberty). OSM Liberty is a fork of OSM Bright. The OSM Bright GL Style is derived from [Mapbox Open Styles](https://github.com/mapbox/mapbox-gl-styles). The OSM Bright GL Style’s code is released under the [BSD 3-Clause License](). The OSM Bright GL Style’s design is released under [CC BY 4.0](https://creativecommons.org/licenses/by/4.0/). [LICENSE.md](https://github.com/maputnik/osm-liberty/blob/gh-pages/LICENSE.md). 18 | 19 | #### Positron 20 | 21 | Fork from [openmaptiles/positron-gl-style](https://github.com/openmaptiles/positron-gl-style). The Positron GL Style is derived from [CartoDB Basemaps](https://github.com/CartoDB/CartoDB-basemaps) designed by Stamen and Paul Norman for CartoDB Inc., licensed under [CC BY 3.0](https://creativecommons.org/licenses/by/3.0/). The Positron GL style’s code is released under the [BSD 3-Clause License](). The Positron GL Style’s design is released under [CC BY 4.0](https://creativecommons.org/licenses/by/4.0/). [LICENSE.md](https://github.com/openmaptiles/positron-gl-style/blob/master/LICENSE.md). 22 | 23 | ### Fonts 24 | 25 | [Noto Sans](https://www.google.com/get/noto/) is released under the [SIL Open Font License, Version 1.1](https://openfontlicense.org/) 26 | 27 | ### **Icons** 28 | 29 | The [Maki POI icon set](https://github.com/mapbox/maki/blob/master/LICENSE.txt) is licensed under CC0 1.0 Universal. 30 | 31 | The right arrow was derived from [Wikipedia](https://commons.wikimedia.org/wiki/File:Arrowright.svg), it is public domain. 32 | 33 | ### Natural Earth 34 | 35 | [Natural Earth](https://www.naturalearthdata.com/) map data is in the public domain. 36 | 37 | ### Tools 38 | 39 | spritezero-cli is from [mapbox/spritezero-cli](https://github.com/mapbox/spritezero-cli), licensed [ISC](https://github.com/mapbox/spritezero-cli/blob/master/LICENSE.md). 40 | 41 | --- 42 | 43 | ## Licence of this repo 44 | 45 | MIT License 46 | 47 | Copyright (c) 2023 Zsolt Ero 48 | 49 | Permission is hereby granted, free of charge, to any person obtaining a copy 50 | of this software and associated documentation files (the "Software"), to deal 51 | in the Software without restriction, including without limitation the rights 52 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 53 | copies of the Software, and to permit persons to whom the Software is 54 | furnished to do so, subject to the following conditions: 55 | 56 | The above copyright notice and this permission notice shall be included in all 57 | copies or substantial portions of the Software. 58 | 59 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 60 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 61 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 62 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 63 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 64 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 65 | SOFTWARE. 66 | -------------------------------------------------------------------------------- /biome.jsonc: -------------------------------------------------------------------------------- 1 | { 2 | "$schema": "https://biomejs.dev/schemas/1.8.3/schema.json", 3 | "formatter": { 4 | "indentStyle": "space", 5 | "lineWidth": 100 6 | }, 7 | "organizeImports": { 8 | "enabled": true, 9 | "ignore": [] 10 | }, 11 | "linter": { 12 | "enabled": true, 13 | "rules": { 14 | "recommended": true, 15 | "complexity": { 16 | "noForEach": "off" 17 | } 18 | }, 19 | "ignore": [] 20 | }, 21 | "javascript": { 22 | "formatter": { 23 | "semicolons": "asNeeded", 24 | "quoteStyle": "single" 25 | } 26 | }, 27 | "files": { 28 | "maxSize": 100000, 29 | "ignore": ["venv", "dist", ".astro"] 30 | } 31 | } 32 | -------------------------------------------------------------------------------- /config/.env.sample: -------------------------------------------------------------------------------- 1 | # Leave this empty if you use SSH keys 2 | SSH_PASSWD= 3 | 4 | # domain/subdomain 5 | # Set up an A record pointing to your server's IP address and 6 | # write the full domain here 7 | DOMAIN_DIRECT=maps.example.com 8 | 9 | # Your email address to be used for the Let's Encrypt certificates 10 | LETSENCRYPT_EMAIL= 11 | 12 | # Skip the full planet download, useful for testing (true/false) 13 | SKIP_PLANET=false 14 | 15 | # Use self-signed certs / skip the certificate management part. 16 | # If you are using a custom solution like VPN, Traefik, 17 | # or Cloudflare managed certificates, set this to true. 18 | # In this case, you'll have self-signed certificates after the script completes. 19 | SELF_SIGNED_CERTS=false 20 | 21 | 22 | 23 | ### --- Advanced setup below this line --- ### 24 | ### --- 99.9% you don't need any of this! --- ### 25 | 26 | # DOMAIN_ROUNDROBIN is a very special feature for getting certificates on one server, 27 | # uploading them to a bucket, and then downloading them to multiple http-host servers. 28 | # For a single host, you don't need it! 29 | DOMAIN_ROUNDROBIN= 30 | 31 | # Variables used by the load balancer script - you don't need these! 32 | HTTP_HOST_LIST= 33 | TELEGRAM_TOKEN= 34 | TELEGRAM_CHAT_ID= 35 | 36 | -------------------------------------------------------------------------------- /config/cloudflare.ini.sample: -------------------------------------------------------------------------------- 1 | # --- Let's Encrypt DNS challenge, not needed for self-hosting 2 | 3 | dns_cloudflare_api_token = xxx 4 | -------------------------------------------------------------------------------- /config/rclone.conf.sample: -------------------------------------------------------------------------------- 1 | [remote] 2 | type = s3 3 | provider = Cloudflare 4 | access_key_id = xxx 5 | secret_access_key = xxx 6 | endpoint = https://xxx.r2.cloudflarestorage.com 7 | no_check_bucket = true 8 | -------------------------------------------------------------------------------- /docs/_not_used/ext4-ideas.txt: -------------------------------------------------------------------------------- 1 | ext4 2 | 3 | Reduce the inode size: 256 -> -I 128 4 | 5 | Reduce the inode ratio: The inode ratio is the number of blocks per inode. You can increase the inode ratio to reduce the number of inodes created. This can be done with the -i option when creating the filesystem. For example, -i 8192 will create one inode every 8192 blocks. -------------------------------------------------------------------------------- /docs/_not_used/extract_ext4.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # reference: 4 | # https://www.kernel.org/doc/Documentation/filesystems/ext4.txt 5 | # https://wiki.archlinux.org/title/ext4 6 | # 7 | # -m reserved-blocks-percentage 8 | # -F Force mke2fs to create a filesystem, even if the specified device is not a partition on a block special device 9 | # 10 | # -O feature 11 | # from /etc/mke2fs.conf 12 | # defaults: has_journal,extent,huge_file,flex_bg,metadata_csum,64bit,dir_nlink,extra_isize 13 | # disabling journalling, since it's a read-only fs, as well as other unused features 14 | # extent is actually needed for tail packing small files 15 | # 16 | # -E extended-options 17 | # lazy_itable_init - inode table is fully initialized at the time of file system creation 18 | # nodiscard - Do not attempt to discard blocks at mkfs time. 19 | # 20 | # inode_size = 128 (minimum) 21 | # inode_ratio = 16384 (default but experimenting) 22 | 23 | 24 | sudo umount mnt || true 25 | rm -rf mnt 26 | rm -f image.ext4 27 | 28 | 29 | # make a sparse file 30 | # make sure it's bigger then the current OSM output 31 | fallocate -l 200G image.ext4 32 | 33 | 34 | mke2fs -t ext4 -v \ 35 | -m 0 \ 36 | -F \ 37 | -O ^has_journal,^huge_file,^metadata_csum,^64bit,^extra_isize \ 38 | -E lazy_itable_init=0,nodiscard \ 39 | -I 128 \ 40 | -i 16384 \ 41 | image.ext4 42 | 43 | mkdir mnt 44 | sudo mount -v \ 45 | -t ext4 \ 46 | -o nobarrier,noatime \ 47 | image.ext4 mnt 48 | 49 | sudo chown ofm:ofm -R mnt 50 | 51 | /data/ofm/venv/bin/python ../../tile_gen/extract.py output.mbtiles mnt/extract \ 52 | > "extract_out.log" 2> "extract_err.log" 53 | 54 | sudo umount mnt 55 | 56 | e2fsck -vf image.ext4 && \ 57 | resize2fs -M image.ext4 && \ 58 | e2fsck -vf image.ext4 59 | 60 | # default to read-only mode 61 | tune2fs -E mount_opts=ro image.ext4 62 | 63 | 64 | 65 | 66 | 67 | 68 | -------------------------------------------------------------------------------- /docs/_not_used/fs_stats/README.md: -------------------------------------------------------------------------------- 1 | # Comparing filesystem stats after extraction 2 | 3 | Run: planet/20231208_091355_pt 4 | 5 | dedupl-fix.log contains the files which were created for the btrfs 64k limit workaround. 6 | -------------------------------------------------------------------------------- /docs/_not_used/fs_stats/btrfs_after_delete.txt: -------------------------------------------------------------------------------- 1 | df -h 2 | Filesystem Size Used Avail Use% Mounted on 3 | /dev/loop1 200G 130G 70G 66% /data/ofm/tile_gen/runs/planet/20231221_134737_pt/mnt_rw2 4 | 5 | 6 | btrfs filesystem df 7 | Data, single: total=48.01GiB, used=47.54GiB 8 | System, single: total=4.00MiB, used=16.00KiB 9 | Metadata, single: total=83.01GiB, used=81.42GiB 10 | GlobalReserve, single: total=512.00MiB, used=0.00B 11 | 12 | 13 | btrfs filesystem show 14 | Label: none uuid: b9e8ae3a-aedb-4221-ab08-e1ad6095f188 15 | Total devices 1 FS bytes used 128.96GiB 16 | devid 1 size 200.00GiB used 131.02GiB path /dev/loop1 17 | 18 | 19 | 20 | btrfs filesystem usage 21 | Overall: 22 | Device size: 200.00GiB 23 | Device allocated: 131.02GiB 24 | Device unallocated: 68.98GiB 25 | Device missing: 0.00B 26 | Used: 128.96GiB 27 | Free (estimated): 69.45GiB (min: 69.45GiB) 28 | Free (statfs, df): 69.44GiB 29 | Data ratio: 1.00 30 | Metadata ratio: 1.00 31 | Global reserve: 512.00MiB (used: 0.00B) 32 | Multiple profiles: no 33 | 34 | Data,single: Size:48.01GiB, Used:47.54GiB (99.03%) 35 | /dev/loop1 48.01GiB 36 | 37 | Metadata,single: Size:83.01GiB, Used:81.42GiB (98.09%) 38 | /dev/loop1 83.01GiB 39 | 40 | System,single: Size:4.00MiB, Used:16.00KiB (0.39%) 41 | /dev/loop1 4.00MiB 42 | 43 | Unallocated: 44 | /dev/loop1 68.98GiB 45 | -------------------------------------------------------------------------------- /docs/_not_used/fs_stats/btrfs_before_delete.txt: -------------------------------------------------------------------------------- 1 | df -h 2 | Filesystem Size Used Avail Use% Mounted on 3 | /dev/loop0 200G 139G 61G 70% /data/ofm/tile_gen/runs/planet/20231221_134737_pt/mnt_rw 4 | 5 | 6 | btrfs filesystem df 7 | Data, single: total=48.01GiB, used=47.59GiB 8 | System, single: total=4.00MiB, used=16.00KiB 9 | Metadata, single: total=92.01GiB, used=90.52GiB 10 | GlobalReserve, single: total=512.00MiB, used=0.00B 11 | 12 | 13 | btrfs filesystem show 14 | Label: none uuid: 9c5cd306-96c8-4ee5-bfbb-b8216698f955 15 | Total devices 1 FS bytes used 138.11GiB 16 | devid 1 size 200.00GiB used 140.02GiB path /dev/loop0 17 | 18 | 19 | 20 | btrfs filesystem usage 21 | Overall: 22 | Device size: 200.00GiB 23 | Device allocated: 140.02GiB 24 | Device unallocated: 59.98GiB 25 | Device missing: 0.00B 26 | Used: 138.11GiB 27 | Free (estimated): 60.40GiB (min: 60.40GiB) 28 | Free (statfs, df): 60.40GiB 29 | Data ratio: 1.00 30 | Metadata ratio: 1.00 31 | Global reserve: 512.00MiB (used: 0.00B) 32 | Multiple profiles: no 33 | 34 | Data,single: Size:48.01GiB, Used:47.59GiB (99.12%) 35 | /dev/loop0 48.01GiB 36 | 37 | Metadata,single: Size:92.01GiB, Used:90.52GiB (98.38%) 38 | /dev/loop0 92.01GiB 39 | 40 | System,single: Size:4.00MiB, Used:16.00KiB (0.39%) 41 | /dev/loop0 4.00MiB 42 | 43 | Unallocated: 44 | /dev/loop0 59.98GiB 45 | -------------------------------------------------------------------------------- /docs/_not_used/fs_stats/btrfs_not_used.txt: -------------------------------------------------------------------------------- 1 | # takes a lot of time, should only be used when debugging 2 | 3 | echo -e "\n\nbtrfs filesystem du -s" 4 | sudo btrfs filesystem du -s mnt_rw 5 | 6 | echo -e "\n\ncompsize -x" 7 | sudo compsize -x mnt_rw 2> /dev/null || true -------------------------------------------------------------------------------- /docs/_not_used/fs_stats/ext4.md: -------------------------------------------------------------------------------- 1 | ## deduplicated on ext4 2 | 3 | ```df -h mnt_rw 4 | df -h mnt_rw 5 | Filesystem Size Used Avail Use% Mounted on 6 | /dev/loop0 1.4T 187G 1.2T 14% 7 | ``` 8 | 9 | ``` 10 | df -i mnt_rw 11 | Filesystem Inodes IUsed IFree IUse% Mounted on 12 | /dev/loop0 393216000 39614466 353601534 11% 13 | ``` 14 | -------------------------------------------------------------------------------- /docs/_not_used/fs_stats/naive_mbutil_ext4.md: -------------------------------------------------------------------------------- 1 | ## native mapbox/mbutil 2 | 3 | // pip install git+https://github.com/mapbox/mbutil.git@544c76e 4 | 5 | ``` 6 | Filesystem 1K-blocks Used Available Use% Mounted on 7 | /dev/loop0 1,474,386,100 1,119,622,516 354,763,584 76% 8 | ``` 9 | 10 | ``` 11 | Filesystem Inodes IUsed IFree IUse% Mounted on 12 | /dev/loop0 393,216,000 269,252,174 123,963,826 69% 13 | ``` 14 | -------------------------------------------------------------------------------- /docs/_not_used/fs_stats/planet_version: -------------------------------------------------------------------------------- 1 | planetiler: 2023-12-11 2 | -------------------------------------------------------------------------------- /docs/_not_used/kernel-ideas.txt: -------------------------------------------------------------------------------- 1 | # not using 2 | 3 | fs.file-max # not needed, recent Ubuntu has it on high by default 4 | 5 | 6 | # not tested 7 | 8 | tcp_fin_timeout 9 | TCP max buffer size 10 | 11 | net.core.netdev_max_backlog = 262144 12 | net.core.rmem_max = 16777216 13 | net.core.wmem_max = 16777216 14 | net.ipv4.tcp_max_syn_backlog = 262144 15 | net.ipv4.tcp_max_tw_buckets = 6000000 16 | net.ipv4.tcp_no_metrics_save = 1 17 | net.ipv4.tcp_rmem = 4096 87380 16777216 18 | net.ipv4.tcp_syn_retries = 2 19 | net.ipv4.tcp_synack_retries = 2 20 | net.ipv4.tcp_tw_reuse = 1 21 | net.ipv4.tcp_wmem = 4096 65536 16777216 22 | 23 | 24 | -------------------------------------------------------------------------------- /docs/_not_used/loop_test.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Define the source folder 4 | source_folder="20231228_201550_pt" 5 | 6 | # Define the number of copies you want to make 7 | number_of_copies=40 8 | 9 | # Loop and copy the folder into c1, c2, c3, c4, ... 10 | for i in $(seq 1 $number_of_copies); do 11 | cp -r "$source_folder" "c$i" 12 | btrfstune -m "c$i/tiles.btrfs" 13 | done -------------------------------------------------------------------------------- /docs/_not_used/nginx-ideas.txt: -------------------------------------------------------------------------------- 1 | # ideas https://calomel.org/nginx.html 2 | # https://www.nginx.com/blog/tuning-nginx/ 3 | # https://github.com/denji/nginx-tuning 4 | 5 | 6 | # not using 7 | 8 | sendfile_max_chunk 512k; # no need for small files 9 | keepalive_requests # default seems reasonable 10 | keepalive_timeout # default seems reasonable 11 | 12 | gzip_vary on; # no need probably 13 | gzip_min_length 10240; # only one file 14 | gzip_proxied any; # no proxying 15 | 16 | types_hash_max_size 2048; # default should be good for the default set 17 | 18 | 19 | 20 | 21 | 22 | # not tested 23 | 24 | client_body_buffer_size 128k; 25 | client_max_body_size 128k; 26 | client_header_buffer_size 1k; 27 | large_client_header_buffers 2 1k; 28 | 29 | client_header_timeout 12; # default is 60 30 | 31 | -------------------------------------------------------------------------------- /docs/assets/name-maputnik-details.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hyperknot/openfreemap/dd97e1fdcbd2f7d9c90eee9b52c96bc2ac9009b0/docs/assets/name-maputnik-details.png -------------------------------------------------------------------------------- /docs/assets/name-maputnik-view.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hyperknot/openfreemap/dd97e1fdcbd2f7d9c90eee9b52c96bc2ac9009b0/docs/assets/name-maputnik-view.png -------------------------------------------------------------------------------- /docs/assets/name-osm-search.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hyperknot/openfreemap/dd97e1fdcbd2f7d9c90eee9b52c96bc2ac9009b0/docs/assets/name-osm-search.png -------------------------------------------------------------------------------- /docs/benchmark/README.md: -------------------------------------------------------------------------------- 1 | # HTTP Hosts Benchmarking 2 | 3 | This repository contains tools and scripts for benchmarking HTTP hosts performance. 4 | 5 | ## Prerequisites 6 | 7 | Before running the benchmarks, you need to create a path list (`path_list_500k.txt`). You have two options: 8 | 9 | 1. Generate from real-world server logs using `nginx_to_path_list.py` 10 | 2. Generate randomly (Note: real-world usage patterns are typically non-random, e.g., ocean tiles are rarely accessed) 11 | 12 | ## Important Notes 13 | 14 | - Run the benchmarks on `localhost`, and not over the internet! Otherwise you'd be just testing your internet speed. 15 | - The benchmark uses [wrk](https://github.com/wg/wrk) HTTP benchmarking tool 16 | 17 | ## Usage 18 | 19 | Basic command: 20 | 21 | ```bash 22 | wrk -c10 -t4 -d10s -s /data/ofm/benchmark/wrk_custom_list.lua http://localhost 23 | ``` 24 | 25 | ### Parameters Explained 26 | 27 | - `-c10`: Number of connections to keep open 28 | - `-t4`: Number of threads to use 29 | - `-d10s`: Duration of the test (10 seconds) 30 | - `-s`: Script file to use 31 | 32 | ### Thread Count Considerations 33 | 34 | - `-t1`: More accurate results as the URL list is loaded exactly in sequence 35 | - `-t4`: Better reflects real-world usage patterns 36 | 37 | ## Results 38 | 39 | Benchmark results can be found in [results.md](results.md) 40 | 41 | ## Contributing 42 | 43 | Feel free to submit your results including which hosts were used. 44 | -------------------------------------------------------------------------------- /docs/benchmark/nginx_to_path_list.py: -------------------------------------------------------------------------------- 1 | import json 2 | 3 | 4 | # This script parses a nginx server log and creates a text file 5 | # which can be used in the Lua script. 6 | # The path file is not supplied in this repo. 7 | 8 | with open('access.jsonl') as fp: 9 | json_lines = fp.readlines() 10 | 11 | paths = [] 12 | for i, line in enumerate(json_lines): 13 | log_data = json.loads(line) 14 | if log_data['status'] != 200: 15 | continue 16 | 17 | if log_data['request_method'] != 'GET': 18 | continue 19 | 20 | uri = log_data['uri'] 21 | 22 | if 'tiles/' not in uri or not uri.endswith('.pbf'): 23 | continue 24 | 25 | path = log_data['uri'].split('tiles/')[1] 26 | paths.append(path + '\n') 27 | 28 | print(f'{i / len(json_lines) * 100:.1f}%') 29 | 30 | with open('path_list.txt', 'w') as fp: 31 | fp.writelines(paths) 32 | -------------------------------------------------------------------------------- /docs/benchmark/results.md: -------------------------------------------------------------------------------- 1 | # wrk benchmarks 2 | 3 | Real world usage, 500k requests replayed from server log. 4 | 5 | ### Hetnzer dedicated server with NVME ssd 6 | 7 | #### localhost 8 | 9 | clean cache after nginx restart. 10 | 11 | ``` 12 | service nginx restart 13 | wrk -c10 -t4 -d60s -s /data/ofm/benchmark/wrk_custom_list.lua http://localhost 14 | Running 1m test @ http://localhost 15 | 4 threads and 10 connections 16 | Thread Stats Avg Stdev Max +/- Stdev 17 | Latency 2.02ms 7.04ms 50.43ms 93.23% 18 | Req/Sec 8.42k 2.01k 18.52k 69.79% 19 | 2871265 requests in 1.00m, 230.65GB read 20 | Requests/sec: 47811.00 21 | Transfer/sec: 3.84GB 22 | ``` 23 | 24 | Super much overkill, we'd only need 125 MB/s for Gigabit connection and this is 3840 MB/s. 25 | Also max request time is super nice + no errors. 26 | 27 | #### over network 28 | 29 | ``` 30 | wrk -c10 -t4 -d60s -s /data/ofm/benchmark/wrk_custom_list.lua http://x.x.x.x 31 | Running 1m te st @ http://144.76.168.195 32 | 4 threads and 10 connections 33 | Thread Stats Avg Stdev Max +/- Stdev 34 | Latency 7.57ms 6.61ms 45.34ms 84.32% 35 | Req/Sec 293.85 141.33 1.18k 73.07% 36 | 71628 requests in 1.00m, 6.05GB read 37 | Requests/sec: 1191.88 38 | Transfer/sec: 103.01MB 39 | ``` 40 | 41 | Realistically this is the max over Gigabit connection. 42 | 43 | --- 44 | 45 | ### BuyVM KVM machine with 1 TB BuyVM Block Storage Slab 46 | 47 | Advertisement: 40Gbit+ InfiniBand RDMA Storage Fabric giving near local storage performance. 48 | 49 | Reality: 50 | 51 | ``` 52 | wrk -c10 -t4 -d60s -s /data/ofm/benchmark/wrk_custom_list.lua http://localhost 53 | Running 1m test @ http://localhost 54 | 4 threads and 10 connections 55 | Thread Stats Avg Stdev Max +/- Stdev 56 | Latency 226.10ms 343.52ms 1.99s 87.75% 57 | Req/Sec 29.77 38.06 272.00 89.72% 58 | 3655 requests in 1.00m, 232.76MB read 59 | Socket errors: connect 0, read 0, write 0, timeout 8 60 | Requests/sec: 60.87 61 | Transfer/sec: 3.88MB 62 | ``` 63 | 64 | Wow, this is 60 request per second compared to Hetzner's 47000, just wow! Repeated tests with hot cache resulted in a bit better performance, but still not Gigabit. 65 | 66 | ``` 67 | Requests/sec: 266.99 68 | Transfer/sec: 23.07MB 69 | ``` 70 | 71 | Abandoned the idea of using BuyVM, even though their unlimited bandwidth is quite unique in this price range in USA. 72 | -------------------------------------------------------------------------------- /docs/benchmark/wrk_custom_list.lua: -------------------------------------------------------------------------------- 1 | local counter = 1 2 | local lines = {} 3 | local url_base = "/planet/fake_version/" -- trailing slash 4 | local path_list_txt = "/data/ofm/benchmark/path_list_500k.txt" 5 | 6 | for line in io.lines(path_list_txt) do 7 | table.insert(lines, url_base .. line) 8 | end 9 | 10 | local function getNextUrl() 11 | -- Get the next URL from the list 12 | local url_path = lines[counter] 13 | counter = counter + 1 14 | 15 | -- If we've gone past the end of the list, wrap around to the start 16 | if counter > #lines then 17 | counter = 1 18 | end 19 | 20 | return url_path 21 | end 22 | 23 | request = function() 24 | -- Return the request object with the current URL path 25 | path = getNextUrl() 26 | local headers = {} 27 | headers["Host"] = "ofm" 28 | return wrk.format('GET', path, headers, nil) 29 | end 30 | 31 | response = function(status) 32 | if status ~= 200 then 33 | print("Non-200 response") 34 | print("Status: ", status) 35 | -- this only works in single threaded mode (-t1) 36 | print("Request path: ", path) 37 | end 38 | end 39 | 40 | -------------------------------------------------------------------------------- /docs/debugging_names.md: -------------------------------------------------------------------------------- 1 | # Debugging international names 2 | 3 | If there is an issue about international names not being displayed correctly, first, we need to find **one specific example** and check at which stage does the problem appear. 4 | 5 | OpenFreeMap map data is created by the following stack: 6 | 7 | **OpenStreetMap data ➔ OpenMapTiles specification ➔ Planetiler** 8 | 9 | 1. To debug OpenStreetMap data, go to OpenStreetMap.org and search for the query string. For example "Iwate Prefecture" gives these results: [nominatim](https://nominatim.openstreetmap.org/ui/details.html?osmtype=R&osmid=3792412&class=boundary) and [openstreetmap](https://www.openstreetmap.org/relation/3792412) 10 | 11 | 12 | 13 | 2. Then we need to check what the data is in the generated vector tiles. The best way to do this is to go to [Maputnik editor](https://maputnik.github.io/editor?style=https://tiles.openfreemap.org/styles/bright) and select View / Inspect. 14 | 15 | 16 | 17 | 3. Then you can search for the little red dot matching your label and make a screenshot. 18 | 19 | 20 | 21 | Now we can compare where the naming problem is coming from. 22 | 23 | In conclusion: for the **one specific example**, please link the OSM pages and add the inspector screenshot, then we can start with the debugging. 24 | 25 | ## Next steps 26 | 27 | It'd be nice to compare with other OpenMapTiles implementations like [tilemaker](https://github.com/systemed/tilemaker) or the [OpenMapTiles reference](https://github.com/openmaptiles/openmaptiles). I don't have full planet datasets from these implementations currently, so if someone is willing to run one it'd be a great contribution. 28 | -------------------------------------------------------------------------------- /docs/dev_setup.md: -------------------------------------------------------------------------------- 1 | # dev setup 2 | 3 | ### macOS 4 | 5 | On macOS, I recommend [OrbStack](https://orbstack.dev/). 6 | 7 | I saved this function into my bash_profile. It sets up a clean x64-based Ubuntu 22 VM in a few seconds. 8 | 9 | ``` 10 | orb_reset() { 11 | orbctl delete -f ubuntu-test 12 | orbctl create -a amd64 ubuntu:jammy ubuntu-test 13 | } 14 | ``` 15 | 16 | I saved the following in `.ssh/config`: 17 | 18 | ``` 19 | Host orb_my 20 | Hostname 127.0.0.1 21 | Port 32222 22 | IdentityFile ~/.orbstack/ssh/id_ed25519 23 | ``` 24 | 25 | Then I run commands like the following: 26 | 27 | ``` 28 | ./init-server.py http-host-static orb_my 29 | ./init-server.py debug orb_my 30 | ``` 31 | -------------------------------------------------------------------------------- /docs/self_hosting.md: -------------------------------------------------------------------------------- 1 | # Self-hosting Howto 2 | 3 | You can either self-host or use our public instance. Everything is **open-source**, including the full production setup — there’s no 'open-core' model here. 4 | 5 | When self-hosting, there are two modules you can set up on a server (see details in the repo README). 6 | 7 | - **http-host** 8 | 9 | - **tile-gen** 10 | 11 | I there is a 99.9% chance you only need **http-host**. Tile-gen is slow, needs a huge machine and is totally pointless, since we upload the processed files every week. 12 | 13 | ### System requirements 14 | 15 | **http-host**: 300 GB disk space for hosting a single run. SSD is recommended, but not required. 16 | 17 | **tile-gen**: 500 GB SDD and at least 64 GB ram 18 | 19 | **Ubuntu 22** or newer 20 | 21 | ### Provider recommendation 22 | 23 | One amazing deal, which is tested and known to work well for http-host is the €4.5 / month [Contabo Storage VPS](https://contabo.com/en/storage-vps/) 24 | 25 | --- 26 | 27 | ### Warning 28 | 29 | This project is made to run on **clean servers** or virtual machines dedicated for this project. The scripts need sudo permissions as they mount/unmount disk images. Do not run this on your dev machine without using virtual machines. If you do, please make sure you understand exactly what each script is doing. 30 | 31 | If you run it on a non-clean server, please understand that this will modify your nginx config! 32 | 33 | --- 34 | 35 | ## Instructions 36 | 37 | I recommend running things quickly first, with `SKIP_PLANET=true` and then once it works, running it with `SKIP_PLANET=false`. 38 | 39 | #### 1. DNS setup 40 | 41 | Set up a server with at least 300 GB SSD space and configure the DNS for the subdomain of your choice. 42 | For example, make an A record for "maps.example.com" -> 185.199.110.153 43 | 44 | #### 2. Clone and prepare `config` folder 45 | 46 | ``` 47 | git clone https://github.com/hyperknot/openfreemap 48 | ``` 49 | 50 | In the config folder, copy `.env.sample` to `.env` and set the values. 51 | 52 | `DOMAIN_DIRECT` - Your subdomain \ 53 | `LETSENCRYPT_EMAIL` - Your email for Let's Encrypt 54 | 55 | Set `SKIP_PLANET=true` first. 56 | 57 | #### 3. Set up Python if you don't have it yet 58 | 59 | On Ubuntu you can get it by `sudo apt install python3-pip` 60 | 61 | On macOS you can do `brew install python` 62 | 63 | #### 4. Prepare the Python environment 64 | 65 | You run the deploy script locally, and it deploys to a remote server over SSH. You can use a virtualenv if you are used to working with them, but it's not necessary. 66 | 67 | ``` 68 | cd openfreemap 69 | pip install -e . 70 | ``` 71 | 72 | #### 5. Deploy quick version with `SKIP_PLANET=true` 73 | 74 | Run the actual deploy command and wait a few minutes 75 | 76 | ``` 77 | ./init-server.py http-host-static HOSTNAME 78 | ``` 79 | 80 | #### 5. Check 81 | 82 | If everything is OK, you'll have some curl lines printed. Run the first one locally and make sure it's showing HTTP/2 200. For example this is an OK response. 83 | 84 | ```locally to test them. 85 | curl -sI https://test.openfreemap.org/monaco | sort 86 | 87 | HTTP/2 200 88 | access-control-allow-origin: * 89 | cache-control: max-age=86400 90 | cache-control: public 91 | content-length: 5776 92 | content-type: application/json 93 | date: Fri, 11 Oct 2024 21:01:23 GMT 94 | etag: "670991d1-1690" 95 | expires: Sat, 12 Oct 2024 21:01:23 GMT 96 | last-modified: Fri, 11 Oct 2024 21:00:01 GMT 97 | server: nginx 98 | x-ofm-debug: latest JSON monaco 99 | ``` 100 | 101 | #### 6. Deploy and check with `SKIP_PLANET=false` 102 | 103 | Update your `.env` file and re-run the same `./init-server.py http-host-static HOSTNAME` as before. 104 | 105 | Go for a walk and by the time you come back it should be up and running with the latest planet tiles deployed. Don't worry about the "Download aborted" lines in the meanwhile, it's a bug in CloudFlare. 106 | 107 | If your server doesn't have an SSD, the download + uncompressing process can take hours. 108 | 109 | --- 110 | 111 | #### Deploy tile-gen server (optional) 112 | 113 | If you have a really beefy machine (see above) and you really want to generate tiles yourself, you can run `./init-server.py tile-gen HOSTNAME`. 114 | 115 | Trigger a run manually, by running 116 | 117 | ``` 118 | sudo /data/ofm/venv/bin/python -u /data/ofm/tile_gen/bin/tile_gen.py make-tiles planet 119 | ``` 120 | 121 | It's recommended to use tmux or similar, as it can take days to complete. 122 | -------------------------------------------------------------------------------- /init-server.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import click 4 | from fabric import Config, Connection 5 | 6 | from ssh_lib import MODULES_DIR, dotenv_val 7 | from ssh_lib.tasks import ( 8 | prepare_http_host, 9 | prepare_shared, 10 | prepare_tile_gen, 11 | run_http_host_sync, 12 | setup_loadbalancer, 13 | setup_roundrobin_writer, 14 | ) 15 | from ssh_lib.utils import ( 16 | put, 17 | ) 18 | 19 | 20 | def get_connection(hostname, user, port): 21 | ssh_passwd = dotenv_val('SSH_PASSWD') 22 | 23 | if ssh_passwd: 24 | print('Using SSH password') 25 | 26 | c = Connection( 27 | host=hostname, 28 | user=user, 29 | port=port, 30 | connect_kwargs={'password': ssh_passwd}, 31 | config=Config(overrides={'sudo': {'password': ssh_passwd}}), 32 | ) 33 | else: 34 | c = Connection( 35 | host=hostname, 36 | user=user, 37 | port=port, 38 | ) 39 | 40 | return c 41 | 42 | 43 | def common_options(func): 44 | """Decorator to define common options.""" 45 | func = click.argument('hostname')(func) 46 | func = click.option('--port', type=int, help='SSH port (if not in .ssh/config)')(func) 47 | func = click.option('--user', help='SSH user (if not in .ssh/config)')(func) 48 | func = click.option('-y', '--noninteractive', is_flag=True, help='Skip confirmation questions')( 49 | func 50 | ) 51 | return func 52 | 53 | 54 | @click.group() 55 | def cli(): 56 | pass 57 | 58 | 59 | @cli.command() 60 | @common_options 61 | def http_host_static(hostname, user, port, noninteractive): 62 | if not noninteractive and not click.confirm(f'Run script on {hostname}?'): 63 | return 64 | 65 | c = get_connection(hostname, user, port) 66 | 67 | prepare_shared(c) 68 | prepare_http_host(c) 69 | 70 | run_http_host_sync(c) 71 | 72 | 73 | @cli.command() 74 | @common_options 75 | def http_host_autoupdate(hostname, user, port, noninteractive): 76 | if not noninteractive and not click.confirm(f'Run script on {hostname}?'): 77 | return 78 | 79 | c = get_connection(hostname, user, port) 80 | 81 | c.sudo('rm -f /etc/cron.d/ofm_http_host') 82 | 83 | prepare_shared(c) 84 | prepare_http_host(c) 85 | 86 | run_http_host_sync(c) # disable for first install if you don't want to wait 87 | 88 | put(c, MODULES_DIR / 'http_host' / 'cron.d' / 'ofm_http_host', '/etc/cron.d/') 89 | 90 | 91 | @cli.command() 92 | @common_options 93 | @click.option('--cron', is_flag=True, help='Enable cron task') 94 | def tile_gen(hostname, user, port, cron, noninteractive): 95 | if not noninteractive and not click.confirm(f'Run script on {hostname}?'): 96 | return 97 | 98 | c = get_connection(hostname, user, port) 99 | 100 | prepare_shared(c) 101 | 102 | prepare_tile_gen(c, enable_cron=cron) 103 | 104 | 105 | @cli.command() 106 | @common_options 107 | def roundrobin_dns_writer(hostname, user, port, noninteractive): 108 | if not noninteractive and not click.confirm(f'Run script on {hostname}?'): 109 | return 110 | 111 | c = get_connection(hostname, user, port) 112 | 113 | setup_roundrobin_writer(c) 114 | 115 | 116 | @cli.command() 117 | @common_options 118 | def loadbalancer(hostname, user, port, noninteractive): 119 | if not noninteractive and not click.confirm(f'Run script on {hostname}?'): 120 | return 121 | 122 | c = get_connection(hostname, user, port) 123 | prepare_shared(c) 124 | 125 | setup_loadbalancer(c) 126 | 127 | 128 | @cli.command() 129 | @common_options 130 | def http_host_sync(hostname, user, port, noninteractive): 131 | if not noninteractive and not click.confirm(f'Run script on {hostname}?'): 132 | return 133 | 134 | c = get_connection(hostname, user, port) 135 | run_http_host_sync(c) 136 | 137 | 138 | @cli.command() 139 | @common_options 140 | def debug(hostname, user, port, noninteractive): 141 | c = get_connection(hostname, user, port) 142 | run_http_host_sync(c) 143 | 144 | 145 | if __name__ == '__main__': 146 | cli() 147 | -------------------------------------------------------------------------------- /lint.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -e 3 | 4 | node_modules/.bin/prettier -w "**/*.md" 5 | 6 | # biome 7 | #pnpm biome check --write --unsafe --colors=off --log-level=info --log-kind=pretty . | grep path | sort 8 | pnpm biome check --write --unsafe . 9 | 10 | ruff check --fix . 11 | ruff format . 12 | 13 | find . -type f -name '*.conf' -path '*/nginx*' -exec nginxfmt -v {} +; 14 | 15 | 16 | 17 | -------------------------------------------------------------------------------- /modules/debug_proxy/.gitignore: -------------------------------------------------------------------------------- 1 | # Logs 2 | 3 | logs 4 | _.log 5 | npm-debug.log_ 6 | yarn-debug.log* 7 | yarn-error.log* 8 | lerna-debug.log* 9 | .pnpm-debug.log* 10 | 11 | # Diagnostic reports (https://nodejs.org/api/report.html) 12 | 13 | report.[0-9]_.[0-9]_.[0-9]_.[0-9]_.json 14 | 15 | # Runtime data 16 | 17 | pids 18 | _.pid 19 | _.seed 20 | \*.pid.lock 21 | 22 | # Directory for instrumented libs generated by jscoverage/JSCover 23 | 24 | lib-cov 25 | 26 | # Coverage directory used by tools like istanbul 27 | 28 | coverage 29 | \*.lcov 30 | 31 | # nyc test coverage 32 | 33 | .nyc_output 34 | 35 | # Grunt intermediate storage (https://gruntjs.com/creating-plugins#storing-task-files) 36 | 37 | .grunt 38 | 39 | # Bower dependency directory (https://bower.io/) 40 | 41 | bower_components 42 | 43 | # node-waf configuration 44 | 45 | .lock-wscript 46 | 47 | # Compiled binary addons (https://nodejs.org/api/addons.html) 48 | 49 | build/Release 50 | 51 | # Dependency directories 52 | 53 | node_modules/ 54 | jspm_packages/ 55 | 56 | # Snowpack dependency directory (https://snowpack.dev/) 57 | 58 | web_modules/ 59 | 60 | # TypeScript cache 61 | 62 | \*.tsbuildinfo 63 | 64 | # Optional npm cache directory 65 | 66 | .npm 67 | 68 | # Optional eslint cache 69 | 70 | .eslintcache 71 | 72 | # Optional stylelint cache 73 | 74 | .stylelintcache 75 | 76 | # Microbundle cache 77 | 78 | .rpt2_cache/ 79 | .rts2_cache_cjs/ 80 | .rts2_cache_es/ 81 | .rts2_cache_umd/ 82 | 83 | # Optional REPL history 84 | 85 | .node_repl_history 86 | 87 | # Output of 'npm pack' 88 | 89 | \*.tgz 90 | 91 | # Yarn Integrity file 92 | 93 | .yarn-integrity 94 | 95 | # dotenv environment variable files 96 | 97 | .env 98 | .env.development.local 99 | .env.test.local 100 | .env.production.local 101 | .env.local 102 | 103 | # parcel-bundler cache (https://parceljs.org/) 104 | 105 | .cache 106 | .parcel-cache 107 | 108 | # Next.js build output 109 | 110 | .next 111 | out 112 | 113 | # Nuxt.js build / generate output 114 | 115 | .nuxt 116 | dist 117 | 118 | # Gatsby files 119 | 120 | .cache/ 121 | 122 | # Comment in the public line in if your project uses Gatsby and not Next.js 123 | 124 | # https://nextjs.org/blog/next-9-1#public-directory-support 125 | 126 | # public 127 | 128 | # vuepress build output 129 | 130 | .vuepress/dist 131 | 132 | # vuepress v2.x temp and cache directory 133 | 134 | .temp 135 | .cache 136 | 137 | # Docusaurus cache and generated files 138 | 139 | .docusaurus 140 | 141 | # Serverless directories 142 | 143 | .serverless/ 144 | 145 | # FuseBox cache 146 | 147 | .fusebox/ 148 | 149 | # DynamoDB Local files 150 | 151 | .dynamodb/ 152 | 153 | # TernJS port file 154 | 155 | .tern-port 156 | 157 | # Stores VSCode versions used for testing VSCode extensions 158 | 159 | .vscode-test 160 | 161 | # yarn v2 162 | 163 | .yarn/cache 164 | .yarn/unplugged 165 | .yarn/build-state.yml 166 | .yarn/install-state.gz 167 | .pnp.\* 168 | 169 | # wrangler project 170 | 171 | .dev.vars 172 | .wrangler/ 173 | -------------------------------------------------------------------------------- /modules/debug_proxy/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "cf-debug-proxy", 3 | "version": "0.0.0", 4 | "private": true, 5 | "scripts": { 6 | "deploy": "wrangler deploy", 7 | "dev": "wrangler dev", 8 | "start": "wrangler dev" 9 | }, 10 | "devDependencies": { 11 | "itty-router": "^3.0.12", 12 | "wrangler": "^3.60.3" 13 | } 14 | } 15 | -------------------------------------------------------------------------------- /modules/debug_proxy/src/index.js: -------------------------------------------------------------------------------- 1 | async function sendTelegramMessage(message, botToken, chatId) { 2 | const url = `https://api.telegram.org/bot${botToken}/sendMessage` 3 | const payload = { 4 | chat_id: chatId, 5 | text: message, 6 | } 7 | 8 | try { 9 | const response = await fetch(url, { 10 | method: 'POST', 11 | headers: { 12 | 'Content-Type': 'application/json', 13 | }, 14 | body: JSON.stringify(payload), 15 | }) 16 | 17 | if (!response.ok) { 18 | console.error('Failed to send message:', await response.text()) 19 | } 20 | } catch (error) { 21 | console.error('Error sending Telegram message:', error) 22 | } 23 | } 24 | 25 | export default { 26 | async fetch(request, env, ctx) { 27 | const url = new URL(request.url) 28 | const userIP = request.headers.get('CF-Connecting-IP') 29 | 30 | if (url.pathname === '/b') { 31 | url.pathname = '/styles/bright' 32 | } 33 | 34 | // // no failure, just warning 35 | // if (request.method !== 'GET') { 36 | // const warningMessage = `Non-GET request ${request.method} ${url.pathname} ${userIP}` 37 | // console.error(warningMessage) 38 | // await sendTelegramMessage(warningMessage, env.TELEGRAM_TOKEN, env.TELEGRAM_CHAT_ID) 39 | // } 40 | 41 | if (!url.pathname.startsWith('/styles')) { 42 | const errorMessage = 'Bad path' 43 | return new Response(errorMessage, { status: 500 }) 44 | } 45 | 46 | const proxyUrl = new URL(url.pathname, 'https://tiles.openfreemap.org') 47 | 48 | try { 49 | const response = await fetch(proxyUrl) 50 | 51 | if (response.status !== 200) { 52 | const errorMessage = `Proxy error: Bad status ${response.status} ${url.pathname} ${userIP}` 53 | console.error(errorMessage) 54 | await sendTelegramMessage(errorMessage, env.TELEGRAM_TOKEN, env.TELEGRAM_CHAT_ID) 55 | return new Response('Proxy error: Bad status', { status: 500 }) 56 | } 57 | 58 | return response 59 | } catch (error) { 60 | const errorMessage = `Proxy error: ${error.message} ${url.pathname} ${userIP}` 61 | console.error(errorMessage) 62 | await sendTelegramMessage(errorMessage, env.TELEGRAM_TOKEN, env.TELEGRAM_CHAT_ID) 63 | return new Response('Proxy error: Fetch failed', { status: 500 }) 64 | } 65 | }, 66 | } 67 | -------------------------------------------------------------------------------- /modules/debug_proxy/wrangler.toml: -------------------------------------------------------------------------------- 1 | #:schema node_modules/wrangler/config-schema.json 2 | name = "cf-debug-proxy" 3 | main = "src/index.js" 4 | compatibility_date = "2024-06-20" 5 | 6 | # Automatically place your workloads in an optimal location to minimize latency. 7 | # If you are running back-end logic in a Worker, running it closer to your back-end infrastructure 8 | # rather than the end user may result in better performance. 9 | # Docs: https://developers.cloudflare.com/workers/configuration/smart-placement/#smart-placement 10 | # [placement] 11 | # mode = "smart" 12 | 13 | # Variable bindings. These are arbitrary, plaintext strings (similar to environment variables) 14 | # Docs: 15 | # - https://developers.cloudflare.com/workers/wrangler/configuration/#environment-variables 16 | # Note: Use secrets to store sensitive data. 17 | # - https://developers.cloudflare.com/workers/configuration/secrets/ 18 | # [vars] 19 | # MY_VARIABLE = "production_value" 20 | 21 | # Bind the Workers AI model catalog. Run machine learning models, powered by serverless GPUs, on Cloudflare’s global network 22 | # Docs: https://developers.cloudflare.com/workers/wrangler/configuration/#workers-ai 23 | # [ai] 24 | # binding = "AI" 25 | 26 | # Bind an Analytics Engine dataset. Use Analytics Engine to write analytics within your Pages Function. 27 | # Docs: https://developers.cloudflare.com/workers/wrangler/configuration/#analytics-engine-datasets 28 | # [[analytics_engine_datasets]] 29 | # binding = "MY_DATASET" 30 | 31 | # Bind a headless browser instance running on Cloudflare's global network. 32 | # Docs: https://developers.cloudflare.com/workers/wrangler/configuration/#browser-rendering 33 | # [browser] 34 | # binding = "MY_BROWSER" 35 | 36 | # Bind a D1 database. D1 is Cloudflare’s native serverless SQL database. 37 | # Docs: https://developers.cloudflare.com/workers/wrangler/configuration/#d1-databases 38 | # [[d1_databases]] 39 | # binding = "MY_DB" 40 | # database_name = "my-database" 41 | # database_id = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" 42 | 43 | # Bind a dispatch namespace. Use Workers for Platforms to deploy serverless functions programmatically on behalf of your customers. 44 | # Docs: https://developers.cloudflare.com/workers/wrangler/configuration/#dispatch-namespace-bindings-workers-for-platforms 45 | # [[dispatch_namespaces]] 46 | # binding = "MY_DISPATCHER" 47 | # namespace = "my-namespace" 48 | 49 | # Bind a Durable Object. Durable objects are a scale-to-zero compute primitive based on the actor model. 50 | # Durable Objects can live for as long as needed. Use these when you need a long-running "server", such as in realtime apps. 51 | # Docs: https://developers.cloudflare.com/workers/wrangler/configuration/#durable-objects 52 | # [[durable_objects.bindings]] 53 | # name = "MY_DURABLE_OBJECT" 54 | # class_name = "MyDurableObject" 55 | 56 | # Durable Object migrations. 57 | # Docs: https://developers.cloudflare.com/workers/wrangler/configuration/#migrations 58 | # [[migrations]] 59 | # tag = "v1" 60 | # new_classes = ["MyDurableObject"] 61 | 62 | # Bind a Hyperdrive configuration. Use to accelerate access to your existing databases from Cloudflare Workers. 63 | # Docs: https://developers.cloudflare.com/workers/wrangler/configuration/#hyperdrive 64 | # [[hyperdrive]] 65 | # binding = "MY_HYPERDRIVE" 66 | # id = "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" 67 | 68 | # Bind a KV Namespace. Use KV as persistent storage for small key-value pairs. 69 | # Docs: https://developers.cloudflare.com/workers/wrangler/configuration/#kv-namespaces 70 | # [[kv_namespaces]] 71 | # binding = "MY_KV_NAMESPACE" 72 | # id = "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" 73 | 74 | # Bind an mTLS certificate. Use to present a client certificate when communicating with another service. 75 | # Docs: https://developers.cloudflare.com/workers/wrangler/configuration/#mtls-certificates 76 | # [[mtls_certificates]] 77 | # binding = "MY_CERTIFICATE" 78 | # certificate_id = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" 79 | 80 | # Bind a Queue producer. Use this binding to schedule an arbitrary task that may be processed later by a Queue consumer. 81 | # Docs: https://developers.cloudflare.com/workers/wrangler/configuration/#queues 82 | # [[queues.producers]] 83 | # binding = "MY_QUEUE" 84 | # queue = "my-queue" 85 | 86 | # Bind a Queue consumer. Queue Consumers can retrieve tasks scheduled by Producers to act on them. 87 | # Docs: https://developers.cloudflare.com/workers/wrangler/configuration/#queues 88 | # [[queues.consumers]] 89 | # queue = "my-queue" 90 | 91 | # Bind an R2 Bucket. Use R2 to store arbitrarily large blobs of data, such as files. 92 | # Docs: https://developers.cloudflare.com/workers/wrangler/configuration/#r2-buckets 93 | # [[r2_buckets]] 94 | # binding = "MY_BUCKET" 95 | # bucket_name = "my-bucket" 96 | 97 | # Bind another Worker service. Use this binding to call another Worker without network overhead. 98 | # Docs: https://developers.cloudflare.com/workers/wrangler/configuration/#service-bindings 99 | # [[services]] 100 | # binding = "MY_SERVICE" 101 | # service = "my-service" 102 | 103 | # Bind a Vectorize index. Use to store and query vector embeddings for semantic search, classification and other vector search use-cases. 104 | # Docs: https://developers.cloudflare.com/workers/wrangler/configuration/#vectorize-indexes 105 | # [[vectorize]] 106 | # binding = "MY_INDEX" 107 | # index_name = "my-index" 108 | -------------------------------------------------------------------------------- /modules/http_host/cron.d/ofm_http_host: -------------------------------------------------------------------------------- 1 | # every minute sync, locking so that only one process can run at a time 2 | * * * * * ofm /usr/bin/flock -n /tmp/http_host.lockfile -c 'sudo /data/ofm/venv/bin/python -u /data/ofm/http_host/bin/http_host.py sync >> /data/ofm/http_host/logs/http_host_sync.log 2>&1' 3 | 4 | 5 | -------------------------------------------------------------------------------- /modules/http_host/cron.d/ofm_roundrobin_reader: -------------------------------------------------------------------------------- 1 | # once per day 2 | 2 34 * * * ofm sudo /usr/bin/bash /data/ofm/http_host/bin/roundrobin_reader.sh >> /data/ofm/http_host/logs/roundrobin_reader.log 2>&1 3 | -------------------------------------------------------------------------------- /modules/http_host/http_host.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | from datetime import datetime, timezone 3 | 4 | import click 5 | from http_host_lib.assets import ( 6 | download_assets, 7 | ) 8 | from http_host_lib.btrfs import ( 9 | download_area_version, 10 | get_versions_for_area, 11 | ) 12 | from http_host_lib.mount import auto_mount 13 | from http_host_lib.nginx import write_nginx_config 14 | from http_host_lib.sync import auto_clean_btrfs, full_sync 15 | from http_host_lib.versions import fetch_version_files 16 | 17 | 18 | now = datetime.now(timezone.utc) 19 | 20 | 21 | @click.group() 22 | def cli(): 23 | """ 24 | Manages OpenFreeMap HTTP hosts, including:\n 25 | - Downloading btrfs images\n 26 | - Downloading assets\n 27 | - Mounting downloaded btrfs images\n 28 | - Fetches version files\n 29 | - Running the sync cron task (called every minute with http-host-autoupdate) 30 | """ 31 | 32 | 33 | @cli.command() 34 | @click.argument('area', required=False) 35 | @click.option( 36 | '--version', default='latest', help='Optional version string, like "20231227_043106_pt"' 37 | ) 38 | def download_btrfs(area: str, version: str): 39 | """ 40 | Downloads and uncompresses tiles.btrfs files from the btrfs bucket 41 | Version can be "latest" (default) or specified, like "20231227_043106_pt" 42 | Use --version=1 to list all available versions 43 | """ 44 | 45 | download_area_version(area, version) 46 | 47 | 48 | @cli.command(name='download-assets') 49 | def download_assets_(): 50 | """ 51 | Downloads and extracts assets 52 | """ 53 | 54 | download_assets() 55 | 56 | 57 | @cli.command() 58 | def mount(): 59 | """ 60 | Mounts/unmounts the btrfs images from /data/ofm/http_host/runs automatically. 61 | When finished, /mnt/ofm dir will have all the present tiles.btrfs files mounted in a read-only way. 62 | """ 63 | 64 | auto_mount() 65 | 66 | 67 | @cli.command(name='fetch-versions') 68 | def fetch_version_files_(): 69 | """ 70 | Fetches the version files from remote to local. 71 | Remote versions are specified by https://assets.openfreemap.com/versions/deployed_{area}.txt 72 | """ 73 | 74 | fetch_version_files() 75 | 76 | 77 | @cli.command() 78 | def auto_clean(): 79 | """ 80 | Cleans the old btrfs images 81 | """ 82 | 83 | auto_clean_btrfs() 84 | 85 | 86 | @cli.command() 87 | def nginx_config(): 88 | """ 89 | Writes the nginx config files and reloads nginx 90 | """ 91 | 92 | write_nginx_config() 93 | 94 | 95 | @cli.command() 96 | @click.option('--force', is_flag=True, help='Force nginx sync run') 97 | def sync(force): 98 | """ 99 | Runs the sync task, normally called by cron every minute 100 | On a new server this also takes care of everything, no need to run anything manually. 101 | """ 102 | 103 | print(f'---\n{now}\nStarting sync') 104 | 105 | full_sync(force) 106 | 107 | 108 | @cli.command() 109 | def debug(): 110 | versions = get_versions_for_area('monaco') 111 | print(versions) 112 | 113 | 114 | if __name__ == '__main__': 115 | cli() 116 | -------------------------------------------------------------------------------- /modules/http_host/http_host_lib/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hyperknot/openfreemap/dd97e1fdcbd2f7d9c90eee9b52c96bc2ac9009b0/modules/http_host/http_host_lib/__init__.py -------------------------------------------------------------------------------- /modules/http_host/http_host_lib/assets.py: -------------------------------------------------------------------------------- 1 | import shutil 2 | import subprocess 3 | 4 | import requests 5 | 6 | from http_host_lib.config import config 7 | from http_host_lib.utils import download_file_aria2, download_if_size_differs 8 | 9 | 10 | def download_assets() -> bool: 11 | """ 12 | Downloads and extracts assets 13 | """ 14 | 15 | changed = False 16 | 17 | changed += download_and_extract_asset_tar_gz('fonts') 18 | changed += download_and_extract_asset_tar_gz('styles') 19 | changed += download_and_extract_asset_tar_gz('natural_earth') 20 | 21 | changed += download_sprites() 22 | 23 | return changed 24 | 25 | 26 | def download_and_extract_asset_tar_gz(asset_kind): 27 | """ 28 | Download and extract asset.tgz if the file size differ or not available locally 29 | Returns True if modified 30 | """ 31 | 32 | print(f'Downloading asset {asset_kind}') 33 | 34 | asset_dir = config.assets_dir / asset_kind 35 | asset_dir.mkdir(exist_ok=True, parents=True) 36 | 37 | url = f'https://assets.openfreemap.com/{asset_kind}/ofm.tar.gz' 38 | local_file = asset_dir / 'ofm.tar.gz' 39 | if not download_if_size_differs(url, local_file): 40 | print(f' skipping asset: {asset_kind}') 41 | return False 42 | 43 | ofm_dir = asset_dir / 'ofm' 44 | ofm_dir_bak = asset_dir / 'ofm.bak' 45 | shutil.rmtree(ofm_dir_bak, ignore_errors=True) 46 | if ofm_dir.exists(): 47 | ofm_dir.rename(ofm_dir_bak) 48 | 49 | subprocess.run( 50 | ['tar', '-xzf', local_file, '-C', asset_dir], 51 | check=True, 52 | ) 53 | 54 | print(f' downloaded asset: {asset_kind}') 55 | return True 56 | 57 | 58 | def download_sprites() -> bool: 59 | """ 60 | Sprites are special assets, as we have to keep the old versions indefinitely 61 | """ 62 | 63 | print('Downloading sprites') 64 | 65 | sprites_dir = config.assets_dir / 'sprites' 66 | sprites_dir.mkdir(exist_ok=True, parents=True) 67 | 68 | r = requests.get('https://assets.openfreemap.com/files.txt', timeout=30) 69 | r.raise_for_status() 70 | 71 | sprites_remote = [l for l in r.text.splitlines() if l.startswith('sprites/')] 72 | 73 | changed = False 74 | 75 | for sprite in sprites_remote: 76 | sprite_name = sprite.split('/')[1].replace('.tar.gz', '') 77 | 78 | if (sprites_dir / sprite_name).is_dir(): 79 | print(f' skipping sprite version: {sprite_name}') 80 | continue 81 | 82 | url = f'https://assets.openfreemap.com/sprites/{sprite_name}.tar.gz' 83 | local_file = sprites_dir / 'temp.tar.gz' 84 | download_file_aria2(url, local_file) 85 | 86 | subprocess.run( 87 | ['tar', '-xzf', local_file, '-C', sprites_dir], 88 | check=True, 89 | ) 90 | local_file.unlink() 91 | print(f' downloaded sprite version: {sprite_name}') 92 | changed = True 93 | 94 | return changed 95 | -------------------------------------------------------------------------------- /modules/http_host/http_host_lib/btrfs.py: -------------------------------------------------------------------------------- 1 | import shutil 2 | import subprocess 3 | import sys 4 | 5 | from http_host_lib.config import config 6 | from http_host_lib.shared import get_versions_for_area 7 | from http_host_lib.utils import download_file_aria2, get_remote_file_size 8 | 9 | 10 | def download_area_version(area: str, version: str) -> bool: 11 | """ 12 | Downloads and uncompresses tiles.btrfs files from the btrfs bucket 13 | 14 | "latest" version means the latest in the remote bucket 15 | "deployed" version means to read the currently deployed version string from the config dir 16 | """ 17 | 18 | if area not in config.areas: 19 | sys.exit(f' Please specify area: {config.areas}') 20 | 21 | versions = get_versions_for_area(area) 22 | if not versions: 23 | print(f' No versions found for {area}') 24 | return False 25 | 26 | # latest version 27 | if version == 'latest': 28 | selected_version = versions[-1] 29 | 30 | # deployed version 31 | elif version == 'deployed': 32 | try: 33 | selected_version = (config.deployed_versions_dir / f'{area}.txt').read_text().strip() 34 | except Exception: 35 | return False 36 | 37 | # specific version 38 | else: 39 | if version not in versions: 40 | available_versions_str = '\n'.join(versions) 41 | print( 42 | f' Requested version is not available.\nAvailable versions for {area}:\n{available_versions_str}' 43 | ) 44 | return False 45 | selected_version = version 46 | 47 | return download_and_extract_btrfs(area, selected_version) 48 | 49 | 50 | def download_and_extract_btrfs(area: str, version: str) -> bool: 51 | """ 52 | returns True if download successful, False if skipped 53 | """ 54 | 55 | print(f'Downloading btrfs: {area} {version}') 56 | 57 | version_dir = config.runs_dir / area / version 58 | btrfs_file = version_dir / 'tiles.btrfs' 59 | if btrfs_file.exists(): 60 | print(' file exists, skipping download') 61 | return False 62 | 63 | temp_dir = config.runs_dir / '_tmp' 64 | shutil.rmtree(temp_dir, ignore_errors=True) 65 | temp_dir.mkdir(parents=True) 66 | 67 | url = f'https://btrfs.openfreemap.com/areas/{area}/{version}/tiles.btrfs.gz' 68 | 69 | # check disk space 70 | disk_free = shutil.disk_usage(temp_dir).free 71 | file_size = get_remote_file_size(url) 72 | if not file_size: 73 | print(f' cannot get remote file size for {url}') 74 | return False 75 | 76 | needed_space = file_size * 3 77 | if disk_free < needed_space: 78 | print(f' not enough disk space. Needed: {needed_space}, free space: {disk_free}') 79 | return False 80 | 81 | target_file = temp_dir / 'tiles.btrfs.gz' 82 | download_file_aria2(url, target_file) 83 | 84 | print(' uncompressing...') 85 | subprocess.run(['unpigz', temp_dir / 'tiles.btrfs.gz'], check=True) 86 | btrfs_src = temp_dir / 'tiles.btrfs' 87 | 88 | shutil.rmtree(version_dir, ignore_errors=True) 89 | version_dir.mkdir(parents=True) 90 | 91 | btrfs_src.rename(btrfs_file) 92 | 93 | shutil.rmtree(temp_dir) 94 | return True 95 | -------------------------------------------------------------------------------- /modules/http_host/http_host_lib/config.py: -------------------------------------------------------------------------------- 1 | import json 2 | import subprocess 3 | from pathlib import Path 4 | 5 | 6 | class Configuration: 7 | areas = ['planet', 'monaco'] 8 | 9 | http_host_dir = Path('/data/ofm/http_host') 10 | 11 | http_host_bin = http_host_dir / 'bin' 12 | http_host_scripts_dir = http_host_bin / 'scripts' 13 | 14 | runs_dir = http_host_dir / 'runs' 15 | assets_dir = http_host_dir / 'assets' 16 | 17 | mnt_dir = Path('/mnt/ofm') 18 | 19 | certs_dir = Path('/data/nginx/certs') 20 | nginx_confs = Path(__file__).parent / 'nginx_confs' 21 | 22 | if Path('/data/ofm').exists(): 23 | ofm_config_dir = Path('/data/ofm/config') 24 | else: 25 | repo_root = Path(__file__).parent.parent.parent.parent 26 | ofm_config_dir = repo_root / 'config' 27 | 28 | ofm_config = json.loads((ofm_config_dir / 'config.json').read_text()) 29 | 30 | deployed_versions_dir = ofm_config_dir / 'deployed_versions' 31 | 32 | rclone_config = ofm_config_dir / 'rclone.conf' 33 | rclone_bin = subprocess.run(['which', 'rclone'], capture_output=True, text=True).stdout.strip() 34 | 35 | 36 | config = Configuration() 37 | -------------------------------------------------------------------------------- /modules/http_host/http_host_lib/mount.py: -------------------------------------------------------------------------------- 1 | import subprocess 2 | import sys 3 | from pathlib import Path 4 | 5 | from http_host_lib.config import config 6 | from http_host_lib.utils import assert_linux, assert_sudo 7 | 8 | 9 | def auto_mount(): 10 | """ 11 | Mounts/unmounts the btrfs images from /data/ofm/http_host/runs automatically. 12 | When finished, /mnt/ofm dir will have all the present tiles.btrfs files mounted in a read-only way. 13 | """ 14 | 15 | print('Running auto mount') 16 | 17 | assert_linux() 18 | assert_sudo() 19 | 20 | if not config.runs_dir.exists(): 21 | sys.exit(' download-btrfs needs to be run first') 22 | 23 | # clean_up_mounts(config.mnt_dir) # disabling, as it can be in use before the nginx sync works 24 | create_fstab() 25 | 26 | print(' running mount -a') 27 | subprocess.run(['mount', '-a'], check=True) 28 | 29 | 30 | def create_fstab(): 31 | print(' creating fstab') 32 | fstab_new = [] 33 | 34 | for area in ['planet', 'monaco']: 35 | area_dir = (config.runs_dir / area).resolve() 36 | if not area_dir.exists(): 37 | continue 38 | 39 | versions = sorted(area_dir.iterdir()) 40 | for version in versions: 41 | version_str = version.name 42 | btrfs_file = area_dir / version_str / 'tiles.btrfs' 43 | if not btrfs_file.is_file(): 44 | print(f" {btrfs_file} doesn't exist, skipping") 45 | continue 46 | 47 | mnt_folder = config.mnt_dir / f'{area}-{version_str}' 48 | mnt_folder.mkdir(exist_ok=True, parents=True) 49 | 50 | fstab_new.append(f'{btrfs_file} {mnt_folder} btrfs loop,ro 0 0\n') 51 | print(f' created fstab entry for {mnt_folder}') 52 | 53 | with open('/etc/fstab') as fp: 54 | fstab_orig = [l for l in fp.readlines() if f'{config.mnt_dir}/' not in l] 55 | 56 | with open('/etc/fstab', 'w') as fp: 57 | fp.writelines(fstab_orig + fstab_new) 58 | 59 | 60 | def clean_up_mounts(mnt_dir): 61 | if not mnt_dir.exists(): 62 | return 63 | 64 | print('Cleaning up mounts') 65 | 66 | # handle deleted files 67 | p = subprocess.run(['mount'], capture_output=True, text=True, check=True) 68 | lines = [l for l in p.stdout.splitlines() if f'{mnt_dir}/' in l and '(deleted)' in l] 69 | 70 | for l in lines: 71 | mnt_path = Path(l.split('(deleted) on ')[1].split(' type btrfs')[0]) 72 | print(f' removing deleted mount {mnt_path}') 73 | assert mnt_path.exists() 74 | subprocess.run(['umount', mnt_path], check=True) 75 | mnt_path.rmdir() 76 | 77 | # clean all mounts not in current fstab 78 | with open('/etc/fstab') as fp: 79 | fstab_str = fp.read() 80 | 81 | for subdir in mnt_dir.iterdir(): 82 | if f'{subdir} ' in fstab_str: 83 | continue 84 | 85 | print(f' removing old mount {subdir}') 86 | subprocess.run(['umount', subdir], check=True) 87 | subdir.rmdir() 88 | -------------------------------------------------------------------------------- /modules/http_host/http_host_lib/nginx.py: -------------------------------------------------------------------------------- 1 | import shutil 2 | import subprocess 3 | import sys 4 | from pathlib import Path 5 | 6 | from http_host_lib.config import config 7 | from http_host_lib.utils import python_venv_executable 8 | 9 | 10 | def write_nginx_config(): 11 | print('Writing nginx config') 12 | 13 | if not config.mnt_dir.exists(): 14 | sys.exit(' mount needs to be run first') 15 | 16 | curl_text_mix = '' 17 | 18 | domain_direct = config.ofm_config['domain_direct'] 19 | domain_roundrobin = config.ofm_config['domain_roundrobin'] 20 | self_signed_certs = config.ofm_config['self_signed_certs'] 21 | 22 | # remove old configs and certs 23 | for file in Path('/data/nginx/sites').glob('ofm_*.conf'): 24 | file.unlink() 25 | 26 | for file in Path('/data/nginx/certs').glob('ofm_*'): 27 | file.unlink() 28 | 29 | # processing Round Robin DNS config 30 | if domain_roundrobin: 31 | if not config.rclone_config.is_file(): 32 | sys.exit('rclone.conf missing') 33 | 34 | # download the roundrobin certificate from bucket using rclone 35 | write_roundrobin_reader_script(domain_roundrobin) 36 | subprocess.run(['bash', config.http_host_bin / 'roundrobin_reader.sh'], check=True) 37 | 38 | curl_text_mix += create_nginx_conf( 39 | template_path=config.nginx_confs / 'roundrobin.conf', 40 | local='ofm_roundrobin', 41 | domain=domain_roundrobin, 42 | ) 43 | 44 | # processing Let's Encrypt config 45 | if domain_direct: 46 | direct_cert = config.certs_dir / 'ofm_direct.cert' 47 | direct_key = config.certs_dir / 'ofm_direct.key' 48 | 49 | if not direct_cert.is_file() or not direct_key.is_file(): 50 | shutil.copyfile(Path('/etc/nginx/ssl/dummy.cert'), direct_cert) 51 | shutil.copyfile(Path('/etc/nginx/ssl/dummy.key'), direct_key) 52 | 53 | curl_text_mix += create_nginx_conf( 54 | template_path=config.nginx_confs / 'le.conf', 55 | local='ofm_direct', 56 | domain=domain_direct, 57 | ) 58 | 59 | subprocess.run(['nginx', '-t'], check=True) 60 | subprocess.run(['systemctl', 'reload', 'nginx'], check=True) 61 | 62 | if not self_signed_certs: 63 | subprocess.run( 64 | [ 65 | 'certbot', 66 | 'certonly', 67 | '--webroot', 68 | '--webroot-path=/data/nginx/acme-challenges', 69 | '--noninteractive', 70 | '-m', 71 | config.ofm_config['letsencrypt_email'], 72 | '--agree-tos', 73 | '--cert-name=ofm_direct', 74 | # '--staging', 75 | '--deploy-hook', 76 | 'nginx -t && service nginx reload', 77 | '-d', 78 | domain_direct, 79 | ], 80 | check=True, 81 | ) 82 | 83 | # link certs to nginx dir 84 | direct_cert.unlink() 85 | direct_key.unlink() 86 | 87 | etc_cert = Path('/etc/letsencrypt/live/ofm_direct/fullchain.pem') 88 | etc_key = Path('/etc/letsencrypt/live/ofm_direct/privkey.pem') 89 | assert etc_cert.is_file() 90 | assert etc_key.is_file() 91 | direct_cert.symlink_to(etc_cert) 92 | direct_key.symlink_to(etc_key) 93 | 94 | subprocess.run(['nginx', '-t'], check=True) 95 | subprocess.run(['systemctl', 'reload', 'nginx'], check=True) 96 | 97 | curl_text_lines = sorted(curl_text_mix.splitlines()) 98 | if config.ofm_config.get('skip_planet'): 99 | curl_text_lines = [l for l in curl_text_lines if '/planet' not in l] 100 | else: 101 | curl_text_lines = [l for l in curl_text_lines if '/monaco' not in l] 102 | 103 | curl_text_mix = '\n'.join(curl_text_lines) 104 | print(f'test with:\n{curl_text_mix}') 105 | 106 | 107 | def create_nginx_conf(*, template_path, local, domain): 108 | location_str, curl_text = create_location_blocks(local=local, domain=domain) 109 | 110 | with open(template_path) as fp: 111 | template = fp.read() 112 | 113 | template = template.replace('__LOCATION_BLOCKS__', location_str) 114 | template = template.replace('__LOCAL__', local) 115 | template = template.replace('__DOMAIN__', domain) 116 | 117 | curl_text = curl_text.replace('__LOCAL__', local) 118 | curl_text = curl_text.replace('__DOMAIN__', domain) 119 | 120 | with open(f'/data/nginx/sites/{local}.conf', 'w') as fp: 121 | fp.write(template) 122 | print(f' nginx config written: {domain} {local}') 123 | 124 | return curl_text 125 | 126 | 127 | def create_location_blocks(*, local, domain): 128 | location_str = '' 129 | curl_text = '' 130 | 131 | for subdir in config.mnt_dir.iterdir(): 132 | if not subdir.is_dir(): 133 | continue 134 | area, version = subdir.name.split('-') 135 | 136 | location_str += create_version_location( 137 | area=area, version=version, mnt_dir=subdir, local=local, domain=domain 138 | ) 139 | 140 | for path in [ 141 | f'/{area}/{version}', 142 | f'/{area}/{version}/14/8529/5975.pbf', 143 | f'/{area}/{version}/9999/9999/9999.pbf', # empty_tile test 144 | ]: 145 | curl_text += ( 146 | # f'curl -H "Host: __LOCAL__" -I http://localhost/{path}\n' 147 | f'curl -sI https://__DOMAIN__{path} | sort\n' 148 | ) 149 | 150 | location_str += create_latest_locations(local=local, domain=domain) 151 | 152 | for area in config.areas: 153 | for path in [ 154 | f'/{area}', 155 | f'/{area}/19700101_old_version_test', 156 | f'/{area}/19700101_old_version_test/14/8529/5975.pbf', 157 | f'/{area}/19700101_old_version_test/9999/9999/9999.pbf', # empty_tile test 158 | ]: 159 | curl_text += ( 160 | # f'curl -H "Host: __LOCAL__" -I http://localhost/{path}\n' 161 | f'curl -sI https://__DOMAIN__{path} | sort\n' 162 | ) 163 | 164 | with open(config.nginx_confs / 'location_static.conf') as fp: 165 | location_str += '\n' + fp.read() 166 | 167 | return location_str, curl_text 168 | 169 | 170 | def create_version_location( 171 | *, area: str, version: str, mnt_dir: Path, local: str, domain: str 172 | ) -> str: 173 | run_dir = config.runs_dir / area / version 174 | if not run_dir.is_dir(): 175 | print(f" {run_dir} doesn't exist, skipping") 176 | return '' 177 | 178 | tilejson_path = run_dir / f'tilejson-{local}.json' 179 | 180 | metadata_path = mnt_dir / 'metadata.json' 181 | if not metadata_path.is_file(): 182 | print(f" {metadata_path} doesn't exist, skipping") 183 | return '' 184 | 185 | url_prefix = f'https://{domain}/{area}/{version}' 186 | 187 | subprocess.run( 188 | [ 189 | python_venv_executable(), 190 | config.http_host_scripts_dir / 'metadata_to_tilejson.py', 191 | '--minify', 192 | metadata_path, 193 | tilejson_path, 194 | url_prefix, 195 | ], 196 | check=True, 197 | ) 198 | 199 | return f""" 200 | # specific JSON {area} {version} 201 | location = /{area}/{version} {{ # no trailing slash 202 | alias {tilejson_path}; # no trailing slash 203 | 204 | expires 1w; 205 | default_type application/json; 206 | 207 | add_header 'Access-Control-Allow-Origin' '*' always; 208 | add_header Cache-Control public; 209 | add_header X-Robots-Tag "noindex, nofollow" always; 210 | 211 | add_header x-ofm-debug 'specific JSON {area} {version}'; 212 | }} 213 | 214 | # specific PBF {area} {version} 215 | location ^~ /{area}/{version}/ {{ # trailing slash 216 | alias {mnt_dir}/tiles/; # trailing slash 217 | try_files $uri @empty_tile; 218 | add_header Content-Encoding gzip; 219 | 220 | expires 10y; 221 | 222 | types {{ 223 | application/vnd.mapbox-vector-tile pbf; 224 | }} 225 | 226 | add_header 'Access-Control-Allow-Origin' '*' always; 227 | add_header Cache-Control public; 228 | add_header X-Robots-Tag "noindex, nofollow" always; 229 | 230 | add_header x-ofm-debug 'specific PBF {area} {version}'; 231 | }} 232 | """ 233 | 234 | 235 | def create_latest_locations(*, local: str, domain: str) -> str: 236 | location_str = '' 237 | 238 | local_version_files = config.deployed_versions_dir.glob('*.txt') 239 | 240 | for file in local_version_files: 241 | area = file.stem 242 | with open(file) as fp: 243 | version = fp.read().strip() 244 | 245 | print(f' linking latest version for {area}: {version}') 246 | 247 | # checking runs dir 248 | run_dir = config.runs_dir / area / version 249 | tilejson_path = run_dir / f'tilejson-{local}.json' 250 | if not tilejson_path.is_file(): 251 | print(f' error with latest: {tilejson_path} does not exist') 252 | continue 253 | 254 | # checking mnt dir 255 | mnt_dir = Path(f'/mnt/ofm/{area}-{version}') 256 | mnt_file = mnt_dir / 'metadata.json' 257 | if not mnt_file.is_file(): 258 | print(f' error with latest: {mnt_file} does not exist') 259 | continue 260 | 261 | # latest 262 | location_str += f""" 263 | 264 | # latest JSON {area} 265 | location = /{area} {{ # no trailing slash 266 | alias {tilejson_path}; # no trailing slash 267 | 268 | expires 1d; 269 | default_type application/json; 270 | 271 | add_header 'Access-Control-Allow-Origin' '*' always; 272 | add_header Cache-Control public; 273 | add_header X-Robots-Tag "noindex, nofollow" always; 274 | 275 | add_header x-ofm-debug 'latest JSON {area}'; 276 | }} 277 | """ 278 | 279 | # wildcard 280 | # identical to create_version_location 281 | location_str += f""" 282 | 283 | # wildcard JSON {area} 284 | location ~ ^/{area}/([^/]+)$ {{ 285 | # regex location is unreliable with alias, only root is reliable 286 | 287 | root {run_dir}; # no trailing slash 288 | try_files /tilejson-{local}.json =404; 289 | 290 | expires 1w; 291 | default_type application/json; 292 | 293 | add_header 'Access-Control-Allow-Origin' '*' always; 294 | add_header Cache-Control public; 295 | add_header X-Robots-Tag "noindex, nofollow" always; 296 | 297 | add_header x-ofm-debug 'wildcard JSON {area}'; 298 | }} 299 | 300 | # wildcard PBF {area} 301 | location ~ ^/{area}/([^/]+)/(.+)$ {{ 302 | # regex location is unreliable with alias, only root is reliable 303 | 304 | root {mnt_dir}/tiles/; # trailing slash 305 | try_files /$2 @empty_tile; 306 | add_header Content-Encoding gzip; 307 | 308 | expires 10y; 309 | 310 | types {{ 311 | application/vnd.mapbox-vector-tile pbf; 312 | }} 313 | 314 | add_header 'Access-Control-Allow-Origin' '*' always; 315 | add_header Cache-Control public; 316 | add_header X-Robots-Tag "noindex, nofollow" always; 317 | 318 | add_header x-ofm-debug 'wildcard PBF {area}'; 319 | }} 320 | """ 321 | 322 | return location_str 323 | 324 | 325 | def write_roundrobin_reader_script(domain_roundrobin): 326 | script = f""" 327 | #!/usr/bin/env bash 328 | export RCLONE_CONFIG=/data/ofm/config/rclone.conf 329 | rclone copyto -v "remote:ofm-private/roundrobin/{domain_roundrobin}/ofm_roundrobin.cert" /data/nginx/certs/ofm_roundrobin.cert 330 | rclone copyto -v "remote:ofm-private/roundrobin/{domain_roundrobin}/ofm_roundrobin.key" /data/nginx/certs/ofm_roundrobin.key 331 | """.strip() 332 | 333 | with open(config.http_host_bin / 'roundrobin_reader.sh', 'w') as fp: 334 | fp.write(script) 335 | -------------------------------------------------------------------------------- /modules/http_host/http_host_lib/nginx_confs/le.conf: -------------------------------------------------------------------------------- 1 | server { 2 | server_name __LOCAL__ __DOMAIN__; 3 | 4 | # ssl: https://ssl-config.mozilla.org / intermediate config 5 | 6 | listen 80; 7 | listen 443 ssl; 8 | listen [::]:443 ssl; 9 | http2 on; 10 | 11 | ssl_certificate /data/nginx/certs/ofm_direct.cert; 12 | ssl_certificate_key /data/nginx/certs/ofm_direct.key; 13 | 14 | ssl_session_timeout 1d; 15 | ssl_session_cache shared:MozSSL:10m; # about 40000 sessions 16 | ssl_session_tickets off; 17 | 18 | ssl_dhparam /etc/nginx/ffdhe2048.txt; 19 | 20 | # intermediate configuration 21 | ssl_protocols TLSv1.2 TLSv1.3; 22 | ssl_ciphers ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384:DHE-RSA-CHACHA20-POLY1305; 23 | ssl_prefer_server_ciphers off; 24 | 25 | 26 | # access log doesn't contain IP address 27 | access_log off; 28 | #access_log /data/ofm/http_host/logs_nginx/le-access.jsonl access_json buffer=128k; 29 | 30 | error_log /data/ofm/http_host/logs_nginx/le-error.log; 31 | 32 | add_header X-Robots-Tag "noindex, nofollow" always; 33 | 34 | 35 | location ^~ /.well-known/acme-challenge/ { 36 | # trailing slash 37 | root /data/nginx/acme-challenges; 38 | try_files $uri =404; 39 | } 40 | 41 | __LOCATION_BLOCKS__ 42 | 43 | location /styles/ { 44 | # trailing slash 45 | 46 | alias /data/ofm/http_host/assets/styles/ofm/; # trailing slash 47 | try_files $uri.json =404; 48 | 49 | expires 1d; 50 | default_type application/json; 51 | 52 | # substitute the domain in the TileJSON 53 | sub_filter '__TILEJSON_DOMAIN__' '__DOMAIN__'; 54 | sub_filter_once off; 55 | sub_filter_types '*'; 56 | 57 | add_header 'Access-Control-Allow-Origin' '*' always; 58 | add_header Cache-Control public; 59 | add_header X-Robots-Tag "noindex, nofollow" always; 60 | } 61 | 62 | # catch-all block to deny all other requests 63 | location / { 64 | deny all; 65 | error_log /data/ofm/http_host/logs_nginx/le-deny.log error; 66 | } 67 | } 68 | -------------------------------------------------------------------------------- /modules/http_host/http_host_lib/nginx_confs/location_static.conf: -------------------------------------------------------------------------------- 1 | # Serve robots.txt that blocks all crawlers 2 | #location = /robots.txt { 3 | # add_header Content-Type text/plain; 4 | #return 200 "User-agent: *\nDisallow: /\n"; 5 | #} 6 | 7 | location /fonts/ { 8 | # trailing slash 9 | 10 | alias /data/ofm/http_host/assets/fonts/ofm/; # trailing slash 11 | try_files $uri =404; 12 | 13 | expires 1w; 14 | 15 | add_header 'Access-Control-Allow-Origin' '*' always; 16 | add_header Cache-Control public; 17 | add_header X-Robots-Tag "noindex, nofollow" always; 18 | 19 | } 20 | 21 | location /natural_earth/ { 22 | # trailing slash 23 | 24 | alias /data/ofm/http_host/assets/natural_earth/ofm/; # trailing slash 25 | try_files $uri =404; 26 | 27 | expires 10y; 28 | 29 | add_header 'Access-Control-Allow-Origin' '*' always; 30 | add_header Cache-Control public; 31 | add_header X-Robots-Tag "noindex, nofollow" always; 32 | 33 | } 34 | 35 | location /sprites/ { 36 | # trailing slash 37 | 38 | alias /data/ofm/http_host/assets/sprites/; # trailing slash 39 | try_files $uri =404; 40 | 41 | expires 10y; 42 | 43 | add_header 'Access-Control-Allow-Origin' '*' always; 44 | add_header Cache-Control public; 45 | add_header X-Robots-Tag "noindex, nofollow" always; 46 | 47 | } 48 | 49 | 50 | # we need to handle missing tiles as valid request returning empty string 51 | location @empty_tile { 52 | return 200 ''; 53 | 54 | expires 10y; 55 | 56 | types { 57 | application/vnd.mapbox-vector-tile pbf; 58 | } 59 | 60 | add_header 'Access-Control-Allow-Origin' '*' always; 61 | add_header Cache-Control public; 62 | add_header X-Robots-Tag "noindex, nofollow" always; 63 | 64 | add_header x-ofm-debug 'empty tile'; 65 | } 66 | 67 | location = / { 68 | return 302 https://openfreemap.org; 69 | } 70 | -------------------------------------------------------------------------------- /modules/http_host/http_host_lib/nginx_confs/roundrobin.conf: -------------------------------------------------------------------------------- 1 | server { 2 | server_name __LOCAL__ __DOMAIN__; 3 | 4 | # ssl: https://ssl-config.mozilla.org / intermediate config 5 | 6 | listen 80; 7 | listen 443 ssl; 8 | listen [::]:443 ssl; 9 | http2 on; 10 | 11 | ssl_certificate /data/nginx/certs/ofm_roundrobin.cert; 12 | ssl_certificate_key /data/nginx/certs/ofm_roundrobin.key; 13 | 14 | ssl_session_timeout 1d; 15 | ssl_session_cache shared:MozSSL:10m; # about 40000 sessions 16 | ssl_session_tickets off; 17 | 18 | ssl_dhparam /etc/nginx/ffdhe2048.txt; 19 | 20 | # intermediate configuration 21 | ssl_protocols TLSv1.2 TLSv1.3; 22 | ssl_ciphers ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384:DHE-RSA-CHACHA20-POLY1305; 23 | ssl_prefer_server_ciphers off; 24 | 25 | # access log doesn't contain IP address 26 | access_log off; 27 | #access_log /data/ofm/http_host/logs_nginx/roundrobin-access.jsonl access_json buffer=128k; 28 | 29 | error_log /data/ofm/http_host/logs_nginx/roundrobin-error.log; 30 | 31 | add_header X-Robots-Tag "noindex, nofollow" always; 32 | 33 | 34 | __LOCATION_BLOCKS__ 35 | 36 | location /styles/ { 37 | # trailing slash 38 | 39 | alias /data/ofm/http_host/assets/styles/ofm/; # trailing slash 40 | try_files $uri.json =404; 41 | 42 | expires 1d; 43 | default_type application/json; 44 | 45 | # substitute the domain in the TileJSON 46 | sub_filter '__TILEJSON_DOMAIN__' '__DOMAIN__'; 47 | sub_filter_once off; 48 | sub_filter_types '*'; 49 | 50 | add_header 'Access-Control-Allow-Origin' '*' always; 51 | add_header Cache-Control public; 52 | add_header X-Robots-Tag "noindex, nofollow" always; 53 | 54 | } 55 | 56 | # catch-all block to deny all other requests 57 | location / { 58 | deny all; 59 | error_log /data/ofm/http_host/logs_nginx/roundrobin-deny.log error; 60 | } 61 | } 62 | -------------------------------------------------------------------------------- /modules/http_host/http_host_lib/shared.py: -------------------------------------------------------------------------------- 1 | ../../tile_gen/tile_gen_lib/shared.py -------------------------------------------------------------------------------- /modules/http_host/http_host_lib/sync.py: -------------------------------------------------------------------------------- 1 | import shutil 2 | 3 | from http_host_lib.assets import download_assets 4 | from http_host_lib.btrfs import download_area_version 5 | from http_host_lib.config import config 6 | from http_host_lib.mount import auto_mount, clean_up_mounts 7 | from http_host_lib.nginx import write_nginx_config 8 | from http_host_lib.utils import assert_linux, assert_sudo 9 | from http_host_lib.versions import fetch_version_files 10 | 11 | 12 | def full_sync(force=False): 13 | """ 14 | Runs the sync task, normally called by cron every minute 15 | On a new server this also takes care of everything, no need to run anything manually. 16 | """ 17 | 18 | assert_linux() 19 | assert_sudo() 20 | 21 | # start 22 | versions_changed = fetch_version_files() 23 | 24 | assets_changed = download_assets() 25 | 26 | btrfs_downloaded = False 27 | 28 | # download latest and deployed monaco 29 | btrfs_downloaded += download_area_version(area='monaco', version='latest') 30 | btrfs_downloaded += download_area_version(area='monaco', version='deployed') 31 | 32 | # download latest and deployed planet 33 | if not config.ofm_config.get('skip_planet'): 34 | btrfs_downloaded += download_area_version(area='planet', version='latest') 35 | btrfs_downloaded += download_area_version(area='planet', version='deployed') 36 | 37 | if btrfs_downloaded or versions_changed or assets_changed or force: 38 | auto_clean_btrfs() 39 | auto_mount() 40 | 41 | write_nginx_config() 42 | 43 | clean_up_mounts(config.mnt_dir) 44 | 45 | 46 | def auto_clean_btrfs(): 47 | """ 48 | Clean old btrfs runs 49 | 50 | For each area we keep max two versions: 51 | 1. The newest one available locally 52 | 2. The one currently deployed, specified in /data/ofm/config/deployed_versions 53 | 3. If there is no deployed version, then we include the second newest one 54 | """ 55 | 56 | print('Running auto clean btrfs') 57 | 58 | for area in config.areas: 59 | area_dir = config.runs_dir / area 60 | if not area_dir.is_dir(): 61 | continue 62 | 63 | local_versions = sorted([i.name for i in area_dir.iterdir()]) 64 | 65 | versions_to_keep = set() 66 | 67 | # add newest version 68 | if local_versions: 69 | versions_to_keep.add(local_versions[-1]) 70 | 71 | # add deployed version 72 | try: 73 | deployed_version_file = config.deployed_versions_dir / f'{area}.txt' 74 | deployed_version = deployed_version_file.read_text().strip() 75 | if (config.runs_dir / area / deployed_version).exists(): 76 | versions_to_keep.add(deployed_version) 77 | except Exception: 78 | pass 79 | 80 | # if still only one version, we include the second newest one 81 | if len(versions_to_keep) == 1 and len(local_versions) >= 2: 82 | versions_to_keep.add(local_versions[-2]) 83 | 84 | print(f' keeping runs for {area}: {sorted(versions_to_keep)}') 85 | 86 | versions_to_remove = set(local_versions).difference(versions_to_keep) 87 | 88 | for version in versions_to_remove: 89 | # Interesting bit: linux allows us to remove the disk image file for a mount 90 | # while the mount is still being used. 91 | # We delete the disk image, update nginx config and only then unmount the /mnt dir. 92 | print(f' removing runs for {area}: {version}') 93 | version_dir = config.runs_dir / area / version 94 | shutil.rmtree(version_dir) 95 | -------------------------------------------------------------------------------- /modules/http_host/http_host_lib/utils.py: -------------------------------------------------------------------------------- 1 | import os 2 | import subprocess 3 | import sys 4 | from pathlib import Path 5 | 6 | import requests 7 | 8 | 9 | def assert_sudo(): 10 | if os.geteuid() != 0: 11 | sys.exit(' needs sudo') 12 | 13 | 14 | def assert_linux(): 15 | if not Path('/etc/fstab').exists(): 16 | sys.exit(' needs to be run on Linux') 17 | 18 | 19 | def assert_single_process(): 20 | p = subprocess.run(['pgrep', '-fl', sys.argv[0]], capture_output=True, text=True) 21 | lines = [l for l in p.stdout.splitlines() if 'python' in l] 22 | if len(lines) >= 2: 23 | sys.exit(' detected multiple processes, terminating') 24 | 25 | 26 | def download_if_size_differs(url: str, local_file: Path) -> bool: 27 | if not local_file.exists() or local_file.stat().st_size != get_remote_file_size(url): 28 | download_file_aria2(url, local_file) 29 | return True 30 | 31 | return False 32 | 33 | 34 | def get_remote_file_size(url: str) -> int | None: 35 | r = requests.head(url, timeout=30) 36 | size = r.headers.get('Content-Length') 37 | return int(size) if size else None 38 | 39 | 40 | def download_file_aria2(url: str, local_file: Path): 41 | print(f' downloading {url} into {local_file}') 42 | local_file.unlink(missing_ok=True) 43 | 44 | subprocess.run( 45 | [ 46 | 'aria2c', 47 | '--split=8', 48 | '--max-connection-per-server=8', 49 | '--file-allocation=none', 50 | '--min-split-size=1M', 51 | '-d', 52 | local_file.parent, 53 | '-o', 54 | local_file.name, 55 | url, 56 | ], 57 | check=True, 58 | ) 59 | 60 | 61 | def python_venv_executable() -> Path: 62 | venv_path = os.environ.get('VIRTUAL_ENV') 63 | 64 | if venv_path: 65 | return Path(venv_path) / 'bin' / 'python' 66 | elif sys.prefix != sys.base_prefix: 67 | return Path(sys.prefix) / 'bin' / 'python' 68 | else: 69 | return Path(sys.executable) 70 | -------------------------------------------------------------------------------- /modules/http_host/http_host_lib/versions.py: -------------------------------------------------------------------------------- 1 | import requests 2 | 3 | from http_host_lib.config import config 4 | from http_host_lib.shared import get_deployed_version 5 | from http_host_lib.utils import assert_linux, assert_sudo 6 | 7 | 8 | def fetch_version_files() -> bool: 9 | """ 10 | Syncs the version files from remote to local. 11 | Remote versions are specified by https://assets.openfreemap.com/versions/deployed_{area}.txt 12 | """ 13 | 14 | print('Syncing local version files') 15 | 16 | assert_linux() 17 | assert_sudo() 18 | 19 | need_nginx_sync = False 20 | 21 | for area in config.areas: 22 | deployed_version = get_deployed_version(area)['version'] 23 | if not deployed_version: 24 | print(f' deployed version not found: {area}') 25 | continue 26 | print(f' deployed version {area}: {deployed_version}') 27 | 28 | local_version_file = config.deployed_versions_dir / f'{area}.txt' 29 | 30 | try: 31 | local_version_old = local_version_file.read_text() 32 | except Exception: 33 | local_version_old = None 34 | 35 | if deployed_version != local_version_old: 36 | config.deployed_versions_dir.mkdir(exist_ok=True, parents=True) 37 | local_version_file.write_text(deployed_version) 38 | need_nginx_sync = True 39 | 40 | return need_nginx_sync 41 | -------------------------------------------------------------------------------- /modules/http_host/scripts/metadata_to_tilejson.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | import json 3 | from pathlib import Path 4 | 5 | import click 6 | 7 | 8 | @click.command() 9 | @click.argument( 10 | 'metadata_path', type=click.Path(exists=True, dir_okay=False, file_okay=True, path_type=Path) 11 | ) 12 | @click.argument('tilejson_path', type=click.Path(path_type=Path)) 13 | @click.argument('url_prefix') 14 | @click.option('--minify', is_flag=True, help='Minify the generated JSON') 15 | def cli(metadata_path: Path, tilejson_path: Path, url_prefix: str, minify: bool): 16 | """ 17 | Takes a MBTiles metadata.json and generates a TileJSON 3.0.0 file 18 | 19 | URL_PREFIX: Base URL to use as a prefix for tiles in the generated TileJSON. 20 | 21 | Reference: https://github.com/mapbox/tilejson-spec/tree/master/3.0.0 22 | """ 23 | 24 | tilejson = dict(tilejson='3.0.0') 25 | 26 | with open(metadata_path) as fp: 27 | metadata = json.load(fp) 28 | 29 | metadata_json_key = json.loads(metadata.pop('json')) 30 | 31 | tilejson['tiles'] = [url_prefix.rstrip('/') + '/{z}/{x}/{y}.pbf'] 32 | 33 | '' 34 | tilejson['vector_layers'] = metadata_json_key.pop('vector_layers') 35 | assert not metadata_json_key # check that no more keys are left 36 | 37 | tilejson['attribution'] = metadata.pop('attribution') 38 | 39 | # overwriting new style OSM license, until fixed in tile_gen 40 | tilejson['attribution'] = ( 41 | 'OpenFreeMap ' 42 | '© OpenMapTiles ' 43 | 'Data from OpenStreetMap' 44 | ) 45 | 46 | tilejson['bounds'] = [float(n) for n in metadata.pop('bounds').split(',')] 47 | tilejson['center'] = [float(n) for n in metadata.pop('center').split(',')] 48 | tilejson['center'][2] = 1 49 | 50 | tilejson['description'] = metadata.pop('description') 51 | 52 | tilejson['maxzoom'] = int(metadata.pop('maxzoom')) 53 | tilejson['minzoom'] = int(metadata.pop('minzoom')) 54 | 55 | tilejson['name'] = metadata.pop('name') 56 | tilejson['version'] = metadata.pop('version') 57 | 58 | with open(tilejson_path, 'w') as fp: 59 | if minify: 60 | json.dump(tilejson, fp, ensure_ascii=False, separators=(',', ':')) 61 | else: 62 | json.dump(tilejson, fp, ensure_ascii=False, indent=2) 63 | 64 | 65 | if __name__ == '__main__': 66 | cli() 67 | -------------------------------------------------------------------------------- /modules/http_host/setup.py: -------------------------------------------------------------------------------- 1 | from setuptools import find_packages, setup 2 | 3 | 4 | requirements = [ 5 | 'click', 6 | 'pycurl', 7 | 'requests', 8 | ] 9 | 10 | 11 | setup( 12 | python_requires='>=3.10', 13 | install_requires=requirements, 14 | packages=find_packages(), 15 | ) 16 | -------------------------------------------------------------------------------- /modules/loadbalancer/cron.d/ofm_loadbalancer: -------------------------------------------------------------------------------- 1 | # every minute 2 | 3 | # fix 4 | #* * * * * ofm sudo /data/ofm/venv/bin/python -u /data/ofm/loadbalancer/loadbalancer.py fix >> /data/ofm/loadbalancer/logs/run.log 2>&1 5 | 6 | 7 | # check 8 | * * * * * ofm sudo /data/ofm/venv/bin/python -u /data/ofm/loadbalancer/loadbalancer.py check >> /data/ofm/loadbalancer/logs/run.log 2>&1 9 | -------------------------------------------------------------------------------- /modules/loadbalancer/loadbalancer.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | from datetime import datetime, timezone 4 | 5 | import click 6 | from loadbalancer_lib.loadbalance import check_or_fix 7 | 8 | 9 | now = datetime.now(timezone.utc) 10 | 11 | 12 | @click.group() 13 | def cli(): 14 | """ 15 | Manages load-balancing of Round-Robin DNS records 16 | """ 17 | 18 | 19 | @cli.command() 20 | def check(): 21 | """ 22 | Runs load-balancing check 23 | """ 24 | 25 | print(f'---\n{now}\nStarting check') 26 | check_or_fix(fix=False) 27 | 28 | 29 | @cli.command() 30 | def fix(): 31 | """ 32 | Runs check and fixes records based on check results 33 | """ 34 | 35 | print(f'---\n{now}\nStarting fix') 36 | check_or_fix(fix=True) 37 | 38 | 39 | if __name__ == '__main__': 40 | cli() 41 | -------------------------------------------------------------------------------- /modules/loadbalancer/loadbalancer_lib/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hyperknot/openfreemap/dd97e1fdcbd2f7d9c90eee9b52c96bc2ac9009b0/modules/loadbalancer/loadbalancer_lib/__init__.py -------------------------------------------------------------------------------- /modules/loadbalancer/loadbalancer_lib/cloudflare.py: -------------------------------------------------------------------------------- 1 | import requests 2 | 3 | 4 | # docs: https://api.cloudflare.com/ 5 | 6 | 7 | def cloudflare_get(path: str, params: dict, cloudflare_api_token: str): 8 | headers = {'Authorization': f'Bearer {cloudflare_api_token}'} 9 | res = requests.get( 10 | f'https://api.cloudflare.com/client/v4{path}', headers=headers, params=params 11 | ) 12 | res.raise_for_status() 13 | data = res.json() 14 | assert data['success'] is True 15 | return data 16 | 17 | 18 | def get_zone_id(domain, cloudflare_api_token: str): 19 | data = cloudflare_get( 20 | '/zones', params=dict(name=domain), cloudflare_api_token=cloudflare_api_token 21 | ) 22 | assert len(data['result']) == 1 23 | zone_info = data['result'][0] 24 | return zone_info['id'] 25 | 26 | 27 | def get_dns_records_round_robin(zone_id, cloudflare_api_token: str) -> dict: 28 | data = cloudflare_get( 29 | f'/zones/{zone_id}/dns_records', 30 | params=dict(per_page=5000), 31 | cloudflare_api_token=cloudflare_api_token, 32 | ) 33 | records = data['result'] 34 | 35 | data = {} 36 | 37 | for r in records: 38 | if r['type'] != 'A': 39 | continue 40 | 41 | data.setdefault(r['name'], []) 42 | data[r['name']].append(dict(content=r['content'], id=r['id'])) 43 | 44 | return data 45 | 46 | 47 | def set_records_round_robin( 48 | zone_id, 49 | *, 50 | name: str, 51 | host_ip_set: set, 52 | ttl: int = 1, 53 | proxied: bool, 54 | comment: str = None, 55 | cloudflare_api_token: str, 56 | ) -> bool: 57 | headers = {'Authorization': f'Bearer {cloudflare_api_token}'} 58 | 59 | dns_records = get_dns_records_round_robin(zone_id, cloudflare_api_token=cloudflare_api_token) 60 | current_records = dns_records.get(name, []) 61 | 62 | current_ips = {r['content'] for r in current_records} 63 | 64 | if current_ips == host_ip_set: 65 | print(f'No need to update records: {name} currently set: {sorted(current_ips)}') 66 | return False 67 | 68 | # changing records 69 | 70 | # delete all current records first 71 | for r in current_records: 72 | delete_record(zone_id, id_=r['id'], cloudflare_api_token=cloudflare_api_token) 73 | 74 | # create new records 75 | for ip in host_ip_set: 76 | print(f'Creating record: {name} {ip}') 77 | json_data = dict( 78 | type='A', 79 | name=name, 80 | content=ip, 81 | ttl=ttl, 82 | proxied=proxied, 83 | comment=comment, 84 | ) 85 | res = requests.post( 86 | f'https://api.cloudflare.com/client/v4/zones/{zone_id}/dns_records', 87 | headers=headers, 88 | json=json_data, 89 | ) 90 | res.raise_for_status() 91 | data = res.json() 92 | assert data['success'] is True 93 | 94 | return True 95 | 96 | 97 | def delete_record(zone_id, *, id_: str, cloudflare_api_token: str): 98 | headers = {'Authorization': f'Bearer {cloudflare_api_token}'} 99 | 100 | print(f'Deleting record: {id_}') 101 | res = requests.delete( 102 | f'https://api.cloudflare.com/client/v4/zones/{zone_id}/dns_records/{id_}', 103 | headers=headers, 104 | json={}, 105 | ) 106 | res.raise_for_status() 107 | data = res.json() 108 | assert data['success'] is True 109 | -------------------------------------------------------------------------------- /modules/loadbalancer/loadbalancer_lib/config.py: -------------------------------------------------------------------------------- 1 | import json 2 | from pathlib import Path 3 | 4 | from dotenv import dotenv_values 5 | 6 | 7 | class Configuration: 8 | areas = ['planet', 'monaco'] 9 | 10 | if Path('/data/ofm').exists(): 11 | ofm_config_dir = Path('/data/ofm/config') 12 | else: 13 | repo_root = Path(__file__).parent.parent.parent.parent 14 | ofm_config_dir = repo_root / 'config' 15 | 16 | ofm_config = json.loads((ofm_config_dir / 'config.json').read_text()) 17 | 18 | http_host_list = ofm_config['http_host_list'] 19 | telegram_token = ofm_config['telegram_token'] 20 | telegram_chat_id = ofm_config['telegram_chat_id'] 21 | 22 | domain_roundrobin = ofm_config['domain_roundrobin'] 23 | domain_root = '.'.join(domain_roundrobin.split('.')[-2:]) 24 | 25 | cloudflare_ini = dotenv_values(ofm_config_dir / 'cloudflare.ini') 26 | cloudflare_api_token = cloudflare_ini['dns_cloudflare_api_token'] 27 | 28 | 29 | config = Configuration() 30 | -------------------------------------------------------------------------------- /modules/loadbalancer/loadbalancer_lib/loadbalance.py: -------------------------------------------------------------------------------- 1 | from datetime import datetime, timedelta, timezone 2 | 3 | from loadbalancer_lib.cloudflare import get_zone_id, set_records_round_robin 4 | from loadbalancer_lib.config import config 5 | from loadbalancer_lib.shared import check_host_latest, check_host_version, get_deployed_version 6 | from loadbalancer_lib.telegram_ import telegram_send_message 7 | 8 | 9 | def check_or_fix(fix=False): 10 | if not config.http_host_list: 11 | telegram_quick( 12 | 'OFM loadbalancer no hosts found on list, terminating', 13 | ) 14 | return 15 | 16 | try: 17 | results_by_ip = {} 18 | working_hosts = set() 19 | 20 | for area in config.areas: 21 | results = run_area(area) 22 | for host_ip, host_is_ok in results.items(): 23 | results_by_ip.setdefault(host_ip, True) 24 | results_by_ip[host_ip] &= host_is_ok 25 | 26 | for host_ip, host_is_ok in results_by_ip.items(): 27 | if not host_is_ok: 28 | telegram_quick(f'OFM loadbalancer ERROR with host: {host_ip}') 29 | else: 30 | working_hosts.add(host_ip) 31 | 32 | except Exception as e: 33 | telegram_quick(f'OFM loadbalancer ERROR with loadbalancer: {e}') 34 | return 35 | 36 | print(f'working hosts: {sorted(working_hosts)}') 37 | 38 | if fix: 39 | # if no hosts are detected working, probably a bug in this script 40 | # fail-safe to include all hosts 41 | if not working_hosts: 42 | working_hosts = set(config.http_host_list) 43 | telegram_quick('OFM loadbalancer FIX found no working hosts, reverting to full list!') 44 | 45 | updated = update_records(working_hosts) 46 | if updated: 47 | telegram_quick(f'OFM loadbalancer FIX modified records, new records: {working_hosts}') 48 | 49 | 50 | def run_area(area): 51 | deployed_data = get_deployed_version(area) 52 | version = deployed_data['version'] 53 | last_modified = deployed_data['last_modified'] 54 | 55 | if not version: 56 | print(f' deployed version not found: {area}') 57 | return 58 | 59 | print(f' deployed version {area}: {version}') 60 | 61 | # using relaxed mode for while the servers are still deploying 62 | now = datetime.now(timezone.utc) 63 | delta = now - last_modified 64 | relaxed_mode = delta < timedelta(minutes=3) 65 | 66 | if relaxed_mode: 67 | print(' using relaxed mode') 68 | 69 | results = {} 70 | 71 | for host_ip in config.http_host_list: 72 | try: 73 | # don't check latest 74 | if relaxed_mode: 75 | check_host_version(config.domain_roundrobin, host_ip, area, version) 76 | else: 77 | check_host_latest(config.domain_roundrobin, host_ip, area, version) 78 | 79 | results[host_ip] = True 80 | except Exception as e: 81 | results[host_ip] = False 82 | print(e) 83 | 84 | return results 85 | 86 | 87 | def update_records(working_hosts) -> bool: 88 | zone_id = get_zone_id(config.domain_root, cloudflare_api_token=config.cloudflare_api_token) 89 | 90 | updated = False 91 | 92 | updated |= set_records_round_robin( 93 | zone_id=zone_id, 94 | name=config.domain_roundrobin, 95 | host_ip_set=working_hosts, 96 | proxied=False, 97 | ttl=300, 98 | comment='domain_roundrobin', 99 | cloudflare_api_token=config.cloudflare_api_token, 100 | ) 101 | 102 | return updated 103 | 104 | 105 | def telegram_quick(message): 106 | telegram_send_message(message, config.telegram_token, config.telegram_chat_id) 107 | -------------------------------------------------------------------------------- /modules/loadbalancer/loadbalancer_lib/shared.py: -------------------------------------------------------------------------------- 1 | ../../tile_gen/tile_gen_lib/shared.py -------------------------------------------------------------------------------- /modules/loadbalancer/loadbalancer_lib/telegram_.py: -------------------------------------------------------------------------------- 1 | import requests 2 | 3 | 4 | def telegram_send_message(message, bot_token, chat_id): 5 | print(message) 6 | 7 | url = f'https://api.telegram.org/bot{bot_token}/sendMessage' 8 | 9 | payload = {'chat_id': chat_id, 'text': message} 10 | 11 | response = requests.post(url, data=payload) 12 | 13 | if response.status_code == 200: 14 | print(' Message sent successfully!') 15 | else: 16 | print(' Failed to send message:', response.text) 17 | -------------------------------------------------------------------------------- /modules/loadbalancer/setup.py: -------------------------------------------------------------------------------- 1 | from setuptools import find_packages, setup 2 | 3 | 4 | requirements = [ 5 | 'click', 6 | 'requests', 7 | 'pycurl', 8 | 'python-dotenv', 9 | ] 10 | 11 | 12 | setup( 13 | python_requires='>=3.10', 14 | install_requires=requirements, 15 | packages=find_packages(), 16 | ) 17 | -------------------------------------------------------------------------------- /modules/prepare-virtualenv.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | find . -name "*.egg-info" -exec rm -rf {} + 4 | find . -name __pycache__ -exec rm -rf {} + 5 | 6 | # deactivate 7 | rm -rf venv 8 | python3 -m venv venv 9 | 10 | venv/bin/pip -V 11 | 12 | venv/bin/pip install -U pip wheel setuptools 13 | 14 | 15 | 16 | -------------------------------------------------------------------------------- /modules/roundrobin/rclone_write.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # these are not needed as certbot generates these 4 | #env > /data/ofm/roundrobin/env.txt 5 | #RENEWED_DOMAINS=tiles.openfreemap.org 6 | #RENEWED_LINEAGE=/etc/letsencrypt/live/ofm_roundrobin 7 | 8 | export RCLONE_CONFIG=/data/ofm/config/rclone.conf 9 | 10 | rclone copyto -v --copy-links "$RENEWED_LINEAGE/fullchain.pem" "remote:ofm-private/roundrobin/$RENEWED_DOMAINS/ofm_roundrobin.cert" 11 | rclone copyto -v --copy-links "$RENEWED_LINEAGE/privkey.pem" "remote:ofm-private/roundrobin/$RENEWED_DOMAINS/ofm_roundrobin.key" 12 | 13 | -------------------------------------------------------------------------------- /modules/tile_gen/cron.d/ofm_tile_gen: -------------------------------------------------------------------------------- 1 | # Define common variables 2 | CMD="sudo /data/ofm/venv/bin/python -u /data/ofm/tile_gen/bin/tile_gen.py" 3 | LOG_DIR=/data/ofm/tile_gen/logs 4 | 5 | # every day at 23:10, make a monaco run 6 | 10 23 * * * ofm $CMD make-tiles monaco --upload >> $LOG_DIR/monaco-make-tiles.log 2>&1 7 | 8 | # debug every 15 minutes 9 | #*/15 * * * * ofm $CMD make-tiles monaco --upload >> $LOG_DIR/monaco-make-tiles.log 2>&1 10 | 11 | # every minute, set monaco to latest 12 | * * * * * ofm $CMD set-version monaco >> $LOG_DIR/monaco-set-version.log 2>&1 13 | 14 | # every Wednesday, make a planet run 15 | 10 0 * * 3 ofm $CMD make-tiles planet --upload >> $LOG_DIR/planet-make-tiles.log 2>&1 16 | 17 | # every Saturday, set planet to latest 18 | 0 11 * * 6 ofm $CMD set-version planet >> $LOG_DIR/planet-set-version.log 2>&1 19 | 20 | # once per minute, create indexes 21 | * * * * * ofm $CMD make-indexes >> $LOG_DIR/make-indexes-cron.log 2>&1 22 | -------------------------------------------------------------------------------- /modules/tile_gen/scripts/README.md: -------------------------------------------------------------------------------- 1 | These are self contained Python scripts, they can be run outside of this project's environment. 2 | -------------------------------------------------------------------------------- /modules/tile_gen/scripts/extract_mbtiles.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | import json 3 | import shutil 4 | import sqlite3 5 | import sys 6 | from pathlib import Path 7 | 8 | import click 9 | 10 | 11 | @click.command() 12 | @click.argument( 13 | 'mbtiles_path', 14 | type=click.Path(exists=True, dir_okay=False, file_okay=True, path_type=Path), 15 | ) 16 | @click.argument('dir_path', type=click.Path(dir_okay=True, file_okay=False, path_type=Path)) 17 | def cli(mbtiles_path: Path, dir_path: Path): 18 | """ 19 | Extracts a mbtiles sqlite to a folder 20 | Deduplicating identical tiles as hard-links 21 | 22 | used for reference: https://github.com/mapbox/mbutil 23 | """ 24 | 25 | if dir_path.exists() and any(dir_path.iterdir()): 26 | sys.exit(' dir not empty') 27 | 28 | dir_path.mkdir(exist_ok=True) 29 | 30 | conn = sqlite3.connect(mbtiles_path) 31 | c = conn.cursor() 32 | 33 | write_dedupl_files(c, dir_path=dir_path) 34 | write_tile_files(c, dir_path=dir_path) 35 | 36 | write_metadata(c, dir_path=dir_path) 37 | conn.commit() 38 | 39 | print('extract_mbtiles.py DONE') 40 | 41 | 42 | def write_metadata(c, *, dir_path): 43 | metadata = dict(c.execute('select name, value from metadata').fetchall()) 44 | c.execute("update metadata set value='OpenFreeMap' where name='name'") 45 | c.execute("update metadata set value='https://openfreemap.org' where name='description'") 46 | 47 | if 'openfreemap' not in metadata['attribution']: 48 | attr_str = ( 49 | 'OpenFreeMap ' 50 | + metadata['attribution'] 51 | ) 52 | c.execute("UPDATE metadata SET value = ? WHERE name = 'attribution'", (attr_str,)) 53 | 54 | if 'osm_date' not in metadata: 55 | if 'planetiler:osm:osmosisreplicationtime' in metadata: 56 | osm_date = metadata['planetiler:osm:osmosisreplicationtime'][:10] 57 | c.execute('INSERT INTO metadata (name, value) VALUES (?, ?)', ('osm_date', osm_date)) 58 | 59 | metadata = dict(c.execute('select name, value from metadata').fetchall()) 60 | with open(dir_path / 'metadata.json', 'w') as fp: 61 | json.dump(metadata, fp, indent=2) 62 | 63 | with open(dir_path / 'osm_date', 'w') as fp: 64 | fp.write(metadata['osm_date']) 65 | 66 | 67 | def write_dedupl_files(c, *, dir_path): 68 | """ 69 | dedupl files 70 | write out the tiles_data files into a multi-level folder 71 | """ 72 | 73 | total = c.execute('select count(*) from tiles_data').fetchone()[0] 74 | 75 | c.execute('select tile_data_id, tile_data from tiles_data') 76 | for i, row in enumerate(c, start=1): 77 | dedupl_id = row[0] 78 | dedupl_path = dir_path / 'dedupl' / dedupl_helper_path(dedupl_id) 79 | dedupl_path.parent.mkdir(parents=True, exist_ok=True) 80 | with open(dedupl_path, 'wb') as fp: 81 | fp.write(row[1]) 82 | print(f'written dedupl file {i}/{total}') 83 | 84 | 85 | def write_tile_files(c, *, dir_path): 86 | total = c.execute('select count(*) from tiles_shallow').fetchone()[0] 87 | 88 | bug_fix_dict = {} 89 | 90 | c.execute('select zoom_level, tile_column, tile_row, tile_data_id from tiles_shallow') 91 | for i, row in enumerate(c, start=1): 92 | z = row[0] 93 | x = row[1] 94 | y = flip_y(z, row[2]) 95 | dedupl_id = row[3] 96 | 97 | dedupl_path = dir_path / 'dedupl' / dedupl_helper_path(dedupl_id) 98 | dedupl_path_fixed = get_fixed_dedupl_name(bug_fix_dict, dedupl_path) 99 | 100 | tile_path = dir_path / 'tiles' / str(z) / str(x) / f'{y}.pbf' 101 | tile_path.parent.mkdir(parents=True, exist_ok=True) 102 | 103 | if tile_path.is_file(): 104 | continue 105 | 106 | # create the hard link 107 | try: 108 | tile_path.hardlink_to(dedupl_path_fixed) 109 | print(f'hard link created {i}/{total} {i / total * 100:.1f}%: {tile_path}') 110 | except OSError as e: 111 | # fixing Btrfs's 64k max link limit 112 | if e.errno == 31: 113 | bug_fix_dict.setdefault(dedupl_path, 0) 114 | bug_fix_dict[dedupl_path] += 1 115 | dedupl_path_fixed = get_fixed_dedupl_name(bug_fix_dict, dedupl_path) 116 | shutil.copyfile(dedupl_path, dedupl_path_fixed) 117 | print(f'Created fixed dedupl file: {dedupl_path_fixed}') 118 | tile_path.hardlink_to(dedupl_path_fixed) 119 | print(f'hard link created {i}/{total} {i / total * 100:.1f}%: {tile_path}') 120 | else: 121 | raise 122 | 123 | 124 | def get_fixed_dedupl_name(bug_fix_dict, dedupl_path): 125 | if dedupl_path in bug_fix_dict: 126 | return dedupl_path.with_name(f'{dedupl_path.name}-{bug_fix_dict[dedupl_path]}') 127 | else: 128 | return dedupl_path 129 | 130 | 131 | def dedupl_helper_path(dedupl_id: int) -> Path: 132 | """ 133 | Naming 200 million files such that each subdir has max 1000 children 134 | """ 135 | 136 | str_num = f'{dedupl_id:09}' 137 | l1 = str_num[:3] 138 | l2 = str_num[3:6] 139 | l3 = str_num[6:] 140 | return Path(l1) / l2 / f'{l3}.pbf' 141 | 142 | 143 | def flip_y(zoom, y): 144 | return (2**zoom - 1) - y 145 | 146 | 147 | if __name__ == '__main__': 148 | cli() 149 | -------------------------------------------------------------------------------- /modules/tile_gen/scripts/shrink_btrfs.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | import os 3 | import subprocess 4 | import sys 5 | import tempfile 6 | from pathlib import Path 7 | 8 | import click 9 | 10 | 11 | # btrfs cannot shrink smaller than 256 MiB 12 | SMALLEST_SIZE = 256 * 1024 * 1024 13 | 14 | 15 | @click.command() 16 | @click.argument( 17 | 'btrfs_img', 18 | type=click.Path(exists=True, dir_okay=False, file_okay=True, path_type=Path), 19 | ) 20 | def cli(btrfs_img: Path): 21 | """ 22 | Shrinks a Btrfs image 23 | // I cannot believe that Btrfs is over 15 years old, 24 | // yet there is no resize2fs tool which can shrink a disk image 25 | // to minimum size. 26 | // It cannot even tell you how much should be the right size, 27 | // it just randomly fails after which you have to umount and mount again. 28 | // So we have to make a loop which tries to shrink it until it fails. 29 | 30 | // Also, WONTFIX bugs like how instead of telling you that 31 | // minimum fs size is 256 MB, it says "ERROR: unable to resize - Invalid argument" 32 | // https://bugzilla.kernel.org/show_bug.cgi?id=118111 33 | """ 34 | 35 | if os.geteuid() != 0: 36 | sys.exit(' needs sudo') 37 | 38 | current_dir = Path.cwd() 39 | 40 | mnt_dir = Path(tempfile.mkdtemp(dir=current_dir, prefix='tmp_shrink_')) 41 | subprocess.run(['mount', '-t', 'btrfs', btrfs_img, mnt_dir], check=True) 42 | 43 | # shink until max. 10 MB left or reached SMALLEST_SIZE or failure 44 | while True: 45 | # needs to start with a balancing 46 | # https://btrfs.readthedocs.io/en/latest/Balance.html 47 | # https://marc.merlins.org/perso/btrfs/post_2014-05-04_Fixing-Btrfs-Filesystem-Full-Problems.html 48 | do_balancing(mnt_dir) 49 | 50 | free_bytes = get_usage(mnt_dir, 'Device unallocated') 51 | device_size = get_usage(mnt_dir, 'Device size') 52 | shrink_idea = free_bytes * 0.7 53 | 54 | # workaround for the SMALLEST_SIZE limit 55 | if device_size - free_bytes < SMALLEST_SIZE: 56 | shrink_idea = (device_size - SMALLEST_SIZE) * 0.7 57 | 58 | # stop if 10 MB left 59 | if shrink_idea < 10_000_000: 60 | break 61 | 62 | # stop if process error 63 | if not do_shrink(mnt_dir, shrink_idea): 64 | break 65 | 66 | total_size = get_usage(mnt_dir, 'Device size') 67 | 68 | subprocess.run(['umount', mnt_dir]) 69 | mnt_dir.rmdir() 70 | 71 | subprocess.run(['truncate', '-s', str(total_size), btrfs_img]) 72 | print(f'Truncated {btrfs_img} to {total_size // 1_000_000} MB size') 73 | print('shrink_btrfs.py DONE') 74 | 75 | 76 | def get_usage(mnt: Path, key: str): 77 | p = subprocess.run( 78 | ['btrfs', 'filesystem', 'usage', '-b', mnt], text=True, capture_output=True, check=True 79 | ) 80 | for line in p.stdout.splitlines(): 81 | if f'{key}:' not in line: 82 | continue 83 | free = int(line.split(':')[1]) 84 | return free 85 | 86 | 87 | def do_shrink(mnt: Path, delta_size: float): 88 | delta_size = int(delta_size) 89 | print(f'Trying to shrink by {delta_size // 1_000_000} MB') 90 | p = subprocess.run(['btrfs', 'filesystem', 'resize', str(-delta_size), mnt]) 91 | return p.returncode == 0 92 | 93 | 94 | def do_balancing(mnt: Path): 95 | print('Starting btrfs balancing') 96 | p = subprocess.run( 97 | ['btrfs', 'balance', 'start', '-dusage=100', mnt], capture_output=True, text=True 98 | ) 99 | if p.returncode: 100 | print(f'Balance error: {p.stdout} {p.stderr}') 101 | print('Balancing done') 102 | 103 | 104 | if __name__ == '__main__': 105 | cli() 106 | -------------------------------------------------------------------------------- /modules/tile_gen/setup.py: -------------------------------------------------------------------------------- 1 | from setuptools import find_packages, setup 2 | 3 | 4 | requirements = [ 5 | 'click', 6 | 'pycurl', 7 | 'requests', 8 | ] 9 | 10 | 11 | setup( 12 | python_requires='>=3.10', 13 | install_requires=requirements, 14 | packages=find_packages(), 15 | ) 16 | -------------------------------------------------------------------------------- /modules/tile_gen/tile_gen.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | from datetime import datetime, timezone 3 | 4 | import click 5 | from tile_gen_lib.btrfs import make_btrfs 6 | from tile_gen_lib.planetiler import run_planetiler 7 | from tile_gen_lib.rclone import make_indexes_for_bucket, upload_area 8 | from tile_gen_lib.set_version import check_and_set_version 9 | 10 | 11 | now = datetime.now(timezone.utc) 12 | 13 | 14 | @click.group() 15 | def cli(): 16 | """ 17 | Generates tiles and uploads to CloudFlare 18 | """ 19 | 20 | 21 | @cli.command() 22 | @click.argument('area', required=True) 23 | @click.option('--upload', is_flag=True, help='Upload after generation is complete') 24 | def make_tiles(area, upload): 25 | """ 26 | Generate tiles for a given area, optionally upload it to the btrfs bucket 27 | """ 28 | 29 | print(f'---\n{now}\nStarting make-tiles {area} upload: {upload}') 30 | 31 | run_folder = run_planetiler(area) 32 | make_btrfs(run_folder) 33 | 34 | if upload: 35 | upload_area(area) 36 | 37 | 38 | @cli.command(name='upload-area') 39 | @click.argument('area', required=True) 40 | def upload_area_(area): 41 | """ 42 | Upload all runs from a given area to the btrfs bucket 43 | """ 44 | 45 | print(f'---\n{now}\nStarting upload-area {area}') 46 | 47 | upload_area(area) 48 | 49 | 50 | @cli.command() 51 | def make_indexes(): 52 | """ 53 | Make indexes for all buckets 54 | """ 55 | 56 | print(f'---\n{now}\nStarting make-indexes') 57 | 58 | for bucket in ['ofm-btrfs', 'ofm-assets']: 59 | make_indexes_for_bucket(bucket) 60 | 61 | 62 | @cli.command() 63 | @click.argument('area', required=True) 64 | @click.option( 65 | '--version', default='latest', help='Optional version string, like "20231227_043106_pt"' 66 | ) 67 | def set_version(area, version): 68 | """ 69 | Set versions for a given area 70 | """ 71 | 72 | print(f'---\n{now}\nStarting set-version {area}') 73 | 74 | check_and_set_version(area, version) 75 | 76 | 77 | if __name__ == '__main__': 78 | cli() 79 | -------------------------------------------------------------------------------- /modules/tile_gen/tile_gen_lib/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hyperknot/openfreemap/dd97e1fdcbd2f7d9c90eee9b52c96bc2ac9009b0/modules/tile_gen/tile_gen_lib/__init__.py -------------------------------------------------------------------------------- /modules/tile_gen/tile_gen_lib/btrfs.py: -------------------------------------------------------------------------------- 1 | import os 2 | import shutil 3 | import subprocess 4 | from pathlib import Path 5 | 6 | from tile_gen_lib.config import config 7 | from tile_gen_lib.utils import python_venv_executable 8 | 9 | 10 | IMAGE_SIZE = '200G' 11 | 12 | 13 | def make_btrfs(run_folder: Path): 14 | os.chdir(run_folder) 15 | 16 | cleanup_folder(run_folder) 17 | 18 | # make an empty file that's definitely bigger then the current OSM output 19 | for image in ['image.btrfs', 'image2.btrfs']: 20 | subprocess.run(['fallocate', '-l', IMAGE_SIZE, image], check=True) 21 | subprocess.run(['mkfs.btrfs', '-m', 'single', image], check=True, capture_output=True) 22 | 23 | for image, mount in [('image.btrfs', 'mnt_rw'), ('image2.btrfs', 'mnt_rw2')]: 24 | Path(mount).mkdir() 25 | 26 | # https://btrfs.readthedocs.io/en/latest/btrfs-man5.html#mount-options 27 | # compression doesn't make sense, data is already gzip compressed 28 | subprocess.run( 29 | [ 30 | 'sudo', 31 | 'mount', 32 | '-t', 33 | 'btrfs', 34 | '-o', 35 | 'noacl,nobarrier,noatime,max_inline=4096', 36 | image, 37 | mount, 38 | ], 39 | check=True, 40 | ) 41 | 42 | subprocess.run(['sudo', 'chown', 'ofm:ofm', '-R', mount], check=True) 43 | 44 | # extract mbtiles 45 | extract_script = config.tile_gen_scripts_dir / 'extract_mbtiles.py' 46 | with open('extract_out.log', 'w') as out, open('extract_err.log', 'w') as err: 47 | subprocess.run( 48 | [ 49 | python_venv_executable(), 50 | extract_script, 51 | 'tiles.mbtiles', 52 | 'mnt_rw/extract', 53 | ], 54 | check=True, 55 | stdout=out, 56 | stderr=err, 57 | ) 58 | 59 | shutil.copy('mnt_rw/extract/osm_date', '.') 60 | 61 | # process logs 62 | subprocess.run('grep fixed extract_out.log > dedupl_fixed.log', shell=True) 63 | 64 | # unfortunately, by deleting files from the btrfs partition, the partition size grows 65 | # so we need to rsync onto a new partition instead of deleting 66 | with open('rsync_out.log', 'w') as out, open('rsync_err.log', 'w') as err: 67 | subprocess.run( 68 | [ 69 | 'rsync', 70 | '-avH', 71 | '--max-alloc=4294967296', 72 | '--exclude', 73 | 'dedupl', 74 | 'mnt_rw/extract/', 75 | 'mnt_rw2/', 76 | ], 77 | check=True, 78 | stdout=out, 79 | stderr=err, 80 | ) 81 | 82 | # collect stats 83 | for i, mount in enumerate(['mnt_rw', 'mnt_rw2'], 1): 84 | with open(f'stats{i}.txt', 'w') as f: 85 | for cmd in [ 86 | ['df', '-h', mount], 87 | ['btrfs', 'filesystem', 'df', mount], 88 | ['btrfs', 'filesystem', 'show', mount], 89 | ['btrfs', 'filesystem', 'usage', mount], 90 | ]: 91 | f.write(f'\n\n{" ".join(cmd)}\n') 92 | result = subprocess.run(['sudo'] + cmd, check=True, capture_output=True, text=True) 93 | f.write(result.stdout) 94 | 95 | # unmount and cleanup 96 | for mount in ['mnt_rw', 'mnt_rw2']: 97 | subprocess.run(['sudo', 'umount', mount], check=True) 98 | 99 | shutil.rmtree('mnt_rw') 100 | shutil.rmtree('mnt_rw2') 101 | 102 | # shrink btrfs 103 | shrink_script = config.tile_gen_scripts_dir / 'shrink_btrfs.py' 104 | with open('shrink_out.log', 'w') as out, open('shrink_err.log', 'w') as err: 105 | subprocess.run( 106 | ['sudo', python_venv_executable(), shrink_script, 'image2.btrfs'], 107 | check=True, 108 | stdout=out, 109 | stderr=err, 110 | ) 111 | 112 | os.unlink('image.btrfs') 113 | shutil.move('image2.btrfs', 'tiles.btrfs') 114 | 115 | # parallel gzip (pigz) 116 | subprocess.run(['pigz', 'tiles.btrfs', '--fast'], check=True) 117 | 118 | # move logs 119 | Path('logs').mkdir() 120 | for pattern in ['*.log', '*.txt']: 121 | for file in Path().glob(pattern): 122 | shutil.move(file, 'logs') 123 | 124 | # create a checksum file, Ubuntu style naming convention 125 | with open('SHA256SUMS', 'w') as out: 126 | subprocess.run( 127 | ['sha256sum', 'tiles.btrfs.gz', 'tiles.mbtiles'], 128 | check=True, 129 | stdout=out, 130 | ) 131 | 132 | print('extract_btrfs.py DONE') 133 | 134 | 135 | def cleanup_folder(run_folder: Path): 136 | print(f'cleaning up {run_folder}') 137 | 138 | for mount in ['mnt_rw', 'mnt_rw2']: 139 | subprocess.run(['sudo', 'umount', run_folder / mount], capture_output=True) 140 | 141 | for pattern in ['mnt_rw*', 'tmp_*', '*.btrfs', '*.gz', '*.log', '*.txt', 'logs', 'osm_date']: 142 | for item in run_folder.glob(pattern): 143 | if item.is_dir(): 144 | shutil.rmtree(item) 145 | else: 146 | item.unlink() 147 | -------------------------------------------------------------------------------- /modules/tile_gen/tile_gen_lib/config.py: -------------------------------------------------------------------------------- 1 | import json 2 | import subprocess 3 | from pathlib import Path 4 | from pprint import pprint 5 | 6 | 7 | class Configuration: 8 | areas = ['planet', 'monaco'] 9 | 10 | tile_gen_dir = Path('/data/ofm/tile_gen') 11 | 12 | tile_gen_bin = tile_gen_dir / 'bin' 13 | tile_gen_scripts_dir = tile_gen_bin / 'scripts' 14 | 15 | planetiler_bin = tile_gen_dir / 'planetiler' 16 | planetiler_path = planetiler_bin / 'planetiler.jar' 17 | 18 | runs_dir = tile_gen_dir / 'runs' 19 | 20 | if Path('/data/ofm').exists(): 21 | ofm_config_dir = Path('/data/ofm/config') 22 | else: 23 | repo_root = Path(__file__).parent.parent.parent.parent 24 | ofm_config_dir = repo_root / 'config' 25 | 26 | ofm_config = json.loads((ofm_config_dir / 'config.json').read_text()) 27 | 28 | rclone_config = ofm_config_dir / 'rclone.conf' 29 | rclone_bin = subprocess.run(['which', 'rclone'], capture_output=True, text=True).stdout.strip() 30 | 31 | 32 | config = Configuration() 33 | -------------------------------------------------------------------------------- /modules/tile_gen/tile_gen_lib/planetiler.py: -------------------------------------------------------------------------------- 1 | import os 2 | import shutil 3 | import subprocess 4 | from datetime import datetime, timezone 5 | from pathlib import Path 6 | 7 | from tile_gen_lib.btrfs import cleanup_folder 8 | from tile_gen_lib.config import config 9 | 10 | 11 | def run_planetiler(area: str) -> Path: 12 | assert area in config.areas 13 | 14 | date = datetime.now(timezone.utc).strftime('%Y%m%d_%H%M%S') 15 | 16 | area_dir = config.runs_dir / area 17 | 18 | # delete all previous runs for the given area 19 | for subdir in area_dir.iterdir(): 20 | cleanup_folder(subdir) 21 | 22 | print('running rmtree') 23 | shutil.rmtree(area_dir, ignore_errors=True) 24 | print('rmtree done') 25 | 26 | run_folder = area_dir / f'{date}_pt' 27 | run_folder.mkdir(parents=True, exist_ok=True) 28 | 29 | os.chdir(run_folder) 30 | 31 | # link to discussion about why exactly 30 GB 32 | # https://github.com/onthegomap/planetiler/discussions/690#discussioncomment-7756397 33 | java_memory_gb = 30 if area == 'planet' else 1 34 | 35 | command = [ 36 | 'java', 37 | f'-Xmx{java_memory_gb}g', 38 | '-jar', 39 | config.planetiler_path, 40 | f'--area={area}', 41 | '--download', 42 | '--download-threads=10', 43 | '--download-chunk-size-mb=1000', 44 | '--fetch-wikidata', 45 | '--output=tiles.mbtiles', 46 | '--storage=mmap', 47 | '--force', 48 | '--languages=default,tok', 49 | ] 50 | 51 | if area == 'planet': 52 | command.append('--nodemap-type=array') 53 | command.append('--bounds=planet') 54 | 55 | if area == 'monaco': 56 | command.append('--nodemap-type=sortedtable') 57 | 58 | print(command) 59 | 60 | out_path = run_folder / 'planetiler.out' 61 | err_path = run_folder / 'planetiler.err' 62 | 63 | with out_path.open('w') as out_file, err_path.open('w') as err_file: 64 | subprocess.run(command, stdout=out_file, stderr=err_file, check=True, cwd=run_folder) 65 | 66 | shutil.rmtree(run_folder / 'data', ignore_errors=True) 67 | print('planetiler.jar DONE') 68 | 69 | return run_folder 70 | -------------------------------------------------------------------------------- /modules/tile_gen/tile_gen_lib/rclone.py: -------------------------------------------------------------------------------- 1 | import subprocess 2 | import sys 3 | 4 | from tile_gen_lib.config import config 5 | 6 | 7 | def upload_area(area): 8 | """ 9 | Uploads an area, making sure there is exactly one run present 10 | """ 11 | 12 | print(f'Uploading area: {area}') 13 | 14 | assert area in config.areas 15 | 16 | area_dir = config.runs_dir / area 17 | if not area_dir.exists(): 18 | return 19 | 20 | runs = list(area_dir.iterdir()) 21 | if len(runs) != 1: 22 | print('Error: Make sure there is only one run in the given area') 23 | sys.exit(1) 24 | 25 | run = runs[0].name 26 | 27 | upload_area_run(area, run) 28 | make_indexes_for_bucket('ofm-btrfs') 29 | 30 | 31 | def upload_area_run(area, run): 32 | print(f'Uploading {area} {run} to btrfs bucket') 33 | 34 | run_dir = config.runs_dir / area / run 35 | assert run_dir.is_dir() 36 | 37 | subprocess.run( 38 | [ 39 | 'rclone', 40 | 'sync', 41 | '--verbose=1', 42 | '--transfers=8', 43 | '--multi-thread-streams=8', 44 | '--fast-list', 45 | '--stats-file-name-length=0', 46 | '--stats-one-line', 47 | '--log-file', 48 | run_dir / 'logs' / 'rclone.log', 49 | '--exclude', 50 | 'logs/**', 51 | run_dir, 52 | f'remote:ofm-btrfs/areas/{area}/{run}', 53 | ], 54 | env=dict(RCLONE_CONFIG=config.rclone_config), 55 | check=True, 56 | ) 57 | 58 | # crate "done" file 59 | subprocess.run( 60 | [ 61 | 'rclone', 62 | 'touch', 63 | f'remote:ofm-btrfs/areas/{area}/{run}/done', 64 | ], 65 | env=dict(RCLONE_CONFIG=config.rclone_config), 66 | check=True, 67 | ) 68 | 69 | 70 | def make_indexes_for_bucket(bucket): 71 | print(f'Making indexes for bucket: {bucket}') 72 | 73 | # files 74 | p = subprocess.run( 75 | [ 76 | 'rclone', 77 | 'lsf', 78 | '--recursive', 79 | '--files-only', 80 | '--fast-list', 81 | '--exclude', 82 | 'dirs.txt', 83 | '--exclude', 84 | 'files.txt', 85 | f'remote:{bucket}', 86 | ], 87 | env=dict(RCLONE_CONFIG=config.rclone_config), 88 | check=True, 89 | capture_output=True, 90 | text=True, 91 | ) 92 | index_str = p.stdout 93 | 94 | # upload to files.txt 95 | subprocess.run( 96 | [ 97 | 'rclone', 98 | 'rcat', 99 | f'remote:{bucket}/files.txt', 100 | ], 101 | env=dict(RCLONE_CONFIG=config.rclone_config), 102 | check=True, 103 | input=index_str.encode(), 104 | ) 105 | 106 | # directories 107 | p = subprocess.run( 108 | [ 109 | 'rclone', 110 | 'lsf', 111 | '--recursive', 112 | '--dirs-only', 113 | '--dir-slash=false', 114 | '--fast-list', 115 | f'remote:{bucket}', 116 | ], 117 | env=dict(RCLONE_CONFIG=config.rclone_config), 118 | check=True, 119 | capture_output=True, 120 | text=True, 121 | ) 122 | index_str = p.stdout 123 | 124 | # upload to dirs.txt 125 | subprocess.run( 126 | [ 127 | 'rclone', 128 | 'rcat', 129 | f'remote:{bucket}/dirs.txt', 130 | ], 131 | env=dict(RCLONE_CONFIG=config.rclone_config), 132 | check=True, 133 | input=index_str.encode(), 134 | ) 135 | -------------------------------------------------------------------------------- /modules/tile_gen/tile_gen_lib/set_version.py: -------------------------------------------------------------------------------- 1 | import subprocess 2 | 3 | from tile_gen_lib.config import config 4 | from tile_gen_lib.shared import check_host_version, get_deployed_version, get_versions_for_area 5 | 6 | 7 | def check_and_set_version(area, version): 8 | if version == 'latest': 9 | versions = get_versions_for_area(area) 10 | if not versions: 11 | print(f' No versions found for {area}') 12 | return 13 | 14 | version = versions[-1] 15 | print(f' Latest version on bucket: {area} {version}') 16 | 17 | if not check_all_hosts(area, version): 18 | return 19 | 20 | try: 21 | if get_deployed_version(area)['version'] == version: 22 | return 23 | except Exception: 24 | pass 25 | 26 | set_version(area, version) 27 | 28 | 29 | def set_version(area, version): 30 | print(f'setting version: {area} {version}') 31 | subprocess.run( 32 | [ 33 | config.rclone_bin, 34 | 'rcat', 35 | f'remote:ofm-assets/deployed_versions/{area}.txt', 36 | ], 37 | env=dict(RCLONE_CONFIG=config.rclone_config), 38 | check=True, 39 | input=version.strip().encode(), 40 | ) 41 | 42 | 43 | def check_all_hosts(area, version) -> bool: 44 | oc = config.ofm_config 45 | 46 | domain = oc['domain_roundrobin'] or oc['domain_direct'] 47 | print(f'Using domain: {domain}') 48 | 49 | try: 50 | for host_ip in oc['http_host_list']: 51 | print(f'Checking {area} {version} on host {host_ip}') 52 | check_host_version(domain, host_ip, area, version) 53 | return True 54 | except Exception: 55 | print('Error, version not available') 56 | return False 57 | -------------------------------------------------------------------------------- /modules/tile_gen/tile_gen_lib/shared.py: -------------------------------------------------------------------------------- 1 | import json 2 | from datetime import datetime, timezone 3 | from io import BytesIO 4 | from pathlib import Path 5 | 6 | import pycurl 7 | import requests 8 | 9 | 10 | def get_versions_for_area(area: str) -> list: 11 | """ 12 | Download the files.txt and check for the runs with the "done" file present 13 | """ 14 | r = requests.get('https://btrfs.openfreemap.com/files.txt', timeout=30) 15 | r.raise_for_status() 16 | 17 | versions = [] 18 | 19 | files = r.text.splitlines() 20 | for f in files: 21 | if not f.startswith(f'areas/{area}/'): 22 | continue 23 | if not f.endswith('/done'): 24 | continue 25 | version_str = f.split('/')[2] 26 | versions.append(version_str) 27 | 28 | return sorted(versions) 29 | 30 | 31 | def get_deployed_version(area: str) -> dict: 32 | r = requests.get(f'https://assets.openfreemap.com/deployed_versions/{area}.txt', timeout=30) 33 | r.raise_for_status() 34 | version = r.text.strip() 35 | 36 | last_modified_str = r.headers.get('Last-Modified') 37 | last_modified = parse_http_last_modified(last_modified_str) 38 | 39 | return dict( 40 | version=version, 41 | last_modified=last_modified, 42 | ) 43 | 44 | 45 | def parse_http_last_modified(date_string) -> datetime: 46 | parsed_date = datetime.strptime(date_string, '%a, %d %b %Y %H:%M:%S GMT') 47 | parsed_date = parsed_date.replace(tzinfo=timezone.utc) 48 | return parsed_date 49 | 50 | 51 | def check_host_version(domain, host_ip, area, version): 52 | # check versioned TileJSON 53 | check_tilejson(f'https://{domain}/{area}/{version}', domain, host_ip, version) 54 | 55 | # check actual vector tile 56 | url = f'https://{domain}/{area}/{version}/14/8529/5975.pbf' 57 | assert pycurl_status(url, domain, host_ip) == 200 58 | 59 | 60 | def check_host_latest(domain, host_ip, area, version): 61 | # check latest TileJSON 62 | check_tilejson(f'https://{domain}/{area}', domain, host_ip, version) 63 | 64 | # check versioned TileJSON 65 | check_tilejson(f'https://{domain}/{area}/{version}', domain, host_ip, version) 66 | 67 | # check actual vector tile 68 | url = f'https://{domain}/{area}/{version}/14/8529/5975.pbf' 69 | assert pycurl_status(url, domain, host_ip) == 200 70 | 71 | # check style 72 | url = f'https://{domain}/styles/bright' 73 | assert pycurl_status(url, domain, host_ip) == 200 74 | 75 | 76 | def check_tilejson(url, domain, host_ip, version): 77 | tilejson_str = pycurl_get(url, domain, host_ip) 78 | tilejson = json.loads(tilejson_str) 79 | tiles_url = tilejson['tiles'][0] 80 | version_in_tilejson = tiles_url.split('/')[4] 81 | assert version_in_tilejson == version 82 | 83 | 84 | # pycurl 85 | 86 | 87 | def pycurl_status(url, domain, host_ip): 88 | """ 89 | Uses pycurl to make a HTTPS HEAD request using custom resolving, 90 | checks if the status code is 200 91 | """ 92 | 93 | c = pycurl.Curl() 94 | c.setopt(c.URL, url) 95 | 96 | # linux needs CA certs specified manually 97 | if Path('/etc/ssl/certs/ca-certificates.crt').exists(): 98 | c.setopt(c.CAINFO, '/etc/ssl/certs/ca-certificates.crt') 99 | 100 | c.setopt(c.RESOLVE, [f'{domain}:443:{host_ip}']) 101 | c.setopt(c.NOBODY, True) 102 | c.setopt(c.TIMEOUT, 5) 103 | c.perform() 104 | status_code = c.getinfo(c.RESPONSE_CODE) 105 | c.close() 106 | 107 | return status_code 108 | 109 | 110 | def pycurl_get(url, domain, host_ip): 111 | """ 112 | Uses pycurl to make a HTTPS GET request using custom resolving, 113 | checks if the status code is 200, and returns the content. 114 | """ 115 | 116 | buffer = BytesIO() 117 | c = pycurl.Curl() 118 | c.setopt(c.URL, url) 119 | 120 | # linux needs CA certs specified manually 121 | if Path('/etc/ssl/certs/ca-certificates.crt').exists(): 122 | c.setopt(c.CAINFO, '/etc/ssl/certs/ca-certificates.crt') 123 | 124 | c.setopt(c.RESOLVE, [f'{domain}:443:{host_ip}']) 125 | c.setopt(c.WRITEDATA, buffer) 126 | c.setopt(c.TIMEOUT, 5) 127 | c.perform() 128 | status_code = c.getinfo(c.RESPONSE_CODE) 129 | c.close() 130 | 131 | if status_code != 200: 132 | raise ValueError(f'status code: {status_code}') 133 | 134 | return buffer.getvalue().decode('utf8') 135 | -------------------------------------------------------------------------------- /modules/tile_gen/tile_gen_lib/utils.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | from pathlib import Path 4 | 5 | 6 | def python_venv_executable() -> Path: 7 | venv_path = os.environ.get('VIRTUAL_ENV') 8 | 9 | if venv_path: 10 | return Path(venv_path) / 'bin' / 'python' 11 | elif sys.prefix != sys.base_prefix: 12 | return Path(sys.prefix) / 'bin' / 'python' 13 | else: 14 | return Path(sys.executable) 15 | -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "dependencies": { 3 | "@biomejs/biome": "^1.9.2", 4 | "prettier": "^3.2.4", 5 | "prettier-plugin-astro": "^0.14.0" 6 | }, 7 | "packageManager": "pnpm@9.9.0+sha512.60c18acd138bff695d339be6ad13f7e936eea6745660d4cc4a776d5247c540d0edee1a563695c183a66eb917ef88f2b4feb1fc25f32a7adcadc7aaf3438e99c1" 8 | } 9 | -------------------------------------------------------------------------------- /prepare-virtualenv.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | find . -name '*.egg-info' -type d -prune -exec rm -rf {} + 4 | find . -name '*.pyc' -delete 5 | find . -name __pycache__ -type d -prune -exec rm -rf {} + 6 | find . -name .DS_Store -delete 7 | find . -name .ipynb_checkpoints -exec rm -rf {} + 8 | find . -name .pytest_cache -exec rm -rf {} + 9 | find . -name .ruff_cache -exec rm -rf {} + 10 | find . -name .venv -type d -prune -exec rm -rf {} + 11 | find . -name venv -type d -prune -exec rm -rf {} + 12 | 13 | 14 | uv venv --python=3.12 15 | source .venv/bin/activate 16 | 17 | 18 | uv pip install -e . 19 | uv pip install -e modules/http_host 20 | uv pip install -e modules/tile_gen 21 | uv pip install -e modules/loadbalancer 22 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | from setuptools import find_packages, setup 2 | 3 | 4 | requirements = [ 5 | 'click', 6 | 'fabric', 7 | 'nginxfmt', 8 | 'python-dotenv', 9 | 'ruff', 10 | 'marko', 11 | 'requests', 12 | ] 13 | 14 | 15 | setup( 16 | python_requires='>=3.10', 17 | install_requires=requirements, 18 | packages=find_packages(), 19 | ) 20 | -------------------------------------------------------------------------------- /ssh_lib/__init__.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | from pathlib import Path 4 | 5 | from dotenv import dotenv_values 6 | 7 | 8 | ASSETS_DIR = Path(__file__).parent / 'assets' 9 | CONFIG_DIR = Path(__file__).parent.parent / 'config' 10 | MODULES_DIR = Path(__file__).parent.parent / 'modules' 11 | 12 | OFM_DIR = '/data/ofm' 13 | REMOTE_CONFIG = f'{OFM_DIR}/config' 14 | VENV_BIN = f'{OFM_DIR}/venv/bin' 15 | 16 | TILE_GEN_DIR = f'{OFM_DIR}/tile_gen' 17 | TILE_GEN_BIN = f'{TILE_GEN_DIR}/bin' 18 | 19 | PLANETILER_SRC = f'{TILE_GEN_DIR}/planetiler_src' 20 | PLANETILER_BIN = f'{TILE_GEN_DIR}/planetiler' 21 | 22 | HTTP_HOST_BIN = f'{OFM_DIR}/http_host/bin' 23 | 24 | 25 | # Handling multiple .env files is supported 26 | # or example ENV=test would use .env.test 27 | 28 | ENV = os.getenv('ENV') 29 | if ENV: 30 | env_file_name = f'.env.{ENV}' 31 | else: 32 | env_file_name = '.env' 33 | 34 | env_file_path = CONFIG_DIR / env_file_name 35 | if not env_file_path.exists(): 36 | sys.exit(f'config/{env_file_name} does not exist') 37 | 38 | DOTENV_VALUES = dotenv_values(CONFIG_DIR / env_file_name) 39 | 40 | 41 | def dotenv_val(key): 42 | return DOTENV_VALUES.get(key, '').strip() 43 | -------------------------------------------------------------------------------- /ssh_lib/assets/nginx/cloudflare.conf: -------------------------------------------------------------------------------- 1 | # https://www.cloudflare.com/ips/ 2 | 3 | 4 | set_real_ip_from 173.245.48.0/20; 5 | set_real_ip_from 103.21.244.0/22; 6 | set_real_ip_from 103.22.200.0/22; 7 | set_real_ip_from 103.31.4.0/22; 8 | set_real_ip_from 141.101.64.0/18; 9 | set_real_ip_from 108.162.192.0/18; 10 | set_real_ip_from 190.93.240.0/20; 11 | set_real_ip_from 188.114.96.0/20; 12 | set_real_ip_from 197.234.240.0/22; 13 | set_real_ip_from 198.41.128.0/17; 14 | set_real_ip_from 162.158.0.0/15; 15 | set_real_ip_from 104.16.0.0/13; 16 | set_real_ip_from 104.24.0.0/14; 17 | set_real_ip_from 172.64.0.0/13; 18 | set_real_ip_from 131.0.72.0/22; 19 | 20 | set_real_ip_from 2400:cb00::/32; 21 | set_real_ip_from 2606:4700::/32; 22 | set_real_ip_from 2803:f800::/32; 23 | set_real_ip_from 2405:b500::/32; 24 | set_real_ip_from 2405:8100::/32; 25 | set_real_ip_from 2a06:98c0::/29; 26 | set_real_ip_from 2c0f:f248::/32; 27 | 28 | 29 | # use any of the following two 30 | real_ip_header CF-Connecting-IP; 31 | #real_ip_header X-Forwarded-For; 32 | -------------------------------------------------------------------------------- /ssh_lib/assets/nginx/default_disable.conf: -------------------------------------------------------------------------------- 1 | map "" $empty { 2 | default ""; 3 | } 4 | 5 | server { 6 | listen 80 default_server; 7 | listen [::]:80 default_server; 8 | 9 | listen 443 ssl default_server; 10 | listen [::]:443 ssl default_server; 11 | http2 on; 12 | 13 | server_name _; 14 | 15 | ssl_ciphers aNULL; 16 | ssl_certificate /etc/nginx/ssl/dummy.cert; 17 | ssl_certificate_key /etc/nginx/ssl/dummy.key; 18 | 19 | return 444; 20 | } 21 | -------------------------------------------------------------------------------- /ssh_lib/assets/nginx/mime.types: -------------------------------------------------------------------------------- 1 | types { 2 | 3 | # Data interchange 4 | 5 | application/atom+xml atom; 6 | application/json json map topojson; 7 | application/ld+json jsonld; 8 | application/rss+xml rss; 9 | # Normalize to standard type. 10 | # https://tools.ietf.org/html/rfc7946#section-12 11 | application/geo+json geojson; 12 | application/xml xml; 13 | # Normalize to standard type. 14 | # https://tools.ietf.org/html/rfc3870#section-2 15 | application/rdf+xml rdf; 16 | 17 | 18 | # JavaScript 19 | 20 | # Servers should use text/javascript for JavaScript resources. 21 | # https://html.spec.whatwg.org/multipage/scripting.html#scriptingLanguages 22 | text/javascript js mjs; 23 | application/wasm wasm; 24 | 25 | 26 | # Manifest files 27 | 28 | application/manifest+json webmanifest; 29 | application/x-web-app-manifest+json webapp; 30 | text/cache-manifest appcache; 31 | 32 | 33 | # Media files 34 | 35 | audio/midi mid midi kar; 36 | audio/mp4 aac f4a f4b m4a; 37 | audio/mpeg mp3; 38 | audio/ogg oga ogg opus; 39 | audio/x-realaudio ra; 40 | audio/x-wav wav; 41 | image/apng apng; 42 | image/avif avif avifs; 43 | image/bmp bmp; 44 | image/gif gif; 45 | image/jpeg jpeg jpg; 46 | image/jxl jxl; 47 | image/jxr jxr hdp wdp; 48 | image/png png; 49 | image/svg+xml svg svgz; 50 | image/tiff tif tiff; 51 | image/vnd.wap.wbmp wbmp; 52 | image/webp webp; 53 | image/x-jng jng; 54 | video/3gpp 3gp 3gpp; 55 | video/mp4 f4p f4v m4v mp4; 56 | video/mpeg mpeg mpg; 57 | video/ogg ogv; 58 | video/quicktime mov; 59 | video/webm webm; 60 | video/x-flv flv; 61 | video/x-mng mng; 62 | video/x-ms-asf asf asx; 63 | video/x-msvideo avi; 64 | 65 | # Serving `.ico` image files with a different media type 66 | # prevents Internet Explorer from displaying then as images: 67 | # https://github.com/h5bp/html5-boilerplate/commit/37b5fec090d00f38de64b591bcddcb205aadf8ee 68 | 69 | image/x-icon cur ico; 70 | 71 | 72 | # Microsoft Office 73 | 74 | application/msword doc; 75 | application/vnd.ms-excel xls; 76 | application/vnd.ms-powerpoint ppt; 77 | application/vnd.openxmlformats-officedocument.wordprocessingml.document docx; 78 | application/vnd.openxmlformats-officedocument.spreadsheetml.sheet xlsx; 79 | application/vnd.openxmlformats-officedocument.presentationml.presentation pptx; 80 | 81 | 82 | # Web fonts 83 | 84 | font/woff woff; 85 | font/woff2 woff2; 86 | application/vnd.ms-fontobject eot; 87 | font/ttf ttf; 88 | font/collection ttc; 89 | font/otf otf; 90 | 91 | 92 | # Other 93 | 94 | application/java-archive ear jar war; 95 | application/mac-binhex40 hqx; 96 | application/octet-stream bin deb dll dmg exe img iso msi msm msp safariextz; 97 | application/pdf pdf; 98 | application/postscript ai eps ps; 99 | application/rtf rtf; 100 | application/vnd.google-earth.kml+xml kml; 101 | application/vnd.google-earth.kmz kmz; 102 | application/vnd.wap.wmlc wmlc; 103 | application/x-7z-compressed 7z; 104 | application/x-bb-appworld bbaw; 105 | application/x-bittorrent torrent; 106 | application/x-chrome-extension crx; 107 | application/x-cocoa cco; 108 | application/x-java-archive-diff jardiff; 109 | application/x-java-jnlp-file jnlp; 110 | application/x-makeself run; 111 | application/x-opera-extension oex; 112 | application/x-perl pl pm; 113 | application/x-pilot pdb prc; 114 | application/x-rar-compressed rar; 115 | application/x-redhat-package-manager rpm; 116 | application/x-sea sea; 117 | application/x-shockwave-flash swf; 118 | application/x-stuffit sit; 119 | application/x-tcl tcl tk; 120 | application/x-x509-ca-cert crt der pem; 121 | application/x-xpinstall xpi; 122 | application/xhtml+xml xhtml; 123 | application/xslt+xml xsl; 124 | application/zip zip; 125 | text/calendar ics; 126 | text/css css; 127 | text/csv csv; 128 | text/html htm html shtml; 129 | text/markdown md markdown; 130 | text/mathml mml; 131 | text/plain txt; 132 | text/vcard vcard vcf; 133 | text/vnd.rim.location.xloc xloc; 134 | text/vnd.sun.j2me.app-descriptor jad; 135 | text/vnd.wap.wml wml; 136 | text/vtt vtt; 137 | text/x-component htc; 138 | 139 | } 140 | -------------------------------------------------------------------------------- /ssh_lib/assets/nginx/nginx.conf: -------------------------------------------------------------------------------- 1 | user nginx; 2 | pid /var/run/nginx.pid; 3 | 4 | 5 | worker_processes auto; 6 | worker_rlimit_nofile 300000; # needs to be < ulimit -n 7 | 8 | error_log /data/nginx/logs/nginx-error.log warn; 9 | 10 | events { 11 | worker_connections 40000; 12 | multi_accept on; 13 | } 14 | 15 | http { 16 | # aggressive caching for read-only sources 17 | open_file_cache max=1000000 inactive=60m; 18 | open_file_cache_valid 60m; 19 | open_file_cache_min_uses 1; 20 | open_file_cache_errors on; 21 | 22 | server_tokens off; 23 | 24 | include /etc/nginx/mime.types; 25 | types { 26 | application/x-protobuf pbf; 27 | } 28 | default_type application/octet-stream; 29 | 30 | charset utf-8; 31 | 32 | sendfile on; 33 | tcp_nopush on; 34 | tcp_nodelay on; 35 | 36 | reset_timedout_connection on; 37 | send_timeout 20; 38 | 39 | max_ranges 0; 40 | 41 | gzip on; 42 | gzip_comp_level 1; 43 | gzip_types application/json application/x-protobuf; 44 | 45 | log_format access_json '{' 46 | 47 | # general 48 | '"time": "$time_iso8601", ' 49 | '"status": $status, ' 50 | #'"request_method": "$request_method", ' 51 | #'"uri": "$uri", ' 52 | #'"request": "$request", ' 53 | #'"request_time": $request_time, ' 54 | '"body_bytes_sent": $body_bytes_sent, ' 55 | '"http_referrer": "$http_referer", ' 56 | '"http_user_agent": "$http_user_agent", ' 57 | #'"scheme": "$scheme", ' 58 | #'"host": "$host", ' 59 | #'"http_host": "$http_host", ' 60 | 61 | # IP address related 62 | # IP address logging is disabled 63 | #'"remote_addr": "$remote_addr", ' 64 | #'"http_x_forwarded_for": "$http_x_forwarded_for", ' 65 | 66 | # CF related 67 | #'"http_cf_ray": "$http_cf_ray", ' 68 | #'"http_cf_ipcountry": "$http_cf_ipcountry", ' 69 | #'"http_cf_connecting_ip": "$http_cf_connecting_ip", ' 70 | 71 | '"_": "_"' # helper for no trailing comma 72 | '}'; 73 | 74 | access_log off; 75 | #access_log /data/nginx/logs/nginx-access.log access_json buffer=128k; 76 | 77 | include /data/nginx/config/*; 78 | include /data/nginx/sites/*; 79 | } 80 | -------------------------------------------------------------------------------- /ssh_lib/benchmark.py: -------------------------------------------------------------------------------- 1 | from ssh_lib import MODULES_DIR 2 | from ssh_lib.utils import apt_get_install, exists, put 3 | 4 | 5 | def c1000k(c): 6 | if exists(c, 'c1000k-master'): 7 | return 8 | 9 | c.run('wget https://github.com/ideawu/c1000k/archive/master.zip -O tmp.zip') 10 | c.run('unzip -o tmp.zip') 11 | c.run('rm tmp.zip') 12 | c.run('cd c1000k-master && make') 13 | 14 | # usage 15 | # ./server 7000 16 | # ./client 127.0.0.1 7000 17 | # make sure it runs till 1 million 18 | 19 | 20 | def wrk(c): 21 | apt_get_install(c, 'wrk') 22 | c.sudo('mkdir -p /data/ofm/benchmark') 23 | put(c, f'{MODULES_DIR}/http_host/benchmark/wrk_custom_list.lua', '/data/ofm/benchmark') 24 | -------------------------------------------------------------------------------- /ssh_lib/kernel.py: -------------------------------------------------------------------------------- 1 | from ssh_lib.utils import put_str 2 | 3 | 4 | def kernel_somaxconn65k(c): 5 | put_str(c, '/etc/sysctl.d/60-somaxconn65k.conf', 'net.core.somaxconn = 65535') 6 | 7 | 8 | def kernel_limits1m(c): 9 | put_str( 10 | c, 11 | '/etc/security/limits.d/limits1m.conf', 12 | """ 13 | * soft nofile 1048576 14 | * hard nofile 1048576 15 | root soft nofile 1048576 16 | root hard nofile 1048576 17 | """, 18 | ) 19 | 20 | 21 | def kernel_tweaks_ofm(c): 22 | kernel_somaxconn65k(c) 23 | kernel_limits1m(c) 24 | -------------------------------------------------------------------------------- /ssh_lib/nginx.py: -------------------------------------------------------------------------------- 1 | from ssh_lib import ASSETS_DIR 2 | from ssh_lib.utils import ( 3 | apt_get_install, 4 | apt_get_purge, 5 | apt_get_update, 6 | exists, 7 | get_latest_release_github, 8 | put, 9 | put_str, 10 | sudo_cmd, 11 | ubuntu_codename, 12 | ) 13 | 14 | 15 | def nginx(c): 16 | codename = ubuntu_codename(c) 17 | 18 | if not exists(c, '/usr/sbin/nginx'): 19 | sudo_cmd( 20 | c, 21 | 'curl https://nginx.org/keys/nginx_signing.key ' 22 | '| gpg --dearmor --yes -o /etc/apt/keyrings/nginx.gpg', 23 | ) 24 | put_str( 25 | c, 26 | '/etc/apt/sources.list.d/nginx.list', 27 | f'deb [signed-by=/etc/apt/keyrings/nginx.gpg] http://nginx.org/packages/mainline/ubuntu {codename} nginx', 28 | ) 29 | apt_get_update(c) 30 | apt_get_install(c, 'nginx') 31 | 32 | c.sudo('rm -rf /data/nginx/config') 33 | c.sudo('mkdir -p /data/nginx/config') 34 | 35 | c.sudo('rm -rf /data/nginx/logs') 36 | c.sudo('mkdir -p /data/nginx/logs') 37 | 38 | c.sudo('mkdir -p /data/nginx/sites') 39 | c.sudo('mkdir -p /data/nginx/acme-challenges') 40 | c.sudo('mkdir -p /data/nginx/certs') 41 | 42 | generate_self_signed_cert(c) 43 | 44 | put(c, f'{ASSETS_DIR}/nginx/nginx.conf', '/etc/nginx/') 45 | put(c, f'{ASSETS_DIR}/nginx/mime.types', '/etc/nginx/') 46 | put(c, f'{ASSETS_DIR}/nginx/default_disable.conf', '/data/nginx/sites') 47 | put(c, f'{ASSETS_DIR}/nginx/cloudflare.conf', '/data/nginx/config') 48 | 49 | sudo_cmd(c, 'curl https://ssl-config.mozilla.org/ffdhe2048.txt -o /etc/nginx/ffdhe2048.txt') 50 | 51 | c.sudo('nginx -t') 52 | c.sudo('service nginx restart') 53 | 54 | 55 | def certbot(c): 56 | apt_get_install(c, 'snapd') 57 | 58 | # this is silly, but needs to be run twice 59 | c.sudo('snap install core', warn=True, echo=True) 60 | c.sudo('snap install core', warn=True, echo=True) 61 | 62 | c.sudo('snap refresh core', warn=True) 63 | 64 | apt_get_purge(c, 'certbot') 65 | c.sudo('snap install --classic certbot', warn=True) 66 | c.sudo('snap set certbot trust-plugin-with-root=ok', warn=True) 67 | c.sudo('snap install certbot-dns-cloudflare', warn=True) 68 | 69 | 70 | def lego(c): 71 | lego_version = get_latest_release_github('go-acme', 'lego') 72 | 73 | url = f'https://github.com/go-acme/lego/releases/download/{lego_version}/lego_{lego_version}_linux_amd64.tar.gz' 74 | 75 | c.sudo('rm -rf /tmp/lego*') 76 | c.sudo('mkdir -p /tmp/lego') 77 | c.sudo( 78 | f'wget -q "{url}" -O /tmp/lego/out.tar.gz', 79 | ) 80 | c.sudo('tar xzvf /tmp/lego/out.tar.gz -C /tmp/lego') 81 | c.sudo('chmod +x /tmp/lego/lego') 82 | c.sudo('mv /tmp/lego/lego /usr/local/bin') 83 | c.sudo('rm -rf /tmp/lego*') 84 | 85 | 86 | def generate_self_signed_cert(c): 87 | if not exists(c, '/etc/nginx/ssl/dummy.cert'): 88 | c.sudo('mkdir -p /etc/nginx/ssl') 89 | c.sudo( 90 | 'openssl req -x509 -nodes -days 3650 -newkey rsa:2048 ' 91 | '-keyout /etc/nginx/ssl/dummy.key -out /etc/nginx/ssl/dummy.cert ' 92 | '-subj "/C=US/ST=Dummy/L=Dummy/O=Dummy/CN=example.com"', 93 | hide=True, 94 | ) 95 | -------------------------------------------------------------------------------- /ssh_lib/pkg_base.py: -------------------------------------------------------------------------------- 1 | from ssh_lib.utils import ( 2 | apt_get_install, 3 | apt_get_update, 4 | ) 5 | 6 | 7 | def pkg_base(c): 8 | pkg_list = [ 9 | 'aria2', 10 | 'build-essential', 11 | 'curl', 12 | 'dnsutils', 13 | 'git', 14 | 'htop', 15 | 'lsb-release', 16 | 'pigz', 17 | 'rsync', 18 | 'unzip', 19 | 'wget', 20 | 'psmisc', 21 | 'util-linux', 22 | # 23 | 'btrfs-progs', 24 | # 25 | 'ca-certificates', 26 | 'gnupg-agent', 27 | 'gnupg2', 28 | 'ubuntu-keyring', 29 | # 30 | 'iftop', 31 | 'nload', 32 | 'vnstat', 33 | # 34 | 'python3', 35 | 'python3-venv', 36 | ] 37 | 38 | apt_get_install(c, ' '.join(pkg_list)) 39 | 40 | 41 | def pkg_upgrade(c): 42 | apt_get_update(c) 43 | c.sudo( 44 | 'DEBIAN_FRONTEND=noninteractive apt-get dist-upgrade -y -o Dpkg::Options::="--force-confold"' 45 | ) 46 | -------------------------------------------------------------------------------- /ssh_lib/planetiler.py: -------------------------------------------------------------------------------- 1 | from ssh_lib import PLANETILER_BIN, PLANETILER_SRC 2 | from ssh_lib.utils import apt_get_install, apt_get_update, exists, sudo_cmd 3 | 4 | 5 | PLANETILER_COMMIT = 'ee22a014022f1dcc120cba6a768567408ba74908' 6 | PLANETILER_PATH = f'{PLANETILER_BIN}/planetiler.jar' 7 | 8 | 9 | def install_planetiler(c): 10 | if exists(c, PLANETILER_PATH): 11 | print('planetiler exists, skipping') 12 | return 13 | 14 | apt_get_update(c) 15 | apt_get_install(c, 'openjdk-21-jre-headless') 16 | 17 | c.sudo(f'rm -rf {PLANETILER_BIN} {PLANETILER_SRC}') 18 | c.sudo(f'mkdir -p {PLANETILER_BIN} {PLANETILER_SRC}') 19 | 20 | c.sudo( 21 | f'git clone --recurse-submodules https://github.com/onthegomap/planetiler.git {PLANETILER_SRC}' 22 | ) 23 | 24 | sudo_cmd(c, f'cd {PLANETILER_SRC} && git checkout {PLANETILER_COMMIT}') 25 | sudo_cmd(c, f'cd {PLANETILER_SRC} && git submodule update --init --recursive') 26 | 27 | sudo_cmd(c, f'cd {PLANETILER_SRC} && ./mvnw clean test package > {PLANETILER_SRC}/_build.log') 28 | 29 | c.sudo( 30 | f'mv {PLANETILER_SRC}/planetiler-dist/target/planetiler-dist-*-SNAPSHOT-with-deps.jar {PLANETILER_PATH}', 31 | warn=True, 32 | ) 33 | 34 | c.sudo(f'java -jar {PLANETILER_PATH} --help', hide=True) 35 | 36 | c.sudo(f'rm -rf {PLANETILER_SRC}') 37 | -------------------------------------------------------------------------------- /ssh_lib/rclone.py: -------------------------------------------------------------------------------- 1 | from ssh_lib.utils import apt_get_update, exists 2 | 3 | 4 | def rclone(c): 5 | if exists(c, '/usr/bin/rclone'): 6 | return 7 | 8 | apt_get_update(c) 9 | c.sudo('curl https://rclone.org/install.sh | sudo bash') 10 | -------------------------------------------------------------------------------- /ssh_lib/tasks.py: -------------------------------------------------------------------------------- 1 | import json 2 | import sys 3 | 4 | from ssh_lib import ( 5 | CONFIG_DIR, 6 | HTTP_HOST_BIN, 7 | MODULES_DIR, 8 | OFM_DIR, 9 | REMOTE_CONFIG, 10 | TILE_GEN_BIN, 11 | VENV_BIN, 12 | dotenv_val, 13 | ) 14 | from ssh_lib.benchmark import c1000k, wrk 15 | from ssh_lib.kernel import kernel_tweaks_ofm 16 | from ssh_lib.nginx import certbot, nginx 17 | from ssh_lib.pkg_base import pkg_base, pkg_upgrade 18 | from ssh_lib.planetiler import install_planetiler 19 | from ssh_lib.rclone import rclone 20 | from ssh_lib.utils import add_user, enable_sudo, put, put_dir, put_str, sudo_cmd 21 | 22 | 23 | def prepare_shared(c): 24 | # creates ofm user with uid=2000, disabled password and nopasswd sudo 25 | add_user(c, 'ofm', uid=2000) 26 | enable_sudo(c, 'ofm', nopasswd=True) 27 | 28 | pkg_upgrade(c) 29 | pkg_base(c) 30 | rclone(c) 31 | 32 | c.sudo(f'mkdir -p {REMOTE_CONFIG}') 33 | c.sudo(f'chown ofm:ofm {REMOTE_CONFIG}') 34 | c.sudo(f'chown ofm:ofm {OFM_DIR}') 35 | 36 | upload_config_json(c) 37 | 38 | prepare_venv(c) 39 | 40 | 41 | def prepare_venv(c): 42 | put( 43 | c, 44 | MODULES_DIR / 'prepare-virtualenv.sh', 45 | OFM_DIR, 46 | permissions='755', 47 | user='ofm', 48 | ) 49 | sudo_cmd(c, f'cd {OFM_DIR} && source prepare-virtualenv.sh') 50 | 51 | 52 | def prepare_tile_gen(c, *, enable_cron): 53 | c.sudo('rm -f /etc/cron.d/ofm_tile_gen') 54 | 55 | install_planetiler(c) 56 | 57 | c.sudo(f'rm -rf {TILE_GEN_BIN}') 58 | 59 | put_dir(c, MODULES_DIR / 'tile_gen', TILE_GEN_BIN, file_permissions='755') 60 | 61 | for dirname in ['tile_gen_lib', 'scripts']: 62 | put_dir(c, MODULES_DIR / 'tile_gen' / dirname, f'{TILE_GEN_BIN}/{dirname}') 63 | 64 | if (CONFIG_DIR / 'rclone.conf').exists(): 65 | put( 66 | c, 67 | CONFIG_DIR / 'rclone.conf', 68 | f'{REMOTE_CONFIG}/rclone.conf', 69 | permissions='600', 70 | user='ofm', 71 | ) 72 | 73 | c.sudo(f'{VENV_BIN}/pip install -e {TILE_GEN_BIN} --use-pep517') 74 | 75 | c.sudo('rm -rf /data/ofm/tile_gen/logs') 76 | c.sudo('mkdir -p /data/ofm/tile_gen/logs') 77 | 78 | c.sudo('chown ofm:ofm /data/ofm/tile_gen/{,*}') 79 | c.sudo(f'chown ofm:ofm -R {TILE_GEN_BIN}') 80 | 81 | if enable_cron: 82 | put(c, MODULES_DIR / 'tile_gen' / 'cron.d' / 'ofm_tile_gen', '/etc/cron.d/') 83 | 84 | 85 | def prepare_http_host(c): 86 | kernel_tweaks_ofm(c) 87 | 88 | nginx(c) 89 | certbot(c) 90 | 91 | c.sudo('rm -rf /data/ofm/http_host/logs') 92 | c.sudo('mkdir -p /data/ofm/http_host/logs') 93 | c.sudo('chown ofm:ofm /data/ofm/http_host/logs') 94 | 95 | c.sudo('rm -rf /data/ofm/http_host/logs_nginx') 96 | c.sudo('mkdir -p /data/ofm/http_host/logs_nginx') 97 | c.sudo('chown nginx:nginx /data/ofm/http_host/logs_nginx') 98 | 99 | upload_http_host_files(c) 100 | 101 | if dotenv_val('DOMAIN_ROUNDROBIN'): 102 | assert (CONFIG_DIR / 'rclone.conf').exists() 103 | put( 104 | c, 105 | CONFIG_DIR / 'rclone.conf', 106 | f'{REMOTE_CONFIG}/rclone.conf', 107 | permissions=400, 108 | ) 109 | put(c, MODULES_DIR / 'http_host' / 'cron.d' / 'ofm_roundrobin_reader', '/etc/cron.d/') 110 | 111 | c.sudo(f'{VENV_BIN}/pip install -e {HTTP_HOST_BIN} --use-pep517') 112 | 113 | 114 | def run_http_host_sync(c): 115 | print('Running http_host.py sync --force') 116 | sudo_cmd(c, f'{VENV_BIN}/python -u {HTTP_HOST_BIN}/http_host.py sync --force') 117 | 118 | 119 | def upload_http_host_files(c): 120 | c.sudo(f'rm -rf {HTTP_HOST_BIN}') 121 | c.sudo(f'mkdir -p {HTTP_HOST_BIN}') 122 | 123 | put_dir(c, MODULES_DIR / 'http_host', HTTP_HOST_BIN, file_permissions='755') 124 | 125 | for dirname in ['http_host_lib', 'scripts']: 126 | put_dir(c, MODULES_DIR / 'http_host' / dirname, f'{HTTP_HOST_BIN}/{dirname}') 127 | 128 | put_dir( 129 | c, 130 | MODULES_DIR / 'http_host' / 'http_host_lib' / 'nginx_confs', 131 | f'{HTTP_HOST_BIN}/http_host_lib/nginx_confs', 132 | ) 133 | 134 | c.sudo('chown -R ofm:ofm /data/ofm/http_host') 135 | 136 | 137 | def install_benchmark(c): 138 | """ 139 | Read docs/quick_notes/http_benchmark.md 140 | """ 141 | c1000k(c) 142 | wrk(c) 143 | 144 | 145 | def setup_roundrobin_writer(c): 146 | letsencrypt_email = dotenv_val('LETSENCRYPT_EMAIL').lower() 147 | domain_roundrobin = dotenv_val('DOMAIN_ROUNDROBIN').lower() 148 | assert letsencrypt_email 149 | assert domain_roundrobin 150 | assert (CONFIG_DIR / 'rclone.conf').exists() 151 | assert (CONFIG_DIR / 'cloudflare.ini').exists() 152 | 153 | rclone(c) 154 | certbot(c) 155 | 156 | c.sudo(f'mkdir -p {REMOTE_CONFIG}') 157 | 158 | put( 159 | c, 160 | CONFIG_DIR / 'rclone.conf', 161 | f'{REMOTE_CONFIG}/rclone.conf', 162 | permissions=400, 163 | ) 164 | 165 | put( 166 | c, 167 | CONFIG_DIR / 'cloudflare.ini', 168 | f'{REMOTE_CONFIG}/cloudflare.ini', 169 | permissions=400, 170 | ) 171 | 172 | c.sudo('rm -rf /data/ofm/roundrobin') 173 | 174 | put( 175 | c, 176 | MODULES_DIR / 'roundrobin' / 'rclone_write.sh', 177 | '/data/ofm/roundrobin/rclone_write.sh', 178 | create_parent_dir=True, 179 | permissions=500, 180 | ) 181 | 182 | # only use with --staging 183 | # c.sudo('certbot delete --noninteractive --cert-name ofm_roundrobin', warn=True) 184 | 185 | sudo_cmd( 186 | c, 187 | 'certbot certonly ' 188 | '--dns-cloudflare ' 189 | f'--dns-cloudflare-credentials {REMOTE_CONFIG}/cloudflare.ini ' 190 | '--dns-cloudflare-propagation-seconds 20 ' 191 | f'--noninteractive ' 192 | f'-m {letsencrypt_email} ' 193 | f'--agree-tos ' 194 | f'--cert-name=ofm_roundrobin ' 195 | f'--deploy-hook /data/ofm/roundrobin/rclone_write.sh ' 196 | f'-d {domain_roundrobin}', 197 | # f'-d {domain2_roundrobin}', 198 | ) 199 | 200 | 201 | def upload_config_json(c): 202 | domain_direct = dotenv_val('DOMAIN_DIRECT').lower() 203 | domain_roundrobin = dotenv_val('DOMAIN_ROUNDROBIN').lower() 204 | skip_planet = dotenv_val('SKIP_PLANET').lower() == 'true' 205 | self_signed_certs = dotenv_val('SELF_SIGNED_CERTS').lower() == 'true' 206 | letsencrypt_email = dotenv_val('LETSENCRYPT_EMAIL').lower() 207 | 208 | if not (domain_direct or domain_roundrobin): 209 | sys.exit('Please specify DOMAIN_DIRECT or DOMAIN_ROUNDROBIN in config/.env') 210 | 211 | if domain_direct and not letsencrypt_email and not self_signed_certs: 212 | sys.exit('Please add your email to LETSENCRYPT_EMAIL when using DOMAIN_DIRECT') 213 | 214 | http_host_list = [h.strip() for h in dotenv_val('HTTP_HOST_LIST').split(',') if h.strip()] 215 | 216 | config = { 217 | 'domain_direct': domain_direct, 218 | 'domain_roundrobin': domain_roundrobin, 219 | 'letsencrypt_email': letsencrypt_email, 220 | 'skip_planet': skip_planet, 221 | 'self_signed_certs': self_signed_certs, 222 | 'http_host_list': http_host_list, 223 | 'telegram_token': dotenv_val('TELEGRAM_TOKEN'), 224 | 'telegram_chat_id': dotenv_val('TELEGRAM_CHAT_ID'), 225 | } 226 | 227 | config_str = json.dumps(config, indent=2, ensure_ascii=False) 228 | print(config_str) 229 | put_str(c, f'{REMOTE_CONFIG}/config.json', config_str) 230 | 231 | 232 | def setup_loadbalancer(c): 233 | c.sudo('rm -f /etc/cron.d/ofm_loadbalancer') 234 | 235 | put( 236 | c, 237 | CONFIG_DIR / 'cloudflare.ini', 238 | f'{REMOTE_CONFIG}/cloudflare.ini', 239 | permissions=400, 240 | ) 241 | 242 | c.sudo('rm -rf /data/ofm/loadbalancer') 243 | put_dir(c, MODULES_DIR / 'loadbalancer', '/data/ofm/loadbalancer') 244 | put_dir( 245 | c, 246 | MODULES_DIR / 'loadbalancer' / 'loadbalancer_lib', 247 | '/data/ofm/loadbalancer/loadbalancer_lib', 248 | ) 249 | 250 | c.sudo(f'{VENV_BIN}/pip install -e /data/ofm/loadbalancer --use-pep517') 251 | 252 | c.sudo('mkdir -p /data/ofm/loadbalancer/logs') 253 | c.sudo('chown -R ofm:ofm /data/ofm/loadbalancer') 254 | 255 | put(c, MODULES_DIR / 'loadbalancer' / 'cron.d' / 'ofm_loadbalancer', '/etc/cron.d/') 256 | -------------------------------------------------------------------------------- /ssh_lib/utils.py: -------------------------------------------------------------------------------- 1 | import os 2 | import secrets 3 | import string 4 | import sys 5 | from pathlib import Path 6 | 7 | import requests 8 | from invoke import UnexpectedExit 9 | 10 | 11 | def put( 12 | c, local_path, remote_path, permissions=None, user='root', group=None, create_parent_dir=False 13 | ): 14 | tmp_path = f'/tmp/fabtmp_{random_string(8)}' 15 | c.put(local_path, tmp_path) 16 | 17 | if create_parent_dir: 18 | dirname = os.path.dirname(remote_path) 19 | c.sudo(f'mkdir -p {dirname}') 20 | set_permission(c, dirname, user=user, group=group) 21 | 22 | if is_dir(c, remote_path): 23 | if not remote_path.endswith('/'): 24 | remote_path += '/' 25 | 26 | filename = os.path.basename(local_path) 27 | remote_path += filename 28 | 29 | c.sudo(f"mv '{tmp_path}' '{remote_path}'") 30 | c.sudo(f"rm -rf '{tmp_path}'") 31 | 32 | set_permission(c, remote_path, permissions=permissions, user=user, group=group) 33 | 34 | 35 | def put_dir( 36 | c, 37 | local_dir: Path, 38 | remote_dir, 39 | dir_permissions=None, 40 | file_permissions=None, 41 | user='root', 42 | group=None, 43 | exclude_set=None, 44 | ): 45 | """ 46 | copies all files from local path to remote path 47 | not recursive 48 | """ 49 | 50 | files = [file for file in local_dir.iterdir() if file.is_file()] 51 | 52 | if exclude_set: 53 | files = [f for f in files if f.name not in exclude_set] 54 | 55 | c.sudo(f'mkdir -p "{remote_dir}"') 56 | set_permission(c, remote_dir, permissions=dir_permissions, user=user, group=group) 57 | 58 | for file in files: 59 | print(f'uploading {remote_dir}/{file.name}') 60 | put(c, file, f'{remote_dir}/{file.name}', file_permissions, user, group) 61 | 62 | 63 | def put_str(c, remote_path, str_): 64 | tmp_file = 'tmp.txt' 65 | with open(tmp_file, 'w') as outfile: 66 | outfile.write(str_ + '\n') 67 | put(c, tmp_file, remote_path) 68 | os.remove(tmp_file) 69 | 70 | 71 | def append_str(c, remote_path, str_): 72 | tmp_path = f'/tmp/fabtmp_{random_string(8)}' 73 | put_str(c, tmp_path, str_) 74 | 75 | sudo_cmd(c, f"cat '{tmp_path}' >> '{remote_path}'") 76 | c.sudo(f'rm -f {tmp_path}') 77 | 78 | 79 | def sudo_cmd(c, cmd, *, user=None): 80 | cmd = cmd.replace('"', '\\"') 81 | 82 | try: 83 | c.sudo(f'bash -c "{cmd}"', user=user) 84 | except UnexpectedExit as e: 85 | print(f'Command failed: {e.result.command}') 86 | print(f'Error: {e.result.stderr}') 87 | sys.exit(1) 88 | 89 | 90 | def run_nice(c, cmd): 91 | try: 92 | c.run(cmd) 93 | except UnexpectedExit as e: 94 | print(f'Command failed: {e.result.command}') 95 | print(f'Error: {e.result.stderr}') 96 | sys.exit(1) 97 | 98 | 99 | def set_permission(c, path, *, permissions=None, user=None, group=None): 100 | if user: 101 | if not group: 102 | group = user 103 | c.sudo(f"chown {user}:{group} '{path}'") 104 | 105 | if permissions: 106 | c.sudo(f"chmod {permissions} '{path}'") 107 | 108 | 109 | def reboot(c): 110 | print('Rebooting') 111 | try: 112 | c.sudo('reboot') 113 | except Exception: 114 | pass 115 | 116 | 117 | def exists(c, path): 118 | return c.sudo(f"test -e '{path}'", hide=True, warn=True).ok 119 | 120 | 121 | def is_dir(c, path): 122 | return c.sudo(f"test -d '{path}'", hide=True, warn=True).ok 123 | 124 | 125 | def random_string(length): 126 | return ''.join(secrets.choice(string.ascii_uppercase + string.digits) for _ in range(length)) 127 | 128 | 129 | def ubuntu_release(c): 130 | return c.run('lsb_release -rs').stdout.strip()[:2] 131 | 132 | 133 | def ubuntu_codename(c): 134 | return c.run('lsb_release -cs').stdout.strip() 135 | 136 | 137 | def apt_get_update(c): 138 | c.sudo('apt-get update') 139 | 140 | 141 | def apt_get_install(c, pkgs, warn=False): 142 | c.sudo( 143 | f'DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends {pkgs}', 144 | warn=warn, 145 | echo=True, 146 | ) 147 | 148 | 149 | def apt_get_purge(c, pkgs): 150 | c.sudo(f'DEBIAN_FRONTEND=noninteractive apt-get purge -y {pkgs}') 151 | 152 | 153 | def apt_get_autoremove(c): 154 | c.sudo('DEBIAN_FRONTEND=noninteractive apt-get autoremove -y') 155 | 156 | 157 | def get_username(c): 158 | return c.run('whoami').stdout.strip() 159 | 160 | 161 | def add_user(c, username, passwd=None, uid=None): 162 | uid_str = f'--uid={uid}' if uid else '' 163 | 164 | # --disabled-password -> ssh-key login only 165 | c.sudo(f'adduser --disabled-password --gecos "" {uid_str} {username}', warn=True) 166 | if passwd: 167 | sudo_cmd(c, f'echo "{username}:{passwd}" | chpasswd') 168 | 169 | 170 | def remove_user(c, username): 171 | c.sudo(f'userdel -r {username}', warn=True) 172 | c.sudo(f'rm -rf /home/{username}') 173 | 174 | 175 | def enable_sudo(c, username, nopasswd=False): 176 | c.sudo(f'usermod -aG sudo {username}') 177 | if nopasswd: 178 | put_str(c, '/etc/sudoers.d/tmp.', f'{username} ALL=(ALL) NOPASSWD:ALL') 179 | set_permission(c, '/etc/sudoers.d/tmp.', permissions='440', user='root') 180 | c.sudo(f'mv /etc/sudoers.d/tmp. /etc/sudoers.d/{username}') 181 | 182 | 183 | def get_latest_release_github(user, repo): 184 | url = f'https://api.github.com/repos/{user}/{repo}/releases/latest' 185 | r = requests.get(url) 186 | r.raise_for_status() 187 | 188 | data = r.json() 189 | assert data['tag_name'] == data['name'] 190 | 191 | return data['tag_name'] 192 | -------------------------------------------------------------------------------- /website/.gitignore: -------------------------------------------------------------------------------- 1 | # build output 2 | dist/ 3 | 4 | # generated types 5 | .astro/ 6 | 7 | # dependencies 8 | node_modules/ 9 | 10 | # logs 11 | npm-debug.log* 12 | yarn-debug.log* 13 | yarn-error.log* 14 | pnpm-debug.log* 15 | 16 | # environment variables 17 | .env 18 | .env.production 19 | 20 | # macOS-specific files 21 | .DS_Store 22 | 23 | # jetbrains setting folder 24 | .idea/ 25 | -------------------------------------------------------------------------------- /website/README.md: -------------------------------------------------------------------------------- 1 | | Command | Action | 2 | | :--------------------- | :----------------------------------------------- | 3 | | `pnpm install` | Installs dependencies | 4 | | `pnpm dev` | Starts local dev server at `localhost:4321` | 5 | | `pnpm build` | Build your production site to `./dist/` | 6 | | `pnpm preview` | Preview your build locally, before deploying | 7 | | `pnpm astro ...` | Run CLI commands like `astro add`, `astro check` | 8 | | `pnpm astro -- --help` | Get help using the Astro CLI | 9 | -------------------------------------------------------------------------------- /website/astro.config.mjs: -------------------------------------------------------------------------------- 1 | // @ts-check 2 | import { defineConfig } from 'astro/config' 3 | 4 | import sitemap from '@astrojs/sitemap' 5 | 6 | // https://astro.build/config 7 | 8 | export default defineConfig({ 9 | site: 'https://openfreemap.org', 10 | trailingSlash: 'always', 11 | vite: { 12 | css: { 13 | transformer: 'lightningcss', 14 | }, 15 | }, 16 | integrations: [sitemap()], 17 | }) 18 | -------------------------------------------------------------------------------- /website/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "website_astro", 3 | "type": "module", 4 | "version": "0.0.1", 5 | "scripts": { 6 | "dev": "astro dev", 7 | "start": "astro dev", 8 | "build": "astro build", 9 | "preview": "astro preview", 10 | "astro": "astro" 11 | }, 12 | "dependencies": { 13 | "@astrojs/sitemap": "^3.2.1", 14 | "astro": "^5.4.0", 15 | "lightningcss": "^1.29.1" 16 | }, 17 | "pnpm": { 18 | "onlyBuiltDependencies": ["esbuild", "sharp"] 19 | } 20 | } 21 | -------------------------------------------------------------------------------- /website/public/berlin.webp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hyperknot/openfreemap/dd97e1fdcbd2f7d9c90eee9b52c96bc2ac9009b0/website/public/berlin.webp -------------------------------------------------------------------------------- /website/public/bsky.svg: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /website/public/favicon.ico: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hyperknot/openfreemap/dd97e1fdcbd2f7d9c90eee9b52c96bc2ac9009b0/website/public/favicon.ico -------------------------------------------------------------------------------- /website/public/github.svg: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /website/public/logo.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hyperknot/openfreemap/dd97e1fdcbd2f7d9c90eee9b52c96bc2ac9009b0/website/public/logo.jpg -------------------------------------------------------------------------------- /website/public/robots.txt: -------------------------------------------------------------------------------- 1 | User-agent: * 2 | Allow: / 3 | 4 | Sitemap: https://openfreemap.org/sitemap-index.xml 5 | -------------------------------------------------------------------------------- /website/public/scripts/map.js: -------------------------------------------------------------------------------- 1 | const london3d = { 2 | center: [-0.114, 51.506], 3 | zoom: 14.2, 4 | bearing: 55.2, 5 | pitch: 60, 6 | } 7 | 8 | const berlin = { 9 | center: [13.388, 52.517], 10 | zoom: 9.5, 11 | bearing: 0, 12 | pitch: 0, 13 | } 14 | 15 | function initMap() { 16 | if (window.map) return 17 | 18 | document.getElementById('mapbg-image').style.opacity = '0.5' 19 | 20 | // RTL support, optional 21 | maplibregl.setRTLTextPlugin( 22 | 'https://unpkg.com/@mapbox/mapbox-gl-rtl-text@0.3.0/dist/mapbox-gl-rtl-text.js', 23 | true, // Lazy load the plugin 24 | ) 25 | 26 | const map = new maplibregl.Map({ 27 | style: 'https://tiles.openfreemap.org/styles/liberty', 28 | center: berlin.center, 29 | zoom: berlin.zoom, 30 | bearing: berlin.bearing, 31 | pitch: berlin.pitch, 32 | container: mapDiv, 33 | boxZoom: false, 34 | // doubleClickZoom: false, 35 | // scrollZoom: false, 36 | attributionControl: false, 37 | cooperativeGestures: true, 38 | dragRotate: false, 39 | }) 40 | window.map = map 41 | 42 | map.once('idle', () => { 43 | document.getElementById('mapbg-image').remove() 44 | }) 45 | 46 | const nav = new maplibregl.NavigationControl({ showCompass: false }) 47 | map.addControl(nav, 'top-right') 48 | 49 | // let scale = new maplibregl.ScaleControl() 50 | // map.addControl(scale) 51 | 52 | const attrib = new maplibregl.AttributionControl({ 53 | compact: false, 54 | }) 55 | map.addControl(attrib) 56 | 57 | new maplibregl.Marker().setLngLat([-0.119, 51.507]).addTo(map) 58 | } 59 | 60 | function selectStyle(style) { 61 | const styleUrl = `https://tiles.openfreemap.org/styles/${style.split('-')[0]}` 62 | map.setStyle(styleUrl) 63 | 64 | if (style === 'liberty-3d') { 65 | map.setCenter(london3d.center) 66 | map.setPitch(london3d.pitch) 67 | map.setBearing(london3d.bearing) 68 | map.setZoom(london3d.zoom) 69 | map.dragRotate.enable() 70 | } else if (map.getBearing() !== 0) { 71 | map.setCenter(berlin.center) 72 | map.setPitch(berlin.pitch) 73 | map.setBearing(berlin.bearing) 74 | map.setZoom(berlin.zoom) 75 | map.dragRotate.disable() 76 | } 77 | 78 | document.getElementById('style-url-code').innerText = styleUrl 79 | } 80 | 81 | // --- start 82 | 83 | const mapDiv = document.getElementById('map-container') 84 | initMap() 85 | 86 | const buttons = document.querySelectorAll('.button-container .btn') 87 | 88 | buttons.forEach((button) => { 89 | button.addEventListener('click', (event) => { 90 | buttons.forEach((button) => button.classList.remove('selected')) 91 | button.classList.add('selected') 92 | 93 | const style = event.target.getAttribute('data-style') 94 | selectStyle(style) 95 | }) 96 | }) 97 | -------------------------------------------------------------------------------- /website/public/x.svg: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /website/src/components/Donate.astro: -------------------------------------------------------------------------------- 1 | --- 2 | const { title } = Astro.props 3 | 4 | import { Content as Pro } from '../content/donate/pro.md' 5 | import { Content as Sponsor } from '../content/donate/sponsor.md' 6 | --- 7 | 8 | 9 | Sponsor me 10 | 11 | -------------------------------------------------------------------------------- /website/src/components/Logo.astro: -------------------------------------------------------------------------------- 1 | --- 2 | const { title } = Astro.props 3 | --- 4 | 5 | 6 | 7 |

8 | 9 |
10 | github 13 | x 14 | bsky 15 |
16 | -------------------------------------------------------------------------------- /website/src/components/Map.astro: -------------------------------------------------------------------------------- 1 | --- 2 | import StyleUrlBug from './StyleUrlBug.astro' 3 | const { showStyleURL } = Astro.props 4 | --- 5 | 6 |
7 |
8 |
9 | OpenFreeMap 10 | © OpenMapTiles Data from 11 | OpenStreetMap 12 |
13 |
14 |
15 | 16 |
17 | 18 | 19 | 20 | 21 |
22 | 23 | {showStyleURL && } 24 | 25 | 26 | 27 | -------------------------------------------------------------------------------- /website/src/components/StyleUrlBug.astro: -------------------------------------------------------------------------------- 1 |
2 |

Use the following style in a MapLibre map:

3 |
https://tiles.openfreemap.org/styles/liberty
5 |
6 | -------------------------------------------------------------------------------- /website/src/content/donate/pro.md: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hyperknot/openfreemap/dd97e1fdcbd2f7d9c90eee9b52c96bc2ac9009b0/website/src/content/donate/pro.md -------------------------------------------------------------------------------- /website/src/content/donate/sponsor.md: -------------------------------------------------------------------------------- 1 | ## How can I sponsor the project? 2 | 3 | If this project helps you save on your map hosting costs, please consider sponsoring me on [GitHub Sponsors](https://github.com/sponsors/hyperknot). 4 | 5 | If possible, please choose a monthly donation, even if it's a smaller amount. The nature of this project needs recurring donations to cover the server costs. 6 | -------------------------------------------------------------------------------- /website/src/content/how_to_use/custom_styles.md: -------------------------------------------------------------------------------- 1 | ## Custom styles 2 | 3 | You can customize the styles using the [Maputnik](https://maputnik.github.io/) editor. For example, you can remove labels, POIs, or change colors. 4 | 5 | When you use a customized style, you need to host the style JSON yourself and use its URL in MapLibre. 6 | 7 | Customize Bright 8 | 9 | Customize Liberty 10 | 11 | Customize Positron 12 | -------------------------------------------------------------------------------- /website/src/content/how_to_use/leaflet.md: -------------------------------------------------------------------------------- 1 | ## Using Leaflet 2 | 3 | [MapLibre GL Leaflet](https://github.com/maplibre/maplibre-gl-leaflet) provides a binding for Leaflet that allows you to add vector tile sources to the Leaflet map. 4 | 5 | Include the following links and scripts in your page: 6 | 7 | ```html 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | ``` 19 | 20 | Initialize it to a div like this: 21 | 22 | ```html 23 |
24 | 31 | ``` 32 | -------------------------------------------------------------------------------- /website/src/content/how_to_use/mapbox.md: -------------------------------------------------------------------------------- 1 | ## Using Mapbox? 2 | 3 | If you are currently using Mapbox, please change your libraries to [MapLibre GL JS](https://maplibre.org/maplibre-gl-js/docs/). MapLibre is based on the last open-source version of Mapbox GL JS before it went closed-source. Migrating should be as simple as changing the libraries, as long as you are not using any features specific to the 2.x or later releases. 4 | -------------------------------------------------------------------------------- /website/src/content/how_to_use/maplibre.md: -------------------------------------------------------------------------------- 1 | ## How to load MapLibre? 2 | 3 | Include MapLibre GL JS in the ``. If you are using npm, you can install the `maplibre-gl` package. Make sure to import the CSS as well. 4 | 5 | ```html 6 | 7 | 8 | ``` 9 | 10 | Initialize it to a div like this: 11 | 12 | ```html 13 |
14 | 22 | ``` 23 | -------------------------------------------------------------------------------- /website/src/content/how_to_use/mobile.md: -------------------------------------------------------------------------------- 1 | ## Mobile Apps 2 | 3 | For mobile apps, you can use the same styles with [MapLibre Native](https://maplibre.org/). 4 | -------------------------------------------------------------------------------- /website/src/content/how_to_use/openlayers.md: -------------------------------------------------------------------------------- 1 | ## Using OpenLayers 2 | 3 | [ol-mapbox-style](https://openlayers.org/ol-mapbox-style/) is an OpenLayers add-on that creates a layer group from a Mapbox/MapLibre style. 4 | 5 | Include the following links and scripts in your page: 6 | 7 | ```html 8 | 9 | 10 | 11 | 12 | 13 | 14 | ``` 15 | 16 | Initialize it to a div like this: 17 | 18 | ```html 19 |
20 | 29 | ``` -------------------------------------------------------------------------------- /website/src/content/how_to_use/self_hosting.md: -------------------------------------------------------------------------------- 1 | ## Self-hosting 2 | 3 | You can also download our processed full planet Btrfs images if you want to self-host yourself. Details can be found on [GitHub](https://github.com/hyperknot/openfreemap). 4 | -------------------------------------------------------------------------------- /website/src/content/index/after_donate.md: -------------------------------------------------------------------------------- 1 | ## Is commercial usage allowed? 2 | 3 | Yes. 4 | 5 | ## Do you offer support and SLA guarantees? 6 | 7 | At the moment, I don't offer SLA guarantees or personalized support. However, if there's enough interest, I may introduce a Pro plan in the future. If you're interested, please let me know by sending an [email](mailto:zsolt@openfreemap.org). 8 | 9 | ## What is the tech stack? 10 | 11 | There is no tile server running; only nginx serving a Btrfs image with 300 million hard-linked files. This was my idea; I haven't read about anyone else doing this in production, but it works really well. (You can read more about it on [GitHub](https://github.com/hyperknot/openfreemap).) 12 | 13 | There is no cloud, just dedicated servers. 14 | 15 | Special thanks go to [Michael Barry](https://github.com/msbarry) for developing [Planetiler](https://github.com/onthegomap/planetiler). It made it possible to generate the tiles in 5 hours instead of 5 weeks. 16 | 17 | The [styles](https://github.com/hyperknot/openfreemap-styles) are forked and heavily modified. The map schema is unmodified [OpenMapTiles](https://github.com/openmaptiles/openmaptiles). 18 | 19 | ## Attribution 20 | 21 | Attribution is required. If you are using MapLibre, they are automatically added, you have nothing to do. 22 | 23 | If you are using alternative clients, or if you are using this in printed media or video, you must add the following attribution: 24 | 25 | OpenFreeMap © OpenMapTiles Data from OpenStreetMap 26 | 27 | You do not need to display the OpenFreeMap part, but it is nice if you do. 28 | 29 | ## License 30 | 31 | The license of this project is [MIT](https://www.tldrlegal.com/license/mit-license). Map data is from [OpenStreetMap](https://www.openstreetmap.org/copyright). The licenses for included projects are listed in [LICENSE.md](https://github.com/hyperknot/openfreemap/blob/main/LICENSE.md). 32 | -------------------------------------------------------------------------------- /website/src/content/index/before_donate.md: -------------------------------------------------------------------------------- 1 | ## Who is behind this project and how can I follow it? 2 | 3 | I'm Zsolt Ero ([blog](https://blog.hyperknot.com/), [email](mailto:zsolt@openfreemap.org)). 4 | 5 | After 9 years of running my own map tile infrastructure for [MapHub](https://maphub.net/), I've open-sourced it and launched OpenFreeMap. 6 | 7 | X: [@hyperknot](https://x.com/hyperknot) (details) \ 8 | X: [@OpenFreeMapOrg](https://x.com/OpenFreeMapOrg) (announcements) \ 9 | bsky: [@hyperknot.com](https://bsky.app/profile/hyperknot.com) 10 | 11 | GitHub: [openfreemap](https://github.com/hyperknot/openfreemap) and [openfreemap-styles](https://github.com/hyperknot/openfreemap-styles) 12 | 13 | ## Why did you build this project? 14 | 15 | OpenStreetMap is one of the most important collective projects in history. It began 20 years ago, and today, 3 million edits are made each day! 16 | 17 | For a long time, when you wanted to use the map on your website or app, you had to look for a commercial map tile provider and hope your site didn't become too popular. Otherwise, you might end up with a $10,000 bill in a single day, as Hoodmaps [did](https://x.com/levelsio/status/1730659933232730443). 18 | 19 | Self-hosting was an option, but it required a big server and a lot of time to get it right. 20 | 21 | Since I've spent many years developing the map tile infrastructure for [MapHub](https://maphub.net/), I decided to open-source it so anyone can use it. With OpenFreeMap, you now have the option to either set up your own server or use our public instance. 22 | 23 | ## How can you offer a free public instance? 24 | 25 | There is no technical reason why map hosting costs as much as it does today. Vector tiles are just static files. It's true that serving hundreds of millions of files is not easy, but at the end of the day, they are just files. 26 | 27 | Financially, the plan is to keep renting servers until they cover the bandwidth. I believe it can be self-sustainable if enough people subscribe to the support plans. 28 | 29 | If this project helps you save on your map hosting costs, please consider subscribing to a support plan. 30 | -------------------------------------------------------------------------------- /website/src/content/index/whatis.md: -------------------------------------------------------------------------------- 1 | ## What is OpenFreeMap? 2 | 3 | OpenFreeMap lets you display custom maps on your website and apps for free. 4 | 5 | You can either self-host or use our public instance. Everything is **open-source**, including the full production setup — there’s no 'open-core' model here. Check out our [GitHub](https://github.com/hyperknot/openfreemap). The map data comes from OpenStreetMap. 6 | 7 | Using our **public instance** is completely free: there are no limits on the number of map views or requests. There’s no registration, no user database, no API keys, and no cookies. We aim to cover the running costs of our public instance through donations. 8 | 9 | We also provide **weekly** full planet downloads both in Btrfs and MBTiles formats. 10 | -------------------------------------------------------------------------------- /website/src/env.d.ts: -------------------------------------------------------------------------------- 1 | /// 2 | /// 3 | -------------------------------------------------------------------------------- /website/src/examples/cluster.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | Create and style clusters 5 | 6 | 7 | 8 | 9 | 10 | 14 | 15 | 16 |
17 | 18 | 148 | 149 | 150 | -------------------------------------------------------------------------------- /website/src/layouts/Layout.astro: -------------------------------------------------------------------------------- 1 | --- 2 | const { frontmatter } = Astro.props || {} 3 | const { title } = frontmatter || Astro.props 4 | const canonicalURL = new URL(Astro.url.pathname, Astro.site) 5 | 6 | import '../styles/_style.css' 7 | --- 8 | 9 | 10 | 11 | 12 | 13 | 14 | {title} 15 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | { 35 | frontmatter && ( 36 |
37 | 38 |
39 | ) 40 | } 41 | {!frontmatter && } 42 | 43 | 47 | 48 | 49 | -------------------------------------------------------------------------------- /website/src/pages/404.astro: -------------------------------------------------------------------------------- 1 | --- 2 | import Layout from '../layouts/Layout.astro' 3 | --- 4 | 5 | 6 |

404 Page Not Found

7 |
8 | -------------------------------------------------------------------------------- /website/src/pages/index.astro: -------------------------------------------------------------------------------- 1 | --- 2 | import Donate from '../components/Donate.astro' 3 | import Logo from '../components/Logo.astro' 4 | import Map_ from '../components/Map.astro' 5 | import Layout from '../layouts/Layout.astro' 6 | 7 | import { Content as AfterDonate } from '../content/index/after_donate.md' 8 | import { Content as BeforeDonate } from '../content/index/before_donate.md' 9 | import { Content as WhatisText } from '../content/index/whatis.md' 10 | --- 11 | 12 | 13 | 14 | 15 |
16 | 17 |

How can I use it?

18 |
19 | 20 | 21 | 22 |
23 |

24 | Have a look at the default styles and read more about how to integrate it to your website or 25 | app: 26 |

27 | 28 | Quick Start Guide 29 | 30 | 31 | 32 | 33 |
34 |
35 | -------------------------------------------------------------------------------- /website/src/pages/privacy.md: -------------------------------------------------------------------------------- 1 | --- 2 | layout: '../layouts/Layout.astro' 3 | title: 'Privacy Policy' 4 | --- 5 | 6 | # OpenFreeMap Privacy Policy 7 | 8 | **Last Updated:** February 26, 2025 9 | 10 | ## Summary 11 | 12 | OpenFreeMap is completely free and collects minimal data. We believe your privacy is essential. 13 | 14 | **What We Do NOT Collect:** 15 | 16 | - We do **not** have user accounts 17 | - We do **not** use cookies or tracking technologies 18 | - We do **not** collect email addresses or send newsletters 19 | - We do **not** process any payments 20 | - We do **not** store IP addresses in our regular server logs 21 | 22 | **What We DO Collect:** 23 | 24 | - **Basic Server Logs:** We collect anonymized logs that include browser type, referring pages, date/time stamps, and operating system—but no IP addresses by default 25 | - **Temporary IP Logs (Only During Security Incidents):** If we detect abuse or a security threat, we may temporarily enable IP logging for up to 30 days to investigate. These logs are deleted after the investigation or 30 days, whichever comes first 26 | 27 | **Updates & Communication:** 28 | All announcements are shared exclusively through our X and Bluesky accounts. 29 | 30 | --- 31 | 32 | ## Full Privacy Policy 33 | 34 | Your privacy matters to us. Hyperknot Software Kft., based in Hungary, has created this Privacy Policy to explain what information we collect when you visit our Site https://openfreemap.org, why we collect it, and how we use it. 35 | 36 | The terms "You" and "User" refer to anyone using our Site. "We," "Us," and "Our" refer to Hyperknot Software Kft. 37 | 38 | This Privacy Policy is governed by our [Terms of Service](/tos). 39 | 40 | For questions about this policy or data processing, contact us at privacy@openfreemap.org. 41 | 42 | ### 1. INFORMATION WE COLLECT 43 | 44 | We collect minimal data to maintain functionality and security: 45 | 46 | - **Server Logs:** We collect anonymized logs that include browser type, referring/exit pages, date/time stamps, and operating system. These logs **do not** contain IP addresses. 47 | 48 | - **IP Addresses (Temporary & Limited):** By default, we do not log IP addresses. However, if we detect a security incident (such as an attack on our service) or misuse, we may temporarily enable IP logging for a maximum of 30 days to investigate and resolve the issue. After this period or once the issue is resolved (whichever comes first), any logs containing IP addresses are permanently deleted. 49 | 50 | - **Cloudflare:** We may use Cloudflare as a CDN to improve service delivery. Cloudflare may collect certain information as described in their [privacy policy](https://www.cloudflare.com/privacypolicy/). 51 | 52 | ### 2. HOW WE USE YOUR INFORMATION 53 | 54 | We use the collected information to: 55 | 56 | - Maintain and improve our services 57 | - Protect against security threats and misuse 58 | - Comply with legal obligations 59 | 60 | We process your information based on our legitimate interest in providing a secure, functional service in accordance with GDPR principles. 61 | 62 | ### 3. INFORMATION WE DO NOT COLLECT 63 | 64 | We do **not** collect or process: 65 | 66 | - User accounts or authentication data 67 | - Cookies or other tracking technologies 68 | - Payment information 69 | - Email addresses (except when you contact us directly) 70 | - Sensitive personal information (political opinions, religious beliefs, health data, etc.) 71 | 72 | ### 4. INFORMATION FROM MINORS 73 | 74 | Our Site is not intended for persons under 18. We do not knowingly collect information from minors. 75 | 76 | ### 5. RETENTION 77 | 78 | We retain anonymized server logs (without IP addresses) indefinitely to help improve our service. Any logs containing IP addresses collected during security incidents are deleted after a maximum of 30 days. 79 | 80 | ### 6. SECURITY 81 | 82 | We implement reasonable security measures to protect your information. For security-related questions, contact security@openfreemap.org. 83 | 84 | ### 7. YOUR RIGHTS UNDER GDPR 85 | 86 | You have the right to: 87 | 88 | - Access any personal data we hold about you 89 | - Request correction of inaccurate data 90 | - Request deletion of your data 91 | - Object to processing 92 | - File a complaint with your local Data Protection Authority 93 | 94 | To exercise these rights, contact us at privacy@openfreemap.org. 95 | 96 | ### 8. CHANGES TO THIS POLICY 97 | 98 | We may update this Policy periodically. Changes will be posted here, and we encourage users to follow our X and Bluesky accounts for updates. 99 | 100 | ### 9. ACCEPTANCE 101 | 102 | By using our Site, you agree to this Policy. If you disagree, please refrain from using the Site. 103 | -------------------------------------------------------------------------------- /website/src/pages/quick_start.astro: -------------------------------------------------------------------------------- 1 | --- 2 | import Map_ from '../components/Map.astro' 3 | import Layout from '../layouts/Layout.astro' 4 | 5 | import Donate from '../components/Donate.astro' 6 | import Logo from '../components/Logo.astro' 7 | import { Content as CustomStylesText } from '../content/how_to_use/custom_styles.md' 8 | import { Content as LeafletText } from '../content/how_to_use/leaflet.md' 9 | import { Content as MapboxText } from '../content/how_to_use/mapbox.md' 10 | import { Content as MaplibreText } from '../content/how_to_use/maplibre.md' 11 | import { Content as MobileText } from '../content/how_to_use/mobile.md' 12 | import { Content as OpenLayersText } from '../content/how_to_use/openlayers.md' 13 | import { Content as SelfHostingText } from '../content/how_to_use/self_hosting.md' 14 | --- 15 | 16 | 17 | 18 | 19 |
20 |

21 | This guide provides step-by-step instructions for integrating OpenFreeMap into your website or 22 | mobile application. 23 |

24 | To get started, choose a style from the default styles provided below. Later, there'll be 25 | options to use custom styles as well. 26 |

27 |

(You can navigate the map, it's interactive!)

28 |
29 | 30 | 31 | 32 |
33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 |
43 |
44 | -------------------------------------------------------------------------------- /website/src/pages/tos.md: -------------------------------------------------------------------------------- 1 | --- 2 | layout: '../layouts/Layout.astro' 3 | title: 'Terms of Service' 4 | --- 5 | 6 | # OpenFreeMap Terms of Service 7 | 8 | **Last Updated:** February 26, 2025 9 | 10 | ## Overview 11 | 12 | This Terms of Service agreement ("Agreement") is between Hyperknot Software Kft., located at Petofi Sandor utca 48., Ujlengyel, 2724, Hungary ("we," "us," "our"), and you ("you," "user"). It becomes effective when you use https://openfreemap.org ("Site"). 13 | 14 | OpenFreeMap is a completely free service provided "as-is," with no warranties. By using this Site, you accept these terms. If you disagree with them, please do not use the Site. 15 | 16 | ## Eligibility 17 | 18 | You must be at least 18 years old and legally able to enter into this Agreement. If representing an organization, you must have authority to bind it. 19 | 20 | ## User Conduct 21 | 22 | You agree to use the Site legally and responsibly. You will not: 23 | 24 | - Use it for illegal activities 25 | - Disrupt the service or introduce harmful code 26 | - Violate intellectual property rights 27 | - Attempt to collect data from the service in automated ways without permission 28 | 29 | ## Intellectual Property 30 | 31 | Our content is owned or licensed by us and protected by law. 32 | 33 | ## Third-Party Links 34 | 35 | We are not responsible for any third-party websites linked from our Site. 36 | 37 | ## Disclaimer of Warranties 38 | 39 | THE SITE IS PROVIDED "AS-IS," "AS AVAILABLE," AND "WITH ALL FAULTS." WE MAKE NO WARRANTIES REGARDING ACCURACY, AVAILABILITY, OR FITNESS FOR ANY PURPOSE. YOUR USE IS AT YOUR OWN RISK. 40 | 41 | ## Limitation of Liability 42 | 43 | TO THE FULLEST EXTENT PERMITTED BY LAW, WE ARE NOT LIABLE FOR ANY DAMAGES ARISING FROM YOUR USE OF THE SITE, INCLUDING DIRECT, INDIRECT, INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES. 44 | 45 | ## Site Availability and Changes 46 | 47 | We aim to maintain the Site's availability but may discontinue it at any time without notice. When possible, we will announce changes through our X and Bluesky accounts. 48 | 49 | ## Cloudflare CDN 50 | 51 | We may use Cloudflare as a CDN to deliver content more efficiently. By using the Site, you consent to Cloudflare's processing of your requests. 52 | 53 | ## Governing Law 54 | 55 | This Agreement is governed by Hungarian law. 56 | 57 | ## Dispute Resolution 58 | 59 | Any dispute will be resolved by binding arbitration in Budapest, Hungary, on an individual basis. 60 | 61 | ## Changes to Terms 62 | 63 | We may update these Terms at any time. Updates will be announced on X and Bluesky. Your continued use after changes constitutes acceptance. 64 | 65 | ## Contact Information 66 | 67 | For questions about these Terms, email info@openfreemap.org. 68 | 69 | By using this Site, you also accept our [Privacy Policy](/privacy). 70 | -------------------------------------------------------------------------------- /website/src/styles/_style.css: -------------------------------------------------------------------------------- 1 | @import "reset.css"; 2 | @import "global.css"; 3 | /* */ 4 | @import "map.css"; 5 | -------------------------------------------------------------------------------- /website/src/styles/global.css: -------------------------------------------------------------------------------- 1 | body { 2 | line-height: 1.5; 3 | font-size: 17px; 4 | -webkit-font-smoothing: antialiased; 5 | font-family: Avenir, Montserrat, Corbel, "URW Gothic", source-sans-pro, sans-serif; 6 | font-weight: normal; 7 | color: #000; 8 | } 9 | 10 | .container { 11 | max-width: 700px; 12 | margin-left: auto; 13 | margin-right: auto; 14 | padding: 0 20px; 15 | } 16 | 17 | h1, 18 | h2, 19 | h3, 20 | h4, 21 | h5, 22 | h6 { 23 | color: #333; 24 | margin-top: 3em; 25 | margin-bottom: 0.5em; 26 | line-height: 1.2; 27 | font-family: Seravek, "Gill Sans Nova", Ubuntu, Calibri, "DejaVu Sans", source-sans-pro, 28 | sans-serif; 29 | font-weight: bold; 30 | } 31 | 32 | p, 33 | ul, 34 | pre { 35 | margin-bottom: 0.6em; 36 | } 37 | 38 | h1 { 39 | text-align: center; 40 | margin-top: 1em; 41 | } 42 | 43 | p { 44 | a { 45 | color: #333; 46 | font-weight: bold; 47 | text-decoration: none; 48 | box-shadow: 0 1px 0 #adc2ee; 49 | transition: box-shadow 200ms ease 0s; 50 | 51 | &:hover { 52 | box-shadow: 0 2px 0 #adc2ee; 53 | } 54 | } 55 | } 56 | 57 | strong { 58 | color: #333; 59 | } 60 | 61 | pre { 62 | font-size: 14px; 63 | padding: 7px 14px; 64 | } 65 | 66 | code { 67 | font-size: 14px; 68 | } 69 | 70 | hr { 71 | border: 0; 72 | height: 1px; 73 | background-color: #555; 74 | margin: 2em 0; 75 | } 76 | 77 | .footer { 78 | margin-top: 6em; 79 | margin-bottom: 4em; 80 | display: flex; 81 | justify-content: space-evenly; 82 | 83 | a { 84 | color: #777; 85 | text-decoration: none; 86 | } 87 | } 88 | 89 | .logo { 90 | margin: 0 auto; 91 | } 92 | 93 | .icons { 94 | margin: 0 auto; 95 | width: 155px; 96 | display: flex; 97 | justify-content: space-between; 98 | } 99 | 100 | #style-url-pre { 101 | background: #efefef; 102 | padding: 14px; 103 | font-weight: bold; 104 | overflow-x: auto; 105 | } 106 | 107 | .quick-start-button { 108 | display: block; 109 | text-decoration: none; 110 | color: white; 111 | font-weight: bold; 112 | letter-spacing: 0.05rem; 113 | font-size: 18px; 114 | border-radius: 20px; 115 | padding: 15px 0; 116 | margin: 2em auto 5em; 117 | width: 230px; 118 | text-align: center; 119 | background: linear-gradient(32deg, #0070a2, transparent) #59c15a; 120 | transition: background-color 1s; 121 | 122 | &:hover, 123 | &:focus { 124 | background-color: #dea31d; 125 | } 126 | 127 | &.pink { 128 | background: linear-gradient(32deg, #0070a2, transparent) #ed36a7; 129 | 130 | &:hover, 131 | &:focus { 132 | background-color: #dea31d; 133 | } 134 | } 135 | } 136 | -------------------------------------------------------------------------------- /website/src/styles/map.css: -------------------------------------------------------------------------------- 1 | #map-container { 2 | width: 100%; 3 | height: 500px; 4 | margin-bottom: 24px; 5 | position: relative; 6 | } 7 | 8 | @media (max-width: 550px) { 9 | #map-container { 10 | height: 300px; 11 | } 12 | } 13 | 14 | #mapbg-image { 15 | width: 100%; 16 | height: 100%; 17 | position: absolute; 18 | z-index: 1; 19 | background-image: url("/berlin.webp"); 20 | background-size: cover; 21 | background-position: center; 22 | background-repeat: no-repeat; 23 | cursor: pointer; 24 | transition: opacity 3s; 25 | } 26 | 27 | .mapbg-attrib { 28 | font: 12px / 20px "Helvetica Neue", Arial, Helvetica, sans-serif; 29 | background-color: hsla(0, 0%, 100%, 0.5); 30 | padding: 0 5px; 31 | bottom: 0; 32 | right: 0; 33 | position: absolute; 34 | 35 | a { 36 | color: rgba(0, 0, 0, 0.75); 37 | text-decoration: none; 38 | 39 | &:hover { 40 | text-decoration: underline; 41 | } 42 | } 43 | } 44 | 45 | .button-container { 46 | margin-bottom: 20px; 47 | display: flex; 48 | flex-wrap: nowrap; 49 | } 50 | 51 | .btn { 52 | border: 0; 53 | border-radius: 0.25rem; 54 | background: #2f5f8b; 55 | color: white; 56 | font-size: 1rem; 57 | white-space: nowrap; 58 | text-decoration: none; 59 | padding: 0.25rem 0.5rem; 60 | margin-right: 0.5rem; 61 | cursor: pointer; 62 | display: inline-flex; 63 | align-items: center; 64 | 65 | &.selected { 66 | background: #4892d9; 67 | } 68 | 69 | &:hover { 70 | background: #4892d9; 71 | } 72 | } 73 | -------------------------------------------------------------------------------- /website/src/styles/reset.css: -------------------------------------------------------------------------------- 1 | /* 2 | Josh's Custom CSS Reset 3 | https://www.joshwcomeau.com/css/custom-css-reset/ 4 | */ 5 | 6 | *, 7 | *::before, 8 | *::after { 9 | box-sizing: border-box; 10 | } 11 | 12 | * { 13 | margin: 0; 14 | } 15 | 16 | img, 17 | picture, 18 | video, 19 | canvas, 20 | svg { 21 | display: block; 22 | max-width: 100%; 23 | } 24 | 25 | input, 26 | button, 27 | textarea, 28 | select { 29 | font: inherit; 30 | } 31 | 32 | /* normalize.css */ 33 | 34 | button { 35 | font-family: inherit; 36 | font-size: 100%; 37 | line-height: 1.15; 38 | overflow: visible; 39 | text-transform: none; 40 | -webkit-appearance: button; 41 | } 42 | 43 | button::-moz-focus-inner { 44 | border-style: none; 45 | padding: 0; 46 | } 47 | 48 | button:-moz-focusring { 49 | outline: 1px dotted ButtonText; 50 | } 51 | -------------------------------------------------------------------------------- /website/tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "extends": "astro/tsconfigs/base" 3 | } 4 | --------------------------------------------------------------------------------