├── .gitignore ├── .vscode └── settings.json ├── Cargo.lock ├── Cargo.toml ├── LICENSE ├── README.md ├── sample_request.json ├── shell.nix ├── solari-export-graph ├── Cargo.toml └── src │ └── main.rs ├── solari-geomath ├── Cargo.toml └── src │ └── lib.rs ├── solari-spatial ├── Cargo.toml └── src │ └── lib.rs ├── solari-transfers ├── Cargo.toml └── src │ ├── lib.rs │ └── valinor │ ├── NOTICE │ ├── VALINOR_LICENSE │ ├── edge_export.rs │ ├── edge_models.rs │ └── mod.rs └── solari ├── Cargo.toml └── src ├── api ├── mod.rs ├── request.rs └── response.rs ├── bin ├── build_timetable.rs ├── download_feeds.rs └── serve.rs ├── dmfr.rs ├── lib.rs ├── raptor ├── mod.rs └── timetable │ ├── in_memory.rs │ ├── mmap.rs │ └── mod.rs ├── route.rs └── spatial.rs /.gitignore: -------------------------------------------------------------------------------- 1 | /target 2 | *.fdata 3 | *.zip 4 | log 5 | profile.json 6 | *.tt 7 | /timetable 8 | /feeds -------------------------------------------------------------------------------- /.vscode/settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "editor.defaultFormatter": "rust-lang.rust-analyzer", 3 | "editor.formatOnSave": true 4 | } -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [workspace] 2 | resolver = "2" 3 | members = ["solari", "solari-export-graph", "solari-geomath", "solari-spatial", "solari-transfers"] 4 | 5 | [profile.release-debug] 6 | inherits = "release" 7 | debug = true 8 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | 2 | Apache License 3 | Version 2.0, January 2004 4 | http://www.apache.org/licenses/ 5 | 6 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 7 | 8 | 1. Definitions. 9 | 10 | "License" shall mean the terms and conditions for use, reproduction, 11 | and distribution as defined by Sections 1 through 9 of this document. 12 | 13 | "Licensor" shall mean the copyright owner or entity authorized by 14 | the copyright owner that is granting the License. 15 | 16 | "Legal Entity" shall mean the union of the acting entity and all 17 | other entities that control, are controlled by, or are under common 18 | control with that entity. For the purposes of this definition, 19 | "control" means (i) the power, direct or indirect, to cause the 20 | direction or management of such entity, whether by contract or 21 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 22 | outstanding shares, or (iii) beneficial ownership of such entity. 23 | 24 | "You" (or "Your") shall mean an individual or Legal Entity 25 | exercising permissions granted by this License. 26 | 27 | "Source" form shall mean the preferred form for making modifications, 28 | including but not limited to software source code, documentation 29 | source, and configuration files. 30 | 31 | "Object" form shall mean any form resulting from mechanical 32 | transformation or translation of a Source form, including but 33 | not limited to compiled object code, generated documentation, 34 | and conversions to other media types. 35 | 36 | "Work" shall mean the work of authorship, whether in Source or 37 | Object form, made available under the License, as indicated by a 38 | copyright notice that is included in or attached to the work 39 | (an example is provided in the Appendix below). 40 | 41 | "Derivative Works" shall mean any work, whether in Source or Object 42 | form, that is based on (or derived from) the Work and for which the 43 | editorial revisions, annotations, elaborations, or other modifications 44 | represent, as a whole, an original work of authorship. For the purposes 45 | of this License, Derivative Works shall not include works that remain 46 | separable from, or merely link (or bind by name) to the interfaces of, 47 | the Work and Derivative Works thereof. 48 | 49 | "Contribution" shall mean any work of authorship, including 50 | the original version of the Work and any modifications or additions 51 | to that Work or Derivative Works thereof, that is intentionally 52 | submitted to Licensor for inclusion in the Work by the copyright owner 53 | or by an individual or Legal Entity authorized to submit on behalf of 54 | the copyright owner. For the purposes of this definition, "submitted" 55 | means any form of electronic, verbal, or written communication sent 56 | to the Licensor or its representatives, including but not limited to 57 | communication on electronic mailing lists, source code control systems, 58 | and issue tracking systems that are managed by, or on behalf of, the 59 | Licensor for the purpose of discussing and improving the Work, but 60 | excluding communication that is conspicuously marked or otherwise 61 | designated in writing by the copyright owner as "Not a Contribution." 62 | 63 | "Contributor" shall mean Licensor and any individual or Legal Entity 64 | on behalf of whom a Contribution has been received by Licensor and 65 | subsequently incorporated within the Work. 66 | 67 | 2. Grant of Copyright License. Subject to the terms and conditions of 68 | this License, each Contributor hereby grants to You a perpetual, 69 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 70 | copyright license to reproduce, prepare Derivative Works of, 71 | publicly display, publicly perform, sublicense, and distribute the 72 | Work and such Derivative Works in Source or Object form. 73 | 74 | 3. Grant of Patent License. Subject to the terms and conditions of 75 | this License, each Contributor hereby grants to You a perpetual, 76 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 77 | (except as stated in this section) patent license to make, have made, 78 | use, offer to sell, sell, import, and otherwise transfer the Work, 79 | where such license applies only to those patent claims licensable 80 | by such Contributor that are necessarily infringed by their 81 | Contribution(s) alone or by combination of their Contribution(s) 82 | with the Work to which such Contribution(s) was submitted. If You 83 | institute patent litigation against any entity (including a 84 | cross-claim or counterclaim in a lawsuit) alleging that the Work 85 | or a Contribution incorporated within the Work constitutes direct 86 | or contributory patent infringement, then any patent licenses 87 | granted to You under this License for that Work shall terminate 88 | as of the date such litigation is filed. 89 | 90 | 4. Redistribution. You may reproduce and distribute copies of the 91 | Work or Derivative Works thereof in any medium, with or without 92 | modifications, and in Source or Object form, provided that You 93 | meet the following conditions: 94 | 95 | (a) You must give any other recipients of the Work or 96 | Derivative Works a copy of this License; and 97 | 98 | (b) You must cause any modified files to carry prominent notices 99 | stating that You changed the files; and 100 | 101 | (c) You must retain, in the Source form of any Derivative Works 102 | that You distribute, all copyright, patent, trademark, and 103 | attribution notices from the Source form of the Work, 104 | excluding those notices that do not pertain to any part of 105 | the Derivative Works; and 106 | 107 | (d) If the Work includes a "NOTICE" text file as part of its 108 | distribution, then any Derivative Works that You distribute must 109 | include a readable copy of the attribution notices contained 110 | within such NOTICE file, excluding those notices that do not 111 | pertain to any part of the Derivative Works, in at least one 112 | of the following places: within a NOTICE text file distributed 113 | as part of the Derivative Works; within the Source form or 114 | documentation, if provided along with the Derivative Works; or, 115 | within a display generated by the Derivative Works, if and 116 | wherever such third-party notices normally appear. The contents 117 | of the NOTICE file are for informational purposes only and 118 | do not modify the License. You may add Your own attribution 119 | notices within Derivative Works that You distribute, alongside 120 | or as an addendum to the NOTICE text from the Work, provided 121 | that such additional attribution notices cannot be construed 122 | as modifying the License. 123 | 124 | You may add Your own copyright statement to Your modifications and 125 | may provide additional or different license terms and conditions 126 | for use, reproduction, or distribution of Your modifications, or 127 | for any such Derivative Works as a whole, provided Your use, 128 | reproduction, and distribution of the Work otherwise complies with 129 | the conditions stated in this License. 130 | 131 | 5. Submission of Contributions. Unless You explicitly state otherwise, 132 | any Contribution intentionally submitted for inclusion in the Work 133 | by You to the Licensor shall be under the terms and conditions of 134 | this License, without any additional terms or conditions. 135 | Notwithstanding the above, nothing herein shall supersede or modify 136 | the terms of any separate license agreement you may have executed 137 | with Licensor regarding such Contributions. 138 | 139 | 6. Trademarks. This License does not grant permission to use the trade 140 | names, trademarks, service marks, or product names of the Licensor, 141 | except as required for reasonable and customary use in describing the 142 | origin of the Work and reproducing the content of the NOTICE file. 143 | 144 | 7. Disclaimer of Warranty. Unless required by applicable law or 145 | agreed to in writing, Licensor provides the Work (and each 146 | Contributor provides its Contributions) on an "AS IS" BASIS, 147 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 148 | implied, including, without limitation, any warranties or conditions 149 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 150 | PARTICULAR PURPOSE. You are solely responsible for determining the 151 | appropriateness of using or redistributing the Work and assume any 152 | risks associated with Your exercise of permissions under this License. 153 | 154 | 8. Limitation of Liability. In no event and under no legal theory, 155 | whether in tort (including negligence), contract, or otherwise, 156 | unless required by applicable law (such as deliberate and grossly 157 | negligent acts) or agreed to in writing, shall any Contributor be 158 | liable to You for damages, including any direct, indirect, special, 159 | incidental, or consequential damages of any character arising as a 160 | result of this License or out of the use or inability to use the 161 | Work (including but not limited to damages for loss of goodwill, 162 | work stoppage, computer failure or malfunction, or any and all 163 | other commercial damages or losses), even if such Contributor 164 | has been advised of the possibility of such damages. 165 | 166 | 9. Accepting Warranty or Additional Liability. While redistributing 167 | the Work or Derivative Works thereof, You may choose to offer, 168 | and charge a fee for, acceptance of support, warranty, indemnity, 169 | or other liability obligations and/or rights consistent with this 170 | License. However, in accepting such obligations, You may act only 171 | on Your own behalf and on Your sole responsibility, not on behalf 172 | of any other Contributor, and only if You agree to indemnify, 173 | defend, and hold each Contributor harmless for any liability 174 | incurred by, or claims asserted against, such Contributor by reason 175 | of your accepting any such warranty or additional liability. 176 | 177 | END OF TERMS AND CONDITIONS 178 | 179 | APPENDIX: How to apply the Apache License to your work. 180 | 181 | To apply the Apache License to your work, attach the following 182 | boilerplate notice, with the fields enclosed by brackets "[]" 183 | replaced with your own identifying information. (Don't include 184 | the brackets!) The text should be enclosed in the appropriate 185 | comment syntax for the file format. We also recommend that a 186 | file or class name and description of purpose be included on the 187 | same "printed page" as the copyright notice for easier 188 | identification within third-party archives. 189 | 190 | Copyright [yyyy] [name of copyright owner] 191 | 192 | Licensed under the Apache License, Version 2.0 (the "License"); 193 | you may not use this file except in compliance with the License. 194 | You may obtain a copy of the License at 195 | 196 | http://www.apache.org/licenses/LICENSE-2.0 197 | 198 | Unless required by applicable law or agreed to in writing, software 199 | distributed under the License is distributed on an "AS IS" BASIS, 200 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 201 | See the License for the specific language governing permissions and 202 | limitations under the License. 203 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Solari 2 | 3 | Solari is a high-performance transit routing engine built using the [RAPTOR algorithm](https://www.microsoft.com/en-us/research/wp-content/uploads/2012/01/raptor_alenex.pdf), optimized for lightweight, global-scale public transit routing. Designed to serve developers building applications requiring fast and resource-efficient transit planning (e.g., maps apps, trip-planning APIs), it avoids heavy preprocessing steps while supporting planet-scale coverage through memory-mapped timetables. 4 | 5 | ## Key Features 6 | - **Lightweight & Fast**: 7 | - Outperforms MOTIS/Transitous by anecdotally observed 5-10x in query speed. 8 | - Single-core routing across the continental U.S. completes in seconds; local routes usually resolve in <250ms. 9 | 10 | - **Planet-Scale Coverage**: 11 | - Memory-mapped timetable data allows a single instance to handle global networks with minimal RAM usage (via `memmap2`). 12 | 13 | - **Multi-Agency Support**: 14 | - Load multiple GTFS feeds from a directory for seamless cross-agency routing. 15 | 16 | - **Timezone Awareness**: 17 | - Automatically handles timezone conversions based on GTFS feed data. Developers are responsible for converting epoch timestamps to local time in their app layer. 18 | 19 | - **HTTP API Endpoint**: 20 | ```http 21 | POST /v1/plan 22 | ``` 23 | Example request: 24 | ```bash 25 | curl -d '{"from":{"lat":47.679591,"lon":-122.356388},"to":{"lat":47.616440,"lon":-122.320440},"start_at":1742845000000}' \ 26 | https://transit.maps.earth/v1/plan 27 | ``` 28 | 29 | - **GTFS Compatibility**: 30 | - Supports modern GTFS feeds via the `gtfs-structures` crate. 31 | - No real-time (GTFS-RT) support yet; prioritized roadmap features include alerts, delays and vehicle position updates. 32 | 33 | ## Getting Started 34 | 35 | ### Prerequisites 36 | 1. **Rust** (`rustc >= 1.85` tested). 37 | 2. **OpenSSL development package**: Install via your OS's package manager (e.g., `libssl-dev` on Ubuntu). 38 | 39 | ### Quickstart Commands 40 | #### Step 1: Build the Timetable Database 41 | ```bash 42 | # For a single GTFS feed: 43 | cargo run --release --bin build_timetable -- \ 44 | --base-path /path/to/timetable \ 45 | --gtfs-path gtfs.zip 46 | 47 | # For multi-agency feeds (directory of .zip files): 48 | cargo run --release --bin build_timetable -- \ 49 | --base-path /path/to/timetable \ 50 | --gtfs-path ./gtfs_feeds/ 51 | ``` 52 | 53 | #### Step 2: Run the API Server 54 | ```bash 55 | cargo run --release --bin serve -- --base-path /path/to/timetable 56 | ``` 57 | 58 | ## Architecture 59 | - **RAPTOR Algorithm**: Implements all pruning rules from the original paper for optimal performance. 60 | - **Memory Mapping**: Uses `memmap2` to load timetable data directly from disk, enabling fast access without RAM overhead. 61 | - **Designed for Modularity**: Decouples routing logic from geocoding/external services (e.g., uses Valhalla for transfer routes, but otherwise allows easy integration into an existing stack). 62 | 63 | ## Performance Benchmarks 64 | | Metric | Solari | OpenTripPlanner/MOTIS | 65 | |-----------------------|------------------|-----------------| 66 | | Query Latency | 150ms - 2.5s | ~150ms-1 minute | 67 | | Memory Usage | ~1.5GB (global) | Higher? (OTP: ~4GB+, MOTIS=?) | 68 | 69 | *Note: Benchmarks are informal and based on limited testing. Systematic comparisons pending.* 70 | 71 | --- 72 | 73 | ## Roadmap 74 | - **GTFS-RT Support** (priority order): 75 | 1. Service alerts and closures 76 | 2. Real-time delays 77 | 3. Vehicle positions 78 | - **Performance Quantification**: Come up with better benchmarks against MOTIS and OpenTripPlanner. 79 | - **rRAPTOR Implementation**: Long-term goal for multi-departure-time routing. 80 | - **Documentation**: Ongoing work to finalize API response formats and provide detailed guides. 81 | 82 | ## Contributing 83 | - Solari is in active development; contributions (documentation, testing, or features) are welcome. 84 | - Check the repository's issue tracker for tasks, but note there are no formal contribution guidelines yet. 85 | 86 | ## Known Limitations 87 | - **No Real-Time Updates**: Only static GTFS feeds supported currently. 88 | - **API Stability**: The `/v1/plan` response format may evolve as documentation finalizes, but no compatibility breaking changes to the v1 endpoint after the initial release. 89 | 90 | ## When to Use Solari? 91 | You may want to use this project if you need: 92 | - Fast, lightweight routing for global-scale transit networks on modest hardware. 93 | - A minimal API layer that integrates easily with modern web stacks (geocoding, map rendering handled externally). 94 | 95 | Avoid if you require: 96 | - Full-featured trip-planning like OpenTripPlanner's extensive customization or real-time capabilities. 97 | 98 | ## License 99 | [Apache-2.0](LICENSE) 100 | -------------------------------------------------------------------------------- /sample_request.json: -------------------------------------------------------------------------------- 1 | { 2 | "from": { 3 | "lat": 47.67959109544867, 4 | "lon": -122.35638830541808 5 | }, 6 | "to": { 7 | "lat": 47.61644052092532, 8 | "lon": -122.32044047417187 9 | }, 10 | "start_at": 1743635155000 11 | } 12 | -------------------------------------------------------------------------------- /shell.nix: -------------------------------------------------------------------------------- 1 | { pkgs ? import {} }: 2 | pkgs.mkShell { 3 | nativeBuildInputs = with pkgs; [ rustc cargo cargo-typify gcc rustfmt clippy openssl pkg-config ]; 4 | 5 | # Certain Rust tools won't work without this 6 | # This can also be fixed by using oxalica/rust-overlay and specifying the rust-src extension 7 | # See https://discourse.nixos.org/t/rust-src-not-found-and-other-misadventures-of-developing-rust-on-nixos/11570/3?u=samuela. for more details. 8 | RUST_SRC_PATH = "${pkgs.rust.packages.stable.rustPlatform.rustLibSrc}"; 9 | } 10 | -------------------------------------------------------------------------------- /solari-export-graph/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "solari-export-graph" 3 | version = "0.1.0" 4 | edition = "2024" 5 | 6 | [dependencies] 7 | solari-transfers = { path = "../solari-transfers" } 8 | solari-spatial = { path = "../solari-spatial" } 9 | clap = { version = "4.3.0", features = ["derive"] } 10 | env_logger = "0.10.0" 11 | log = "0.4" 12 | anyhow = "1.0" 13 | rkyv = "0.8.10" 14 | redb = "2.3.0" 15 | -------------------------------------------------------------------------------- /solari-export-graph/src/main.rs: -------------------------------------------------------------------------------- 1 | use std::{path::PathBuf, sync::Arc}; 2 | 3 | use clap::Parser; 4 | use solari_spatial::SphereIndexVec; 5 | use solari_transfers::{TransferGraph, fast_paths::FastGraphVec}; 6 | 7 | #[derive(Parser)] 8 | struct Args { 9 | #[arg(long)] 10 | valhalla_tiles: PathBuf, 11 | #[arg(long)] 12 | output: PathBuf, 13 | } 14 | 15 | fn main() -> Result<(), anyhow::Error> { 16 | env_logger::init(); 17 | let args = Args::parse(); 18 | let database = Arc::new(redb::Database::create( 19 | args.output.join("graph_metadata.db"), 20 | )?); 21 | let transfer_graph = 22 | TransferGraph::>::new(&args.valhalla_tiles, database)?; 23 | transfer_graph.save_to_dir(args.output)?; 24 | Ok(()) 25 | } 26 | -------------------------------------------------------------------------------- /solari-geomath/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "solari-geomath" 3 | version = "0.1.0" 4 | edition = "2024" 5 | 6 | [dependencies] 7 | 8 | [dev-dependencies] 9 | approx = "0.5.1" 10 | -------------------------------------------------------------------------------- /solari-geomath/src/lib.rs: -------------------------------------------------------------------------------- 1 | pub static EARTH_RADIUS_APPROX: f64 = 6_371_000f64; 2 | 3 | pub fn lat_lng_to_cartesian(lat: f64, lng: f64) -> [f64; 3] { 4 | if !lat.is_finite() || !lng.is_finite() { 5 | return [0.0; 3]; 6 | } 7 | let lat = lat.to_radians(); 8 | let lng = lng.to_radians(); 9 | [ 10 | EARTH_RADIUS_APPROX * lat.cos() * lng.sin(), 11 | EARTH_RADIUS_APPROX * lat.cos() * lng.cos(), 12 | EARTH_RADIUS_APPROX * lat.sin(), 13 | ] 14 | } 15 | 16 | pub fn cartesian_to_lat_lng(coords: [f64; 3]) -> (f64, f64) { 17 | let lng = f64::atan2(coords[0], coords[1]); 18 | let lat = (coords[2] / EARTH_RADIUS_APPROX).asin(); 19 | let lat = lat.to_degrees(); 20 | let lng = lng.to_degrees(); 21 | (lat, lng) 22 | } 23 | 24 | #[cfg(test)] 25 | mod test { 26 | use approx::assert_abs_diff_eq; 27 | 28 | use crate::{cartesian_to_lat_lng, lat_lng_to_cartesian}; 29 | 30 | #[test] 31 | fn test_zeros() { 32 | let coords = lat_lng_to_cartesian(0f64, 0f64); 33 | assert_abs_diff_eq!(coords[0], 0f64, epsilon = 0.001); 34 | assert_abs_diff_eq!(coords[1], super::EARTH_RADIUS_APPROX, epsilon = 0.001); 35 | assert_abs_diff_eq!(coords[2], 0f64, epsilon = 0.001); 36 | } 37 | 38 | #[test] 39 | fn test_poles() { 40 | let coords = lat_lng_to_cartesian(90f64, 0f64); 41 | assert_abs_diff_eq!(coords[0], 0f64, epsilon = 0.001); 42 | assert_abs_diff_eq!(coords[1], 0f64, epsilon = 0.001); 43 | assert_abs_diff_eq!(coords[2], super::EARTH_RADIUS_APPROX, epsilon = 0.001); 44 | 45 | let coords = lat_lng_to_cartesian(-90f64, 0f64); 46 | assert_abs_diff_eq!(coords[0], 0f64, epsilon = 0.001); 47 | assert_abs_diff_eq!(coords[1], 0f64, epsilon = 0.001); 48 | assert_abs_diff_eq!(coords[2], -super::EARTH_RADIUS_APPROX, epsilon = 0.001); 49 | } 50 | 51 | #[test] 52 | fn test_inverse() { 53 | let coords = lat_lng_to_cartesian(45f64, 60f64); 54 | let (lat, lng) = cartesian_to_lat_lng(coords); 55 | assert_abs_diff_eq!(lat, 45f64, epsilon = 0.001); 56 | assert_abs_diff_eq!(lng, 60f64, epsilon = 0.001); 57 | } 58 | 59 | #[test] 60 | fn test_far_west() { 61 | let coords = lat_lng_to_cartesian(45f64, -150f64); 62 | let (lat, lng) = cartesian_to_lat_lng(coords); 63 | assert_abs_diff_eq!(lat, 45f64, epsilon = 0.001); 64 | assert_abs_diff_eq!(lng, -150f64, epsilon = 0.001); 65 | } 66 | 67 | #[test] 68 | fn test_far_east() { 69 | let coords = lat_lng_to_cartesian(45f64, 150f64); 70 | let (lat, lng) = cartesian_to_lat_lng(coords); 71 | assert_abs_diff_eq!(lat, 45f64, epsilon = 0.001); 72 | assert_abs_diff_eq!(lng, 150f64, epsilon = 0.001); 73 | } 74 | } 75 | -------------------------------------------------------------------------------- /solari-spatial/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "solari-spatial" 3 | version = "0.1.0" 4 | edition = "2024" 5 | 6 | [dependencies] 7 | solari-geomath = { path = "../solari-geomath" } 8 | 9 | geo-types = "0.7" 10 | geo = "0.29" 11 | rkyv = "0.8.10" 12 | s2 = "0.0.12" 13 | anyhow = "1.0" 14 | bytemuck = { version = "1.13.1", features = ["derive"] } 15 | memmap2 = "0.9.5" 16 | log = "0.4" 17 | -------------------------------------------------------------------------------- /solari-spatial/src/lib.rs: -------------------------------------------------------------------------------- 1 | use std::{ 2 | fs::File, 3 | io::{BufWriter, Write}, 4 | path::PathBuf, 5 | pin::Pin, 6 | slice, 7 | }; 8 | 9 | use bytemuck::{Pod, Zeroable, cast_slice}; 10 | use geo::Coord; 11 | use log::debug; 12 | use memmap2::Mmap; 13 | use s2::{cell::Cell, cellid::CellID, latlng::LatLng, rect::Rect, region::RegionCoverer, s1::Deg}; 14 | use solari_geomath::EARTH_RADIUS_APPROX; 15 | 16 | pub struct NearestNeighborResult<'a, D: Sized + Pod + Zeroable> { 17 | pub approx_distance_meters: f64, 18 | pub data: &'a D, 19 | } 20 | 21 | pub trait SphereIndex { 22 | fn cells(&self) -> &[u64]; 23 | fn data(&self) -> &[D]; 24 | 25 | /// Query the tree for the nearest neighbors to a given point. 26 | fn nearest_neighbors<'a>( 27 | &'a self, 28 | coord: &Coord, 29 | max_radius_meters: f64, 30 | ) -> Vec> { 31 | let region_coverer = RegionCoverer { 32 | min_level: 18, 33 | max_level: 30, 34 | level_mod: 1, 35 | max_cells: 10, 36 | }; 37 | // Prevent division by zero by clamping the cosine calculated later to this minimum value. 38 | let cos_epsilon = 0.0000001; 39 | let size = LatLng { 40 | lat: Deg(max_radius_meters / 111000.0).into(), 41 | lng: Deg(max_radius_meters 42 | / 111000.0 43 | / f64::cos(coord.y.to_radians()).max(cos_epsilon)) 44 | .into(), 45 | }; 46 | 47 | let target_lat_lng = LatLng::from_degrees(coord.y, coord.x); 48 | let region = Rect::from_center_size(target_lat_lng, size); 49 | let covering = region_coverer.fast_covering(®ion); 50 | let mut covering = covering.0; 51 | covering.sort_unstable_by_key(|cell_id| { 52 | let cell: Cell = cell_id.into(); 53 | let angle = target_lat_lng.distance(&cell.center().into()).deg(); 54 | let meters = 111000.0 * angle; 55 | meters as u32 56 | }); 57 | let mut neighbors = Vec::new(); 58 | for cell_id in &covering { 59 | let child_begin_index = match self.cells().binary_search(&cell_id.child_begin().0) { 60 | Ok(found) => found, 61 | Err(not_found) => not_found.saturating_sub(1), 62 | }; 63 | let child_end_index = match self.cells().binary_search(&cell_id.child_end().0) { 64 | Ok(found) => found, 65 | Err(not_found) => not_found.saturating_sub(1), 66 | }; 67 | for neighbor_index in child_begin_index..=child_end_index { 68 | let neighbor_cell: Cell = CellID(self.cells()[neighbor_index]).into(); 69 | let neighbor_lat_lng: LatLng = neighbor_cell.center().into(); 70 | let approx_distance_meters = 71 | neighbor_lat_lng.distance(&target_lat_lng).rad() * EARTH_RADIUS_APPROX; 72 | neighbors.push(NearestNeighborResult { 73 | approx_distance_meters, 74 | data: &self.data()[neighbor_index], 75 | }); 76 | } 77 | } 78 | neighbors 79 | } 80 | 81 | fn write_to_file(&self, path: PathBuf) -> Result<(), anyhow::Error> { 82 | assert_eq!(self.cells().len(), self.data().len()); 83 | let mut file = File::create(path).unwrap(); 84 | let mut writer = BufWriter::new(&mut file); 85 | writer.write_all(&(self.cells().len() as u64).to_le_bytes())?; 86 | writer.write_all(cast_slice(self.cells()))?; 87 | writer.write_all(cast_slice(self.data()))?; 88 | Ok(()) 89 | } 90 | } 91 | 92 | pub struct SphereIndexVec { 93 | cells: Vec, 94 | data: Vec, 95 | } 96 | 97 | impl SphereIndexVec { 98 | pub fn build(mut points: Vec>) -> Self { 99 | points.sort_unstable_by_key(|point| point.cell); 100 | Self { 101 | cells: points.iter().map(|point| point.cell).collect(), 102 | data: points.into_iter().map(|point| point.data).collect(), 103 | } 104 | } 105 | } 106 | 107 | impl SphereIndex for SphereIndexVec { 108 | fn cells(&self) -> &[u64] { 109 | &self.cells 110 | } 111 | 112 | fn data(&self) -> &[D] { 113 | &self.data 114 | } 115 | } 116 | 117 | pub struct SphereIndexMmap<'a, D: Sized + Pod + Zeroable> { 118 | _mmap: Pin, 119 | // Use associated arrays because bytemuck "Pod" trait doesn't play nice with generics. 120 | cells: &'a [u64], 121 | data: &'a [D], 122 | } 123 | 124 | impl<'a, D: Sized + Pod + Zeroable> SphereIndex for SphereIndexMmap<'a, D> { 125 | fn cells(&self) -> &[u64] { 126 | self.cells 127 | } 128 | 129 | fn data(&self) -> &[D] { 130 | self.data 131 | } 132 | } 133 | 134 | impl<'a, D: Sized + Pod + Zeroable> SphereIndexMmap<'a, D> { 135 | pub fn assemble(mmap: Pin) -> Result { 136 | debug!("Opening sphere index from mmap of size {}", mmap.len()); 137 | let data = &mmap; 138 | let size = u64::from_le_bytes(data[0..8].try_into().unwrap()) as usize; 139 | debug!("Sphere index has length {}", size); 140 | let data = &data[8..]; 141 | let cells = unsafe { 142 | let s = cast_slice::(&data[..(size * 8)]); 143 | slice::from_raw_parts(s.as_ptr(), s.len()) 144 | }; 145 | let data = &data[(size * 8)..]; 146 | let data = unsafe { 147 | let s = cast_slice::(&data[..]); 148 | slice::from_raw_parts(s.as_ptr(), s.len()) 149 | }; 150 | debug!( 151 | "Built sphere index with {} cells and {} data elements", 152 | cells.len(), 153 | data.len() 154 | ); 155 | assert_eq!(cells.len(), data.len()); 156 | Ok(SphereIndexMmap { 157 | _mmap: mmap, 158 | cells, 159 | data, 160 | }) 161 | } 162 | } 163 | 164 | #[repr(C)] 165 | pub struct IndexedPoint { 166 | cell: u64, 167 | data: D, 168 | } 169 | 170 | impl IndexedPoint { 171 | pub fn new(coord: &Coord, data: D) -> IndexedPoint { 172 | let lat_lng = LatLng::from_degrees(coord.y, coord.x); 173 | let cell_id: CellID = lat_lng.into(); 174 | IndexedPoint { 175 | cell: cell_id.0, 176 | data, 177 | } 178 | } 179 | } 180 | -------------------------------------------------------------------------------- /solari-transfers/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "solari-transfers" 3 | version = "0.1.0" 4 | edition = "2024" 5 | 6 | [dependencies] 7 | solari-geomath = { path = "../solari-geomath" } 8 | solari-spatial = { path = "../solari-spatial" } 9 | 10 | valhalla-graphtile = { git = "https://github.com/stadiamaps/valinor", rev = "8e34e84479ef98a9008db22bcc4d9023247e40d1", features = ["serde"] } 11 | fast_paths = { git = "https://github.com/ellenhp/fast_paths", rev = "c3c05f245fcb9cffaf689f161fcc7c8c03a6a1dc" } 12 | bit-set = "0.8.0" 13 | anyhow = "1.0" 14 | serde = { version = "1.0", features = ["derive"] } 15 | geo-types = "0.7" 16 | geo = "0.29" 17 | log = "0.4" 18 | memmap2 = "0.9.5" 19 | redb = "2.3.0" 20 | polyline = "0.11.0" 21 | -------------------------------------------------------------------------------- /solari-transfers/src/lib.rs: -------------------------------------------------------------------------------- 1 | pub mod valinor; 2 | pub use fast_paths; 3 | use redb::{Database, ReadableTable, TableDefinition, WriteTransaction}; 4 | 5 | use std::{ 6 | collections::HashMap, 7 | fs::File, 8 | path::PathBuf, 9 | pin::Pin, 10 | sync::{Arc, RwLock}, 11 | }; 12 | 13 | use crate::valinor::edge_export::enumerate_edges; 14 | use anyhow::{Ok, bail}; 15 | use fast_paths::{ 16 | FastGraph, FastGraphBuilder, FastGraphStatic, FastGraphVec, InputGraph, PathCalculator, 17 | create_calculator, 18 | }; 19 | use geo::{Coord, Geodesic, Length, LineString}; 20 | use log::{error, info}; 21 | use memmap2::MmapOptions; 22 | use solari_spatial::{IndexedPoint, SphereIndex, SphereIndexMmap, SphereIndexVec}; 23 | use valhalla_graphtile::{Access, GraphId}; 24 | 25 | const EDGE_SHAPE_TABLE: TableDefinition<(u64, u64), &[u8]> = 26 | TableDefinition::new("valhalla_edge_shapes"); 27 | const EDGE_LENGTH_TABLE: TableDefinition<(u64, u64), f64> = 28 | TableDefinition::new("valhalla_edge_lengths"); 29 | 30 | pub struct TransferGraph> { 31 | node_index: I, 32 | graph: G, 33 | database: Arc, 34 | } 35 | 36 | impl> TransferGraph { 37 | pub fn new( 38 | valhalla_tile_dir: &PathBuf, 39 | database: Arc, 40 | ) -> Result>, anyhow::Error> { 41 | let mut geometry = RwLock::new(Vec::new()); 42 | let mut node_map = RwLock::new(HashMap::::new()); 43 | let mut next_node = RwLock::new(0usize); 44 | let mut graph = RwLock::new(InputGraph::new()); 45 | info!("Enumerating edges in valhalla tiles and constructing input graph."); 46 | let txn = database.begin_write()?; 47 | enumerate_edges(valhalla_tile_dir, |node, edges| { 48 | if !node.node_info().access().contains(Access::Pedestrian) { 49 | return; 50 | } 51 | let start_node_id = Self::ensure_node( 52 | node.id(), 53 | node_map.get_mut().unwrap(), 54 | next_node.get_mut().unwrap(), 55 | ); 56 | for edge in edges { 57 | for node in &edge.geometry().0 { 58 | geometry 59 | .get_mut() 60 | .unwrap() 61 | .push(IndexedPoint::new(node, start_node_id)); 62 | } 63 | let end_node_id = Self::ensure_node( 64 | &edge.directed_edge().end_node_id(), 65 | node_map.get_mut().unwrap(), 66 | next_node.get_mut().unwrap(), 67 | ); 68 | if end_node_id == start_node_id { 69 | continue; 70 | } 71 | let length_meters = edge.geometry().length::(); 72 | if let Err(err) = Self::push_edge( 73 | &txn, 74 | start_node_id as u64, 75 | end_node_id as u64, 76 | length_meters, 77 | edge.geometry(), 78 | ) { 79 | error!("Failed to insert edge into database: {:?}", err); 80 | }; 81 | 82 | let weight_mm = length_meters * 1000.0; 83 | if edge 84 | .directed_edge() 85 | .forward_access() 86 | .contains(Access::Pedestrian) 87 | { 88 | graph.get_mut().unwrap().add_edge( 89 | start_node_id, 90 | end_node_id, 91 | weight_mm as usize, 92 | ); 93 | } 94 | if edge 95 | .directed_edge() 96 | .reverse_access() 97 | .contains(Access::Pedestrian) 98 | { 99 | graph.get_mut().unwrap().add_edge( 100 | end_node_id, 101 | start_node_id, 102 | weight_mm as usize, 103 | ); 104 | } 105 | } 106 | })?; 107 | txn.commit()?; 108 | let node_index = SphereIndexVec::build(geometry.into_inner().expect("Lock failed")); 109 | let mut graph = graph.into_inner().expect("Lock failed"); 110 | info!("Freezing graph"); 111 | graph.freeze(); 112 | info!("Contracting"); 113 | let graph = FastGraphBuilder::build(&graph); 114 | 115 | Ok(TransferGraph { 116 | node_index, 117 | graph, 118 | database, 119 | }) 120 | } 121 | 122 | pub fn save_to_dir(&self, dir: PathBuf) -> Result<(), anyhow::Error> { 123 | self.graph.save_static(dir.join("transfer_graph.bin"))?; 124 | self.node_index 125 | .write_to_file(dir.join("transfer_node_index.bin"))?; 126 | Ok(()) 127 | } 128 | 129 | pub fn read_from_dir<'a>( 130 | dir: PathBuf, 131 | database: Arc, 132 | ) -> Result, SphereIndexMmap<'a, usize>>, anyhow::Error> { 133 | let graph_file = File::open(dir.join("transfer_graph.bin"))?; 134 | let graph_mmap = unsafe { MmapOptions::new().map(&graph_file)? }; 135 | let graph = FastGraphStatic::assemble(Pin::new(graph_mmap))?; 136 | 137 | let index_file = File::open(dir.join("transfer_node_index.bin"))?; 138 | let index_mmap = unsafe { MmapOptions::new().map(&index_file)? }; 139 | let node_index: SphereIndexMmap<'_, usize> = 140 | SphereIndexMmap::assemble(Pin::new(index_mmap))?; 141 | 142 | Ok(TransferGraph { 143 | graph, 144 | node_index, 145 | database, 146 | }) 147 | } 148 | 149 | pub fn transfer_path( 150 | &self, 151 | search_context: &mut TransferGraphSearcher, 152 | from: &Coord, 153 | to: &Coord, 154 | ) -> Result { 155 | let from = self.get_nearest_nodes(from); 156 | let to = self.get_nearest_nodes(to); 157 | if let Some(path) = search_context 158 | .calculator 159 | .calc_path_multiple_sources_and_targets(&self.graph, from, to) 160 | { 161 | let txn = self.database.begin_read()?; 162 | let shapes = txn.open_table(EDGE_SHAPE_TABLE)?; 163 | let mut path_shape: Vec> = Vec::new(); 164 | for pair in path.get_nodes().windows(2) { 165 | let from = pair[0] as u64; 166 | let to = pair[1] as u64; 167 | let shape_bytes = shapes 168 | .get(&(from, to))? 169 | .ok_or(anyhow::anyhow!("No shape found for edge {}, {}", from, to))? 170 | .value() 171 | .to_vec(); 172 | let shape_string = String::from_utf8(shape_bytes)?; 173 | let shape_linestring = polyline::decode_polyline(&shape_string, 5)?; 174 | path_shape.extend(shape_linestring.0); 175 | } 176 | return Ok(TransferPath { 177 | length_mm: path.get_weight() as u64, 178 | shape: polyline::encode_coordinates(path_shape, 5)?, 179 | }); 180 | } else { 181 | bail!("No route") 182 | } 183 | } 184 | 185 | pub fn transfer_distance_mm( 186 | &self, 187 | search_context: &mut TransferGraphSearcher, 188 | from: &Coord, 189 | to: &Coord, 190 | ) -> Result { 191 | let from = self.get_nearest_nodes(from); 192 | let to = self.get_nearest_nodes(to); 193 | if let Some(path) = search_context 194 | .calculator 195 | .calc_path_multiple_sources_and_targets(&self.graph, from, to) 196 | { 197 | return Ok(path.get_weight() as u64); 198 | } else { 199 | bail!("No route") 200 | } 201 | } 202 | 203 | fn ensure_node( 204 | node: &GraphId, 205 | node_map: &mut HashMap, 206 | next_node_id: &mut usize, 207 | ) -> usize { 208 | if let Some(node) = node_map.get(&node) { 209 | *node 210 | } else { 211 | let this_node = *next_node_id; 212 | node_map.insert(*node, this_node); 213 | *next_node_id += 1; 214 | this_node 215 | } 216 | } 217 | 218 | fn push_edge( 219 | txn: &WriteTransaction, 220 | from: u64, 221 | to: u64, 222 | length: f64, 223 | shape: &LineString, 224 | ) -> Result { 225 | let key = (from, to); 226 | let lengths = txn.open_table(EDGE_LENGTH_TABLE)?; 227 | let should_insert_shape = if let Some(previous_len) = lengths.get(&key)? { 228 | if length < previous_len.value() { 229 | true 230 | } else { 231 | false 232 | } 233 | } else { 234 | true 235 | }; 236 | if !should_insert_shape { 237 | return Ok(false); 238 | } 239 | let polyline = polyline::encode_coordinates(shape.0.clone(), 5)?; 240 | let mut shapes = txn.open_table(EDGE_SHAPE_TABLE)?; 241 | shapes.insert(&key, polyline.as_bytes())?; 242 | Ok(true) 243 | } 244 | 245 | fn get_nearest_nodes(&self, coord: &Coord) -> Vec<(usize, usize)> { 246 | let radius_meters = 50.0; 247 | let off_road_fudge_factor = 2.0; 248 | let neighbors = self.node_index.nearest_neighbors(coord, radius_meters); 249 | neighbors 250 | .iter() 251 | .map(|neighbor| { 252 | ( 253 | *neighbor.data, 254 | (neighbor.approx_distance_meters * 1000.0 * off_road_fudge_factor) as usize, 255 | ) 256 | }) 257 | .collect() 258 | } 259 | } 260 | 261 | pub struct TransferPath { 262 | pub length_mm: u64, 263 | pub shape: String, 264 | } 265 | 266 | pub struct TransferGraphSearcher> { 267 | calculator: PathCalculator, 268 | graph: Arc>, 269 | } 270 | 271 | impl> TransferGraphSearcher { 272 | pub fn new(graph: Arc>) -> TransferGraphSearcher { 273 | TransferGraphSearcher { 274 | calculator: create_calculator(&graph.graph), 275 | graph, 276 | } 277 | } 278 | } 279 | 280 | impl> Clone for TransferGraphSearcher { 281 | fn clone(&self) -> Self { 282 | Self { 283 | calculator: create_calculator(&self.graph.graph), 284 | graph: self.graph.clone(), 285 | } 286 | } 287 | } 288 | -------------------------------------------------------------------------------- /solari-transfers/src/valinor/NOTICE: -------------------------------------------------------------------------------- 1 | The files in this directory are derived from the Valinor project. See ./VALINOR_LICENSE for licensing details. 2 | 3 | https://github.com/stadiamaps/valinor/ 4 | -------------------------------------------------------------------------------- /solari-transfers/src/valinor/VALINOR_LICENSE: -------------------------------------------------------------------------------- 1 | Copyright (c) 2024 Stadia Maps. All rights reserved. 2 | 3 | Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 4 | 5 | 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 6 | 7 | 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 8 | 9 | 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. 10 | 11 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -------------------------------------------------------------------------------- /solari-transfers/src/valinor/edge_export.rs: -------------------------------------------------------------------------------- 1 | use crate::valinor::edge_models::EdgeRecord; 2 | use bit_set::BitSet; 3 | use log::warn; 4 | use std::collections::HashMap; 5 | use std::num::NonZeroUsize; 6 | use std::path::PathBuf; 7 | use valhalla_graphtile::graph_tile::DirectedEdge; 8 | use valhalla_graphtile::tile_hierarchy::STANDARD_LEVELS; 9 | use valhalla_graphtile::tile_provider::{ 10 | DirectoryTileProvider, GraphTileProvider, GraphTileProviderError, 11 | }; 12 | use valhalla_graphtile::{GraphId, RoadUse}; 13 | 14 | use super::edge_models::NodeRecord; 15 | 16 | fn should_skip_edge(edge: &DirectedEdge) -> bool { 17 | (edge.is_transit_line()) || edge.is_shortcut() || (edge.edge_use() == RoadUse::Ferry) 18 | } 19 | 20 | pub fn enumerate_edges)>( 21 | tile_path: &PathBuf, 22 | mut action: F, 23 | ) -> anyhow::Result<()> { 24 | let reader = DirectoryTileProvider::new(tile_path.clone(), NonZeroUsize::new(25).unwrap()); 25 | 26 | let mut tile_set = HashMap::new(); 27 | let mut node_count: usize = 0; 28 | for level in &*STANDARD_LEVELS { 29 | // For each tile in that level... 30 | let n_tiles = level.tiling_system.n_rows * level.tiling_system.n_cols; 31 | 32 | for tile_id in 0..n_tiles { 33 | // Get the index pointer for each tile in the level 34 | let graph_id = GraphId::try_from_components(level.level, u64::from(tile_id), 0)?; 35 | match reader.get_tile_containing(&graph_id) { 36 | Ok(tile) => { 37 | let tile_node_count = tile.header.node_count() as usize; 38 | tile_set.insert(graph_id, node_count); 39 | node_count += tile_node_count; 40 | } 41 | Err(GraphTileProviderError::TileDoesNotExist) => { 42 | // Ignore; not all tiles will exist for extracts 43 | } 44 | Err(e) => return Err(e.into()), 45 | } 46 | } 47 | } 48 | 49 | // Drop mutability 50 | let tile_set = tile_set; 51 | 52 | // An efficient way of tracking whether we've seen an edge before 53 | // TODO: Does this crate actually work for 64-bit values? I also have some doubts about efficiency. 54 | let mut processed_nodes = BitSet::with_capacity(node_count); 55 | 56 | for (tile_id, node_index_offset) in &tile_set { 57 | let node_count = reader.get_tile_containing(&tile_id)?.header.node_count() as usize; 58 | 59 | let tile = reader.get_tile_containing(&tile_id)?; 60 | 61 | for index in 0..node_count { 62 | if processed_nodes.contains(*node_index_offset + index) { 63 | continue; 64 | } 65 | 66 | let node_id = tile_id.with_index(index as u64)?; 67 | let node = tile.get_node(&node_id)?; 68 | 69 | processed_nodes.insert(*node_index_offset + index); 70 | 71 | let mut edges = Vec::new(); 72 | for outbound_edge_index in 0..node.edge_count() { 73 | let outbound_edge_index = node.edge_index() + outbound_edge_index as u32; 74 | let edge_id = if let Ok(id) = tile_id.with_index(outbound_edge_index as u64) { 75 | id 76 | } else { 77 | warn!("Edge ID not constructed correctly"); 78 | continue; 79 | }; 80 | let edge = if let Ok(edge) = tile.get_directed_edge(&edge_id) { 81 | edge 82 | } else { 83 | warn!("Directed edge not found"); 84 | continue; 85 | }; 86 | 87 | let edge_info = if let Ok(edge_info) = tile.get_edge_info(edge) { 88 | edge_info 89 | } else { 90 | warn!("Edge info not found"); 91 | continue; 92 | }; 93 | 94 | // Skip certain edge types based on the config 95 | if should_skip_edge(edge) { 96 | continue; 97 | } 98 | 99 | edges.push(EdgeRecord::new(edge_info.shape()?.clone(), edge)); 100 | } 101 | action(NodeRecord::new(node_id, node), edges); 102 | } 103 | } 104 | Ok(()) 105 | } 106 | -------------------------------------------------------------------------------- /solari-transfers/src/valinor/edge_models.rs: -------------------------------------------------------------------------------- 1 | use geo::LineString; 2 | use valhalla_graphtile::{ 3 | GraphId, 4 | graph_tile::{DirectedEdge, NodeInfo}, 5 | }; 6 | 7 | pub struct EdgeRecord<'a> { 8 | geometry: LineString, 9 | directed_edge: &'a DirectedEdge, 10 | } 11 | 12 | impl<'a> EdgeRecord<'a> { 13 | pub fn new(geometry: LineString, directed_edge: &'a DirectedEdge) -> EdgeRecord<'a> { 14 | EdgeRecord { 15 | geometry, 16 | directed_edge, 17 | } 18 | } 19 | 20 | pub fn geometry(&self) -> &LineString { 21 | &self.geometry 22 | } 23 | 24 | pub fn directed_edge(&'a self) -> &'a DirectedEdge { 25 | self.directed_edge 26 | } 27 | } 28 | pub struct NodeRecord<'a> { 29 | id: GraphId, 30 | node_info: &'a NodeInfo, 31 | } 32 | 33 | impl<'a> NodeRecord<'a> { 34 | pub fn new(id: GraphId, node_info: &'a NodeInfo) -> NodeRecord<'a> { 35 | NodeRecord { id, node_info } 36 | } 37 | 38 | pub fn id(&'a self) -> &'a GraphId { 39 | &self.id 40 | } 41 | 42 | pub fn node_info(&'a self) -> &'a NodeInfo { 43 | self.node_info 44 | } 45 | } 46 | -------------------------------------------------------------------------------- /solari-transfers/src/valinor/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod edge_export; 2 | mod edge_models; 3 | -------------------------------------------------------------------------------- /solari/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "solari" 3 | version = "0.1.0" 4 | edition = "2021" 5 | 6 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 7 | 8 | [features] 9 | enforce_invariants = [] 10 | 11 | [dependencies] 12 | solari-geomath = { path = "../solari-geomath" } 13 | solari-transfers = { path = "../solari-transfers" } 14 | solari-spatial = { path = "../solari-spatial" } 15 | 16 | anyhow = "1.0" 17 | chrono = "0.4.24" 18 | chrono-tz = "0.10.0" 19 | clap = { version = "4.3.0", features = ["derive"] } 20 | env_logger = "0.10.0" 21 | gtfs-structures = "0.43.0" 22 | log = "0.4" 23 | reqwest = "0.11.18" 24 | rstar = { version = "0.12", features = ["serde"] } 25 | s2 = "0.0.12" 26 | thiserror = "1.0.40" 27 | tokio = { version = "1", features = ["full"] } 28 | serde = { version = "1.0", features = ["derive"] } 29 | serde_json = "1.0.96" 30 | flatdata = "0.5.3" 31 | bytemuck = { version = "1.13.1", features = ["derive"] } 32 | memmap2 = "0.9.5" 33 | rmp-serde = "1.3.0" 34 | rocket = { version = "0.5.1", features = ["json", "serde_json"] } 35 | tempdir = "0.3.7" 36 | redb = "2.3.0" 37 | futures = "0.3.31" 38 | rayon = "1.10.0" 39 | time = { version = "0.3.41", features = ["serde"] } 40 | polyline = "0.11.0" 41 | geo-types = "0.7" 42 | geo = "0.29" 43 | regress = "0.10.3" 44 | 45 | [build-dependencies] 46 | flatdata = "0.5.3" 47 | 48 | -------------------------------------------------------------------------------- /solari/src/api/mod.rs: -------------------------------------------------------------------------------- 1 | use serde::{Deserialize, Serialize}; 2 | use time::OffsetDateTime; 3 | 4 | pub mod request; 5 | pub mod response; 6 | 7 | #[derive(Debug, Clone, Deserialize, Serialize)] 8 | pub struct LatLng { 9 | pub lat: f64, 10 | pub lon: f64, 11 | #[serde(skip_serializing_if = "Option::is_none")] 12 | pub stop: Option, 13 | } 14 | 15 | #[derive(Debug, Clone, Deserialize, Serialize)] 16 | pub enum SolariLeg { 17 | #[serde(rename = "transit")] 18 | Transit { 19 | #[serde( 20 | serialize_with = "time::serde::timestamp::milliseconds::serialize", 21 | deserialize_with = "time::serde::timestamp::milliseconds::deserialize" 22 | )] 23 | start_time: OffsetDateTime, 24 | #[serde( 25 | serialize_with = "time::serde::timestamp::milliseconds::serialize", 26 | deserialize_with = "time::serde::timestamp::milliseconds::deserialize" 27 | )] 28 | end_time: OffsetDateTime, 29 | start_location: LatLng, 30 | end_location: LatLng, 31 | #[serde(skip_serializing_if = "Option::is_none")] 32 | route_shape: Option, 33 | transit_route: Option, 34 | transit_agency: Option, 35 | }, 36 | #[serde(rename = "transfer")] 37 | Transfer { 38 | #[serde( 39 | serialize_with = "time::serde::timestamp::milliseconds::serialize", 40 | deserialize_with = "time::serde::timestamp::milliseconds::deserialize" 41 | )] 42 | start_time: OffsetDateTime, 43 | #[serde( 44 | serialize_with = "time::serde::timestamp::milliseconds::serialize", 45 | deserialize_with = "time::serde::timestamp::milliseconds::deserialize" 46 | )] 47 | end_time: OffsetDateTime, 48 | start_location: LatLng, 49 | end_location: LatLng, 50 | route_shape: Option, 51 | }, 52 | } 53 | 54 | #[derive(Debug, Clone, Deserialize, Serialize)] 55 | pub struct SolariItinerary { 56 | pub start_location: LatLng, 57 | pub end_location: LatLng, 58 | #[serde( 59 | serialize_with = "time::serde::timestamp::milliseconds::serialize", 60 | deserialize_with = "time::serde::timestamp::milliseconds::deserialize" 61 | )] 62 | pub start_time: OffsetDateTime, 63 | #[serde( 64 | serialize_with = "time::serde::timestamp::milliseconds::serialize", 65 | deserialize_with = "time::serde::timestamp::milliseconds::deserialize" 66 | )] 67 | pub end_time: OffsetDateTime, 68 | pub legs: Vec, 69 | } 70 | -------------------------------------------------------------------------------- /solari/src/api/request.rs: -------------------------------------------------------------------------------- 1 | use std::usize; 2 | 3 | use serde::{Deserialize, Serialize}; 4 | use time::OffsetDateTime; 5 | 6 | use super::LatLng; 7 | 8 | #[derive(Debug, Clone, Deserialize, Serialize)] 9 | pub enum TransferMode { 10 | #[serde(rename = "walking")] 11 | Walking, 12 | #[serde(rename = "cycling")] 13 | Cycling, 14 | } 15 | 16 | impl Default for TransferMode { 17 | fn default() -> Self { 18 | Self::Walking 19 | } 20 | } 21 | 22 | #[derive(Debug, Clone, Deserialize, Serialize)] 23 | pub struct TransferQuantity(pub usize); 24 | 25 | impl Default for TransferQuantity { 26 | fn default() -> Self { 27 | Self(usize::MAX) 28 | } 29 | } 30 | 31 | #[derive(Debug, Clone, Deserialize, Serialize)] 32 | pub struct SolariRequest { 33 | pub from: LatLng, 34 | pub to: LatLng, 35 | #[serde( 36 | serialize_with = "time::serde::timestamp::milliseconds::serialize", 37 | deserialize_with = "time::serde::timestamp::milliseconds::deserialize" 38 | )] 39 | pub start_at: OffsetDateTime, 40 | 41 | #[serde(default)] 42 | pub transfer_mode: TransferMode, 43 | #[serde(default)] 44 | pub max_transfers: TransferQuantity, 45 | } 46 | -------------------------------------------------------------------------------- /solari/src/api/response.rs: -------------------------------------------------------------------------------- 1 | use serde::{Deserialize, Serialize}; 2 | 3 | use super::SolariItinerary; 4 | 5 | #[derive(Debug, Clone, Deserialize, Serialize)] 6 | pub enum ResponseStatus { 7 | #[serde(rename = "ok")] 8 | Ok, 9 | #[serde(rename = "no_route_found")] 10 | NoRouteFound, 11 | #[serde(rename = "too_early")] 12 | TooEarly, 13 | #[serde(rename = "too_late")] 14 | TooLate, 15 | } 16 | 17 | #[derive(Debug, Clone, Deserialize, Serialize)] 18 | pub struct SolariResponse { 19 | pub status: ResponseStatus, 20 | pub itineraries: Vec, 21 | } 22 | -------------------------------------------------------------------------------- /solari/src/bin/build_timetable.rs: -------------------------------------------------------------------------------- 1 | use std::{ 2 | fs, 3 | hash::{DefaultHasher, Hasher}, 4 | path::PathBuf, 5 | }; 6 | 7 | use anyhow::bail; 8 | use clap::Parser; 9 | use gtfs_structures::GtfsReader; 10 | use log::debug; 11 | use rayon::iter::{IntoParallelRefIterator, ParallelIterator}; 12 | use solari::raptor::timetable::{in_memory::InMemoryTimetableBuilder, mmap::MmapTimetable}; 13 | 14 | extern crate solari; 15 | 16 | #[derive(Parser)] 17 | struct BuildArgs { 18 | #[arg(long)] 19 | base_path: PathBuf, 20 | #[arg(long)] 21 | gtfs_path: PathBuf, 22 | #[arg(long)] 23 | valhalla_tiles: PathBuf, 24 | #[arg(short, long, default_value_t = 1)] 25 | num_threads: usize, 26 | #[arg(long, default_value_t = false)] 27 | concat_only: bool, 28 | } 29 | 30 | fn process_gtfs<'a>( 31 | path: &PathBuf, 32 | base_path: &PathBuf, 33 | ) -> Result, anyhow::Error> { 34 | let feed = if let Ok(feed) = GtfsReader::default().read_from_path(path.to_str().unwrap()) { 35 | feed 36 | } else { 37 | bail!(format!("Failed to load feed: {:?}", path)); 38 | }; 39 | debug!("Processing feed: {:?}", path); 40 | let in_memory_timetable_builder = InMemoryTimetableBuilder::new(&feed)?; 41 | let hash = { 42 | let mut hasher = DefaultHasher::new(); 43 | hasher.write(path.to_str().unwrap().as_bytes()); 44 | format!("{:x}", hasher.finish()) 45 | }; 46 | 47 | let timetable_dir = base_path.join(hash); 48 | fs::create_dir_all(&timetable_dir).unwrap(); 49 | Ok(MmapTimetable::from_in_memory( 50 | &in_memory_timetable_builder, 51 | &timetable_dir, 52 | )?) 53 | } 54 | 55 | async fn concat_timetables<'a>( 56 | paths: &[PathBuf], 57 | base_path: &PathBuf, 58 | valhalla_tile_path: &PathBuf, 59 | ) -> Result, anyhow::Error> { 60 | let paths = paths.to_vec(); 61 | 62 | let timetables: Vec> = paths 63 | .par_iter() 64 | .filter_map(|path| MmapTimetable::open(path).ok()) 65 | .collect(); 66 | 67 | // Combine all timetables into one. 68 | let timetable = MmapTimetable::concatenate(&timetables, base_path, valhalla_tile_path).await; 69 | Ok(timetable) 70 | } 71 | 72 | async fn timetable_from_feeds<'a>( 73 | paths: &[PathBuf], 74 | base_path: &PathBuf, 75 | valhalla_tile_path: &PathBuf, 76 | ) -> Result, anyhow::Error> { 77 | let paths = paths.to_vec(); 78 | 79 | let timetables: Vec> = paths 80 | .par_iter() 81 | .filter(|path| path.extension().map(|ext| ext == "zip") == Some(true)) 82 | .filter_map(|path| { 83 | process_gtfs(&path, base_path) 84 | .map_err(|err| { 85 | log::error!("Failed to process GTFS feed: {}", err); 86 | err 87 | }) 88 | .ok() 89 | }) 90 | .collect(); 91 | 92 | // Combine all timetables into one. 93 | let timetable = MmapTimetable::concatenate(&timetables, base_path, valhalla_tile_path).await; 94 | Ok(timetable) 95 | } 96 | 97 | #[tokio::main(worker_threads = 64)] 98 | async fn main() { 99 | env_logger::init(); 100 | let args = BuildArgs::parse(); 101 | rayon::ThreadPoolBuilder::new() 102 | .num_threads(args.num_threads) 103 | .build_global() 104 | .unwrap(); 105 | if args.concat_only { 106 | let paths: Vec = fs::read_dir(&args.base_path) 107 | .unwrap() 108 | .map(|p| p.unwrap().path()) 109 | .collect(); 110 | 111 | let _timetable = concat_timetables(&paths, &args.base_path.into(), &args.valhalla_tiles) 112 | .await 113 | .unwrap(); 114 | } else if fs::metadata(&args.gtfs_path).unwrap().is_dir() { 115 | let paths: Vec = fs::read_dir(&args.gtfs_path) 116 | .unwrap() 117 | .map(|p| p.unwrap().path()) 118 | .collect(); 119 | 120 | let _timetable = timetable_from_feeds(&paths, &args.base_path.into(), &args.valhalla_tiles) 121 | .await 122 | .unwrap(); 123 | } else { 124 | let _timetable = timetable_from_feeds( 125 | &[args.gtfs_path.into()], 126 | &args.base_path.into(), 127 | &args.valhalla_tiles, 128 | ) 129 | .await 130 | .unwrap(); 131 | } 132 | } 133 | -------------------------------------------------------------------------------- /solari/src/bin/download_feeds.rs: -------------------------------------------------------------------------------- 1 | use std::{ffi::OsString, path::PathBuf, str::FromStr, time::Duration}; 2 | 3 | use clap::Parser; 4 | use solari::dmfr::DistributedMobilityFeedRegistry; 5 | use log::{debug, info}; 6 | use reqwest::Client; 7 | use tokio::{fs, spawn}; 8 | 9 | #[derive(Parser)] 10 | struct Args { 11 | #[arg(long)] 12 | dmfr_dir: PathBuf, 13 | #[arg(long)] 14 | zip_dir: Option, 15 | } 16 | 17 | async fn download_dmfr(path: PathBuf, zip_dir: PathBuf) -> Result<(), anyhow::Error> { 18 | let client = Client::builder() 19 | .timeout(Duration::from_secs(5)) 20 | .build() 21 | .unwrap(); 22 | debug!("Enumerating feeds from {:?}", &path); 23 | let dmfr: DistributedMobilityFeedRegistry = 24 | serde_json::from_str(&fs::read_to_string(&path).await?)?; 25 | for (feed_idx, feed) in dmfr.feeds.iter().enumerate() { 26 | if let Some(url) = &feed.urls.static_current { 27 | debug!("Downloading feed from: {:?}", url); 28 | let response = client.get(url.as_str()).send().await?; 29 | let mut filename = path 30 | .file_name() 31 | .expect("GTFS feed not a file") 32 | .to_os_string(); 33 | filename.push(OsString::from_str(&format!(".{feed_idx}.zip"))?); 34 | let zip_path = zip_dir.join(filename); 35 | fs::write(&zip_path, response.bytes().await?).await?; 36 | info!("Wrote zip file to {:?}", zip_path) 37 | } 38 | } 39 | Ok(()) 40 | } 41 | 42 | #[tokio::main(worker_threads = 16)] 43 | async fn main() -> Result<(), anyhow::Error> { 44 | env_logger::init(); 45 | let args = Args::parse(); 46 | 47 | let mut read = fs::read_dir(&args.dmfr_dir).await?; 48 | let mut handles = Vec::new(); 49 | while let Some(dmfr) = read.next_entry().await? { 50 | let path = dmfr.path(); 51 | debug!("Found path {:?}", &path); 52 | if path 53 | .extension() 54 | .map(|str| str.to_string_lossy().to_string()) 55 | != Some("json".to_string()) 56 | { 57 | debug!("Skipping path {:?}", &path); 58 | continue; 59 | } 60 | let zip_dir = args 61 | .zip_dir 62 | .clone() 63 | .unwrap_or(args.dmfr_dir.clone()) 64 | .clone(); 65 | let handle = spawn(async move { download_dmfr(path, zip_dir) }); 66 | handles.push(handle); 67 | } 68 | for handle in handles { 69 | match handle.await { 70 | Ok(future) => { 71 | if let Err(err) = future.await { 72 | log::warn!("Failed to download a feed: {err}") 73 | } 74 | } 75 | Err(err) => { 76 | log::warn!("Failed to join task: {err}") 77 | } 78 | } 79 | } 80 | println!("Done"); 81 | Ok(()) 82 | } 83 | -------------------------------------------------------------------------------- /solari/src/bin/serve.rs: -------------------------------------------------------------------------------- 1 | use std::path::PathBuf; 2 | 3 | use clap::Parser; 4 | use rocket::{serde::json::Json, State}; 5 | use s2::latlng::LatLng; 6 | use solari::{ 7 | api::{request::SolariRequest, response::SolariResponse}, 8 | raptor::timetable::{mmap::MmapTimetable, Time}, 9 | route::Router, 10 | }; 11 | 12 | #[macro_use] 13 | extern crate rocket; 14 | 15 | #[post("/v1/plan", data = "")] 16 | async fn plan( 17 | request: Json, 18 | router: &State>>, 19 | ) -> Json { 20 | let from = LatLng::from_degrees(request.0.from.lat, request.0.from.lon); 21 | let to = LatLng::from_degrees(request.0.to.lat, request.0.to.lon); 22 | 23 | let max_transfers = usize::min(5, request.0.max_transfers.0); 24 | 25 | return Json( 26 | router 27 | .route( 28 | Time::from_epoch_seconds(request.0.start_at.unix_timestamp() as u32), 29 | from, 30 | to, 31 | Some(1500f64), 32 | Some(1000), 33 | Some(max_transfers), 34 | Some(2), 35 | ) 36 | .await, 37 | ); 38 | } 39 | 40 | #[derive(Parser)] 41 | struct ServeArgs { 42 | #[arg(long)] 43 | base_path: PathBuf, 44 | #[arg(short, long)] 45 | port: Option, 46 | } 47 | 48 | #[launch] 49 | fn rocket() -> _ { 50 | env_logger::init(); 51 | let args = ServeArgs::parse(); 52 | let router = Router::new( 53 | MmapTimetable::open(&args.base_path).expect("Failed to open timetable"), 54 | args.base_path.clone(), 55 | ) 56 | .expect("Failed to build router"); 57 | 58 | rocket::build() 59 | .manage(router) 60 | .configure(rocket::Config::figment().merge(("port", args.port.unwrap_or(8000)))) 61 | .mount("/", routes![plan]) 62 | } 63 | -------------------------------------------------------------------------------- /solari/src/lib.rs: -------------------------------------------------------------------------------- 1 | pub mod api; 2 | pub mod dmfr; 3 | pub mod raptor; 4 | pub mod route; 5 | mod spatial; 6 | -------------------------------------------------------------------------------- /solari/src/raptor/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod timetable; 2 | -------------------------------------------------------------------------------- /solari/src/raptor/timetable/in_memory.rs: -------------------------------------------------------------------------------- 1 | use std::{ 2 | collections::{BTreeMap, BTreeSet, HashMap}, 3 | f32, u32, 4 | }; 5 | 6 | use anyhow::bail; 7 | use chrono::{offset::LocalResult, DateTime, Days, Local, NaiveTime, TimeDelta, TimeZone}; 8 | use chrono_tz::Tz; 9 | use gtfs_structures::{Agency, Gtfs, StopTime}; 10 | use log::{debug, warn}; 11 | use rstar::RTree; 12 | use s2::{cellid::CellID, latlng::LatLng}; 13 | 14 | use crate::{ 15 | raptor::timetable::{Route, RouteStop, Stop, StopRoute, Transfer, Trip, TripStopTime}, 16 | spatial::IndexedStop, 17 | }; 18 | 19 | use super::{ShapeCoordinate, Timetable, TripMetadata}; 20 | 21 | #[derive(Debug, Clone)] 22 | #[repr(C)] 23 | pub(crate) struct InMemoryTimetable { 24 | routes: Vec, 25 | route_stops: Vec, 26 | route_trips: Vec, 27 | stops: Vec, 28 | stop_routes: Vec, 29 | trip_stop_times: Vec, 30 | transfer_index: Vec, 31 | transfers: Vec, 32 | trip_metadata_map: HashMap, 33 | stop_metadata_map: HashMap, 34 | route_shapes: HashMap>>, 35 | } 36 | 37 | impl<'a> Timetable<'a> for InMemoryTimetable { 38 | #[inline] 39 | fn route(&'a self, route_id: usize) -> &'a Route { 40 | &self.routes[route_id as usize] 41 | } 42 | 43 | #[inline] 44 | fn stop(&'a self, stop_id: usize) -> &'a Stop { 45 | &self.stops[stop_id as usize] 46 | } 47 | 48 | #[inline] 49 | fn transfers_from(&'a self, stop_id: usize) -> &'a [Transfer] { 50 | Transfer::all_transfers(self.stop(stop_id), self) 51 | } 52 | 53 | #[inline] 54 | fn stop_count(&self) -> usize { 55 | self.stops.len() 56 | } 57 | 58 | #[inline] 59 | fn stops(&'a self) -> &'a [Stop] { 60 | &self.stops 61 | } 62 | 63 | #[inline] 64 | fn stop_routes(&'a self) -> &'a [StopRoute] { 65 | &self.stop_routes 66 | } 67 | 68 | #[inline] 69 | fn routes(&'a self) -> &'a [Route] { 70 | &self.routes 71 | } 72 | 73 | #[inline] 74 | fn route_stops(&'a self) -> &'a [RouteStop] { 75 | &self.route_stops 76 | } 77 | 78 | #[inline] 79 | fn route_trips(&'a self) -> &'a [Trip] { 80 | &self.route_trips 81 | } 82 | 83 | #[inline] 84 | fn trip_stop_times(&'a self) -> &'a [TripStopTime] { 85 | &self.trip_stop_times 86 | } 87 | 88 | #[inline] 89 | fn transfers(&'a self) -> &'a [Transfer] { 90 | &self.transfers 91 | } 92 | 93 | #[inline] 94 | fn transfer_index(&'a self) -> &'a [usize] { 95 | &self.transfer_index 96 | } 97 | 98 | fn stop_metadata(&'a self, stop: &Stop) -> gtfs_structures::Stop { 99 | self.stop_metadata_map[stop].clone() 100 | } 101 | 102 | fn trip_metadata(&'a self, trip: &Trip) -> TripMetadata { 103 | self.trip_metadata_map[trip].clone() 104 | } 105 | 106 | fn stop_index_copy(&'a self) -> RTree { 107 | RTree::new() 108 | } 109 | 110 | fn nearest_stops(&'a self, _lat: f64, _lng: f64, _n: usize) -> Vec<(&'a Stop, f64)> { 111 | Vec::new() 112 | } 113 | 114 | fn route_shape(&'a self, route: &Route) -> Option> { 115 | self.route_shapes[route].clone() 116 | } 117 | } 118 | 119 | impl<'a> InMemoryTimetable { 120 | pub(crate) fn new() -> InMemoryTimetable { 121 | InMemoryTimetable { 122 | routes: vec![], 123 | route_stops: vec![], 124 | route_trips: vec![], 125 | stops: vec![], 126 | stop_routes: vec![], 127 | trip_stop_times: vec![], 128 | transfer_index: vec![], 129 | transfers: vec![], 130 | trip_metadata_map: HashMap::new(), 131 | stop_metadata_map: HashMap::new(), 132 | route_shapes: HashMap::new(), 133 | } 134 | } 135 | } 136 | 137 | #[derive(Debug, thiserror::Error)] 138 | pub enum InMemoryTimetableBuilderError { 139 | #[error("")] 140 | ParseError(String), 141 | } 142 | 143 | #[derive(Debug)] 144 | pub struct InMemoryTimetableBuilder { 145 | next_stop_id: usize, 146 | next_stop_route_id: usize, 147 | next_route_id: usize, 148 | next_route_trip_id: usize, 149 | next_route_stop_id: usize, 150 | next_trip_stop_time_id: usize, 151 | pub(crate) timetable: InMemoryTimetable, 152 | 153 | stop_table: BTreeMap, 154 | route_index: BTreeMap, 155 | route_table: BTreeMap, 156 | } 157 | 158 | #[derive(Debug, Clone, Copy, PartialOrd, PartialEq, Eq, Ord)] 159 | struct StopId(usize); 160 | 161 | #[derive(Debug, Clone, Copy, PartialOrd, PartialEq, Eq, Ord)] 162 | struct RouteId(usize); 163 | 164 | #[derive(Debug, Clone)] 165 | struct TripInternal { 166 | service_day_start: DateTime, 167 | stop_times: Vec, 168 | gtfs_trip_id: String, 169 | } 170 | 171 | impl TripInternal { 172 | fn get_departure(&self) -> Option> { 173 | let first_departure_time = self 174 | .stop_times 175 | .first() 176 | .map(|stop_time| stop_time.departure_time)??; 177 | if first_departure_time == u32::MAX { 178 | return None; 179 | } 180 | Some( 181 | self.service_day_start 182 | .checked_add_signed(TimeDelta::seconds(first_departure_time as i64)) 183 | .expect("Failed to add departure time to service day start"), 184 | ) 185 | } 186 | } 187 | 188 | #[derive(Debug, Clone, PartialOrd, PartialEq, Eq, Ord)] 189 | struct RouteKey { 190 | trip_stop_ids: Vec, 191 | route_id: String, 192 | } 193 | 194 | #[derive(Debug, Clone)] 195 | struct RouteData { 196 | id: RouteId, 197 | gtfs_route_id: String, 198 | shape: Option>, 199 | trip_list: Vec, 200 | stops: Vec, 201 | shape_distances: Vec, 202 | agency_name: Option, 203 | } 204 | 205 | #[derive(Debug, Clone, PartialOrd, PartialEq, Eq, Ord)] 206 | struct StopKey { 207 | gtfs_id: String, 208 | } 209 | 210 | #[derive(Debug, Clone)] 211 | struct StopData { 212 | id: StopId, 213 | gtfs_id: String, 214 | stop_routes: BTreeSet, 215 | } 216 | 217 | impl<'a> InMemoryTimetableBuilder { 218 | pub fn new(gtfs: &Gtfs) -> Result { 219 | let mut builder = InMemoryTimetableBuilder { 220 | next_stop_id: 0, 221 | next_stop_route_id: 0, 222 | next_route_id: 0, 223 | next_route_trip_id: 0, 224 | next_route_stop_id: 0, 225 | next_trip_stop_time_id: 0, 226 | timetable: InMemoryTimetable::new(), 227 | stop_table: BTreeMap::new(), 228 | route_index: BTreeMap::new(), 229 | route_table: BTreeMap::new(), 230 | }; 231 | builder.preprocess_gtfs(gtfs)?; 232 | Ok(builder) 233 | } 234 | 235 | fn lookup_stop_data(&'a mut self, gtfs_id: &String) -> &'a mut StopData { 236 | let key = StopKey { 237 | gtfs_id: gtfs_id.clone(), 238 | }; 239 | // If the stop isn't already in our table, add it. 240 | if !self.stop_table.contains_key(&key) { 241 | let stop_id = StopId(self.next_stop_id); 242 | self.next_stop_id += 1; 243 | self.stop_table.insert( 244 | key.clone(), 245 | StopData { 246 | id: stop_id, 247 | gtfs_id: gtfs_id.clone(), 248 | stop_routes: BTreeSet::new(), 249 | }, 250 | ); 251 | } 252 | // Return a mutable reference. 253 | return self.stop_table.get_mut(&key).unwrap(); 254 | } 255 | 256 | fn lookup_route_data( 257 | &'a mut self, 258 | gtfs: &Gtfs, 259 | trip: >fs_structures::Trip, 260 | ) -> &'a mut RouteData { 261 | // Solari defines a "route" as something distinct from a GTFS route, because in GTFS there's no guarantee that a route always has the same stops in the same order. In fact, for bidirectional lines, the same route is usually used for trips in both directions, which violates RAPTOR's assumptions. To deal with this, we define a "route" as a set of trips that all visit the same stops in the same order and have the same GTFS route ID in the same GTFS feed. 262 | let trip_stop_ids: Vec = trip 263 | .stop_times 264 | .iter() 265 | .map(|stop_time| stop_time.stop.id.clone()) 266 | .collect(); 267 | let gtfs_route_id = trip.route_id.clone(); 268 | // If the `route_key` is shared between two trips, they belong to the same route. 269 | let route_key = RouteKey { 270 | trip_stop_ids, 271 | route_id: gtfs_route_id.clone(), 272 | }; 273 | if !self.route_index.contains_key(&route_key) { 274 | let route_id = RouteId(self.next_route_id); 275 | self.next_route_id += 1; 276 | 277 | // Determine the path that the route travels. 278 | let shape: Option> = if let Some(shape_id) = &trip.shape_id { 279 | if let Ok(coords) = gtfs.get_shape(&shape_id) { 280 | Some( 281 | coords 282 | .iter() 283 | .map(|coord| ShapeCoordinate { 284 | lat: coord.latitude, 285 | lon: coord.longitude, 286 | distance_along_shape: coord.dist_traveled, 287 | }) 288 | .collect(), 289 | ) 290 | } else { 291 | warn!("Could not look up shape {shape_id}"); 292 | None 293 | } 294 | } else { 295 | None 296 | }; 297 | 298 | // Determine the distance along the travel path of each stop. 299 | let shape_distances: Vec = trip 300 | .stop_times 301 | .iter() 302 | .map(|time| time.shape_dist_traveled.unwrap_or(f32::NAN)) 303 | .collect(); 304 | 305 | let stops = trip 306 | .stop_times 307 | .iter() 308 | .map(|stop_time| self.lookup_stop_data(&stop_time.stop.id).id.clone()) 309 | .collect(); 310 | 311 | // Determine the human-readable agency name. 312 | let agency_name = gtfs 313 | .agencies 314 | .iter() 315 | .find(|agency| { 316 | agency.id 317 | == gtfs 318 | .get_route(>fs_route_id) 319 | .expect("Trip's route ID not found in route table.") 320 | .agency_id 321 | }) 322 | .map(|agency| agency.name.clone()); 323 | 324 | self.route_index.insert(route_key.clone(), route_id); 325 | self.route_table.insert( 326 | route_id, 327 | RouteData { 328 | id: route_id, 329 | gtfs_route_id, 330 | shape, 331 | trip_list: vec![], 332 | stops, 333 | shape_distances, 334 | agency_name, 335 | }, 336 | ); 337 | 338 | self.route_table.get_mut(&route_id).unwrap() 339 | } else { 340 | // We already know of this route. Return a mutable reference. 341 | return self 342 | .route_table 343 | .get_mut(&self.route_index[&route_key]) 344 | .unwrap(); 345 | } 346 | } 347 | 348 | fn preprocess_gtfs(&mut self, gtfs: &Gtfs) -> Result<(), anyhow::Error> { 349 | let agencies: HashMap = gtfs 350 | .agencies 351 | .iter() 352 | .map(|agency| (agency.id.clone().unwrap_or(String::new()), agency)) 353 | .collect(); 354 | let start_date = Local::now().date_naive().pred_opt().unwrap(); 355 | 356 | // First things first, go through every trip in the feed. 357 | for (gtfs_trip_id, trip) in >fs.trips { 358 | { 359 | let agency_id = if let Some(agency_id) = gtfs 360 | .routes 361 | .get(&trip.route_id) 362 | .map(|route| route.agency_id.clone()) 363 | .flatten() 364 | { 365 | agency_id 366 | } else { 367 | continue; 368 | }; 369 | let tz = self 370 | .trip_agency_timezone(&agencies, &agency_id) 371 | .expect("Failed to parse timezone"); 372 | let route_data = self.lookup_route_data(gtfs, trip); 373 | let trip_days = gtfs.trip_days(&trip.service_id, start_date.clone()); 374 | for day in trip_days { 375 | if day <= 14 { 376 | let date_time_offset = start_date 377 | .checked_add_days(Days::new(day as u64)) 378 | .expect(&format!( 379 | "Failed to add {day} days to date {:?}", 380 | start_date 381 | )); 382 | // The start of a service day is defined as noon minus 12 hours. 383 | let noon_service_day = match tz.from_local_datetime( 384 | &date_time_offset.and_time( 385 | NaiveTime::from_hms_opt(12, 0, 0) 386 | .expect("Failed to add 12 hours to service day"), 387 | ), 388 | ) { 389 | LocalResult::Single(date_time) => date_time, 390 | LocalResult::Ambiguous(a, _b) => { 391 | // Pick one and call it good. 392 | a 393 | } 394 | LocalResult::None => { 395 | bail!("Gap in time (at noon? shouldn't be possible), can't determine service day start") 396 | } 397 | }; 398 | let service_day_start = noon_service_day 399 | .checked_sub_signed(TimeDelta::hours(12)) 400 | .expect( 401 | "Failed to subtract 12 hours from noon on the given service day.", 402 | ); 403 | 404 | // Once we've assembled all the necessary data, push a trip to the route_data's trip_list for use later in `process_routes_trips`. 405 | route_data.trip_list.push(TripInternal { 406 | service_day_start, 407 | stop_times: trip.stop_times.clone(), 408 | gtfs_trip_id: gtfs_trip_id.clone(), 409 | }); 410 | } 411 | } 412 | } 413 | let route_id = self.lookup_route_data(gtfs, trip).id; 414 | for stop_time in &trip.stop_times { 415 | self.lookup_stop_data(&stop_time.stop.id) 416 | .stop_routes 417 | .insert(route_id); 418 | } 419 | } 420 | 421 | debug!("Done sorting"); 422 | 423 | self.process_routes_trips(gtfs)?; 424 | 425 | self.process_stops(gtfs)?; 426 | 427 | Result::Ok(()) 428 | } 429 | 430 | fn process_routes_trips(&mut self, gtfs: &Gtfs) -> Result<(), anyhow::Error> { 431 | // TODO: How to deal with this route_table.clone()? It indicates an architectural problem IMO. 432 | for (_, route_data) in self.route_table.clone().iter() { 433 | let route = Route { 434 | route_index: route_data.id.0, 435 | first_route_stop: self.next_route_stop_id, 436 | first_route_trip: self.next_route_trip_id, 437 | }; 438 | self.timetable 439 | .route_shapes 440 | .insert(route, route_data.shape.clone()); 441 | self.timetable.routes.push(route); 442 | 443 | for (stop_seq, stop_id) in route_data.stops.iter().enumerate() { 444 | self.timetable.route_stops.push(RouteStop { 445 | route_index: route_data.id.0, 446 | stop_index: stop_id.0, 447 | stop_seq: stop_seq as u32, 448 | distance_along_route: route_data.shape_distances[stop_seq], 449 | }); 450 | self.next_route_stop_id += 1; 451 | } 452 | let mut trips = route_data.trip_list.clone(); 453 | trips.sort_by_cached_key(|trip| trip.get_departure()); 454 | for trip in &trips { 455 | self.process_trip(gtfs, route_data, trip)?; 456 | } 457 | } 458 | Ok(()) 459 | } 460 | 461 | fn trip_agency_timezone( 462 | &self, 463 | agencies: &HashMap, 464 | trip_agency_id: &String, 465 | ) -> Result { 466 | let trip_agency = if let Some(agency) = agencies.get(trip_agency_id) { 467 | agency 468 | } else { 469 | if agencies.len() == 1 { 470 | agencies.values().next().unwrap() 471 | } else { 472 | bail!("No matching agency: {}, {:?}", trip_agency_id, agencies); 473 | } 474 | }; 475 | Ok(trip_agency.timezone.parse().map_err(|_err| { 476 | anyhow::anyhow!( 477 | "Failed to parse tz for agency {}: {}", 478 | trip_agency, 479 | trip_agency.timezone 480 | ) 481 | })?) 482 | } 483 | 484 | fn process_trip( 485 | &mut self, 486 | gtfs: &Gtfs, 487 | route_data: &RouteData, 488 | trip: &TripInternal, 489 | ) -> Result<(), anyhow::Error> { 490 | let first_trip_stop_time = self.next_trip_stop_time_id; 491 | 492 | #[cfg(feature = "enforce_invariants")] 493 | let mut prev_time = 0u32; 494 | for (stop_seq, stop_time) in trip.stop_times.iter().enumerate() { 495 | #[cfg(feature = "enforce_invariants")] 496 | if let Some(arrival_time) = stop_time.arrival_time { 497 | assert!(arrival_time >= prev_time); 498 | prev_time = arrival_time; 499 | } 500 | let arrival_time = trip 501 | .service_day_start 502 | .checked_add_signed(TimeDelta::seconds( 503 | stop_time.arrival_time.unwrap_or(u32::MAX) as i64, 504 | )) 505 | .unwrap(); 506 | let departure_time = trip 507 | .service_day_start 508 | .checked_add_signed(TimeDelta::seconds( 509 | stop_time.departure_time.unwrap_or(u32::MAX) as i64, 510 | )) 511 | .unwrap(); 512 | self.timetable.trip_stop_times.push(TripStopTime::new( 513 | self.next_route_trip_id, 514 | stop_seq, 515 | arrival_time, 516 | departure_time, 517 | )); 518 | self.next_trip_stop_time_id += 1; 519 | } 520 | let gtfs_trip = gtfs 521 | .get_trip(&trip.gtfs_trip_id) 522 | .expect("Trip not found in trip table."); 523 | let trip = Trip { 524 | trip_index: self.next_route_trip_id, 525 | route_index: route_data.id.0, 526 | first_trip_stop_time, 527 | last_trip_stop_time: self.next_trip_stop_time_id, 528 | }; 529 | self.timetable.route_trips.push(trip); 530 | let metadata = TripMetadata { 531 | agency_name: route_data.agency_name.clone(), 532 | headsign: gtfs_trip.trip_headsign.clone(), 533 | route_name: gtfs.routes[&route_data.gtfs_route_id].short_name.clone(), 534 | }; 535 | self.timetable.trip_metadata_map.insert(trip, metadata); 536 | 537 | self.next_route_trip_id += 1; 538 | 539 | Ok(()) 540 | } 541 | 542 | fn process_stops(&mut self, gtfs: &Gtfs) -> Result<(), anyhow::Error> { 543 | let mut sorted_stops: Vec<&StopData> = self.stop_table.values().collect(); 544 | sorted_stops.sort_by_cached_key(|stop_data| stop_data.id); 545 | for stop_data in sorted_stops { 546 | let gtfs_stop = gtfs.get_stop(&stop_data.gtfs_id).unwrap(); 547 | let lat = if let Some(lat) = gtfs_stop.latitude { 548 | lat 549 | } else { 550 | bail!("Can't process feeds containing stop IDs without lat/lng") 551 | }; 552 | let lng = if let Some(lng) = gtfs_stop.longitude { 553 | lng 554 | } else { 555 | bail!("Can't process feeds containing stop IDs without lat/lng") 556 | }; 557 | let s2cell: CellID = LatLng::from_degrees(lat, lng).into(); 558 | let stop = Stop { 559 | stop_index: stop_data.id.0, 560 | s2cell: s2cell.0, 561 | first_stop_route_index: self.next_stop_route_id, 562 | }; 563 | self.timetable.stops.push(stop); 564 | self.timetable 565 | .stop_metadata_map 566 | .insert(stop, gtfs_stop.clone()); 567 | for route in &stop_data.stop_routes { 568 | let mut seq = 0usize; 569 | let mut found_seq = false; 570 | for route_stop_seq_candidate in &self.route_table[&route].stops { 571 | if &stop_data.id == route_stop_seq_candidate { 572 | found_seq = true; 573 | break; 574 | } 575 | seq += 1; 576 | } 577 | assert!(found_seq); 578 | self.timetable.stop_routes.push(StopRoute { 579 | route_index: route.0, 580 | stop_seq: seq, 581 | }); 582 | self.next_stop_route_id += 1; 583 | } 584 | } 585 | Ok(()) 586 | } 587 | } 588 | -------------------------------------------------------------------------------- /solari/src/raptor/timetable/mmap.rs: -------------------------------------------------------------------------------- 1 | use std::{ 2 | fs::{self, File}, 3 | marker::PhantomData, 4 | mem::size_of, 5 | path::PathBuf, 6 | pin::Pin, 7 | slice, 8 | sync::Arc, 9 | }; 10 | 11 | use anyhow::{Error, Ok}; 12 | use bytemuck::{cast_slice_mut, checked::cast_slice}; 13 | use geo::Coord; 14 | use log::{debug, info}; 15 | use memmap2::{Mmap, MmapMut, MmapOptions}; 16 | use rayon::iter::{IntoParallelRefIterator, ParallelIterator}; 17 | use redb::Database; 18 | use rstar::RTree; 19 | use s2::latlng::LatLng; 20 | use solari_geomath::lat_lng_to_cartesian; 21 | use solari_spatial::{SphereIndex, SphereIndexMmap}; 22 | use solari_transfers::{ 23 | fast_paths::{FastGraph, FastGraphStatic}, 24 | {TransferGraph, TransferGraphSearcher}, 25 | }; 26 | 27 | use crate::spatial::{IndexedStop, WALK_SPEED_MM_PER_SECOND}; 28 | 29 | use super::{ 30 | in_memory::InMemoryTimetableBuilder, Route, RouteStop, ShapeCoordinate, Stop, StopRoute, 31 | Timetable, Transfer, Trip, TripMetadata, TripStopTime, ROUTE_SHAPE_TABLE, STOP_METADATA_TABLE, 32 | TRIP_METADATA_TABLE, 33 | }; 34 | 35 | #[allow(unused)] 36 | pub struct MmapTimetable<'a> { 37 | base_path: PathBuf, 38 | 39 | backing_routes: Pin, 40 | backing_route_stops: Pin, 41 | backing_route_trips: Pin, 42 | backing_stops: Pin, 43 | backing_stop_routes: Pin, 44 | backing_trip_stop_times: Pin, 45 | backing_transfer_index: Pin, 46 | backing_transfers: Pin, 47 | 48 | routes_slice: &'a [Route], 49 | route_stops_slice: &'a [RouteStop], 50 | route_trips_slice: &'a [Trip], 51 | stops_slice: &'a [Stop], 52 | stop_routes_slice: &'a [StopRoute], 53 | trip_stop_times_slice: &'a [TripStopTime], 54 | transfer_index_slice: &'a [usize], 55 | transfers_slice: &'a [Transfer], 56 | rtree: RTree, 57 | 58 | metadata_db: redb::Database, 59 | 60 | phantom: &'a PhantomData<()>, 61 | } 62 | 63 | impl<'a> Timetable<'a> for MmapTimetable<'a> { 64 | #[inline] 65 | fn route(&'a self, route_id: usize) -> &'a Route { 66 | &self.routes()[route_id as usize] 67 | } 68 | 69 | #[inline] 70 | fn stop(&'a self, stop_id: usize) -> &'a Stop { 71 | &self.stops()[stop_id as usize] 72 | } 73 | 74 | #[inline] 75 | fn transfers_from(&'a self, stop_id: usize) -> &'a [Transfer] { 76 | Transfer::all_transfers(self.stop(stop_id), self) 77 | } 78 | 79 | #[inline] 80 | fn stop_count(&self) -> usize { 81 | self.stops().len() 82 | } 83 | 84 | #[inline] 85 | fn stops(&'a self) -> &'a [Stop] { 86 | self.stops_slice 87 | } 88 | 89 | #[inline] 90 | fn stop_routes(&'a self) -> &'a [StopRoute] { 91 | self.stop_routes_slice 92 | } 93 | 94 | #[inline] 95 | fn routes(&'a self) -> &'a [Route] { 96 | self.routes_slice 97 | } 98 | 99 | #[inline] 100 | fn route_stops(&'a self) -> &'a [RouteStop] { 101 | self.route_stops_slice 102 | } 103 | 104 | #[inline] 105 | fn route_trips(&'a self) -> &'a [Trip] { 106 | self.route_trips_slice 107 | } 108 | 109 | #[inline] 110 | fn trip_stop_times(&'a self) -> &'a [TripStopTime] { 111 | self.trip_stop_times_slice 112 | } 113 | 114 | #[inline] 115 | fn transfers(&'a self) -> &'a [Transfer] { 116 | self.transfers_slice 117 | } 118 | 119 | #[inline] 120 | fn transfer_index(&'a self) -> &'a [usize] { 121 | self.transfer_index_slice 122 | } 123 | 124 | #[inline] 125 | fn stop_index_copy(&'a self) -> RTree { 126 | self.rtree.clone() 127 | } 128 | 129 | fn nearest_stops(&'a self, lat: f64, lng: f64, n: usize) -> Vec<(&'a Stop, f64)> { 130 | self.rtree 131 | .nearest_neighbor_iter_with_distance_2(&lat_lng_to_cartesian(lat, lng)) 132 | .take(n) 133 | .map(|(stop, dist_sq)| (self.stop(stop.id), dist_sq.sqrt())) 134 | .collect() 135 | } 136 | 137 | fn stop_metadata(&'a self, stop: &Stop) -> gtfs_structures::Stop { 138 | let table = self 139 | .metadata_db 140 | .begin_read() 141 | .expect("Read failed") 142 | .open_table(STOP_METADATA_TABLE) 143 | .expect("Failed to open table"); 144 | 145 | let bytes = table 146 | .get(stop.id() as u64) 147 | .expect("DB error") 148 | .expect("Missing metadata for stop"); 149 | rmp_serde::from_slice(bytes.value()).expect("Deserialization failed") 150 | } 151 | 152 | fn trip_metadata(&'a self, trip: &Trip) -> TripMetadata { 153 | let table = self 154 | .metadata_db 155 | .begin_read() 156 | .expect("Read failed") 157 | .open_table(TRIP_METADATA_TABLE) 158 | .expect("Failed to open table"); 159 | 160 | let bytes = table 161 | .get(trip.trip_index as u64) 162 | .expect("DB error") 163 | .expect("Missing metadata for trip"); 164 | rmp_serde::from_slice(bytes.value()).expect("Deserialization failed") 165 | } 166 | 167 | fn route_shape(&'a self, route: &Route) -> Option> { 168 | let table = self 169 | .metadata_db 170 | .begin_read() 171 | .expect("Read failed") 172 | .open_table(ROUTE_SHAPE_TABLE) 173 | .expect("Failed to open table"); 174 | 175 | if let Some(bytes) = table.get(route.route_index as u64).expect("DB error") { 176 | rmp_serde::from_slice(bytes.value()).expect("Deserialization failed") 177 | } else { 178 | None 179 | } 180 | } 181 | } 182 | 183 | impl<'a> MmapTimetable<'a> { 184 | fn assemble( 185 | base_path: PathBuf, 186 | backing_routes: Pin, 187 | backing_route_stops: Pin, 188 | backing_route_trips: Pin, 189 | backing_stops: Pin, 190 | backing_stop_routes: Pin, 191 | backing_trip_stop_times: Pin, 192 | backing_transfer_index: Pin, 193 | backing_transfers: Pin, 194 | metadata_db: Database, 195 | ) -> Result, anyhow::Error> { 196 | let routes = unsafe { 197 | let s = cast_slice::(&backing_routes); 198 | slice::from_raw_parts(s.as_ptr(), s.len()) 199 | }; 200 | let route_stops = unsafe { 201 | let s = cast_slice::(&backing_route_stops); 202 | slice::from_raw_parts(s.as_ptr(), s.len()) 203 | }; 204 | let route_trips = unsafe { 205 | let s = cast_slice::(&backing_route_trips); 206 | slice::from_raw_parts(s.as_ptr(), s.len()) 207 | }; 208 | let stops = unsafe { 209 | let s = cast_slice::(&backing_stops); 210 | slice::from_raw_parts(s.as_ptr(), s.len()) 211 | }; 212 | let stop_routes = unsafe { 213 | let s = cast_slice::(&backing_stop_routes); 214 | slice::from_raw_parts(s.as_ptr(), s.len()) 215 | }; 216 | let trip_stop_times = unsafe { 217 | let s = cast_slice::(&backing_trip_stop_times); 218 | slice::from_raw_parts(s.as_ptr(), s.len()) 219 | }; 220 | let transfer_index = unsafe { 221 | let s = cast_slice::(&backing_transfer_index); 222 | slice::from_raw_parts(s.as_ptr(), s.len()) 223 | }; 224 | let transfers = unsafe { 225 | let s = cast_slice::(&backing_transfers); 226 | slice::from_raw_parts(s.as_ptr(), s.len()) 227 | }; 228 | let rtree = { 229 | RTree::bulk_load( 230 | stops 231 | .iter() 232 | .map(|stop| { 233 | let latlng: LatLng = s2::cellid::CellID(stop.s2cell).into(); 234 | let location_cartesian = 235 | lat_lng_to_cartesian(latlng.lat.deg(), latlng.lng.deg()); 236 | IndexedStop { 237 | coords: location_cartesian, 238 | id: stop.id(), 239 | } 240 | }) 241 | .collect(), 242 | ) 243 | }; 244 | 245 | let table = MmapTimetable { 246 | base_path, 247 | backing_routes, 248 | backing_route_stops, 249 | backing_route_trips, 250 | backing_stops, 251 | backing_stop_routes, 252 | backing_trip_stop_times, 253 | backing_transfer_index, 254 | backing_transfers, 255 | phantom: &PhantomData, 256 | 257 | rtree, 258 | routes_slice: routes, 259 | route_stops_slice: route_stops, 260 | route_trips_slice: route_trips, 261 | stops_slice: stops, 262 | stop_routes_slice: stop_routes, 263 | trip_stop_times_slice: trip_stop_times, 264 | transfer_index_slice: transfer_index, 265 | transfers_slice: transfers, 266 | 267 | metadata_db, 268 | }; 269 | Ok(table) 270 | } 271 | 272 | pub fn open(base_path: &PathBuf) -> Result, anyhow::Error> { 273 | debug!("Creating a new memory-mapped timetable. Opening files"); 274 | debug!("Opening routes."); 275 | let routes = File::open(base_path.join("routes"))?; 276 | debug!("Opening route stops."); 277 | let route_stops = File::open(base_path.join("route_stops"))?; 278 | debug!("Opening route trips."); 279 | let route_trips = File::open(base_path.join("route_trips"))?; 280 | debug!("Opening stops."); 281 | let stops = File::open(base_path.join("stops"))?; 282 | debug!("Opening stop routes."); 283 | let stop_routes = File::open(base_path.join("stop_routes"))?; 284 | debug!("Opening stop times."); 285 | let trip_stop_times = File::open(base_path.join("trip_stop_times"))?; 286 | debug!("Opening transfer index."); 287 | let transfer_index = File::open(base_path.join("transfer_index"))?; 288 | debug!("Opening transfers."); 289 | let transfers = File::open(base_path.join("transfers"))?; 290 | 291 | debug!("Opening metadata database"); 292 | let metadata_db = Database::open(base_path.join("metadata.db"))?; 293 | 294 | let page_bits = Some(21); 295 | 296 | debug!("mmapping"); 297 | let backing_routes = unsafe { MmapOptions::new().huge(page_bits).map(&routes)? }; 298 | let backing_route_stops = unsafe { MmapOptions::new().huge(page_bits).map(&route_stops)? }; 299 | let backing_route_trips = unsafe { MmapOptions::new().huge(page_bits).map(&route_trips)? }; 300 | let backing_stops = unsafe { MmapOptions::new().huge(page_bits).map(&stops)? }; 301 | let backing_stop_routes = unsafe { MmapOptions::new().huge(page_bits).map(&stop_routes)? }; 302 | let backing_trip_stop_times = 303 | unsafe { MmapOptions::new().huge(page_bits).map(&trip_stop_times)? }; 304 | let backing_transfer_index = 305 | unsafe { MmapOptions::new().huge(page_bits).map(&transfer_index)? }; 306 | let backing_transfers = unsafe { MmapOptions::new().huge(page_bits).map(&transfers)? }; 307 | 308 | MmapTimetable::assemble( 309 | base_path.clone(), 310 | Pin::new(backing_routes), 311 | Pin::new(backing_route_stops), 312 | Pin::new(backing_route_trips), 313 | Pin::new(backing_stops), 314 | Pin::new(backing_stop_routes), 315 | Pin::new(backing_trip_stop_times), 316 | Pin::new(backing_transfer_index), 317 | Pin::new(backing_transfers), 318 | metadata_db, 319 | ) 320 | } 321 | 322 | pub fn from_in_memory( 323 | in_memory_timetable: &InMemoryTimetableBuilder, 324 | base_path: &PathBuf, 325 | ) -> Result, anyhow::Error> { 326 | fs::create_dir_all(base_path)?; 327 | 328 | let in_memory_timetable = &in_memory_timetable.timetable; 329 | 330 | { 331 | // Drop all of these values before attempting to create a timetable from the raw data. 332 | { 333 | let routes = File::create(base_path.join("routes"))?; 334 | let route_stops = File::create(base_path.join("route_stops"))?; 335 | let route_trips = File::create(base_path.join("route_trips"))?; 336 | let stops = File::create(base_path.join("stops"))?; 337 | let stop_routes = File::create(base_path.join("stop_routes"))?; 338 | let trip_stop_times = File::create(base_path.join("trip_stop_times"))?; 339 | let _ = File::create(base_path.join("transfer_index"))?; 340 | let _ = File::create(base_path.join("transfers"))?; 341 | 342 | routes.set_len((size_of::() * in_memory_timetable.routes().len()) as u64)?; 343 | route_stops.set_len( 344 | (size_of::() * in_memory_timetable.route_stops().len()) as u64, 345 | )?; 346 | route_trips.set_len( 347 | (size_of::() * in_memory_timetable.route_trips().len()) as u64, 348 | )?; 349 | stops.set_len((size_of::() * in_memory_timetable.stops().len()) as u64)?; 350 | stop_routes.set_len( 351 | (size_of::() * in_memory_timetable.stop_routes().len()) as u64, 352 | )?; 353 | trip_stop_times.set_len( 354 | (size_of::() * in_memory_timetable.trip_stop_times().len()) 355 | as u64, 356 | )?; 357 | } 358 | 359 | let routes = File::options() 360 | .write(true) 361 | .read(true) 362 | .open(base_path.join("routes"))?; 363 | let route_stops = File::options() 364 | .write(true) 365 | .read(true) 366 | .open(base_path.join("route_stops"))?; 367 | let route_trips = File::options() 368 | .write(true) 369 | .read(true) 370 | .open(base_path.join("route_trips"))?; 371 | let stops = File::options() 372 | .write(true) 373 | .read(true) 374 | .open(base_path.join("stops"))?; 375 | let stop_routes = File::options() 376 | .write(true) 377 | .read(true) 378 | .open(base_path.join("stop_routes"))?; 379 | let trip_stop_times = File::options() 380 | .write(true) 381 | .read(true) 382 | .open(base_path.join("trip_stop_times"))?; 383 | 384 | let mut backing_routes = unsafe { MmapOptions::new().map_mut(&routes)? }; 385 | let mut backing_route_stops = unsafe { MmapOptions::new().map_mut(&route_stops)? }; 386 | let mut backing_route_trips = unsafe { MmapOptions::new().map_mut(&route_trips)? }; 387 | let mut backing_stops = unsafe { MmapOptions::new().map_mut(&stops)? }; 388 | let mut backing_stop_routes = unsafe { MmapOptions::new().map_mut(&stop_routes)? }; 389 | let mut backing_trip_stop_times = 390 | unsafe { MmapOptions::new().map_mut(&trip_stop_times)? }; 391 | 392 | backing_routes.copy_from_slice(cast_slice(in_memory_timetable.routes())); 393 | backing_route_stops.copy_from_slice(cast_slice(in_memory_timetable.route_stops())); 394 | backing_route_trips.copy_from_slice(cast_slice(in_memory_timetable.route_trips())); 395 | backing_stops.copy_from_slice(cast_slice(in_memory_timetable.stops())); 396 | backing_stop_routes.copy_from_slice(cast_slice(in_memory_timetable.stop_routes())); 397 | backing_trip_stop_times 398 | .copy_from_slice(cast_slice(in_memory_timetable.trip_stop_times())); 399 | 400 | let metadata_db = Database::create(base_path.join("metadata.db"))?; 401 | { 402 | let write = metadata_db.begin_write()?; 403 | { 404 | let mut table = write.open_table(STOP_METADATA_TABLE)?; 405 | for stop in in_memory_timetable.stops() { 406 | let bytes = rmp_serde::to_vec(&in_memory_timetable.stop_metadata(stop))?; 407 | table.insert(stop.id() as u64, bytes.as_slice())?; 408 | } 409 | } 410 | write.commit()?; 411 | } 412 | { 413 | let write = metadata_db.begin_write()?; 414 | { 415 | let mut table = write.open_table(TRIP_METADATA_TABLE)?; 416 | for trip in in_memory_timetable.route_trips() { 417 | let bytes = rmp_serde::to_vec(&in_memory_timetable.trip_metadata(trip))?; 418 | table.insert(trip.trip_index as u64, bytes.as_slice())?; 419 | } 420 | } 421 | write.commit()?; 422 | } 423 | { 424 | let write = metadata_db.begin_write()?; 425 | { 426 | let mut table = write.open_table(ROUTE_SHAPE_TABLE)?; 427 | for route in in_memory_timetable.routes() { 428 | let bytes = rmp_serde::to_vec(&in_memory_timetable.route_shape(route))?; 429 | table.insert(route.route_index as u64, bytes.as_slice())?; 430 | } 431 | } 432 | write.commit()?; 433 | } 434 | info!("Done writing timetable"); 435 | } 436 | MmapTimetable::open(base_path) 437 | } 438 | 439 | pub async fn concatenate<'b>( 440 | timetables: &[MmapTimetable<'b>], 441 | base_path: &PathBuf, 442 | valhalla_tile_path: &PathBuf, 443 | ) -> MmapTimetable<'b> { 444 | { 445 | let total_routes: usize = timetables.iter().map(|tt| tt.routes().len()).sum(); 446 | let total_route_stops: usize = timetables.iter().map(|tt| tt.route_stops().len()).sum(); 447 | let total_route_trips: usize = timetables.iter().map(|tt| tt.route_trips().len()).sum(); 448 | let total_stops: usize = timetables.iter().map(|tt| tt.stops().len()).sum(); 449 | let total_stop_routes: usize = timetables.iter().map(|tt| tt.stop_routes().len()).sum(); 450 | let total_trip_stop_times: usize = 451 | timetables.iter().map(|tt| tt.trip_stop_times().len()).sum(); 452 | { 453 | let routes = File::create(base_path.join("routes")).unwrap(); 454 | let route_stops = File::create(base_path.join("route_stops")).unwrap(); 455 | let route_trips = File::create(base_path.join("route_trips")).unwrap(); 456 | let stops = File::create(base_path.join("stops")).unwrap(); 457 | let stop_routes = File::create(base_path.join("stop_routes")).unwrap(); 458 | let trip_stop_times = File::create(base_path.join("trip_stop_times")).unwrap(); 459 | 460 | routes 461 | .set_len((size_of::() * total_routes) as u64) 462 | .unwrap(); 463 | route_stops 464 | .set_len((size_of::() * total_route_stops) as u64) 465 | .unwrap(); 466 | route_trips 467 | .set_len((size_of::() * total_route_trips) as u64) 468 | .unwrap(); 469 | stops 470 | .set_len((size_of::() * total_stops) as u64) 471 | .unwrap(); 472 | stop_routes 473 | .set_len((size_of::() * total_stop_routes) as u64) 474 | .unwrap(); 475 | trip_stop_times 476 | .set_len((size_of::() * total_trip_stop_times) as u64) 477 | .unwrap(); 478 | } 479 | 480 | let routes = File::options() 481 | .write(true) 482 | .read(true) 483 | .open(base_path.join("routes")) 484 | .unwrap(); 485 | let route_stops = File::options() 486 | .write(true) 487 | .read(true) 488 | .open(base_path.join("route_stops")) 489 | .unwrap(); 490 | let route_trips = File::options() 491 | .write(true) 492 | .read(true) 493 | .open(base_path.join("route_trips")) 494 | .unwrap(); 495 | let stops = File::options() 496 | .write(true) 497 | .read(true) 498 | .open(base_path.join("stops")) 499 | .unwrap(); 500 | let stop_routes = File::options() 501 | .write(true) 502 | .read(true) 503 | .open(base_path.join("stop_routes")) 504 | .unwrap(); 505 | let trip_stop_times = File::options() 506 | .write(true) 507 | .read(true) 508 | .open(base_path.join("trip_stop_times")) 509 | .unwrap(); 510 | let _ = File::create(base_path.join("transfer_index")).unwrap(); 511 | let _ = File::create(base_path.join("transfers")).unwrap(); 512 | 513 | let mut backing_routes = unsafe { MmapOptions::new().map_mut(&routes).unwrap() }; 514 | let mut backing_route_stops = 515 | unsafe { MmapOptions::new().map_mut(&route_stops).unwrap() }; 516 | let mut backing_route_trips = 517 | unsafe { MmapOptions::new().map_mut(&route_trips).unwrap() }; 518 | let mut backing_stops = unsafe { MmapOptions::new().map_mut(&stops).unwrap() }; 519 | let mut backing_stop_routes = 520 | unsafe { MmapOptions::new().map_mut(&stop_routes).unwrap() }; 521 | let mut backing_trip_stop_times = 522 | unsafe { MmapOptions::new().map_mut(&trip_stop_times).unwrap() }; 523 | 524 | let mut route_cursor = 0usize; 525 | let mut route_stop_cursor = 0usize; 526 | let mut route_trip_cursor = 0usize; 527 | let mut stop_cursor = 0usize; 528 | let mut stop_route_cursor = 0usize; 529 | let mut trip_stop_time_cursor = 0usize; 530 | 531 | let route_slice: &mut [Route] = cast_slice_mut(&mut backing_routes); 532 | let route_stop_slice: &mut [RouteStop] = cast_slice_mut(&mut backing_route_stops); 533 | let route_trip_slice: &mut [Trip] = cast_slice_mut(&mut backing_route_trips); 534 | let stop_slice: &mut [Stop] = cast_slice_mut(&mut backing_stops); 535 | let stop_route_slice: &mut [StopRoute] = cast_slice_mut(&mut backing_stop_routes); 536 | let trip_stop_time_slice: &mut [TripStopTime] = 537 | cast_slice_mut(&mut backing_trip_stop_times); 538 | 539 | { 540 | // Make mutable copies of the slices. 541 | for tt in timetables { 542 | let route_slice = 543 | &mut route_slice[route_cursor..route_cursor + tt.routes().len()]; 544 | let route_stop_slice = &mut route_stop_slice 545 | [route_stop_cursor..route_stop_cursor + tt.route_stops().len()]; 546 | let route_trip_slice = &mut route_trip_slice 547 | [route_trip_cursor..route_trip_cursor + tt.route_trips().len()]; 548 | let stop_slice = &mut stop_slice[stop_cursor..stop_cursor + tt.stops().len()]; 549 | let stop_route_slice = &mut stop_route_slice 550 | [stop_route_cursor..stop_route_cursor + tt.stop_routes().len()]; 551 | let trip_stop_time_slice = &mut trip_stop_time_slice 552 | [trip_stop_time_cursor..trip_stop_time_cursor + tt.trip_stop_times().len()]; 553 | 554 | route_slice.copy_from_slice(tt.routes()); 555 | route_stop_slice.copy_from_slice(tt.route_stops()); 556 | route_trip_slice.copy_from_slice(tt.route_trips()); 557 | stop_slice.copy_from_slice(tt.stops()); 558 | stop_route_slice.copy_from_slice(tt.stop_routes()); 559 | trip_stop_time_slice.copy_from_slice(tt.trip_stop_times()); 560 | 561 | for route in route_slice { 562 | route.first_route_stop += route_stop_cursor; 563 | route.first_route_trip += route_trip_cursor; 564 | route.route_index += route_cursor; 565 | } 566 | for route_stop in route_stop_slice { 567 | route_stop.route_index += route_cursor; 568 | route_stop.stop_index += stop_cursor; 569 | } 570 | for route_trip in route_trip_slice { 571 | route_trip.trip_index += route_trip_cursor; 572 | route_trip.route_index += route_cursor; 573 | route_trip.first_trip_stop_time += trip_stop_time_cursor; 574 | route_trip.last_trip_stop_time += trip_stop_time_cursor; 575 | } 576 | for stop in stop_slice { 577 | stop.stop_index += stop_cursor; 578 | stop.first_stop_route_index += stop_route_cursor; 579 | } 580 | for stop_route in stop_route_slice { 581 | stop_route.route_index += route_cursor; 582 | } 583 | for trip_stop_time in trip_stop_time_slice { 584 | trip_stop_time.trip_index += route_trip_cursor; 585 | } 586 | 587 | route_cursor += tt.routes().len(); 588 | route_stop_cursor += tt.route_stops().len(); 589 | route_trip_cursor += tt.route_trips().len(); 590 | stop_cursor += tt.stops().len(); 591 | stop_route_cursor += tt.stop_routes().len(); 592 | trip_stop_time_cursor += tt.trip_stop_times().len(); 593 | } 594 | } 595 | let metadata_db = Database::create(base_path.join("metadata.db")).unwrap(); 596 | { 597 | let write = metadata_db.begin_write().unwrap(); 598 | { 599 | let mut table = write.open_table(STOP_METADATA_TABLE).unwrap(); 600 | let mut cursor = 0usize; 601 | for tt in timetables { 602 | for stop in tt.stops() { 603 | let bytes = rmp_serde::to_vec(&tt.stop_metadata(stop)).unwrap(); 604 | table 605 | .insert((cursor + stop.id()) as u64, bytes.as_slice()) 606 | .unwrap(); 607 | } 608 | cursor += tt.stops().len(); 609 | } 610 | } 611 | write.commit().unwrap(); 612 | } 613 | { 614 | let write = metadata_db.begin_write().unwrap(); 615 | { 616 | let mut table = write.open_table(TRIP_METADATA_TABLE).unwrap(); 617 | let mut cursor = 0usize; 618 | for tt in timetables { 619 | for trip in tt.route_trips() { 620 | let bytes = rmp_serde::to_vec(&tt.trip_metadata(trip)).unwrap(); 621 | table 622 | .insert((cursor + trip.trip_index) as u64, bytes.as_slice()) 623 | .unwrap(); 624 | } 625 | cursor += tt.route_trips().len(); 626 | } 627 | } 628 | write.commit().unwrap(); 629 | } 630 | { 631 | let write = metadata_db.begin_write().unwrap(); 632 | { 633 | let mut table = write.open_table(ROUTE_SHAPE_TABLE).unwrap(); 634 | let mut cursor = 0usize; 635 | for tt in timetables { 636 | for route in tt.routes() { 637 | let bytes = rmp_serde::to_vec(&tt.route_shape(route)).unwrap(); 638 | table 639 | .insert((cursor + route.route_index) as u64, bytes.as_slice()) 640 | .unwrap(); 641 | } 642 | cursor += tt.routes().len(); 643 | } 644 | } 645 | write.commit().unwrap(); 646 | } 647 | } 648 | let mut tt = MmapTimetable::open(base_path).unwrap(); 649 | tt.calculate_transfers(valhalla_tile_path).await.unwrap(); 650 | tt 651 | } 652 | 653 | pub(crate) async fn calculate_transfers( 654 | &mut self, 655 | valhalla_tile_path: &PathBuf, 656 | ) -> Result<(), Error> { 657 | { 658 | let mut rtree = RTree::::new(); 659 | 660 | for (stop_id, stop) in self.stops().iter().enumerate() { 661 | let latlng: LatLng = s2::cellid::CellID(stop.s2cell).into(); 662 | let location_cartesian = lat_lng_to_cartesian(latlng.lat.deg(), latlng.lng.deg()); 663 | rtree.insert(IndexedStop { 664 | coords: location_cartesian, 665 | id: stop_id, 666 | }); 667 | } 668 | self.rtree = rtree; 669 | } 670 | assert_eq!(self.stops().len(), self.rtree.size()); 671 | 672 | info!("Opening transfer graph"); 673 | let transfer_graph = Arc::new( 674 | TransferGraph::>::read_from_dir( 675 | valhalla_tile_path.clone(), 676 | Arc::new(redb::Database::open( 677 | valhalla_tile_path.join("graph_metadata.db"), 678 | )?), 679 | )?, 680 | ); 681 | 682 | info!("Calculating transfer times"); 683 | let transfers: Vec> = self 684 | .stops() 685 | .par_iter() 686 | .map_with( 687 | TransferGraphSearcher::new(transfer_graph.clone()), 688 | |searcher, from_stop| { 689 | self.calculate_transfer_matrix(&transfer_graph, searcher, from_stop) 690 | }, 691 | ) 692 | .collect(); 693 | 694 | let transfer_index_file = File::options() 695 | .write(true) 696 | .read(true) 697 | .create(true) 698 | .open(&self.base_path.join("transfer_index"))?; 699 | transfer_index_file 700 | .set_len((size_of::() * transfers.len()) as u64) 701 | .unwrap(); 702 | let transfer_file = File::options() 703 | .write(true) 704 | .read(true) 705 | .create(true) 706 | .open(&self.base_path.join("transfers"))?; 707 | transfer_file 708 | .set_len( 709 | transfers 710 | .iter() 711 | .map(|t| size_of::() * t.len()) 712 | .sum::() as u64, 713 | ) 714 | .unwrap(); 715 | 716 | let mut backing_transfer_index_mut = 717 | unsafe { MmapMut::map_mut(&transfer_index_file).unwrap() }; 718 | let mut backing_transfers_mut = unsafe { MmapMut::map_mut(&transfer_file).unwrap() }; 719 | 720 | let out_transfer_index = unsafe { 721 | let s = cast_slice_mut::(&mut backing_transfer_index_mut); 722 | slice::from_raw_parts_mut(s.as_mut_ptr(), s.len()) 723 | }; 724 | let out_transfers = unsafe { 725 | let s = cast_slice_mut::(&mut backing_transfers_mut); 726 | slice::from_raw_parts_mut(s.as_mut_ptr(), s.len()) 727 | }; 728 | 729 | let mut total_transfers_processed = 0; 730 | for (transfer_chunk_idx, transfers) in transfers.iter().enumerate() { 731 | out_transfer_index[transfer_chunk_idx] = total_transfers_processed; 732 | for transfer in transfers { 733 | out_transfers[total_transfers_processed] = *transfer; 734 | total_transfers_processed += 1; 735 | } 736 | } 737 | Ok(()) 738 | } 739 | 740 | fn generate_transfer_candidates(&self, stop: &Stop) -> Vec<&Stop> { 741 | let latlng = stop.location(); 742 | let mut transfer_candidates = vec![]; 743 | for (count, (to_stop, dist_sq)) in self 744 | .rtree 745 | .nearest_neighbor_iter_with_distance_2(&lat_lng_to_cartesian( 746 | latlng.lat.deg(), 747 | latlng.lng.deg(), 748 | )) 749 | .enumerate() 750 | { 751 | let dist = dist_sq.sqrt(); 752 | if dist > 1000f64 || count > 20 { 753 | break; 754 | } 755 | transfer_candidates.push(self.stop(to_stop.id)); 756 | } 757 | transfer_candidates 758 | } 759 | 760 | fn calculate_transfer_matrix>( 761 | &self, 762 | graph: &TransferGraph, 763 | search_context: &mut TransferGraphSearcher, 764 | stop: &Stop, 765 | ) -> Vec { 766 | let transfer_candidates = self.generate_transfer_candidates(stop); 767 | transfer_candidates 768 | .iter() 769 | .filter_map(|to_stop| { 770 | let transfer_time = graph 771 | .transfer_distance_mm( 772 | search_context, 773 | &Self::location_to_coords(&stop.location()), 774 | &Self::location_to_coords(&to_stop.location()), 775 | ) 776 | .ok()? 777 | / WALK_SPEED_MM_PER_SECOND; 778 | Some(Transfer { 779 | to: to_stop.id(), 780 | from: stop.id(), 781 | time: transfer_time, 782 | }) 783 | }) 784 | .collect() 785 | } 786 | 787 | fn location_to_coords(location: &LatLng) -> Coord { 788 | Coord { 789 | x: location.lng.deg(), 790 | y: location.lat.deg(), 791 | } 792 | } 793 | } 794 | -------------------------------------------------------------------------------- /solari/src/raptor/timetable/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod in_memory; 2 | pub mod mmap; 3 | 4 | use std::{time::UNIX_EPOCH, u32}; 5 | 6 | use bytemuck::{Pod, Zeroable}; 7 | use chrono::{DateTime, Days, NaiveDate, NaiveDateTime, NaiveTime}; 8 | 9 | use chrono_tz::Tz; 10 | use redb::TableDefinition; 11 | use rstar::RTree; 12 | use s2::latlng::LatLng; 13 | use serde::{Deserialize, Serialize}; 14 | 15 | use crate::spatial::IndexedStop; 16 | 17 | static DAY_SECONDS: u32 = 86_400; 18 | const STOP_METADATA_TABLE: TableDefinition = TableDefinition::new("stop_metadata"); 19 | const TRIP_METADATA_TABLE: TableDefinition = TableDefinition::new("trip_metadata"); 20 | const ROUTE_SHAPE_TABLE: TableDefinition = TableDefinition::new("route_shapes"); 21 | 22 | pub trait Timetable<'a> { 23 | fn route(&'a self, route_id: usize) -> &'a Route; 24 | fn stop(&'a self, stop_id: usize) -> &'a Stop; 25 | fn stop_count(&self) -> usize; 26 | 27 | fn stops(&'a self) -> &'a [Stop]; 28 | fn routes(&'a self) -> &'a [Route]; 29 | fn stop_routes(&'a self) -> &'a [StopRoute]; 30 | fn route_stops(&'a self) -> &'a [RouteStop]; 31 | fn route_trips(&'a self) -> &'a [Trip]; 32 | fn trip_stop_times(&'a self) -> &'a [TripStopTime]; 33 | fn transfers(&'a self) -> &'a [Transfer]; 34 | fn transfer_index(&'a self) -> &'a [usize]; 35 | fn transfers_from(&'a self, stop_id: usize) -> &'a [Transfer]; 36 | fn stop_index_copy(&'a self) -> RTree; 37 | fn nearest_stops(&'a self, lat: f64, lng: f64, n: usize) -> Vec<(&'a Stop, f64)>; 38 | 39 | fn stop_metadata(&'a self, stop: &Stop) -> gtfs_structures::Stop; 40 | fn trip_metadata(&'a self, trip: &Trip) -> TripMetadata; 41 | 42 | fn route_shape(&'a self, route: &Route) -> Option>; 43 | } 44 | 45 | #[derive( 46 | Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Copy, Pod, Zeroable, Serialize, Deserialize, 47 | )] 48 | #[repr(C)] 49 | pub struct Stop { 50 | stop_index: usize, 51 | s2cell: u64, 52 | first_stop_route_index: usize, 53 | } 54 | 55 | impl<'a> Stop { 56 | pub fn stop_routes(&self, timetable: &'a dyn Timetable<'a>) -> &'a [StopRoute] { 57 | let range_end = if self.stop_index == timetable.stops().len() - 1 { 58 | timetable.stop_routes().len() 59 | } else { 60 | timetable.stops()[self.stop_index + 1].first_stop_route_index 61 | }; 62 | &timetable.stop_routes()[self.first_stop_route_index..range_end] 63 | } 64 | 65 | #[inline] 66 | pub fn id(&self) -> usize { 67 | self.stop_index 68 | } 69 | 70 | #[inline] 71 | pub fn location(&self) -> LatLng { 72 | s2::cellid::CellID(self.s2cell).into() 73 | } 74 | 75 | pub fn metadata(&self, timetable: &'a dyn Timetable<'a>) -> gtfs_structures::Stop { 76 | timetable.stop_metadata(self).clone() 77 | } 78 | } 79 | 80 | #[derive(Debug, Clone, PartialEq, PartialOrd, Copy, Pod, Zeroable)] 81 | #[repr(C)] 82 | pub struct RouteStop { 83 | route_index: usize, 84 | stop_index: usize, 85 | stop_seq: u32, 86 | distance_along_route: f32, 87 | } 88 | 89 | impl<'a> RouteStop { 90 | #[inline] 91 | pub fn route(&self, timetable: &'a dyn Timetable<'a>) -> &'a Route { 92 | &timetable.routes()[self.route_index] 93 | } 94 | 95 | #[inline] 96 | pub fn stop_seq(&self) -> usize { 97 | self.stop_seq as usize 98 | } 99 | 100 | #[inline] 101 | pub fn id(&self) -> usize { 102 | self.stop_index 103 | } 104 | 105 | #[inline] 106 | pub fn stop(&self, timetable: &'a dyn Timetable<'a>) -> &'a Stop { 107 | &timetable.stops()[self.stop_index] 108 | } 109 | 110 | #[inline] 111 | pub fn distance_along_route(&self) -> f32 { 112 | self.distance_along_route 113 | } 114 | } 115 | 116 | #[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Copy, Pod, Zeroable)] 117 | #[repr(C)] 118 | pub struct StopId(pub usize); 119 | 120 | #[derive( 121 | Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Copy, Pod, Zeroable, Serialize, Deserialize, 122 | )] 123 | #[repr(C)] 124 | pub struct Trip { 125 | trip_index: usize, 126 | route_index: usize, 127 | first_trip_stop_time: usize, 128 | last_trip_stop_time: usize, 129 | } 130 | 131 | impl<'a> Trip { 132 | pub fn stop_times(&self, timetable: &'a dyn Timetable<'a>) -> &'a [TripStopTime] { 133 | &timetable.trip_stop_times()[self.first_trip_stop_time..self.last_trip_stop_time] 134 | } 135 | 136 | #[inline] 137 | pub fn route(&self, timetable: &'a dyn Timetable<'a>) -> Route { 138 | timetable.routes()[self.route_index].clone() 139 | } 140 | 141 | pub fn metadata(&self, timetable: &'a dyn Timetable<'a>) -> TripMetadata { 142 | timetable.trip_metadata(self).clone() 143 | } 144 | } 145 | 146 | #[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)] 147 | pub struct TripMetadata { 148 | pub headsign: Option, 149 | pub route_name: Option, 150 | pub agency_name: Option, 151 | } 152 | 153 | #[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Copy, Pod, Zeroable)] 154 | #[repr(C)] 155 | pub struct Route { 156 | route_index: usize, 157 | first_route_stop: usize, 158 | pub(crate) first_route_trip: usize, 159 | } 160 | 161 | impl<'a> Route { 162 | pub fn route_stops(&self, timetable: &'a dyn Timetable<'a>) -> &'a [RouteStop] { 163 | let range_end = if self.route_index == timetable.routes().len() - 1 { 164 | timetable.route_stops().len() 165 | } else { 166 | timetable.routes()[self.route_index + 1].first_route_stop 167 | }; 168 | &timetable.route_stops()[self.first_route_stop..range_end] 169 | } 170 | 171 | pub fn route_trips(&self, timetable: &'a dyn Timetable<'a>) -> &'a [Trip] { 172 | let range_end = if self.route_index == timetable.routes().len() - 1 { 173 | timetable.route_trips().len() 174 | } else { 175 | timetable.routes()[self.route_index + 1].first_route_trip 176 | }; 177 | &timetable.route_trips()[self.first_route_trip..range_end] 178 | } 179 | 180 | #[inline] 181 | pub fn id(&self) -> usize { 182 | self.route_index 183 | } 184 | } 185 | 186 | #[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Copy, Pod, Zeroable)] 187 | #[repr(C)] 188 | pub struct StopRoute { 189 | route_index: usize, 190 | stop_seq: usize, 191 | } 192 | 193 | impl<'a> StopRoute { 194 | #[inline] 195 | pub fn route(&'a self, timetable: &'a dyn Timetable<'a>) -> &'a Route { 196 | &timetable.routes()[self.route_index] 197 | } 198 | 199 | #[inline] 200 | pub fn route_id(&self) -> usize { 201 | self.route_index 202 | } 203 | 204 | #[inline] 205 | pub fn stop_seq(&self) -> usize { 206 | self.stop_seq 207 | } 208 | } 209 | 210 | #[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Copy, Pod, Zeroable)] 211 | #[repr(C)] 212 | pub struct TripStopTime { 213 | pub(crate) trip_index: usize, 214 | pub(crate) route_stop_seq: usize, 215 | arrival_time: u32, 216 | departure_time: u32, 217 | } 218 | 219 | impl<'a> TripStopTime { 220 | #[inline] 221 | pub fn arrival(&self) -> Time { 222 | Time { 223 | epoch_seconds: self.arrival_time, 224 | } 225 | } 226 | 227 | #[inline] 228 | pub fn departure(&self) -> Time { 229 | Time { 230 | epoch_seconds: self.departure_time, 231 | } 232 | } 233 | 234 | pub(crate) fn new( 235 | trip_index: usize, 236 | route_stop_seq: usize, 237 | arrival_time: DateTime, 238 | departure_time: DateTime, 239 | ) -> TripStopTime { 240 | TripStopTime { 241 | trip_index, 242 | route_stop_seq, 243 | arrival_time: arrival_time.timestamp() as u32, 244 | departure_time: departure_time.timestamp() as u32, 245 | } 246 | } 247 | 248 | pub(crate) fn marked() -> TripStopTime { 249 | TripStopTime { 250 | trip_index: usize::MAX, 251 | route_stop_seq: usize::MAX, 252 | arrival_time: u32::MAX, 253 | departure_time: u32::MAX, 254 | } 255 | } 256 | 257 | #[inline] 258 | pub fn route_stop(&self, timetable: &'a dyn Timetable<'a>) -> &'a RouteStop { 259 | let route = &timetable.route_trips()[self.trip_index].route(timetable); 260 | &timetable.route_stops()[route.first_route_stop + self.route_stop_seq] 261 | } 262 | } 263 | 264 | #[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Pod, Zeroable)] 265 | #[repr(C)] 266 | pub struct Transfer { 267 | to: usize, 268 | from: usize, 269 | time: u64, 270 | } 271 | 272 | impl<'a> Transfer { 273 | pub fn all_transfers(from: &Stop, timetable: &'a dyn Timetable<'a>) -> &'a [Transfer] { 274 | let from = from.stop_index; 275 | let range_end = if from == timetable.transfer_index().len() - 1 { 276 | timetable.transfers().len() 277 | } else { 278 | timetable.transfer_index()[from + 1] 279 | }; 280 | &timetable.transfers()[timetable.transfer_index()[from]..range_end] 281 | } 282 | 283 | #[inline] 284 | pub fn to(&self, timetable: &'a dyn Timetable<'a>) -> &'a Stop { 285 | timetable.stop(self.to) 286 | } 287 | 288 | #[inline] 289 | pub fn time_seconds(&self) -> u32 { 290 | self.time as u32 291 | } 292 | } 293 | 294 | #[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Copy, Pod, Zeroable)] 295 | #[repr(C)] 296 | pub struct Time { 297 | epoch_seconds: u32, 298 | } 299 | 300 | impl Time { 301 | pub fn naive_date_time(&self, service_day: NaiveDate) -> NaiveDateTime { 302 | NaiveDate::and_time( 303 | &service_day 304 | .checked_add_days(Days::new((self.epoch_seconds / DAY_SECONDS) as u64)) 305 | .unwrap(), 306 | NaiveTime::from_num_seconds_from_midnight_opt(self.epoch_seconds % DAY_SECONDS, 0) 307 | .unwrap(), 308 | ) 309 | } 310 | 311 | pub fn plus_seconds(&self, seconds: u32) -> Time { 312 | Time { 313 | epoch_seconds: self 314 | .epoch_seconds 315 | .checked_add(seconds) 316 | .unwrap_or(self.epoch_seconds), 317 | } 318 | } 319 | 320 | pub fn epoch_seconds(&self) -> u32 { 321 | return self.epoch_seconds; 322 | } 323 | 324 | pub fn from_epoch_seconds(seconds: u32) -> Time { 325 | Time { 326 | epoch_seconds: seconds, 327 | } 328 | } 329 | 330 | pub fn epoch() -> Time { 331 | Time { epoch_seconds: 0 } 332 | } 333 | 334 | pub fn now() -> Time { 335 | Time { 336 | epoch_seconds: UNIX_EPOCH.elapsed().unwrap().as_secs() as u32, 337 | } 338 | } 339 | } 340 | 341 | #[derive(Debug, Clone, Serialize, Deserialize)] 342 | pub struct ShapeCoordinate { 343 | lat: f64, 344 | lon: f64, 345 | distance_along_shape: Option, 346 | } 347 | 348 | impl ShapeCoordinate { 349 | pub fn lat(&self) -> f64 { 350 | self.lat 351 | } 352 | 353 | pub fn lon(&self) -> f64 { 354 | self.lon 355 | } 356 | 357 | pub fn distance_along_shape(&self) -> Option { 358 | self.distance_along_shape 359 | } 360 | } 361 | 362 | #[cfg(test)] 363 | mod test { 364 | use chrono::{NaiveDate, NaiveTime}; 365 | 366 | use super::Time; 367 | 368 | #[test] 369 | fn time_with_24hr_service_day() { 370 | let time = Time { 371 | epoch_seconds: 12 * 60 * 60, 372 | }; 373 | let date_time = time.naive_date_time(NaiveDate::from_ymd_opt(2020, 1, 1).unwrap()); 374 | assert_eq!( 375 | date_time, 376 | NaiveDate::from_ymd_opt(2020, 1, 1) 377 | .unwrap() 378 | .and_time(NaiveTime::from_hms_opt(12, 0, 0).unwrap()) 379 | ); 380 | } 381 | #[test] 382 | fn time_with_25hr_service_day() { 383 | let time = Time { 384 | epoch_seconds: 25 * 60 * 60, 385 | }; 386 | let date_time = time.naive_date_time(NaiveDate::from_ymd_opt(2020, 1, 1).unwrap()); 387 | assert_eq!( 388 | date_time, 389 | NaiveDate::from_ymd_opt(2020, 1, 2) 390 | .unwrap() 391 | .and_time(NaiveTime::from_hms_opt(1, 0, 0).unwrap()) 392 | ); 393 | } 394 | } 395 | -------------------------------------------------------------------------------- /solari/src/route.rs: -------------------------------------------------------------------------------- 1 | use std::{ 2 | cell::RefCell, 3 | collections::{HashMap, HashSet}, 4 | path::PathBuf, 5 | sync::Arc, 6 | }; 7 | 8 | use geo::ClosestPoint; 9 | use geo_types::{Coord, Line, LineString, Point}; 10 | use log::{debug, info}; 11 | use s2::latlng::LatLng; 12 | use serde::Serialize; 13 | use solari_geomath::EARTH_RADIUS_APPROX; 14 | use solari_spatial::SphereIndexMmap; 15 | use solari_transfers::{fast_paths::FastGraphStatic, TransferGraph, TransferGraphSearcher}; 16 | use time::OffsetDateTime; 17 | 18 | use crate::{ 19 | api::{ 20 | response::{ResponseStatus, SolariResponse}, 21 | SolariItinerary, SolariLeg, 22 | }, 23 | raptor::timetable::TripStopTime, 24 | spatial::FAKE_WALK_SPEED_SECONDS_PER_METER, 25 | }; 26 | 27 | use crate::raptor::timetable::{Route, RouteStop, Stop, Time, Timetable, Trip}; 28 | 29 | pub struct Router<'a, T: Timetable<'a>> { 30 | timetable: T, 31 | transfer_graph: Arc, SphereIndexMmap<'a, usize>>>, 32 | } 33 | 34 | impl<'a, T: Timetable<'a>> Router<'a, T> { 35 | pub fn new(timetable: T, transfer_graph_path: PathBuf) -> Result, anyhow::Error> { 36 | info!("Opening transfer graph metadata db."); 37 | let database = Arc::new(redb::Database::open( 38 | transfer_graph_path.join("graph_metadata.db"), 39 | )?); 40 | info!("Opening transfer graph."); 41 | let transfer_graph = Arc::new( 42 | TransferGraph::>::read_from_dir( 43 | transfer_graph_path.clone(), 44 | database, 45 | )?, 46 | ); 47 | info!("Built router"); 48 | Ok(Router { 49 | timetable, 50 | transfer_graph, 51 | }) 52 | } 53 | 54 | pub fn nearest_stops( 55 | &'a self, 56 | location: LatLng, 57 | max_stops: Option, 58 | max_distance: Option, 59 | ) -> Vec<&'a Stop> { 60 | let mut stops: Vec<&'a Stop> = vec![]; 61 | assert!(max_stops.is_some() || max_distance.is_some()); 62 | for (count, (stop, dist_sq)) in self 63 | .timetable 64 | .nearest_stops(location.lat.deg(), location.lng.deg(), 100) 65 | .iter() 66 | .enumerate() 67 | { 68 | if let Some(max_stops) = max_stops { 69 | if count >= max_stops { 70 | break; 71 | } 72 | } 73 | if let Some(max_distance) = max_distance { 74 | if *dist_sq > max_distance { 75 | break; 76 | } 77 | } 78 | stops.push(self.timetable.stop(stop.id())); 79 | } 80 | stops 81 | } 82 | 83 | pub async fn route( 84 | &'a self, 85 | route_start_time: Time, 86 | start_location: LatLng, 87 | target_location: LatLng, 88 | max_distance_meters: Option, 89 | max_candidate_stops_each_side: Option, 90 | max_transfers: Option, 91 | max_transfer_delta: Option, 92 | ) -> SolariResponse { 93 | let start_stops = self.nearest_stops( 94 | start_location, 95 | max_candidate_stops_each_side, 96 | max_distance_meters, 97 | ); 98 | let target_stops = self.nearest_stops( 99 | target_location, 100 | max_candidate_stops_each_side, 101 | max_distance_meters, 102 | ); 103 | 104 | let target_costs: Vec<(usize, u32)> = target_stops 105 | .iter() 106 | .map(|stop| { 107 | ( 108 | stop.id(), 109 | (FAKE_WALK_SPEED_SECONDS_PER_METER 110 | * stop.location().distance(&target_location).rad() 111 | * EARTH_RADIUS_APPROX) as u32, 112 | ) 113 | }) 114 | .collect(); 115 | 116 | let mut context = RouterContext { 117 | best_times_global: vec![None; self.timetable.stop_count()], 118 | best_times_per_round: Vec::new(), 119 | marked_stops: vec![false; self.timetable.stop_count()], 120 | marked_routes: RefCell::new(vec![ 121 | TripStopTime::marked(); 122 | self.timetable.routes().len() 123 | ]), 124 | timetable: &self.timetable, 125 | round: 0, 126 | targets: target_costs.clone(), 127 | max_transfers, 128 | max_transfer_delta, 129 | step_log: vec![InternalStep { 130 | previous_step: 0usize, 131 | from: InternalStepLocation::Location(LatLng::from_degrees(0.0, 0.0)), 132 | to: InternalStepLocation::Location(LatLng::from_degrees(0.0, 0.0)), 133 | route: None, 134 | departure: Time::epoch(), 135 | arrival: Time::epoch(), 136 | trip: None, 137 | }], 138 | }; 139 | context 140 | .init(route_start_time, start_location, &start_stops) 141 | .await; 142 | context.route().await; 143 | 144 | let best_itineraries = self 145 | .pick_best_itineraries(&context, &target_costs) 146 | .iter() 147 | .map(|itinerary| { 148 | self.unwind_itinerary( 149 | &context, 150 | itinerary, 151 | route_start_time, 152 | &target_costs, 153 | start_location, 154 | target_location, 155 | ) 156 | }) 157 | .collect(); 158 | 159 | SolariResponse { 160 | status: ResponseStatus::Ok, 161 | itineraries: best_itineraries, 162 | } 163 | } 164 | 165 | fn unwind_itinerary( 166 | &'a self, 167 | context: &RouterContext<'a, T>, 168 | itinerary: &InternalItinerary, 169 | route_start_time: Time, 170 | target_costs: &[(usize, u32)], 171 | start_location: LatLng, 172 | target_location: LatLng, 173 | ) -> SolariItinerary { 174 | let mut steps = vec![]; 175 | let mut step_cursor = itinerary.last_step; 176 | { 177 | let step = &context.step_log[step_cursor]; 178 | let from = if let InternalStepLocation::Stop(stop) = step.to { 179 | stop 180 | } else { 181 | panic!(); 182 | }; 183 | let to = if let InternalStepLocation::Stop(stop) = step.to { 184 | stop 185 | } else { 186 | panic!(); 187 | }; 188 | let from_location = from.location(); 189 | let last_leg_cost = target_costs 190 | .iter() 191 | .find(|(target, _cost)| target == &to.id()) 192 | .map(|(_target, cost)| *cost) 193 | .expect("Target cost not found"); 194 | steps.push(( 195 | Step::End(EndStep { 196 | last_stop: from.metadata(&self.timetable).name.clone(), 197 | last_stop_latlng: [from_location.lat.deg(), from_location.lng.deg()], 198 | last_stop_departure_epoch_seconds: step.arrival.epoch_seconds() as u64, 199 | end_latlng: [target_location.lat.deg(), target_location.lng.deg()], 200 | end_epoch_seconds: (step.arrival.epoch_seconds() + last_leg_cost) as u64, 201 | }), 202 | step_cursor, 203 | )); 204 | } 205 | while context.step_log[step_cursor].previous_step != 0 { 206 | let step = &context.step_log[step_cursor]; 207 | let to = if let InternalStepLocation::Stop(stop) = step.to { 208 | stop 209 | } else { 210 | panic!(); 211 | }; 212 | let from = if let InternalStepLocation::Stop(stop) = step.from { 213 | stop 214 | } else { 215 | panic!(); 216 | }; 217 | let to_location = to.location(); 218 | let from_location = from.location(); 219 | 220 | steps.push(( 221 | if step.route.is_none() { 222 | Step::Transfer(TransferStep { 223 | from_stop: from.metadata(&self.timetable).name.clone(), 224 | from_stop_latlng: [from_location.lat.deg(), from_location.lng.deg()], 225 | to_stop: to.metadata(&self.timetable).name.clone(), 226 | to_stop_latlng: [to_location.lat.deg(), to_location.lng.deg()], 227 | departure_epoch_seconds: step.departure.epoch_seconds() as u64, 228 | arrival_epoch_seconds: step.arrival.epoch_seconds() as u64, 229 | }) 230 | } else { 231 | let to_location = to.location(); 232 | let from_location = from.location(); 233 | 234 | let shape = self.clip_shape(step); 235 | 236 | Step::Trip(TripStep { 237 | on_route: step 238 | .trip 239 | .unwrap() 240 | .metadata(&self.timetable) 241 | .route_name 242 | .clone(), 243 | agency: step 244 | .trip 245 | .unwrap() 246 | .metadata(&self.timetable) 247 | .agency_name 248 | .clone(), 249 | departure_stop: from.metadata(&self.timetable).name.clone(), 250 | departure_stop_latlng: [from_location.lat.deg(), from_location.lng.deg()], 251 | departure_epoch_seconds: step.departure.epoch_seconds() as u64, 252 | arrival_stop: to.metadata(&self.timetable).name.clone(), 253 | arrival_stop_latlng: [to_location.lat.deg(), to_location.lng.deg()], 254 | arrival_epoch_seconds: step.arrival.epoch_seconds() as u64, 255 | shape, 256 | }) 257 | }, 258 | step_cursor, 259 | )); 260 | step_cursor = step.previous_step; 261 | } 262 | let end_time = if let Some((Step::End(end), _)) = steps.first() { 263 | end.end_epoch_seconds 264 | } else { 265 | panic!("First step is not a Begin step."); 266 | }; 267 | let transfer_graph = self.transfer_graph.clone(); 268 | let mut search_context = TransferGraphSearcher::new(transfer_graph); 269 | let legs = steps 270 | .iter() 271 | .rev() 272 | .filter_map(|(step, _)| match step { 273 | Step::Trip(trip) => Some(SolariLeg::Transit { 274 | start_time: OffsetDateTime::from_unix_timestamp( 275 | trip.departure_epoch_seconds as i64, 276 | ) 277 | .expect("Invalid Unix timestamp"), 278 | end_time: OffsetDateTime::from_unix_timestamp( 279 | trip.arrival_epoch_seconds as i64, 280 | ) 281 | .expect("Invalid Unix timestamp"), 282 | start_location: crate::api::LatLng { 283 | lat: trip.departure_stop_latlng[0], 284 | lon: trip.departure_stop_latlng[1], 285 | stop: trip.departure_stop.clone(), 286 | }, 287 | end_location: crate::api::LatLng { 288 | lat: trip.arrival_stop_latlng[0], 289 | lon: trip.arrival_stop_latlng[1], 290 | stop: trip.arrival_stop.clone(), 291 | }, 292 | transit_route: trip.on_route.clone(), 293 | transit_agency: trip.agency.clone(), 294 | route_shape: trip.shape.clone(), 295 | }), 296 | Step::Transfer(transfer) => { 297 | let from_coord = Coord { 298 | y: transfer.from_stop_latlng[0], 299 | x: transfer.from_stop_latlng[1], 300 | }; 301 | let to_coord = Coord { 302 | y: transfer.to_stop_latlng[0], 303 | x: transfer.to_stop_latlng[1], 304 | }; 305 | let transfer_shape = match self.transfer_graph.transfer_path( 306 | &mut search_context, 307 | &from_coord, 308 | &to_coord, 309 | ) { 310 | Ok(transfer_path) => Some(transfer_path.shape), 311 | Err(err) => { 312 | log::error!( 313 | "Failed to calculate transfer path: {}, step: {:?}", 314 | err, 315 | transfer 316 | ); 317 | None 318 | } 319 | }; 320 | Some(SolariLeg::Transfer { 321 | start_time: OffsetDateTime::from_unix_timestamp( 322 | transfer.departure_epoch_seconds as i64, 323 | ) 324 | .expect("Invalid Unix timestamp"), 325 | end_time: OffsetDateTime::from_unix_timestamp( 326 | transfer.arrival_epoch_seconds as i64, 327 | ) 328 | .expect("Invalid Unix timestamp"), 329 | start_location: crate::api::LatLng { 330 | lat: transfer.from_stop_latlng[0], 331 | lon: transfer.from_stop_latlng[1], 332 | stop: transfer.from_stop.clone(), 333 | }, 334 | end_location: crate::api::LatLng { 335 | lat: transfer.to_stop_latlng[0], 336 | lon: transfer.to_stop_latlng[1], 337 | stop: transfer.to_stop.clone(), 338 | }, 339 | route_shape: transfer_shape, 340 | }) 341 | } 342 | _ => None, 343 | }) 344 | .collect::>(); 345 | SolariItinerary { 346 | start_location: crate::api::LatLng { 347 | lat: start_location.lat.deg(), 348 | lon: start_location.lng.deg(), 349 | stop: None, 350 | }, 351 | end_location: crate::api::LatLng { 352 | lat: target_location.lat.deg(), 353 | lon: target_location.lng.deg(), 354 | stop: None, 355 | }, 356 | start_time: 357 | OffsetDateTime::from_unix_timestamp(route_start_time.epoch_seconds() as i64) 358 | .expect("Invalid Unix timestamp"), 359 | end_time: OffsetDateTime::from_unix_timestamp(end_time as i64) 360 | .expect("Invalid Unix timestamp"), 361 | legs, 362 | } 363 | } 364 | 365 | fn cost_scaling_final_transfer( 366 | &self, 367 | context: &RouterContext<'a, T>, 368 | itinerary: &InternalItinerary, 369 | scalar: f64, 370 | ) -> u32 { 371 | let last_step = &context.step_log[itinerary.last_step]; 372 | if last_step.trip.is_none() { 373 | let last_step_duration = 374 | last_step.arrival.epoch_seconds() - last_step.departure.epoch_seconds(); 375 | let scaled = last_step_duration as f64 * scalar; 376 | return last_step.departure.epoch_seconds() + scaled as u32; 377 | } else { 378 | return last_step.arrival.epoch_seconds(); 379 | } 380 | } 381 | 382 | fn pick_best_itineraries( 383 | &self, 384 | context: &RouterContext<'a, T>, 385 | target_costs: &[(usize, u32)], 386 | ) -> Vec { 387 | let mut itineraries = HashSet::new(); 388 | 389 | let walking_scalars = [0.5, 1.0, 2.0]; 390 | for round in 0..=context.round { 391 | for walking_scalar in walking_scalars { 392 | if let Some((itinerary, _)) = target_costs 393 | .iter() 394 | .filter_map(|(target_id, cost)| { 395 | context.best_times_per_round[round as usize][*target_id] 396 | .as_ref() 397 | .map(|it| (it, *cost as f64 * walking_scalar)) 398 | }) 399 | .min_by_key(|(it, cost)| { 400 | self.cost_scaling_final_transfer(context, *it, walking_scalar) 401 | + *cost as u32 402 | }) 403 | { 404 | itineraries.insert(itinerary.clone()); 405 | } 406 | } 407 | } 408 | 409 | itineraries.into_iter().collect() 410 | } 411 | 412 | fn clip_shape(&'a self, step: &InternalStep) -> Option { 413 | if let Some(route) = &step.route { 414 | if let Some(shape) = self.timetable.route_shape(route) { 415 | let departure_stop_distance = if let InternalStepLocation::Stop(stop) = step.from { 416 | route 417 | .route_stops(&self.timetable) 418 | .iter() 419 | .filter(|route_stop| route_stop.stop(&self.timetable).id() == stop.id()) 420 | .next() 421 | .map(|route_stop| route_stop.distance_along_route())? 422 | } else { 423 | return None; 424 | }; 425 | let arrival_stop_distance = if let InternalStepLocation::Stop(stop) = step.to { 426 | route 427 | .route_stops(&self.timetable) 428 | .iter() 429 | .filter(|route_stop| route_stop.stop(&self.timetable).id() == stop.id()) 430 | .next() 431 | .map(|route_stop| route_stop.distance_along_route())? 432 | } else { 433 | return None; 434 | }; 435 | let mut coords = shape 436 | .iter() 437 | .skip_while(|coord| { 438 | coord 439 | .distance_along_shape() 440 | .map(|dist| dist.is_nan() || dist < departure_stop_distance) 441 | .unwrap_or(true) 442 | }) 443 | .take_while(|coord| { 444 | coord 445 | .distance_along_shape() 446 | .map(|dist| dist.is_nan() || dist < arrival_stop_distance) 447 | .unwrap_or(true) 448 | }) 449 | .map(|coord| Coord { 450 | x: coord.lon(), 451 | y: coord.lat(), 452 | }) 453 | .collect::>(); 454 | 455 | if coords.is_empty() { 456 | let points: Vec = shape 457 | .iter() 458 | .map(|coord| Coord { 459 | x: coord.lon(), 460 | y: coord.lat(), 461 | }) 462 | .collect(); 463 | let start = 464 | Point::new(step.from.latlng().lng.deg(), step.from.latlng().lat.deg()); 465 | let end = Point::new(step.to.latlng().lng.deg(), step.to.latlng().lat.deg()); 466 | if let (Some((start_idx, start_point)), Some((end_idx, end_point))) = ( 467 | Self::closest_point(&start, &points), 468 | Self::closest_point(&end, &points), 469 | ) { 470 | coords = shape 471 | .iter() 472 | .skip(start_idx + 1) 473 | .take(end_idx - start_idx) 474 | .map(|coord| Coord { 475 | x: coord.lon(), 476 | y: coord.lat(), 477 | }) 478 | .collect(); 479 | coords.insert(0, start_point.0); 480 | coords.push(end_point.0); 481 | } 482 | } 483 | 484 | let line_string = LineString::new(coords); 485 | return polyline::encode_coordinates(line_string, 5).ok(); 486 | } 487 | }; 488 | None 489 | } 490 | 491 | fn closest_point(target: &Point, points: &Vec) -> Option<(usize, Point)> { 492 | let (idx, closest) = points 493 | .windows(2) 494 | .map(|window| Line::new(window[0], window[1])) 495 | .map(|line| line.closest_point(target)) 496 | .enumerate() 497 | .reduce(|a, b| { 498 | let closest = a.1.best_of_two(&b.1, *target); 499 | if a.1 == closest { 500 | a 501 | } else { 502 | b 503 | } 504 | })?; 505 | let closest = match closest { 506 | geo::Closest::Intersection(point) => point, 507 | geo::Closest::SinglePoint(point) => point, 508 | geo::Closest::Indeterminate => return None, 509 | }; 510 | return Some((idx, closest)); 511 | } 512 | } 513 | 514 | #[derive(Debug, Clone)] 515 | struct InternalStep<'a> { 516 | previous_step: usize, 517 | from: InternalStepLocation<'a>, 518 | to: InternalStepLocation<'a>, 519 | route: Option, 520 | departure: Time, 521 | arrival: Time, 522 | trip: Option, 523 | } 524 | 525 | #[derive(Debug, Clone, Hash, PartialEq, Eq)] 526 | struct InternalItinerary { 527 | last_step: usize, 528 | final_time: Time, 529 | } 530 | 531 | #[derive(Debug, Clone, Serialize)] 532 | pub struct BeginStep { 533 | pub begin_latlng: [f64; 2], 534 | pub begin_epoch_seconds: u64, 535 | pub first_stop: String, 536 | pub first_stop_latlng: [f64; 2], 537 | pub first_stop_arrival_epoch_seconds: u64, 538 | } 539 | 540 | #[derive(Debug, Clone, Serialize)] 541 | pub struct TripStep { 542 | pub on_route: Option, 543 | pub agency: Option, 544 | pub departure_stop: Option, 545 | pub departure_stop_latlng: [f64; 2], 546 | pub departure_epoch_seconds: u64, 547 | pub arrival_stop: Option, 548 | pub arrival_stop_latlng: [f64; 2], 549 | pub arrival_epoch_seconds: u64, 550 | pub shape: Option, 551 | } 552 | 553 | #[derive(Debug, Clone, Serialize)] 554 | pub struct TransferStep { 555 | pub from_stop: Option, 556 | pub from_stop_latlng: [f64; 2], 557 | pub to_stop: Option, 558 | pub to_stop_latlng: [f64; 2], 559 | pub departure_epoch_seconds: u64, 560 | pub arrival_epoch_seconds: u64, 561 | } 562 | 563 | #[derive(Debug, Clone, Serialize)] 564 | pub struct EndStep { 565 | pub last_stop: Option, 566 | pub last_stop_latlng: [f64; 2], 567 | pub last_stop_departure_epoch_seconds: u64, 568 | pub end_latlng: [f64; 2], 569 | pub end_epoch_seconds: u64, 570 | } 571 | 572 | #[derive(Debug, Clone, Serialize)] 573 | pub enum Step { 574 | Begin(BeginStep), 575 | Trip(TripStep), 576 | Transfer(TransferStep), 577 | End(EndStep), 578 | } 579 | 580 | pub struct RouterContext<'a, T: Timetable<'a>> { 581 | best_times_global: Vec>, 582 | best_times_per_round: Vec>>, 583 | marked_stops: Vec, 584 | marked_routes: RefCell>, 585 | timetable: &'a T, 586 | round: u32, 587 | targets: Vec<(usize, u32)>, 588 | max_transfers: Option, 589 | max_transfer_delta: Option, 590 | step_log: Vec>, 591 | } 592 | 593 | #[derive(Debug, Clone, PartialEq)] 594 | pub enum InternalStepLocation<'a> { 595 | Stop(&'a Stop), 596 | Location(LatLng), 597 | } 598 | 599 | impl<'a> InternalStepLocation<'a> { 600 | pub fn latlng(&'a self) -> LatLng { 601 | match self { 602 | InternalStepLocation::Stop(stop) => stop.location(), 603 | InternalStepLocation::Location(latlng) => latlng.clone(), 604 | } 605 | } 606 | } 607 | 608 | impl<'a, 'b, T: Timetable<'a>> RouterContext<'a, T> 609 | where 610 | 'b: 'a, 611 | { 612 | fn best_time_to_target(&self) -> Option