├── requirements.txt ├── test ├── __init__.py ├── gtfs_shizuoka.data.2016-10-13.zip ├── gtfs_shizuoka.test.J22209730-J22209790.yaml ├── gtfs_shizuoka.test.J22209843-J222093345.yaml ├── gtfs_shizuoka.test.J22209723-J2220952426.yaml ├── gtfs_shizuoka.data.2016-10-13.zip.txt ├── all.py ├── gtfs_shizuoka.py ├── simple.py ├── simple.test.journey-planner-csa.yaml └── _common.py ├── tb_routing ├── types │ ├── __init__.py │ ├── pareto.py │ ├── tp.py │ ├── base.py │ └── public.py ├── __init__.py ├── vis.py ├── utils.py ├── gtfs.py └── engine.py ├── doc ├── example-images │ ├── json-dgc.jpg │ ├── dot-for-lines.jpg │ └── dot-for-tp-subtree.jpg ├── OpenTripPlanner-otp-1.0.0-patch-and-run.sh └── OpenTripPlanner-otp-1.0.0-stopid-display.patch ├── .gitignore ├── COPYING ├── timetable-from-json-dgc.example.json ├── timetable-from-json-dgc.py ├── gtfs-tb-routing.py └── README.rst /requirements.txt: -------------------------------------------------------------------------------- 1 | attrs==16.2.0 2 | -------------------------------------------------------------------------------- /test/__init__.py: -------------------------------------------------------------------------------- 1 | from . import _common 2 | c = _common 3 | -------------------------------------------------------------------------------- /tb_routing/types/__init__.py: -------------------------------------------------------------------------------- 1 | from . import public, base, tp, pareto 2 | -------------------------------------------------------------------------------- /doc/example-images/json-dgc.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mk-fg/trip-based-public-transit-routing-algo/HEAD/doc/example-images/json-dgc.jpg -------------------------------------------------------------------------------- /doc/example-images/dot-for-lines.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mk-fg/trip-based-public-transit-routing-algo/HEAD/doc/example-images/dot-for-lines.jpg -------------------------------------------------------------------------------- /test/gtfs_shizuoka.data.2016-10-13.zip: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mk-fg/trip-based-public-transit-routing-algo/HEAD/test/gtfs_shizuoka.data.2016-10-13.zip -------------------------------------------------------------------------------- /doc/example-images/dot-for-tp-subtree.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mk-fg/trip-based-public-transit-routing-algo/HEAD/doc/example-images/dot-for-tp-subtree.jpg -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # These likely can't be distributed freely 2 | /doc/papers/* 3 | /doc/samples/* 4 | 5 | # Implementation job contract info, might have private info 6 | /doc/contract-info.txt 7 | 8 | # Misc clutter 9 | /cache 10 | *.pyc 11 | 12 | # Test caches 13 | /test/*.cache.* 14 | /test/*.data.unzip/ 15 | -------------------------------------------------------------------------------- /test/gtfs_shizuoka.test.J22209730-J22209790.yaml: -------------------------------------------------------------------------------- 1 | goal: 2 | dts_start: 06:00 3 | src: J22209730_0 4 | dst: J22209790_1 5 | 6 | journey_set: 7 | 8 | journey-A: 9 | stats: ['06:23', '07:08'] 10 | segments: 11 | trip-M: [trip, J22209730_0, J222093340_2] 12 | trip-N: [trip, J222093340_2, J22209790_1] 13 | -------------------------------------------------------------------------------- /test/gtfs_shizuoka.test.J22209843-J222093345.yaml: -------------------------------------------------------------------------------- 1 | goal: 2 | dts_start: 06:00 3 | src: J22209843_0 4 | dst: J222093345_0 5 | 6 | journey_set: 7 | 8 | journey-A: 9 | stats: ['07:02', '07:41'] 10 | segments: 11 | trip-X: [trip, J22209843_0, J222093340_1] 12 | walk: [fp, J222093340_1, J222093340_2] 13 | trip-Y: [trip, J222093340_2, J222093345_0] 14 | -------------------------------------------------------------------------------- /test/gtfs_shizuoka.test.J22209723-J2220952426.yaml: -------------------------------------------------------------------------------- 1 | goal: 2 | dts_start: 06:00 3 | src: J22209723_0 4 | dst: J2220952426_0 5 | 6 | 7 | journey_set: 8 | 9 | journey-A: 10 | stats: ['06:10', '08:43'] 11 | # start: 07:20 # much better option!!! 12 | segments: 13 | trip-M: [trip, J22209723_0, J222093340_2] 14 | trip-N: [trip, J222093340_2, J2220952426_0] 15 | -------------------------------------------------------------------------------- /test/gtfs_shizuoka.data.2016-10-13.zip.txt: -------------------------------------------------------------------------------- 1 | Title: Shimada City 2 | Downloaded from: https://transit.land/feed-registry/operators/o-xn4n-島田市 3 | Date: 2016-10-13 4 | 5 | Onestop ID: o-xn4n-島田市 6 | Region:: Shimada 7 | State: Shizuoka 8 | Country: Japan 9 | Website: https://www.city.shimada.shizuoka.jp/bouhan/komibus.html 10 | Timezone: Asia/Tokyo 11 | 12 | Feed: 13 | Onestop ID: f-xn4n-島田市 14 | Data location: http://opentrans.it/feed/gtfs/5724160613416960/gtfs.zip 15 | License: CC BY 4.0 - https://creativecommons.org/licenses/by/4.0/ 16 | -------------------------------------------------------------------------------- /COPYING: -------------------------------------------------------------------------------- 1 | DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE 2 | Version 2, December 2004 3 | 4 | Copyright (C) 2016 Mike Kazantsev 5 | 6 | Everyone is permitted to copy and distribute verbatim or modified 7 | copies of this license document, and changing it is allowed as long 8 | as the name is changed. 9 | 10 | DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE 11 | TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION 12 | 13 | 0. You just DO WHAT THE FUCK YOU WANT TO. 14 | -------------------------------------------------------------------------------- /doc/OpenTripPlanner-otp-1.0.0-patch-and-run.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | otp_jar=otp-1.0.0-shaded.jar 5 | otp_src_dir=src # src/ dir in otp sources/repo, or unpacked jar dir 6 | data=data/shizuoka 7 | mem=4G 8 | 9 | otp_dst=$(readlink -f "$otp_jar") 10 | otp_src="$otp_dst".orig 11 | [[ -e "$otp_src" ]] || cp -a "$otp_dst" "$otp_src" 12 | 13 | pushd "$otp_src_dir" >/dev/null 14 | files=( $(git status -uno --porcelain | awk '{print $NF}') ) 15 | rsync -t "$otp_src" "$otp_jar" 16 | zip "$otp_jar" "${files[@]}" 17 | rsync -t "$otp_jar" "$otp_dst" 18 | popd >/dev/null 19 | 20 | exec java -Xmx"$mem" -jar "$otp_jar" --build "$data" --inMemory 21 | -------------------------------------------------------------------------------- /test/all.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | from . import gtfs_shizuoka 4 | from . import simple 5 | 6 | 7 | # XXX: because unittest in pypy3/3.3 doesn't have subTest ctx yet 8 | 9 | def load_tests(loader=None, tests=None, pattern=None): 10 | if not tests: tests = unittest.TestSuite() 11 | for mod in gtfs_shizuoka, simple: 12 | tests.addTests(mod.load_tests(loader, tests, pattern)) 13 | return tests 14 | 15 | class SpecificTestCasePicker: 16 | def __init__(self): self.suite = load_tests() 17 | def __getattr__(self, k): 18 | for test in self.suite: 19 | if test._testMethodName == k: return lambda: test 20 | raise AttributeError('No such test case: {}'.format(k)) 21 | case = SpecificTestCasePicker() 22 | -------------------------------------------------------------------------------- /test/gtfs_shizuoka.py: -------------------------------------------------------------------------------- 1 | import itertools as it, operator as op, functools as ft 2 | from pathlib import Path 3 | from pprint import pprint 4 | import os, sys, unittest 5 | 6 | from . import _common as c 7 | 8 | 9 | class GTFS_Shizuoka_20161013(unittest.TestCase): 10 | 11 | @classmethod 12 | def setUpClass(cls): 13 | path_file = Path(__file__) 14 | path_gtfs_zip = path_file.parent / (path_file.stem + '.data.2016-10-13.zip') 15 | cls.fx = c.GTFSTestFixture(path_gtfs_zip, path_file) 16 | 17 | tt_path, tt_path_dump = cls.fx.path_timetable, None 18 | if not tt_path.exists(): tt_path, tt_path_dump = cls.fx.path_unzip, tt_path 19 | 20 | cls.timetable, cls.router = c.tb.init_gtfs_router( 21 | tt_path, cls.fx.path_cache, tt_path_dump, timer_func=c.tb.calc_timer ) 22 | cls.checks = c.GraphAssertions(cls.router.graph) 23 | 24 | @classmethod 25 | def tearDownClass(cls): pass 26 | 27 | def _test_journeys_base(self, data_name): 28 | test = self.fx.load_test_data(data_name) 29 | self.checks.assert_journey_components(test) 30 | 31 | goal = c.struct_from_val(test.goal, c.TestGoal) 32 | goal.dts_start = self.timetable.dts_parse(goal.dts_start) 33 | goal.src, goal.dst = op.itemgetter(goal.src, goal.dst)(self.timetable.stops) 34 | 35 | journeys = self.router.query_earliest_arrival(goal.src, goal.dst, goal.dts_start) 36 | self.checks.assert_journey_results(test, journeys) 37 | 38 | 39 | def test_journeys_J22209723_J2220952426(self): 40 | self._test_journeys_base('J22209723-J2220952426') 41 | 42 | def test_journeys_J22209843_J222093345(self): 43 | self._test_journeys_base('J22209843-J222093345') 44 | 45 | def test_journeys_J22209730_J22209790(self): 46 | self._test_journeys_base('J22209730-J22209790') 47 | 48 | 49 | def load_tests(loader, tests, pattern): 50 | # XXX: because unittest in pypy3/3.3 doesn't have subTest ctx yet 51 | return unittest.makeSuite(GTFS_Shizuoka_20161013) 52 | -------------------------------------------------------------------------------- /timetable-from-json-dgc.example.json: -------------------------------------------------------------------------------- 1 | {"nodes":[{"id":6,"title":"L1-a/L3-j","x":256,"y":245,"data":{"type":"generic"}},{"id":7,"title":"L1-b/L2-j/L4-c","x":481,"y":260,"data":{"type":"generic"}},{"id":8,"title":"L1-c/L2-i/L3-d","x":755,"y":277,"data":{"type":"generic"}},{"id":9,"title":"L1-d","x":1042,"y":296,"data":{"type":"generic"}},{"id":10,"title":"L1-e/L2-f","x":1204,"y":307,"data":{"type":"generic"}},{"id":13,"title":"L1-f","x":1401,"y":338,"data":{"type":"generic"}},{"id":14,"title":"L2-a/L3-k","x":239,"y":504,"data":{"type":"generic"}},{"id":15,"title":"L2-b/L4-d","x":473,"y":341,"data":{"type":"generic"}},{"id":16,"title":"L2-c/L4-e","x":640,"y":499,"data":{"type":"generic"}},{"id":17,"title":"L2-d/L3-b","x":899,"y":489,"data":{"type":"generic"}},{"id":18,"title":"L2-e","x":1083,"y":333,"data":{"type":"generic"}},{"id":19,"title":"L2-g","x":1305,"y":67,"data":{"type":"generic"}},{"id":20,"title":"L2-h/L3-e","x":899,"y":101,"data":{"type":"generic"}},{"id":21,"title":"L2-k/L3-i","x":368,"y":73,"data":{"type":"generic"}},{"id":22,"title":"L3-a/L4-f","x":1032,"y":648,"data":{"type":"generic"}},{"id":23,"title":"L3-c","x":675,"y":433,"data":{"type":"generic"}},{"id":24,"title":"L3-f","x":806,"y":-39,"data":{"type":"generic"}},{"id":25,"title":"L3-g/L4-b","x":608,"y":-40,"data":{"type":"generic"}},{"id":26,"title":"L3-h","x":440,"y":-53,"data":{"type":"generic"}},{"id":27,"title":"L3-l","x":316,"y":631,"data":{"type":"generic"}},{"id":28,"title":"L4-a","x":638,"y":122,"data":{"type":"generic"}},{"id":29,"title":"L4-g","x":1223,"y":504,"data":{"type":"generic"}}],"edges":[{"source":6,"target":7},{"source":8,"target":9},{"source":9,"target":10},{"source":10,"target":13},{"source":14,"target":15},{"source":15,"target":16},{"source":16,"target":17},{"source":17,"target":18},{"source":18,"target":10},{"source":10,"target":19},{"source":19,"target":20},{"source":20,"target":8},{"source":7,"target":21},{"source":7,"target":8},{"source":22,"target":17},{"source":17,"target":23},{"source":23,"target":8},{"source":20,"target":24},{"source":24,"target":25},{"source":25,"target":26},{"source":26,"target":21},{"source":21,"target":6},{"source":6,"target":14},{"source":14,"target":27},{"source":28,"target":25},{"source":25,"target":7},{"source":7,"target":15},{"source":16,"target":22},{"source":22,"target":29}],"weakEdges":[]} -------------------------------------------------------------------------------- /tb_routing/__init__.py: -------------------------------------------------------------------------------- 1 | import itertools as it, operator as op, functools as ft 2 | from pathlib import Path 3 | import time 4 | 5 | from . import engine, vis, gtfs, utils as u, types as t 6 | 7 | 8 | def calc_timer(func, *args, log=u.get_logger('tb.timer'), timer_name=None, **kws): 9 | if not timer_name: 10 | func_base = func if not isinstance(func, ft.partial) else func.func 11 | timer_name = '.'.join([func_base.__module__.strip('__'), func_base.__qualname__]) 12 | log.debug('[{}] Starting...', timer_name) 13 | td = time.monotonic() 14 | data = func(*args, **kws) 15 | td = time.monotonic() - td 16 | log.debug('[{}] Finished in: {:.1f}s', timer_name, td) 17 | return data 18 | 19 | 20 | def init_gtfs_router( 21 | tt_path, cache_path=None, tt_path_dump=None, 22 | conf=None, conf_engine=None, timer_func=None, log=u.get_logger('tb.init') ): 23 | if not conf: conf = gtfs.GTFSConf() 24 | 25 | timetable_func, router_func = gtfs.parse_timetable,\ 26 | ft.partial(engine.TBRoutingEngine, conf=conf_engine, timer_func=timer_func) 27 | if timer_func: 28 | timetable_func, router_func = ( 29 | ft.partial(timer_func, func) for func in [timetable_func, router_func] ) 30 | 31 | tt_path = Path(tt_path) 32 | if tt_path.is_file(): 33 | tt_load = u.pickle_load 34 | if timer_func: tt_load = ft.partial(timer_func, tt_load, timer_name='timetable_load') 35 | timetable = tt_load(tt_path, fail=True) 36 | else: 37 | timetable = timetable_func(tt_path, conf) 38 | if tt_path_dump: u.pickle_dump(timetable, tt_path_dump) 39 | log.debug( 40 | 'Parsed timetable: stops={:,}, footpaths={:,}' 41 | ' (mean-delta={:,.1f}s, mean-options={:,.1f}, same-stop={:,}),' 42 | ' trips={:,} (mean-stops={:,.1f})', 43 | len(timetable.stops), len(timetable.footpaths), 44 | timetable.footpaths.stat_mean_delta(), 45 | timetable.footpaths.stat_mean_delta_count(), 46 | timetable.footpaths.stat_same_stop_count(), 47 | len(timetable.trips), timetable.trips.stat_mean_stops() ) 48 | 49 | if cache_path: cache_path = Path(cache_path) 50 | if cache_path and cache_path.exists(): 51 | with open(str(cache_path), 'rb') as src: 52 | router = router_func(timetable, cached_graph=src) 53 | else: 54 | router = router_func(timetable) 55 | if cache_path: 56 | graph_dump = router.graph.dump 57 | if timer_func: graph_dump = ft.partial(timer_func, graph_dump) 58 | with u.safe_replacement(cache_path, 'wb') as dst: graph_dump(dst) 59 | 60 | return timetable, router 61 | -------------------------------------------------------------------------------- /test/simple.py: -------------------------------------------------------------------------------- 1 | import itertools as it, operator as op, functools as ft 2 | from pathlib import Path 3 | import unittest 4 | 5 | from . import _common as c 6 | 7 | 8 | @c.tb.u.attr_struct 9 | class TestTripStop: keys = 'stop_id dts_arr dts_dep' 10 | @c.tb.u.attr_struct 11 | class TestFootpath: keys = 'src dst dt' 12 | 13 | class SimpleTestCase(unittest.TestCase): 14 | 15 | dt_ch = 2*60 # fixed time-delta overhead for changing trips (i.e. p->p footpaths) 16 | 17 | def __init__(self, test_name, test_data): 18 | self.test_name, self.test_data = test_name, test_data 19 | setattr(self, test_name, self.run_test) 20 | super(SimpleTestCase, self).__init__(test_name) 21 | 22 | def init_router(self): 23 | types = c.tb.t.public 24 | trips, stops, footpaths = types.Trips(), types.Stops(), types.Footpaths() 25 | 26 | tt = self.test_data.timetable or dict() 27 | if not set(tt.keys()).difference(['trips', 'footpaths']): 28 | tt_trips, tt_footpaths = (tt.get(k, list()) for k in ['trips', 'footpaths']) 29 | else: tt_trips, tt_footpaths = self.test_data.timetable, list() 30 | 31 | for trip_id, trip_data in tt_trips.items(): 32 | trip = types.Trip() 33 | for stopidx, ts in enumerate(trip_data): 34 | stop_id, dts_arr, dts_dep = c.struct_from_val(ts, TestTripStop, as_tuple=True) 35 | if not dts_arr or dts_arr == 'x': dts_arr = dts_dep 36 | if not dts_dep or dts_dep == 'x': dts_dep = dts_arr 37 | dts_arr, dts_dep = map(c.tb.u.dts_parse, [dts_arr, dts_dep]) 38 | stop = stops.add(types.Stop(stop_id, stop_id, 0, 0)) 39 | trip.add(types.TripStop(trip, stopidx, stop, dts_arr, dts_dep)) 40 | trips.add(trip) 41 | 42 | with footpaths.populate() as fp_add: 43 | for spec in tt_footpaths: 44 | src_id, dst_id, delta = c.struct_from_val(spec, TestFootpath, as_tuple=True) 45 | src, dst = (stops.add(types.Stop(s, s, 0, 0)) for s in [src_id, dst_id]) 46 | fp_add(src, dst, delta * 60) 47 | for stop in stops: fp_add(stop, stop, self.dt_ch) 48 | 49 | timetable = types.Timetable(stops, footpaths, trips) 50 | router = c.tb.engine.TBRoutingEngine(timetable, timer_func=c.tb.calc_timer) 51 | checks = c.GraphAssertions(router.graph) 52 | return timetable, router, checks 53 | 54 | def run_test(self): 55 | timetable, router, checks = self.init_router() 56 | checks.assert_journey_components(self.test_data) 57 | goal = c.struct_from_val(self.test_data.goal, c.TestGoal) 58 | goal.dts_start = timetable.dts_parse(goal.dts_start) 59 | goal.src, goal.dst = op.itemgetter(goal.src, goal.dst)(timetable.stops) 60 | if not goal.dts_latest: 61 | journeys = router.query_earliest_arrival(goal.src, goal.dst, goal.dts_start) 62 | else: 63 | goal.dts_latest = timetable.dts_parse(goal.dts_latest) 64 | journeys = router.query_profile(goal.src, goal.dst, goal.dts_start, goal.dts_latest) 65 | checks.assert_journey_results(self.test_data, journeys) 66 | 67 | 68 | class SimpleGraphTests(unittest.TestSuite): 69 | 70 | def __init__(self): 71 | path_file = Path(__file__) 72 | tests, tests_data = list(), c.load_test_data( 73 | path_file.parent, path_file.stem, 'journey-planner-csa' ) 74 | for test_name, test_data in tests_data.items(): 75 | tests.append(SimpleTestCase(test_name, test_data)) 76 | super(SimpleGraphTests, self).__init__(tests) 77 | 78 | 79 | def load_tests(loader, tests, pattern): 80 | # XXX: because unittest in pypy3/3.3 doesn't have subTest ctx yet 81 | return SimpleGraphTests() 82 | -------------------------------------------------------------------------------- /tb_routing/vis.py: -------------------------------------------------------------------------------- 1 | # Visualization tools, mostly useful for debugging 2 | 3 | import itertools as it, operator as op, functools as ft 4 | from collections import defaultdict 5 | import contextlib 6 | 7 | from . import utils as u, types as t 8 | 9 | 10 | print_fmt = lambda tpl, *a, file=None, end='\n', **k:\ 11 | print(tpl.format(*a,**k), file=file, end=end) 12 | 13 | dot_str = lambda n: '"{}"'.format(n.replace('"', '\\"')) 14 | dot_html = lambda n: '<{}>'.format(n) 15 | 16 | 17 | @contextlib.contextmanager 18 | def dot_graph(dst, dot_opts, indent=2): 19 | print_fmt('digraph {{', file=dst) 20 | if isinstance(indent, int): indent = ' '*indent 21 | p = lambda tpl, *a, end='\n', **k:\ 22 | print_fmt(indent + tpl, *a, file=dst, end=end, **k) 23 | p('### Defaults') 24 | for t, opts in (dot_opts or dict()).items(): 25 | p('{} [ {} ]'.format(t, ', '.join('{}={}'.format(k, v) for k, v in opts.items()))) 26 | yield p 27 | print_fmt('}}', file=dst) 28 | 29 | 30 | def dot_for_lines(lines, dst, dot_opts=None): 31 | stop_names, stop_edges = defaultdict(set), defaultdict(set) 32 | for line in lines: 33 | stop_prev = None 34 | for n, stop in enumerate(line.stops): 35 | stop_names[stop].add('{}[{}]'.format(line.id, n)) 36 | if stop_prev: stop_edges[stop_prev].add(stop) 37 | stop_prev = stop 38 | 39 | with dot_graph(dst, dot_opts) as p: 40 | 41 | p('') 42 | p('### Labels') 43 | for stop, line_names in stop_names.items(): 44 | label = '{}{}'.format( 45 | stop.name, '
- '.join([''] + sorted(line_names)) ) 46 | name = stop_names[stop] = 'stop-{}'.format(stop.id) 47 | p('{} [label={}]'.format(dot_str(name), dot_html(label))) 48 | 49 | p('') 50 | p('### Edges') 51 | for stop_src, edges in stop_edges.items(): 52 | name_src = stop_names[stop_src] 53 | for stop_dst in edges: 54 | name_dst = stop_names[stop_dst] 55 | p('{} -> {}', *map(dot_str, [name_src, name_dst])) 56 | 57 | 58 | def dot_for_tp_subtree(subtree, dst, dst_to_src=False, dot_opts=None): 59 | assert subtree.prefix, 'Only subtrees are proper graphs' 60 | 61 | def node_name(node, pre=None, pre_type=None): 62 | v, pre_type = node.value, pre_type or dict() 63 | if isinstance(v, t.public.Stop): 64 | v = v.name 65 | if 'stop' in pre_type: v = '{}:{}'.format(pre_type['stop'], v) 66 | elif isinstance(v, t.base.LineStop): 67 | line_id = v.line_id 68 | if isinstance(v.line_id, int): line_id = '{:x}'.format(line_id) 69 | v = '{}:{}[{}]'.format(node.seed, line_id, v.stopidx) 70 | if 'line' in pre_type: v = '{}:{}'.format(pre_type['line'], v) 71 | else: raise ValueError(type(v), v) 72 | if pre: v = '{}:{}'.format(pre, v) 73 | return v 74 | 75 | dot_opts = dot_opts or dict() 76 | dot_opts.setdefault('graph', dict()).setdefault('rankdir', 'LR') 77 | src_dst = ('src dst' if not dst_to_src else 'dst src').split() 78 | with dot_graph(dst, dot_opts) as p: 79 | 80 | stops_src, stops_dst = set(), set() 81 | for k, node_src_set in subtree.tree.items(): 82 | for node_seed, node_src in node_src_set.items(): 83 | name_src = node_name(node_src, pre_type=dict(stop=src_dst[0])) 84 | if isinstance(node_src.value, t.public.Stop): 85 | if node_src.edges_to: stops_src.add(name_src) 86 | else: stops_dst.add(node_name(node_src, pre=src_dst[1])) 87 | for node_dst in node_src.edges_to: 88 | name_dst = node_name(node_dst, pre_type=dict(stop=src_dst[1])) 89 | p('{} -> {}', *map(dot_str, [name_src, name_dst])) 90 | 91 | for subset in filter(None, [stops_src, stops_dst]): 92 | p( 'subgraph {{{{ rank=same;{}; }}}}'.format(', '.join(map(dot_str, sorted(subset))))) 93 | -------------------------------------------------------------------------------- /tb_routing/types/pareto.py: -------------------------------------------------------------------------------- 1 | ### TBRoutingEngine internal types - pareto-optimization stuff 2 | ### Mostly used for results and labels in engine algos 3 | 4 | import itertools as it, operator as op, functools as ft 5 | import heapq 6 | 7 | from .. import utils as u 8 | 9 | 10 | @u.attr_struct(cmp=False) 11 | @ft.total_ordering 12 | class PrioItem: 13 | prio = u.attr_init() 14 | value = u.attr_init() 15 | 16 | def __hash__(self): return hash(self.prio) 17 | def __eq__(self, item): return self.prio == item.prio 18 | def __lt__(self, item): return self.prio < item.prio 19 | def __iter__(self): return iter((self.prio, self.value)) 20 | 21 | @classmethod 22 | def get_factory(cls, attr_args): 23 | '''Returns factory to create PrioItem by extracting 24 | specified prio attrs (or extractor func, if callable) from values. 25 | Intended to work with "*attrs" spec, 26 | where either single callable/string or individual attrs get passed.''' 27 | if isinstance(attr_args, str): attr_args = attr_args.split() 28 | if len(attr_args) == 1: 29 | if isinstance(attr_args[0], str): attr_args = attr_args[0].split() 30 | elif callable(attr_args[0]): attr_args = attr_args[0] 31 | if not callable(attr_args): attr_args = op.attrgetter(*attr_args) 32 | return lambda v: cls(attr_args(v), v) 33 | 34 | 35 | class PrioQueue: 36 | def __init__(self, *prio_attrs): 37 | self.items, self.item_func = list(), PrioItem.get_factory(prio_attrs) 38 | def __len__(self): return len(self.items) 39 | def push(self, value): heapq.heappush(self.items, self.item_func(value)) 40 | def pop(self): return heapq.heappop(self.items).value 41 | def peek(self): return self.items[0].value 42 | 43 | 44 | class ParetoSet: 45 | '''ParetoSet with 2 or 3 criterias. 46 | First two are min-optimal, but the last one is maximized, if used. 47 | Designed to be used with arrival-time, 48 | transfer-count, and - for profile queries - departure-time criterias.''' 49 | 50 | def __init__(self, *dts_n_attrs): 51 | self.items, self.items_exc = list(), list() 52 | self.item_func = PrioItem.get_factory(dts_n_attrs) 53 | 54 | def get_criterias(self, item): 55 | c1, c2 = item.prio[:2] 56 | c3 = item.prio[2] if len(item.prio) > 2 else 0 # always 0 for bi-criteria sets 57 | return c1, c2, c3 58 | 59 | def add(self, value): 60 | '''Check if value is pareto-optimal, and if so, add it 61 | to the set, remove and dominated values and return True.''' 62 | item = self.item_func(value) 63 | item_c1, item_c2, item_c3 = self.get_criterias(item) 64 | for item_chk in list(self.items): 65 | c1, c2, c3 = self.get_criterias(item_chk) 66 | if item_c1 >= c1 and item_c2 >= c2 and item_c3 <= c3: break # dominated 67 | if item_c1 <= c1 and item_c2 <= c2 and item_c3 >= c3: self.items.remove(item_chk) # dominates 68 | else: 69 | self.items.append(item) # nondominated 70 | return True 71 | 72 | def add_exception(self, value): 73 | '''Add value that should not be compared to anything and will always be in the set. 74 | Example for such special cases are footpath-only 75 | journeys in profile queries that have no fixed arrival/departure times, 76 | hence can't really be compared to other results.''' 77 | self.items_exc.append(value) 78 | 79 | def __len__(self): return len(self.items) + len(self.items_exc) 80 | def __iter__(self): 81 | return iter(it.chain(map(op.attrgetter('value'), self.items), self.items_exc)) 82 | def __repr__(self): return ''.format(list(self)) 83 | 84 | 85 | # Special-case ParetoSet used for common QueryResult values 86 | QueryResultParetoSet = ft.partial(ParetoSet, 'dts_arr n dts_dep') 87 | -------------------------------------------------------------------------------- /doc/OpenTripPlanner-otp-1.0.0-stopid-display.patch: -------------------------------------------------------------------------------- 1 | diff --git a/client/js/otp/layers/StopsLayer.js b/client/js/otp/layers/StopsLayer.js 2 | index 94b8898..8809473 100644 3 | --- a/client/js/otp/layers/StopsLayer.js 4 | +++ b/client/js/otp/layers/StopsLayer.js 5 | @@ -29,7 +29,7 @@ otp.layers.StopsLayer = 6 | 7 | module : null, 8 | 9 | - minimumZoomForStops : 15, 10 | + minimumZoomForStops : 0, 11 | 12 | initialize : function(module) { 13 | var this_ = this; 14 | @@ -107,12 +107,13 @@ otp.layers.StopsLayer = 15 | var routes_stop_trans = _tr('Routes Serving Stop'); 16 | 17 | // TriMet-specific code 18 | + var stopId = stop.id.split(':')[1]; 19 | if(stop.url && stop.url.indexOf("http://trimet.org") === 0) { 20 | - var stopId = stop.id.split(':')[1]; 21 | stop.titleLink = 'http://www.trimet.org/go/cgi-bin/cstops.pl?action=entry&resptype=U&lang=en&noCat=Landmark&Loc=' + stopId; 22 | } 23 | var context = _.clone(stop); 24 | context.agencyStopLinkText = otp.config.agencyStopLinkText || "Agency Stop URL"; 25 | + context.stop_id = stopId; 26 | context.stop_viewer = stop_viewer_trans; 27 | context.routes_on_stop = routes_stop_trans; 28 | context.plan_trip = plan_trip_trans; 29 | diff --git a/client/js/otp/layers/layers-templates.html b/client/js/otp/layers/layers-templates.html 30 | index 6ee2845..1bd1d1c 100644 31 | --- a/client/js/otp/layers/layers-templates.html 32 | +++ b/client/js/otp/layers/layers-templates.html 33 | @@ -11,17 +11,19 @@ 34 |
{{desc}}
35 | {{/desc}} 36 | 37 | +
Stop id: {{stop_id}}
38 | + 39 |
40 | [ 41 | - {{stop_viewer}} 42 | + {{stop_viewer}} 43 | {{#url}} 44 | | {{agencyStopLinkText}} 45 | {{/url}} 46 | ] 47 |
48 | -
{{plan_trip}}: [ {{from_stop}} | {{to_stop}} ]
49 | +
{{plan_trip}}: [ {{from_stop}} | {{to_stop}} ]
50 |
51 | - {{routes_on_stop}}: 52 | + {{routes_on_stop}}: 53 |
    54 |
55 | 56 | diff --git a/client/js/otp/modules/planner/ItinerariesWidget.js b/client/js/otp/modules/planner/ItinerariesWidget.js 57 | index fcfbb8d..88c682d 100644 58 | --- a/client/js/otp/modules/planner/ItinerariesWidget.js 59 | +++ b/client/js/otp/modules/planner/ItinerariesWidget.js 60 | @@ -560,7 +560,7 @@ otp.widgets.ItinerariesWidget = 61 | } 62 | 63 | //TRANSLATORS: Depart station / Board at station in itinerary 64 | - var startHtml = '
' + (leg.interlineWithPreviousLeg ? "" + pgettext("itinerary", "Depart") + " " : _tr("Board at ")) +leg.from.name; 65 | + var startHtml = '
' + (leg.interlineWithPreviousLeg ? "" + pgettext("itinerary", "Depart") + " " : _tr("Board at ")) +leg.from.name + " [ " + leg.from.stopId.split(':')[1] + " ]"; 66 | if(otp.config.municoderHostname) { 67 | var spanId = this.newMunicoderRequest(leg.from.lat, leg.from.lon); 68 | startHtml += ''; 69 | @@ -672,7 +672,7 @@ otp.widgets.ItinerariesWidget = 70 | //TRANSLATORS: Stay on board/Alight [at stop name] 71 | var endAction = (nextLeg && nextLeg.interlineWithPreviousLeg) ? _tr("Stay on board") : _tr("Alight"); 72 | //TRANSLATORS: [Stay on board/Alight] at [stop name] 73 | - var endHtml = '
' + endAction + ' ' + _tr('at')+ ' ' +leg.to.name; 74 | + var endHtml = '
' + endAction + ' ' + _tr('at')+ ' ' +leg.to.name + ' [ ' + leg.to.stopId.split(':')[1] + ' ]'; 75 | if(otp.config.municoderHostname) { 76 | spanId = this.newMunicoderRequest(leg.to.lat, leg.to.lon); 77 | endHtml += ''; 78 | -------------------------------------------------------------------------------- /tb_routing/types/tp.py: -------------------------------------------------------------------------------- 1 | ### TBRoutingEngine internal types - transfer patterns: tp-tree and its nodes 2 | 3 | import itertools as it, operator as op, functools as ft 4 | from collections import namedtuple, Counter 5 | 6 | from .. import utils as u 7 | 8 | 9 | @u.attr_struct(repr=False) 10 | class TPNodeID: 11 | prefix = u.attr_init() 12 | t = u.attr_init() 13 | k = u.attr_init() 14 | def __hash__(self): return hash((self.prefix, self.t, self.k)) 15 | def __repr__(self): return ''.format(self) 16 | 17 | @classmethod 18 | def for_k_type(cls, prefix, k): 19 | t = k.__class__.__name__.lower() 20 | return cls(prefix, t, k) 21 | 22 | 23 | @u.attr_struct(repr=False) 24 | class TPNode: 25 | value = u.attr_init() 26 | id = u.attr_init() 27 | edges_to = u.attr_init(set) 28 | seed = u.attr_init_id() 29 | def __hash__(self): return hash(self.id) 30 | def __repr__(self): 31 | return ( '' ).format(self.seed, self.id, len(self.edges_to)) 33 | 34 | 35 | TPTreeCounters = namedtuple('TPTreeCounters', 'total prefix') 36 | TPTreeStats = namedtuple('TPTreeStats', 'nodes nodes_unique t_src t_stop t_line edges') 37 | class TPTreeLookupError(Exception): pass 38 | 39 | class TPTree: 40 | 41 | def __init__(self, tree=None, stats=None, prefix=None): 42 | self.prefix, self.tree = prefix, u.init_if_none(tree, dict) 43 | self.stats = u.init_if_none(stats, lambda: TPTreeCounters(Counter(), Counter())) 44 | 45 | def stat_counts(self): 46 | stats = self.stats.total 47 | count_node_t = lambda t: sum(v for k,v in stats.items() if k[0] == t) 48 | return TPTreeStats( 49 | sum(stats.values()), len(stats), 50 | count_node_t('src'), count_node_t('stop'), count_node_t('linestop'), 51 | sum(len(node.edges_to) 52 | for subtree in ([self.tree] if self.prefix else self.tree.values()) 53 | for node_dict in subtree.values() 54 | for node in node_dict.values() ) ) 55 | 56 | def path_exists(self, node_src, node_dst): 57 | queue = [node_src] 58 | while queue: 59 | queue_prev, queue = queue, list() 60 | for node in queue_prev: 61 | if node is node_dst: return True # found path 62 | queue.extend(self[k] for k in node.edges_to) 63 | return False 64 | 65 | def node(self, k, value=None, t=None, no_path_to=None): 66 | '''Returns node with specified k/t or creates new one with value (or k as a fallback value). 67 | If no_path_to node is passed, returned node will never 68 | have a path to it, creating another same-k node if necessary.''' 69 | assert self.prefix, 'Can only add elements to prefixed subtree' 70 | no_path_to = None # XXX: it should be ok to bypass this check, but not 100% sure 71 | if isinstance(k, TPNode): k = k.value 72 | if not t: node_id = TPNodeID.for_k_type(self.prefix, k) 73 | else: node_id = TPNodeID(self.prefix, t, k) 74 | if not value: value = k 75 | if node_id not in self.tree: 76 | node = TPNode(value, node_id) 77 | self.tree[node_id] = {node.seed: node} 78 | self.stats.total[node_id.t, node_id.k] += 1 # nodes by type/key 79 | self.stats.prefix[self.prefix] += 1 # nodes for each prefix 80 | else: 81 | node_dict = self.tree[node_id] 82 | if not no_path_to: node = next(iter(node_dict.values())) 83 | else: # find node with no reverse path or create new one 84 | for node in node_dict.values(): 85 | if not self.path_exists(node, no_path_to): break 86 | else: 87 | node = TPNode(value, node_id) 88 | self.tree[node_id][node.seed] = node 89 | return node 90 | 91 | def _node_id_for_k(self, k, t=None): 92 | if isinstance(k, TPNode): k = k.id 93 | if not isinstance(k, TPNodeID): k = TPNodeID.for_k_type(self.prefix, k) 94 | return k 95 | 96 | def get_all(self, k, t=None): 97 | assert self.prefix, 'Only makes sense for subtrees' 98 | return self.tree[self._node_id_for_k(k, t)].values() 99 | 100 | def __getitem__(self, k): 101 | '''Returns subtree for prefix of the main tree, or unique node for 102 | specified node/node-id/k (using both id and seed from node objects!). 103 | If no unique element can be returned, TPTreeLookupError will be raised. 104 | get_all() can be used to fetch duplicate nodes for the same k, or with special t.''' 105 | if not self.prefix: return TPTree(self.tree.setdefault(k, dict()), self.stats, k) 106 | node_dict = self.tree[self._node_id_for_k(k)] 107 | if isinstance(k, TPNode): return node_dict[k.seed] 108 | if len(node_dict) != 1: 109 | raise TPTreeLookupError('Non-unique node(s) for {}: {}'.format(k, node_dict)) 110 | return next(iter(node_dict.values())) 111 | -------------------------------------------------------------------------------- /tb_routing/utils.py: -------------------------------------------------------------------------------- 1 | import itertools as it, operator as op, functools as ft 2 | from pathlib import Path 3 | from collections import UserList 4 | import os, sys, logging, datetime, base64 5 | import contextlib, tempfile, stat, warnings 6 | 7 | import attr 8 | 9 | 10 | class LogMessage: 11 | def __init__(self, fmt, a, k): self.fmt, self.a, self.k = fmt, a, k 12 | def __str__(self): return self.fmt.format(*self.a, **self.k) if self.a or self.k else self.fmt 13 | 14 | class LogStyleAdapter(logging.LoggerAdapter): 15 | def __init__(self, logger, extra=None): 16 | super(LogStyleAdapter, self).__init__(logger, extra or {}) 17 | def log(self, level, msg, *args, **kws): 18 | if not self.isEnabledFor(level): return 19 | log_kws = {} if 'exc_info' not in kws else dict(exc_info=kws.pop('exc_info')) 20 | msg, kws = self.process(msg, kws) 21 | self.logger._log(level, LogMessage(msg, args, kws), (), log_kws) 22 | 23 | get_logger = lambda name: LogStyleAdapter(logging.getLogger(name)) 24 | 25 | 26 | def b64(data): 27 | return base64.urlsafe_b64encode(data).rstrip(b'=').decode() 28 | 29 | def get_uid_token(chars=4): 30 | assert chars * 6 % 8 == 0, chars 31 | return b64(os.urandom(chars * 6 // 8)) 32 | 33 | def log_lines(log_func, lines, log_func_last=False): 34 | if isinstance(lines, str): lines = list(line.rstrip() for line in lines.rstrip().split('\n')) 35 | uid = get_uid_token() 36 | for n, line in enumerate(lines, 1): 37 | if isinstance(line, str): line = '[{}] {}', uid, line 38 | else: line = ['[{}] {}'.format(uid, line[0])] + list(line[1:]) 39 | if log_func_last and n == len(lines): log_func_last(*line) 40 | else: log_func(*line) 41 | 42 | 43 | def attr_struct(cls=None, vals_to_attrs=False, defaults=..., **kws): 44 | if not cls: 45 | return ft.partial( attr_struct, 46 | vals_to_attrs=vals_to_attrs, defaults=defaults, **kws ) 47 | try: 48 | keys = cls.keys 49 | del cls.keys 50 | except AttributeError: keys = list() 51 | else: 52 | attr_kws = dict() 53 | if defaults is not ...: attr_kws['default'] = defaults 54 | if isinstance(keys, str): keys = keys.split() 55 | for k in keys: setattr(cls, k, attr.ib(**attr_kws)) 56 | if vals_to_attrs: 57 | for k, v in vars(cls).items(): 58 | if k.startswith('_') or k in keys or callable(v): continue 59 | setattr(cls, k, attr.ib(v)) 60 | kws.setdefault('hash', not hasattr(cls, '__hash__')) 61 | kws.setdefault('slots', True) 62 | return attr.s(cls, **kws) 63 | 64 | def attr_init(factory_or_default=attr.NOTHING, **attr_kws): 65 | if callable(factory_or_default): factory_or_default = attr.Factory(factory_or_default) 66 | return attr.ib(default=factory_or_default, **attr_kws) 67 | 68 | attr_init_id = lambda: attr_init(lambda seq=iter(range(2**40)): next(seq)) 69 | 70 | 71 | p = ft.partial(print, flush=True) 72 | 73 | def die(code=0): 74 | sys.stdout.close() 75 | sys.stderr.close() 76 | os._exit(code) 77 | 78 | def coroutine(func): 79 | @ft.wraps(func) 80 | def cr_wrapper(*args, **kws): 81 | cr = func(*args, **kws) 82 | next(cr) 83 | return cr 84 | return cr_wrapper 85 | 86 | def get_any(d, *keys): 87 | for k in keys: 88 | try: return d[k] 89 | except KeyError: pass 90 | 91 | def init_if_none(v, default): 92 | if v is None: v = default() if callable(default) else v 93 | return v 94 | 95 | def same_type_and_id(v1, v2): 96 | return type(v1) is type(v2) and v1.id == v2.id 97 | 98 | @contextlib.contextmanager 99 | def supress_warnings(): 100 | '''Similar to warnings.catch_warnings(), 101 | but does not miss stuff like DeprecationWarning.''' 102 | filters_bak = warnings.filters 103 | warnings.filterwarnings('ignore') 104 | try: yield 105 | finally: warnings.filters[:] = filters_bak 106 | 107 | inf = float('inf') 108 | 109 | def max(iterable, default=..., _max=max, **kws): 110 | try: return _max(iterable, **kws) 111 | except ValueError: 112 | if default is ...: raise 113 | return default 114 | 115 | def min(iterable, default=..., _min=min, **kws): 116 | try: return _min(iterable, **kws) 117 | except ValueError: 118 | if default is ...: raise 119 | return default 120 | 121 | 122 | @contextlib.contextmanager 123 | def safe_replacement(path, *open_args, mode=None, **open_kws): 124 | path = str(path) 125 | if mode is None: 126 | try: mode = stat.S_IMODE(os.lstat(path).st_mode) 127 | except OSError: pass 128 | open_kws.update( delete=False, 129 | dir=os.path.dirname(path), prefix=os.path.basename(path)+'.' ) 130 | if not open_args: open_kws['mode'] = 'w' 131 | with tempfile.NamedTemporaryFile(*open_args, **open_kws) as tmp: 132 | try: 133 | if mode is not None: os.fchmod(tmp.fileno(), mode) 134 | yield tmp 135 | if not tmp.closed: tmp.flush() 136 | os.rename(tmp.name, path) 137 | finally: 138 | try: os.unlink(tmp.name) 139 | except OSError: pass 140 | 141 | 142 | use_pickle_cache = os.environ.get('TB_PICKLE') 143 | pickle_log = get_logger('pickle') 144 | 145 | def pickle_dump(state, name=use_pickle_cache or 'state.pickle'): 146 | import pickle 147 | with safe_replacement(name, 'wb') as dst: 148 | pickle_log.debug('Pickling data (type={}) to: {}', state.__class__.__name__, name) 149 | pickle.dump(state, dst) 150 | 151 | def pickle_load(name=use_pickle_cache or 'state.pickle', fail=False): 152 | import pickle 153 | try: 154 | with open(str(name), 'rb') as src: 155 | pickle_log.debug('Unpickling data from: {}', name) 156 | return pickle.load(src) 157 | except Exception as err: 158 | if fail: raise 159 | 160 | 161 | def dts_parse(dts_str): 162 | if ':' not in dts_str: return float(dts_str) 163 | dts_vals = dts_str.split(':') 164 | if len(dts_vals) == 2: dts_vals.append('00') 165 | assert len(dts_vals) == 3, dts_vals 166 | return sum(int(n)*k for k, n in zip([3600, 60, 1], dts_vals)) 167 | 168 | def dts_format(dts): 169 | dts_days, dts = divmod(int(dts), 24 * 3600) 170 | dts = str(datetime.time(dts // 3600, (dts % 3600) // 60, dts % 60, dts % 1)) 171 | if dts_days: dts = '{}+{}'.format(dts_days, dts) 172 | return dts 173 | -------------------------------------------------------------------------------- /timetable-from-json-dgc.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import itertools as it, operator as op, functools as ft 4 | import os, sys, pathlib, random, re, pickle 5 | 6 | import tb_routing as tb, test._common as c 7 | 8 | 9 | class Conf: 10 | # All time values are in minutes 11 | line_trip_max_count = 50 12 | line_trip_interval = 30, 150, 30 # min, max, step 13 | line_stop_linger = 5, 30, 5 14 | line_kmh = 50, 100, 10 15 | fp_kmh = 5 16 | fp_dt_base = 2 17 | fp_dt_max = 8 18 | dt_ch = 2 19 | reroll_max = 2**10 20 | 21 | # How to pick these - find 3 stop-pairs, where these should hold: 22 | # 1: dt_line >> dt_fp (too far to walk), 2: dt_line ~ dt_fp (rather close), 23 | # 3: dt_line < dt_fp (right next to each other) 24 | # Run p_dt_stats(stop_id1, stop_id2) on all 3 pairs, tweak formulas based on output. 25 | calc_dt_line = lambda km, kmh: (km / kmh) * 5 * 60 26 | calc_dt_fp = lambda km, kmh, dt_base: (km**2.5 / kmh) / 40 + dt_base * 60 27 | 28 | 29 | rand_int = random.randint 30 | rand_int_align = lambda a,b,step: random.randint(a//step, b//step)*step 31 | 32 | def line_dts_start_end(): 33 | if random.random() < 0.7: 34 | line_dts_start, line_dts_end = rand_int(0, 10), rand_int(18, 24) 35 | elif random.random() < 0.5: 36 | line_dts_start = rand_int(5, 17) 37 | line_dts_end = rand_int(line_dts_start, 23) 38 | while line_dts_end - line_dts_start < 2: 39 | line_dts_start = rand_int(5, line_dts_end) 40 | else: 41 | line_dts_start, line_dts_end = rand_int(12, 18), rand_int(20, 24) 42 | line_dts_start = 3600 * line_dts_start + rand_int_align(0, 55, 5)*60 43 | line_dts_end = 3600 * line_dts_end 44 | return line_dts_start, line_dts_end 45 | 46 | def dist(stop_a, stop_b): 47 | return (abs(stop_a.lon - stop_b.lon)**2 + abs(stop_a.lat - stop_b.lat)**2)**0.5 48 | 49 | def print_dt_stats( stop_a, stop_b, 50 | stops=None, line_kmh=70, fp_kmh=5, fp_dt_base=2*60 ): 51 | stop_a, stop_b = (( stops[v] 52 | if isinstance(v, (str, int)) else v ) for v in [stop_a, stop_b]) 53 | km = dist(stop_a, stop_b) 54 | print('\n'.join([ 55 | 'Path: {a} -> {b}', 56 | ' distance: {d:.1f} km', 57 | ' dt_fp: {dt_fp:.1f} min', 58 | ' dt_line: {dt_line:.0f} min' ]).format( 59 | a=stop_a, b=stop_b, d=km, 60 | dt_fp=calc_dt_fp(km, fp_kmh, fp_dt_base) / 60, 61 | dt_line=calc_dt_line(km, line_kmh) // 60 )) 62 | 63 | 64 | def main(args=None): 65 | import argparse 66 | parser = argparse.ArgumentParser( 67 | description='Generate mock transport network timetable' 68 | ' from a json-dgc (https://github.com/eimink/json-dgc/) graph.' 69 | ' Graph node name format must be: [stop_id ":"] "L" x1 "-" y1 ["/L" x2 "-" y2] ...,' 70 | ' where x values are line-ids and y values are sortable to connect stops of same line.' 71 | ' Examples: L4-b, L1-a/L3-j.' 72 | ' Edge arrows do not matter at all, only node names/positions do.') 73 | parser.add_argument('dag_json', help='DAG JSON/YAML file saved from json-dgc app.') 74 | parser.add_argument('tt_pickle', help='Path to store pickled Timetable object to.') 75 | parser.add_argument('-s', '--seed', 76 | help='Randomness seed (any string) for generated stuff.' 77 | ' Generated by ordered nodes/edges concatenation by default.') 78 | opts = parser.parse_args(sys.argv[1:] if args is None else args) 79 | 80 | conf = Conf() 81 | 82 | with pathlib.Path(opts.dag_json).open() as src: dag = c.yaml_load(src) 83 | dag = c.dmap(dag) 84 | dag.edges = list(c.dmap(e) for e in dag.edges) 85 | dag.nodes = list(c.dmap(n) for n in dag.nodes) 86 | 87 | seed = opts.seed 88 | if not seed: 89 | seed = list() 90 | for n in sorted(dag.nodes, key=op.itemgetter('id')): 91 | seed.extend([n.id, n.title]) 92 | for e in sorted(dag.edges, key=op.itemgetter('source', 'target')): 93 | seed.extend([e.source, e.target]) 94 | seed = '\0'.join(map(str, seed)) 95 | random.seed(seed) 96 | 97 | types = tb.t.public 98 | trips, stops, footpaths = types.Trips(), types.Stops(), types.Footpaths() 99 | p_dt_stats = ft.partial( print_dt_stats, stops=stops, 100 | fp_kmh=conf.fp_kmh, fp_dt_base=conf.fp_dt_base ) 101 | 102 | lines = dict() 103 | for node in dag.nodes: 104 | node_lines = node.title.split('/') 105 | for line_node in node_lines: 106 | m = re.search('^(.*?:)?L(.+?)-(.*)$', line_node) 107 | if not m: raise ValueError(line_node) 108 | stop_id, line_id, line_seq = m.groups() 109 | stop = stops.add(types.Stop( 110 | stop_id or node.title, node.title, node.x, node.y )) 111 | lines.setdefault(line_id, list()).append((line_seq, stop)) 112 | 113 | for line_id, line in sorted(lines.items()): 114 | line_dts_start, line_dts_end = line_dts_start_end() 115 | line_dts_interval = rand_int_align(*conf.line_trip_interval)*60 116 | line_kmh = rand_int_align(*conf.line_kmh) 117 | line_stops = list(map(op.itemgetter(1), sorted(line))) 118 | 119 | trip_prev = None 120 | for trip_seq in range(conf.line_trip_max_count): 121 | trip_dts_start = line_dts_start + line_dts_interval * trip_seq 122 | if trip_dts_start > line_dts_end: break 123 | for n in range(conf.reroll_max): 124 | trip = types.Trip(line_id_hint='L{}'.format(line_id)) 125 | for stopidx, stop in enumerate(line_stops): 126 | if not trip.stops: dts_arr = trip_dts_start 127 | else: 128 | ts = trip.stops[-1] 129 | dts_arr = ts.dts_dep + divmod( 130 | calc_dt_line(dist(ts.stop, stop), line_kmh), 60 )[0] * 60 131 | dts_dep = dts_arr + rand_int_align(*conf.line_stop_linger)*60 132 | if trip_prev and trip_prev[stopidx].dts_arr >= dts_arr: break # avoid overtaking trips 133 | trip.add(types.TripStop(trip, stopidx, stop, dts_arr, dts_dep)) 134 | else: break 135 | else: 136 | raise RuntimeError( 'Failed to generate' 137 | ' non-overtaking trips in {:,} tries'.format(conf.reroll_max) ) 138 | trips.add(trip) 139 | trip_prev = trip 140 | 141 | with footpaths.populate() as fp_add: 142 | fp_delta_max = conf.fp_dt_max * 60 143 | for stop in stops: fp_add(stop, stop, conf.dt_ch*60) 144 | for stop_a, stop_b in it.permutations(stops, 2): 145 | fp_delta = int(calc_dt_fp(dist(stop_a, stop_b), conf.fp_kmh, conf.fp_dt_base)) 146 | if fp_delta <= fp_delta_max: fp_add( stop_a, stop_b, fp_delta) 147 | 148 | timetable = types.Timetable(stops, footpaths, trips) 149 | with pathlib.Path(opts.tt_pickle).open('wb') as dst: pickle.dump(timetable, dst) 150 | 151 | if __name__ == '__main__': sys.exit(main()) 152 | -------------------------------------------------------------------------------- /test/simple.test.journey-planner-csa.yaml: -------------------------------------------------------------------------------- 1 | ## All these are copied pretty much verbatim from 2 | ## ConnectionScannerTest.php in open-track/journey-planner project 3 | 4 | 5 | testBasicJourney: 6 | timetable: 7 | CS1234: 8 | - [A, x, '10:00'] 9 | - [B, '10:15', '10:20'] 10 | - [C, '10:45', '11:00'] 11 | - [D, '11:15', x] 12 | 13 | goal: [A, D, '9:00'] 14 | 15 | journey_set: 16 | journey-A: 17 | stats: ['10:00', '11:15'] 18 | segments: 19 | trip-A: [trip, A, D] 20 | 21 | 22 | testJourneyWithEarlyTermination: 23 | timetable: 24 | CS1234: 25 | - [A, x, '10:00'] 26 | - [B, '10:15', '10:20'] 27 | - [C, '10:45', '11:00'] 28 | - [D, '11:15', '11:20'] 29 | - [E, '11:35', x] 30 | 31 | goal: [A, D, '9:00'] 32 | 33 | journey_set: 34 | journey-A: 35 | stats: ['10:00', '11:15'] 36 | segments: 37 | trip-A: [trip, A, D] 38 | 39 | 40 | testMultipleRoutes: 41 | timetable: 42 | CS1234: 43 | - [A, x, '10:15'] 44 | - [B, '10:25', '10:30'] 45 | - [C, '11:06', '11:10'] 46 | - [D, '11:40', '11:45'] 47 | - [E, '11:50', x] 48 | CS1235: 49 | - [A, x, '10:10'] 50 | - [C, '10:45', '11:00'] 51 | - [D, '11:01', '11:05'] 52 | - [E, '12:00', x] 53 | CS1236: 54 | - [M, x, '10:30'] 55 | - [C, '11:00', '11:05'] 56 | - [D, '11:25', '11:30'] 57 | - [E, '11:40', x] 58 | 59 | goal: [A, E, '9:00'] 60 | 61 | journey_set: 62 | earliest: 63 | stats: ['10:10', '11:40'] 64 | segments: 65 | trip-A: [trip, A, C] 66 | trip-B: [trip, C, E] 67 | least-hops: 68 | stats: ['10:15', '11:50'] 69 | segments: 70 | trip-A: [trip, A, E] 71 | 72 | 73 | testNoRoute: 74 | timetable: 75 | CS1234: 76 | - [A, x, '10:00'] 77 | - [B, '10:25', '10:30'] 78 | - [E, '10:40', x] 79 | CS1236: 80 | - [M, x, '11:30'] 81 | - [N, '11:40', x] 82 | 83 | goal: [A, N, '9:00'] 84 | 85 | journey_set: 86 | 87 | 88 | testNoRouteBecauseOfMissedConnection: 89 | timetable: 90 | CS1235: 91 | - [A, x, '10:10'] 92 | - [M, '11:35', '11:45'] 93 | - [X, '12:00', x] 94 | CS1236: 95 | - [M, x, '11:30'] 96 | - [N, '11:40', x] 97 | 98 | goal: [A, N, '9:00'] 99 | 100 | journey_set: 101 | 102 | 103 | testRouteToNonTimetabledConnection: 104 | timetable: 105 | trips: 106 | CS1235: 107 | - [A, x, '10:10'] 108 | - [B, '10:20', '10:25'] 109 | - [C, '10:40', x] 110 | footpaths: 111 | - [B, D, 30] 112 | 113 | goal: [A, D, '10:00'] 114 | 115 | journey_set: 116 | journey-A: 117 | stats: ['10:10', '10:50'] 118 | segments: 119 | trip: [trip, A, B] 120 | walk: [fp, B, D] 121 | 122 | 123 | testRouteFromNonTimetabledConnection: 124 | timetable: 125 | trips: 126 | CS1235: 127 | - [A, x, '10:10'] 128 | - [B, '10:20', '10:25'] 129 | - [C, '10:40', '11:00'] 130 | - [D, '11:20', x] 131 | footpaths: 132 | - [X, B, 30] 133 | 134 | goal: [X, C, '9:00'] 135 | 136 | journey_set: 137 | journey-A: 138 | stats: ['9:55', '10:40'] 139 | segments: 140 | walk: [fp, X, B] 141 | trip: [trip, B, C] 142 | 143 | 144 | testRouteWithNonTimetabledConnection: 145 | timetable: 146 | trips: 147 | CS1235: 148 | - [A, x, '10:10'] 149 | - [B, '10:20', '10:25'] 150 | - [C, '10:40', x] 151 | CS1236: 152 | - [M, x, '11:00'] 153 | - [N, '11:50', '11:55'] 154 | - [X, '12:00', x] 155 | footpaths: 156 | - [B, M, 30] 157 | - [C, N, 120] 158 | 159 | goal: [A, X, '10:00'] 160 | 161 | journey_set: 162 | journey-A: 163 | stats: ['10:10', '12:00'] 164 | segments: 165 | trip-1: [trip, A, B] 166 | walk: [fp, B, M] 167 | trip-2: [trip, M, X] 168 | 169 | 170 | testRouteWithNonTimetabledConnectionThatCantBeUsed: 171 | timetable: 172 | trips: 173 | CS1235: 174 | - [A, x, '10:10'] 175 | - [M, '10:35', '10:45'] 176 | - [X, '11:30', x] 177 | CS1236: 178 | - [Y, x, '11:30'] 179 | - [N, '11:40', x] 180 | footpaths: 181 | - [M, Y, 60] 182 | # - [Y, M, 1] 183 | - [X, Y, 1] 184 | 185 | goal: [A, N, '9:00'] 186 | 187 | journey_set: 188 | 189 | 190 | testNonTimetableOnly: 191 | timetable: 192 | trips: 193 | CS1235: 194 | - [A, x, '10:00'] 195 | - [B, '10:15', '10:20'] 196 | - [C, '10:45', '11:00'] 197 | - [D, '11:15', x] 198 | footpaths: 199 | - [A, D, 30] 200 | 201 | goal: [A, D, '9:00'] 202 | 203 | journey_set: 204 | journey-A: 205 | stats: ['9:00', '9:30'] 206 | segments: 207 | walk: [fp, A, D] 208 | 209 | 210 | profile: 211 | timetable: 212 | trips: 213 | 214 | # Dominated journey due to early departure with same arrival 215 | CS1232: 216 | - [A, x, '10:00'] 217 | - [C, '11:30', x] 218 | 219 | # This leads to optimal journey for earliest-query, 220 | # with arrival at 12:00 and 3 trips, BUT not good enough for 221 | # profile query, which also has same-arrival journey with less hops. 222 | CS1233: 223 | - [A, x, '10:10'] 224 | - [X1, '11:00', '11:05'] 225 | - [B, '11:20', '11:30'] 226 | - [X2, '12:00', x] 227 | CS1234: 228 | - [Y1, x, '10:50'] 229 | - [B, '11:10', '11:28'] 230 | - [C, '11:30', x] 231 | 232 | # Leads to 10:50-12:00/2 optimal 233 | CS1235: 234 | - [A, x, '10:50'] 235 | - [B, '11:00', '11:05'] 236 | - [C, '11:30', x] 237 | 238 | # Leads to 11:10-13:00/2 optimal 239 | CS1236: 240 | - [A, x, '11:10'] 241 | - [B, '11:35', '11:45'] 242 | - [C, '12:30', x] 243 | 244 | # Optimal because single src-dst trip 245 | CS1237: 246 | - [A, '9:00', '9:20'] 247 | - [Z1, '9:30', '9:40'] 248 | - [Z2, '10:00', '10:10'] 249 | - [Z3, '10:20', '10:30'] 250 | - [Z4, '10:40', '10:50'] 251 | - [Z5, '11:00', '11:10'] 252 | - [Z6, '11:20', '11:30'] 253 | - [Z7, '11:40', '11:50'] 254 | - [B, '12:00', '12:10'] 255 | - [D, '14:00', x] 256 | 257 | # End-trips 258 | CS1238: 259 | - [C, '11:30', '11:40'] 260 | - [D, '12:00', x] 261 | CS1239: 262 | - [C, '12:30', '12:40'] 263 | - [D, '13:00', x] 264 | 265 | footpaths: 266 | - [A, D, 120] # direct footpath, longer than most trips 267 | 268 | goal: [A, D, '9:00', '14:00'] 269 | 270 | journey_set: 271 | journey-A: 272 | stats: ['11:10', '13:00'] 273 | segments: 274 | trip-A: [trip, A, C] 275 | trip-B: [trip, C, D] 276 | journey-B: 277 | stats: ['10:50', '12:00'] 278 | segments: 279 | trip-A: [trip, A, C] 280 | trip-B: [trip, C, D] 281 | journey-C: 282 | stats: ['9:20', '14:00'] 283 | segments: 284 | direct-trip: [trip, A, D] 285 | direct-footpath: 286 | stats: ['9:00', '11:00'] 287 | segments: 288 | direct-trip: [fp, A, D] 289 | -------------------------------------------------------------------------------- /tb_routing/types/base.py: -------------------------------------------------------------------------------- 1 | ### TBRoutingEngine internal types - base: lines, transfers, graph 2 | 3 | import itertools as it, operator as op, functools as ft 4 | from collections import namedtuple 5 | import struct, array 6 | 7 | from .. import utils as u 8 | 9 | 10 | struct_dump_header_fmt = '>I' 11 | 12 | def struct_dumps(chunk_fmt, chunks, chunk_count=None): 13 | header_t = struct.Struct(struct_dump_header_fmt) 14 | if chunk_count is None: chunk_count = len(chunks) 15 | chunk_t = struct.Struct(chunk_fmt) 16 | buff_len = header_t.size + chunk_t.size * chunk_count 17 | buff = bytearray(buff_len) 18 | header_t.pack_into(buff, 0, chunk_count) 19 | for n, (buff_n, chunk) in enumerate(zip( 20 | range(header_t.size, buff_len, chunk_t.size), chunks )): 21 | chunk_t.pack_into(buff, buff_n, *chunk) 22 | assert n == chunk_count-1 and buff_n == buff_len-chunk_t.size 23 | return buff 24 | 25 | def struct_load_iter(chunk_fmt, stream): 26 | header_t = struct.Struct(struct_dump_header_fmt) 27 | chunk_count, = header_t.unpack(stream.read(header_t.size)) 28 | chunk_t = struct.Struct(chunk_fmt) 29 | chunk_buff_len = chunk_t.size * chunk_count 30 | chunk_buff = stream.read(chunk_buff_len) 31 | for n, buff_n in zip( 32 | range(0, chunk_count), range(0, chunk_buff_len, chunk_t.size) ): 33 | yield chunk_t.unpack_from(chunk_buff, buff_n) 34 | 35 | 36 | class Line: 37 | '''Line - group of trips with identical stop sequences, 38 | ordered from earliest-to-latest by arrival time on ALL stops. 39 | If one trip overtakes another (making 40 | such strict ordering impossible), trips should be split into different lines.''' 41 | 42 | def __init__(self, *trips): self.set_idx = list(trips) 43 | def __repr__(self): 44 | return ''.format('{:x}'.format(self.id) if isinstance(self.id, int) else self.id) 45 | 46 | @property 47 | def stops(self): 48 | 'Sequence of Stops for all of the Trips on this Line.' 49 | return list(map(op.attrgetter('stop'), self.set_idx[0].stops)) 50 | 51 | def hash_trips(self): return hash(tuple(map(op.attrgetter('id'), self.set_idx))) 52 | 53 | _id_cache = None 54 | @property 55 | def id(self): 56 | if not self._id_cache: 57 | # Purely for ease of introspection/debugging 58 | line_id_hints = sorted(set(filter( None, 59 | map(op.attrgetter('line_id_hint'), self.set_idx) ))) 60 | if line_id_hints: self._id_cache = '/'.join(line_id_hints) 61 | else: self._id_cache = self.hash_trips() 62 | return self._id_cache 63 | @id.setter 64 | def id(self, value): self._id_cache = value 65 | 66 | def add(self, *trips): 67 | self.set_idx.extend(trips) 68 | self.set_idx.sort(key=lambda trip: sum(map(op.attrgetter('dts_arr'), trip))) 69 | 70 | def earliest_trip(self, stopidx, dts=0): 71 | for trip in self: 72 | if trip[stopidx].dts_dep >= dts: return trip 73 | 74 | def trips_by_relation(self, trip, *rel_set): 75 | '''Return trips from line with specified SolutionStatus relation(s) *from* trip. 76 | E.g. func(t, non_dominated) will return u where t ≺ u.''' 77 | for line_trip in self: 78 | rel = trip.compare(line_trip) 79 | if rel in rel_set: yield line_trip 80 | 81 | def __getitem__(self, k): return self.set_idx[k] 82 | def __hash__(self): return hash(self.id) 83 | def __eq__(self, line): return u.same_type_and_id(self, line) 84 | def __len__(self): return len(self.set_idx) 85 | def __iter__(self): return iter(self.set_idx) 86 | 87 | 88 | @u.attr_struct 89 | class LineStop: 90 | line_id = u.attr_init() 91 | stopidx = u.attr_init() 92 | def __hash__(self): return hash((self.line_id, self.stopidx)) 93 | 94 | 95 | class Lines: 96 | 97 | def __init__(self): 98 | self.idx_stop, self.idx_trip, self.idx_id = dict(), dict(), dict() 99 | 100 | def add(self, *lines): 101 | for line in lines: 102 | for stopidx, ts in enumerate(line[0]): 103 | self.idx_stop.setdefault(ts.stop, list()).append((stopidx, line)) 104 | for trip in line: self.idx_trip[trip] = line 105 | 106 | # Resolve any potential line.id conflicts for named lines 107 | # This should only be used/necessary for "nice" test-graphs 108 | if line.id in self.idx_id and line is not self.idx_id[line.id]: 109 | if self.idx_id[line.id]: 110 | line2 = self.idx_id[line.id] 111 | line2.id = '{}.{:x}'.format(line2.id, line2.hash_trips()) 112 | self.idx_id[line2.id], self.idx_id[line.id] = line2, None 113 | line.id = '{}.{:x}'.format(line.id, line.hash_trips()) 114 | assert line.id not in self.idx_id # trip-id-hash collisions 115 | self.idx_id[line.id] = line 116 | 117 | def lines_with_stop(self, stop): 118 | 'All lines going through stop as (stopidx, line) tuples.' 119 | return self.idx_stop.get(stop, list()) 120 | 121 | def line_for_trip(self, trip): return self.idx_trip[trip] 122 | 123 | _dump_prefix, _dump_t, _dump_sep = '>I', 'I', 2**32-1 124 | 125 | def dump(self, stream): 126 | dump = array.array(self._dump_t) 127 | for line in self: 128 | for trip in line: 129 | assert trip.id != self._dump_sep, trip 130 | dump.append(trip.id) 131 | dump.append(self._dump_sep) 132 | stream.write(struct.pack(self._dump_prefix, len(dump))) 133 | dump.tofile(stream) 134 | 135 | @classmethod 136 | def load(cls, stream, timetable): 137 | dump, prefix_t = array.array(cls._dump_t), struct.Struct(cls._dump_prefix) 138 | dump_len, = prefix_t.unpack(stream.read(prefix_t.size)) 139 | with u.supress_warnings(): 140 | # DeprecationWarning about fromfile using fromstring internally 141 | dump.fromfile(stream, dump_len) 142 | self, line_trips = cls(), list() 143 | for trip_id in dump: 144 | if trip_id == cls._dump_sep: 145 | self.add(Line(*line_trips)) 146 | line_trips.clear() 147 | continue 148 | line_trips.append(timetable.trips[trip_id]) 149 | assert not line_trips 150 | return self 151 | 152 | def __getitem__(self, line_id): return self.idx_id[line_id] 153 | def __iter__(self): return iter(filter(None, self.idx_id.values())) 154 | def __len__(self): return len(set(map(id, self.idx_trip.values()))) 155 | 156 | 157 | @u.attr_struct 158 | class Transfer: 159 | ts_from = u.attr_init() 160 | ts_to = u.attr_init() 161 | dt = u.attr_init(0) # used for min-footpath ordering 162 | id = u.attr_init_id() 163 | def __hash__(self): return hash(self.id) 164 | def __iter__(self): return iter(u.attr.astuple(self, recurse=False)) 165 | 166 | class TransferSet: 167 | 168 | def __init__(self): self.set_idx, self.set_idx_keys = dict(), dict() 169 | 170 | def add(self, transfer): 171 | # Second mapping is used purely for more efficient O(1) removals 172 | assert transfer.ts_from.dts_arr < transfer.ts_to.dts_dep # sanity check 173 | k1 = transfer.ts_from.trip.id, transfer.ts_from.stopidx 174 | if k1 not in self.set_idx: self.set_idx[k1] = dict() 175 | k2 = len(self.set_idx[k1]) 176 | self.set_idx[k1][k2] = transfer 177 | self.set_idx_keys[transfer.id] = k1, k2 178 | 179 | def from_trip_stop(self, ts): 180 | k1 = ts.trip.id, ts.stopidx 181 | return self.set_idx.get(k1, dict()).values() 182 | 183 | _dump_fmt = '>IBIBfI' 184 | 185 | def dump(self, stream): 186 | chunk_iter = ( 187 | ( transfer.ts_from.trip.id, transfer.ts_from.stopidx, 188 | transfer.ts_to.trip.id, transfer.ts_to.stopidx, transfer.dt, transfer.id ) 189 | for transfer in self ) 190 | stream.write(struct_dumps(self._dump_fmt, chunk_iter, len(self))) 191 | 192 | @classmethod 193 | def load(cls, stream, timetable): 194 | self = cls() 195 | for transfer_tuple in struct_load_iter(cls._dump_fmt, stream): 196 | ts_from = timetable.trips[transfer_tuple[0]][transfer_tuple[1]] 197 | ts_to = timetable.trips[transfer_tuple[2]][transfer_tuple[3]] 198 | self.add(Transfer(ts_from, ts_to, transfer_tuple[4], transfer_tuple[5])) 199 | return self 200 | 201 | def __contains__(self, transfer): 202 | k1, k2 = self.set_idx_keys[transfer.id] 203 | return bool(self.set_idx.get(k1, dict()).get(k2)) 204 | def __delitem__(self, transfer): 205 | k1, k2 = self.set_idx_keys.pop(transfer.id) 206 | del self.set_idx[k1][k2] 207 | if not self.set_idx[k1]: del self.set_idx[k1] 208 | def __len__(self): return len(self.set_idx_keys) 209 | def __iter__(self): 210 | for k1, k2 in self.set_idx_keys.values(): yield self.set_idx[k1][k2] 211 | 212 | 213 | @u.attr_struct 214 | class Graph: 215 | keys = 'timetable lines transfers' 216 | def __iter__(self): return iter(u.attr.astuple(self, recurse=False)) 217 | 218 | def dump(self, stream): 219 | self.lines.dump(stream) 220 | self.transfers.dump(stream) 221 | 222 | @classmethod 223 | def load(cls, stream, timetable): 224 | lines = Lines.load(stream, timetable) 225 | transfers = TransferSet.load(stream, timetable) 226 | return cls(timetable, lines, transfers) 227 | 228 | 229 | @u.attr_struct 230 | class QueryResult: 231 | '''Internal query result, containing only an 232 | optimal list of Trips to take, later resolved into a Journey. 233 | Used in ParetoSets to discard some of these results early.''' 234 | dts_arr = u.attr_init() 235 | n = u.attr_init() 236 | jtrips = u.attr_init() 237 | dts_dep = u.attr_init(0) 238 | 239 | 240 | StopLabel = namedtuple('StopLabel', 'dts_dep dts_arr ts_list') # dts_dep -> ts_list -> dts_arr 241 | -------------------------------------------------------------------------------- /gtfs-tb-routing.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import itertools as it, operator as op, functools as ft 4 | from pathlib import Path 5 | import os, sys, time, re 6 | 7 | import tb_routing as tb 8 | 9 | 10 | def main(args=None): 11 | conf = tb.gtfs.GTFSConf() 12 | conf_engine = tb.engine.EngineConf( 13 | log_progress_for={'lines', 'pre-initial-set', 'pre-reduction', 'transfer-patterns'} ) 14 | 15 | import argparse 16 | parser = argparse.ArgumentParser( 17 | description='Simple implementation of trip-based graph-db and algorithms.') 18 | parser.add_argument('gtfs_dir_or_pickle', 19 | help='Path to gtfs data directory to build' 20 | ' graph from or a pickled timetable object (if points to a file).') 21 | 22 | group = parser.add_argument_group('Basic timetable/parser options') 23 | group.add_argument('--cache-timetable', metavar='path', 24 | help='Store parsed timetable data (in pickle format) to specified file.' 25 | ' This file can then be used in place of gtfs dir, and should load much faster.' 26 | ' All of the "Timetable calendar options" only affect how' 27 | ' the timetable data is parsed and that file generated,' 28 | ' and can be dropped after that.') 29 | group.add_argument('-c', '--cache-precalc', metavar='path', 30 | help='Precalculation cache file to load (if exists)' 31 | ' or save (if missing) resulting graph data from/to.' 32 | ' This option must only be used with cached' 33 | ' timetable data (see --cache-timetable option).') 34 | group.add_argument('-s', '--stops-to-stations', action='store_true', 35 | help='Convert/translate GTFS "stop" ids to "parent_station" ids,' 36 | ' i.e. group all stops on the station into a single one.' 37 | ' Can produce smaller graphs that would be easier to query.') 38 | 39 | group = parser.add_argument_group('Timetable calendar options') 40 | group.add_argument('-d', '--day', metavar='{ YYYYMMDD | date }', 41 | help='Specific date when trip is taking place.' 42 | 'Will also make script parse GTFS calendar data and only build' 43 | ' timetable for trips/footpaths/links active on specified day and its vicinity.' 44 | ' Without this option, all trips/etc will be used regardless of calendar info.' 45 | ' Format is either YYYYMMDD or something that "date -d ..." call would parse (e.g.: today).' 46 | ' See also --parse-days-after and --parse-days-before options.') 47 | group.add_argument('--parse-days-after', 48 | type=int, default=conf.parse_days, metavar='n', 49 | help='In addition to date specified with --parse-day,' 50 | ' process trips for specified number of days after it.' 51 | ' This is important to build journeys which e.g. start on 23:00 and end on' 52 | ' the next day - will be impossible to build these without info from there.' 53 | ' Default: %(default)s') 54 | group.add_argument('--parse-days-before', 55 | type=int, default=conf.parse_days_pre, metavar='n', 56 | help='Similar to --parse-days-after, but for loading data from N previous days.' 57 | ' For journeys starting at e.g. 00:10, many trips starting' 58 | ' on a previous day (e.g. just 10min ago) can be useful.' 59 | ' Default: %(default)s') 60 | 61 | group = parser.add_argument_group('Misc/debug options') 62 | group.add_argument('--dot-for-lines', metavar='path', 63 | help='Dump Stop/Line graph (in graphviz dot format) to a specified file and exit.') 64 | group.add_argument('--dot-opts', metavar='yaml-data', 65 | help='Options for graphviz graph/nodes/edges to use with all' 66 | ' --dot-for-* commands, as a YAML mappings. Example: {graph: {rankdir: LR}}') 67 | group.add_argument('--engine-conf', metavar='yaml-data', 68 | help='Override values for EngineConf as a YAML mapping.' 69 | ' Example: {log_progress_steps: 1000}') 70 | group.add_argument('--debug', action='store_true', help='Verbose operation mode.') 71 | 72 | cmds = parser.add_subparsers(title='Commands', dest='call') 73 | 74 | 75 | cmd = cmds.add_parser('cache', 76 | help='Generate/store all the caches and exit.') 77 | 78 | 79 | cmd = cmds.add_parser('query-earliest-arrival', 80 | help='Run earliest arrival query, output resulting journey set.') 81 | cmd.add_argument('stop_from', help='Stop ID to query journey from. Example: J22209723_0') 82 | cmd.add_argument('stop_to', help='Stop ID to query journey to. Example: J2220952426_0') 83 | cmd.add_argument('day_time', nargs='?', default='00:00', 84 | help='Day time to start journey at, either as HH:MM,' 85 | ' HH:MM:SS or just seconds int/float. Default: %(default)s') 86 | 87 | 88 | cmd = cmds.add_parser('query-profile', 89 | help='Run profile query, output resulting journey set.') 90 | 91 | group = cmd.add_argument_group('Query parameters') 92 | group.add_argument('stop_from', help='Stop ID to query journey from. Example: J22209723_0') 93 | group.add_argument('stop_to', help='Stop ID to query journey to. Example: J2220952426_0') 94 | group.add_argument('day_time_earliest', nargs='?', default='00:00', 95 | help='Earliest day time to start journey(s) at, either as HH:MM,' 96 | ' HH:MM:SS or just seconds int/float. Default: %(default)s') 97 | group.add_argument('day_time_latest', nargs='?', default='24:00', 98 | help='Latest day time to start journey(s) at, either as HH:MM,' 99 | ' HH:MM:SS or just seconds int/float. Default: %(default)s') 100 | 101 | group = cmd.add_argument_group('Limits') 102 | group.add_argument('-m', '--max-transfers', 103 | type=int, metavar='n', default=15, 104 | help='Max number of transfers (i.e. interchanges)' 105 | ' between journey trips allowed in the results. Default: %(default)s') 106 | 107 | 108 | cmd = cmds.add_parser('query-transfer-patterns', 109 | help='Build/load Transfer-Patterns trie and run queries on it.') 110 | 111 | group = cmd.add_argument_group('Query parameters') 112 | group.add_argument('stop_from', help='Stop ID to query journey from. Example: J22209723_0') 113 | group.add_argument('stop_to', help='Stop ID to query journey to. Example: J2220952426_0') 114 | group.add_argument('day_time_earliest', nargs='?', default='00:00', 115 | help='Earliest day time to start journey(s) at, either as HH:MM,' 116 | ' HH:MM:SS or just seconds int/float. Default: %(default)s') 117 | group.add_argument('day_time_latest', nargs='?', default='24:00', 118 | help='Latest day time to start journey(s) at, either as HH:MM,' 119 | ' HH:MM:SS or just seconds int/float. Default: %(default)s') 120 | 121 | group = cmd.add_argument_group('Limits') 122 | group.add_argument('-m', '--max-transfers', 123 | type=int, metavar='n', default=15, 124 | help='Max number of transfers (i.e. interchanges)' 125 | ' between journey trips allowed in the results. Default: %(default)s') 126 | 127 | group = cmd.add_argument_group('Graph options') 128 | group.add_argument('--tree-cache', metavar='path', 129 | help='Pickle cache-file to load (if exists)' 130 | ' or save (if missing) resulting Transfer-Patterns' 131 | ' prefix-tree from/to (see arXiv:1607.01299v2 paper).') 132 | 133 | group = cmd.add_argument_group('Misc/debug options') 134 | group.add_argument('--dot-for-tp-subtree', metavar='path', 135 | help='Dump TB-TP subtree graph for specified' 136 | ' stop_from (in graphviz dot format) to a file and exit.') 137 | group.add_argument('--dot-for-tp-query-tree', metavar='path', 138 | help='Dump TB-TP query tree graph for specified' 139 | ' stop_from/stop_to pair (in graphviz dot format) to a file and exit.') 140 | 141 | 142 | opts = parser.parse_args(sys.argv[1:] if args is None else args) 143 | 144 | tb.u.logging.basicConfig( 145 | format='%(asctime)s :: %(name)s %(levelname)s :: %(message)s', 146 | datefmt='%Y-%m-%d %H:%M:%S', 147 | level=tb.u.logging.DEBUG if opts.debug else tb.u.logging.WARNING ) 148 | 149 | tt_path = Path(opts.gtfs_dir_or_pickle) 150 | cache_path = opts.cache_precalc and Path(opts.cache_precalc) 151 | if opts.call != 'cache' and (not tt_path.is_file() and cache_path and cache_path.is_file()): 152 | parser.error( 'Pre-generated --cache-precalc dump can only' 153 | ' be used with cached timetable (see --cache-timetable option).' ) 154 | 155 | day = opts.day 156 | if day: 157 | m = re.search(r'^\s*(\d{4})\s*-\s*(\d{2})\s*-\s*(\d{2})\s*$', day) 158 | if m: day = ''.join(m.groups()) 159 | if not (day.isdigit() and len(day) == 8): 160 | import subprocess 161 | proc = subprocess.Popen(['date', '-d', day, '+%Y%m%d'], stdout=subprocess.PIPE) 162 | day = proc.stdout.read().decode().strip() 163 | if proc.wait() != 0: parser.error('"date -d" failed to parse --day value: {!r}'.format(day)) 164 | log.debug('Parsed --day value via "date -d" subprocess: {!r} -> {}', opts.day, day) 165 | 166 | if opts.stops_to_stations: conf.group_stops_into_stations = True 167 | if opts.engine_conf: 168 | import yaml 169 | for k, v in yaml.safe_load(opts.engine_conf).items(): 170 | if not hasattr(conf_engine, k): 171 | parser.error('Unrecognized engine conf option: {!r} (value: {!r})'.format(k, v)) 172 | setattr(conf_engine, k, v) 173 | 174 | conf.parse_start_date, conf.parse_days, conf.parse_days_pre =\ 175 | day, opts.parse_days_after, opts.parse_days_before 176 | 177 | timetable, router = tb.init_gtfs_router( 178 | tt_path, cache_path, tt_path_dump=opts.cache_timetable, 179 | conf=conf, conf_engine=conf_engine, timer_func=tb.calc_timer ) 180 | 181 | dot_opts = dict() 182 | if opts.dot_opts: 183 | import yaml 184 | dot_opts = yaml.safe_load(opts.dot_opts) 185 | if opts.dot_for_lines: 186 | with tb.u.safe_replacement(opts.dot_for_lines) as dst: 187 | tb.vis.dot_for_lines(router.graph.lines, dst, dot_opts=dot_opts) 188 | return 189 | 190 | if opts.call == 'cache': pass 191 | 192 | elif opts.call == 'query-earliest-arrival': 193 | dts_start = timetable.dts_parse(opts.day_time) 194 | a, b = timetable.stops[opts.stop_from], timetable.stops[opts.stop_to] 195 | journeys = router.query_earliest_arrival(a, b, dts_start) 196 | journeys.pretty_print(timetable.dts_format) 197 | 198 | elif opts.call == 'query-profile': 199 | dts_edt, dts_ldt = map(timetable.dts_parse, [opts.day_time_earliest, opts.day_time_latest]) 200 | a, b = timetable.stops[opts.stop_from], timetable.stops[opts.stop_to] 201 | journeys = router.query_profile(a, b, dts_edt, dts_ldt, max_transfers=opts.max_transfers) 202 | journeys.pretty_print(timetable.dts_format) 203 | 204 | elif opts.call == 'query-transfer-patterns': 205 | dts_edt, dts_ldt = map(timetable.dts_parse, [opts.day_time_earliest, opts.day_time_latest]) 206 | a, b = timetable.stops[opts.stop_from], timetable.stops[opts.stop_to] 207 | 208 | cache_path = opts.tree_cache 209 | tp_tree = tb.u.pickle_load(cache_path) if cache_path else None 210 | tp_router = router.build_tp_engine(tp_tree, max_transfers=opts.max_transfers) 211 | if not tp_tree and cache_path: tb.u.pickle_dump(tp_router.tree, cache_path) 212 | 213 | if opts.dot_for_tp_subtree: 214 | with tb.u.safe_replacement(opts.dot_for_tp_subtree) as dst: 215 | tb.vis.dot_for_tp_subtree(tp_router.tree[a], dst, dst_to_src=True, dot_opts=dot_opts) 216 | return 217 | 218 | query_tree = tp_router.build_query_tree(a, b) 219 | if opts.dot_for_tp_query_tree: 220 | with tb.u.safe_replacement(opts.dot_for_tp_query_tree) as dst: 221 | tb.vis.dot_for_tp_subtree(query_tree, dst, dot_opts=dot_opts) 222 | return 223 | 224 | journeys = tp_router.query_profile(a, b, dts_edt, dts_ldt, query_tree) 225 | journeys.pretty_print(timetable.dts_format) 226 | 227 | else: parser.error('Action not implemented: {}'.format(opts.call)) 228 | 229 | if __name__ == '__main__': sys.exit(main()) 230 | -------------------------------------------------------------------------------- /test/_common.py: -------------------------------------------------------------------------------- 1 | import itertools as it, operator as op, functools as ft 2 | from collections import ChainMap, Mapping, OrderedDict, defaultdict 3 | from pathlib import Path 4 | from pprint import pprint 5 | import os, sys, unittest, types, datetime, re, math 6 | import tempfile, warnings, shutil, zipfile 7 | 8 | import yaml # PyYAML module is required for tests 9 | 10 | path_project = Path(__file__).parent.parent 11 | sys.path.insert(1, str(path_project)) 12 | import tb_routing as tb 13 | 14 | verbose = os.environ.get('TB_DEBUG') 15 | if verbose: 16 | tb.u.logging.basicConfig( 17 | format='%(asctime)s :: %(name)s %(levelname)s :: %(message)s', 18 | datefmt='%Y-%m-%d %H:%M:%S', level=tb.u.logging.DEBUG ) 19 | 20 | 21 | 22 | class dmap(ChainMap): 23 | 24 | maps = None 25 | 26 | def __init__(self, *maps, **map0): 27 | maps = list((v if not isinstance( v, 28 | (types.GeneratorType, list, tuple) ) else OrderedDict(v)) for v in maps) 29 | if map0 or not maps: maps = [map0] + maps 30 | super(dmap, self).__init__(*maps) 31 | 32 | def __repr__(self): 33 | return '<{} {:x} {}>'.format( 34 | self.__class__.__name__, id(self), repr(self._asdict()) ) 35 | 36 | def _asdict(self): 37 | items = dict() 38 | for k, v in self.items(): 39 | if isinstance(v, self.__class__): v = v._asdict() 40 | items[k] = v 41 | return items 42 | 43 | def _set_attr(self, k, v): 44 | self.__dict__[k] = v 45 | 46 | def __iter__(self): 47 | key_set = dict.fromkeys(set().union(*self.maps), True) 48 | return filter(lambda k: key_set.pop(k, False), it.chain.from_iterable(self.maps)) 49 | 50 | def __getitem__(self, k): 51 | k_maps = list() 52 | for m in self.maps: 53 | if k in m: 54 | if isinstance(m[k], Mapping): k_maps.append(m[k]) 55 | elif not (m[k] is None and k_maps): return m[k] 56 | if not k_maps: raise KeyError(k) 57 | return self.__class__(*k_maps) 58 | 59 | def __getattr__(self, k): 60 | try: return self[k] 61 | except KeyError: raise AttributeError(k) 62 | 63 | def __setattr__(self, k, v): 64 | for m in map(op.attrgetter('__dict__'), [self] + self.__class__.mro()): 65 | if k in m: 66 | self._set_attr(k, v) 67 | break 68 | else: self[k] = v 69 | 70 | def __delitem__(self, k): 71 | for m in self.maps: 72 | if k in m: del m[k] 73 | 74 | 75 | def yaml_load(stream, dict_cls=OrderedDict, loader_cls=yaml.SafeLoader): 76 | if not hasattr(yaml_load, '_cls'): 77 | class CustomLoader(loader_cls): pass 78 | def construct_mapping(loader, node): 79 | loader.flatten_mapping(node) 80 | return dict_cls(loader.construct_pairs(node)) 81 | CustomLoader.add_constructor( 82 | yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG, construct_mapping ) 83 | # Do not auto-resolve dates/timestamps, as PyYAML does that badly 84 | res_map = CustomLoader.yaml_implicit_resolvers = CustomLoader.yaml_implicit_resolvers.copy() 85 | res_int = list('-+0123456789') 86 | for c in res_int: del res_map[c] 87 | CustomLoader.add_implicit_resolver( 88 | 'tag:yaml.org,2002:int', 89 | re.compile(r'''^(?:[-+]?0b[0-1_]+ 90 | |[-+]?0[0-7_]+ 91 | |[-+]?(?:0|[1-9][0-9_]*) 92 | |[-+]?0x[0-9a-fA-F_]+)$''', re.X), res_int ) 93 | yaml_load._cls = CustomLoader 94 | return yaml.load(stream, yaml_load._cls) 95 | 96 | def load_test_data(path_dir, path_stem, name): 97 | 'Load test data from specified YAML file and return as dmap object.' 98 | with (path_dir / '{}.test.{}.yaml'.format(path_stem, name)).open() as src: 99 | return dmap(yaml_load(src)) 100 | 101 | 102 | def struct_from_val(val, cls, as_tuple=False): 103 | if isinstance(val, (tuple, list)): val = cls(*val) 104 | elif isinstance(val, (dmap, dict, OrderedDict)): val = cls(**val) 105 | else: raise ValueError(val) 106 | return val if not as_tuple else tb.u.attr.astuple(val) 107 | 108 | @tb.u.attr_struct 109 | class JourneyStats: keys = 'start end' 110 | 111 | @tb.u.attr_struct 112 | class JourneySeg: keys = 'type src dst' 113 | 114 | @tb.u.attr_struct 115 | class TestGoal: 116 | src = tb.u.attr_init() 117 | dst = tb.u.attr_init() 118 | dts_start = tb.u.attr_init() 119 | dts_latest = tb.u.attr_init(None) 120 | 121 | 122 | 123 | class GTFSTestFixture: 124 | 125 | def __init__(self, path_gtfs_zip, path_file): 126 | self.path_gtfs_zip = Path(path_gtfs_zip) 127 | self.path_file = Path(path_file) 128 | self.path_test = self.path_file.parent 129 | self.path_project = self.path_test.parent 130 | self.path_tmp_base = '{}.test.{}'.format( 131 | self.path_project.parent.resolve().name, self.path_file.stem ) 132 | self._path_cache_state = defaultdict(lambda: ...) 133 | 134 | def load_test_data(self, name): 135 | return load_test_data(self.path_test, self.path_file.stem, name) 136 | 137 | 138 | _path_unzip = None 139 | @property 140 | def path_unzip(self): 141 | if self._path_unzip: return self._path_unzip 142 | 143 | paths_unzip = [ self.path_test / '{}.data.unzip'.format(self.path_file.stem), 144 | Path(tempfile.gettempdir()) / '{}.data.unzip'.format(self.path_tmp_base) ] 145 | for p in paths_unzip: 146 | if not p.exists(): 147 | try: p.mkdir(parents=True) 148 | except OSError: continue 149 | path_unzip = p 150 | break 151 | else: 152 | raise OSError( 'Failed to find/create path to unzip data to.' 153 | ' Paths checked: {}'.format(' '.join(repr(str(p)) for p in paths_unzip)) ) 154 | 155 | path_done = path_unzip / '.unzip-done.check' 156 | mtime_src = self.path_gtfs_zip.stat().st_mtime 157 | mtime_done = path_done.stat().st_mtime if path_done.exists() else 0 158 | if mtime_done < mtime_src: 159 | shutil.rmtree(str(path_unzip)) 160 | path_unzip.mkdir(parents=True) 161 | mtime_done = None 162 | 163 | if not mtime_done: 164 | with zipfile.ZipFile(str(self.path_gtfs_zip)) as src: src.extractall(str(path_unzip)) 165 | path_done.touch() 166 | 167 | self._path_unzip = path_unzip 168 | return self._path_unzip 169 | 170 | 171 | def _paths_src_mtimes(self): 172 | paths_src = [Path(tb.__file__).parent, path_project] 173 | for root, dirs, files in it.chain.from_iterable(os.walk(str(p)) for p in paths_src): 174 | p = Path(root) 175 | for name in files: yield (p / name).stat().st_mtime 176 | 177 | def _path_cache(self, ext): 178 | path = self._path_cache_state[ext] 179 | if path is not ...: return state 180 | path = self._path_cache_state[ext] = None 181 | 182 | paths_cache = [ self.path_test / '{}.cache.{}'.format(self.path_file.stem, ext), 183 | Path(tempfile.gettempdir()) / '{}.cache.{}'.format(self.path_tmp_base, ext) ] 184 | 185 | for p in paths_cache: 186 | if not p.exists(): 187 | try: 188 | p.touch() 189 | p.unlink() 190 | except OSError: continue 191 | path = self._path_cache_state[ext] = p 192 | break 193 | else: 194 | warnings.warn('Failed to find writable cache-path, disabling cache') 195 | warnings.warn( 196 | 'Cache paths checked: {}'.format(' '.join(repr(str(p)) for p in paths_cache)) ) 197 | 198 | mtime_src = max(self._paths_src_mtimes()) 199 | mtime_cache = 0 if not path.exists() else path.stat().st_mtime 200 | if mtime_cache and mtime_src > mtime_cache: 201 | warnings.warn( 'Existing timetable/transfer cache' 202 | ' file is older than code, but using it anyway: {}'.format(path) ) 203 | return path 204 | 205 | @property 206 | def path_cache(self): return self._path_cache('graph.bin') 207 | 208 | @property 209 | def path_timetable(self): return self._path_cache('tt.pickle') 210 | 211 | 212 | 213 | class GraphAssertions: 214 | 215 | dts_slack = 3 * 60 216 | 217 | def __init__(self, graph=None): self.graph = graph 218 | 219 | 220 | def debug_trip_transfers(self, stop1, stop2, stop3, max_km=0.2, max_td=3600, graph=None): 221 | '''Show info on possible T[stop-1] -> T[stop-2] -> U[stop-2] -> U[stop-3] 222 | transfers between trips (both passing stop-2), going only by timetable data.''' 223 | graph = graph or self.graph 224 | stop1, stop2, stop3 = (graph.timetable.stops[s] for s in [stop1, stop2, stop3]) 225 | 226 | for (n1_min, line1), (n2_max, line2) in it.product( 227 | graph.lines.lines_with_stop(stop1), graph.lines.lines_with_stop(stop3) ): 228 | 229 | for ts1 in line1[0]: 230 | if ts1.stop == stop2: break 231 | else: continue 232 | for ts2 in line2[0]: 233 | if ts2.stop == stop2: break 234 | else: continue 235 | n1_max, n2_min = ts1.stopidx, ts2.stopidx 236 | 237 | for ts1, ts2 in it.product(line1[0][n1_min:n1_max+1], line2[0][n2_min:n2_max+1]): 238 | n1, n2 = ts1.stopidx, ts2.stopidx 239 | 240 | if ts1.stop == ts2.stop: km = 0 241 | else: 242 | lon1, lat1, lon2, lat2 = ( 243 | math.radians(float(v)) for v in 244 | [ts1.stop.lon, ts1.stop.lat, ts2.stop.lon, ts2.stop.lat] ) 245 | km = 6367 * 2 * math.asin(math.sqrt( 246 | math.sin((lat2 - lat1)/2)**2 + 247 | math.cos(lat1) * math.cos(lat2) * math.sin((lon2 - lon1)/2)**2 )) 248 | 249 | if km <= max_km: 250 | fp_delta = graph.timetable.footpaths.time_delta(ts1.stop, ts2.stop) 251 | if fp_delta is None: fp_delta = -1 252 | print( 253 | 'X-{}: lon={:.4f} lat={:.4f}\n walk:' 254 | ' {:,.1f}m, dt={:,.0f}s\n Y-{}: {:.4f} {:.4f}'.format( 255 | n1, ts1.stop.lon, ts1.stop.lat, 256 | km * 1000, fp_delta, n2, ts2.stop.lon, ts2.stop.lat )) 257 | for trip1, trip2 in it.product(line1, line2): 258 | ts1, ts2 = trip1[n1], trip2[n2] 259 | td = ts2.dts_dep - ts1.dts_arr 260 | if 0 <= td <= max_td: 261 | print(' X-arr[{}]: {} -> Y-dep[{}]: {} (delta: {:,.1f}s)'.format( 262 | trip1.id, tb.u.dts_format(ts1.dts_arr), 263 | trip2.id, tb.u.dts_format(ts2.dts_dep), td )) 264 | print() 265 | 266 | 267 | def assert_journey_components(self, test, graph=None, verbose=verbose): 268 | '''Check that lines, trips, footpaths 269 | and transfers for all test journeys can be found individually.''' 270 | graph = graph or self.graph 271 | goal = struct_from_val(test.goal, TestGoal) 272 | goal_src, goal_dst = op.itemgetter(goal.src, goal.dst)(graph.timetable.stops) 273 | assert goal_src and goal_dst 274 | 275 | def raise_error(tpl, *args, **kws): 276 | jn_seg = kws.get('err_seg', seg_name) 277 | jn_seg = ':{}'.format(jn_seg) if jn_seg else '' 278 | raise AssertionError('[{}{}] {}'.format(jn_name, jn_seg, tpl).format(*args, **kws)) 279 | 280 | for jn_name, jn_info in (test.journey_set or dict()).items(): 281 | jn_stats = struct_from_val(jn_info.stats, JourneyStats) 282 | jn_start, jn_end = map(graph.timetable.dts_parse, [jn_stats.start, jn_stats.end]) 283 | ts_first, ts_last, ts_transfer = set(), set(), set() 284 | 285 | # Check segments 286 | for seg_name, seg in jn_info.segments.items(): 287 | seg = struct_from_val(seg, JourneySeg) 288 | a, b = op.itemgetter(seg.src, seg.dst)(graph.timetable.stops) 289 | ts_transfer_chk, ts_transfer_found, line_found = list(ts_transfer), False, False 290 | ts_transfer.clear() 291 | 292 | if seg.type == 'trip': 293 | for n, line in graph.lines.lines_with_stop(a): 294 | for m, stop in enumerate(line.stops[n:], n): 295 | if stop is b: break 296 | else: continue 297 | for trip in line: 298 | for ts in ts_transfer_chk: 299 | if not (ts.trip.id == trip.id and ts.stop is a): 300 | for transfer in graph.transfers.from_trip_stop(ts): 301 | if transfer.ts_to.stop is trip[n].stop: break 302 | else: continue 303 | ts_transfer_found = True 304 | ts_transfer_chk.clear() 305 | break 306 | if a is goal_src: ts_first.add(trip[n]) 307 | if b is goal_dst: ts_last.add(trip[m]) 308 | ts_transfer.add(trip[m]) 309 | line_found = True 310 | if not line_found: raise_error('No Lines/Trips found for trip-segment') 311 | 312 | elif seg.type == 'fp': 313 | if not graph.timetable.footpaths.connected(a, b): 314 | raise_error('No footpath-transfer found between src/dst: {} -> {}', a, b) 315 | for ts in ts_transfer_chk: 316 | if ts.stop is not a: continue 317 | ts_transfer_found = True 318 | ts_transfer_chk.clear() 319 | break 320 | for m, line in graph.lines.lines_with_stop(b): 321 | for trip in line: 322 | # if b is goal_dst: ts_last.add(trip[m]) 323 | ts_transfer.add(trip[m]) 324 | line_found = True 325 | if not line_found and b is not goal_dst: 326 | raise_error('No Lines/Trips found for footpath-segment dst') 327 | 328 | else: raise NotImplementedError 329 | 330 | if not ts_transfer_found and a is not goal_src: 331 | raise_error( 'No transfers found from' 332 | ' previous segment (checked: {})', len(ts_transfer_chk) ) 333 | if not ts_transfer and b is not goal_dst: 334 | raise_error('No transfers found from segment (type={}) end ({!r})', seg.type, seg.dst) 335 | 336 | # Check start/end times 337 | seg_name = None 338 | for k, ts_set, chk in [('dts_dep', ts_first, jn_start), ('dts_arr', ts_last, jn_end)]: 339 | dt_min = min(abs(chk - getattr(ts, k)) for ts in ts_set) if ts_set else 0 340 | if dt_min > self.dts_slack: 341 | if verbose: 342 | print('[{}] All TripStops for {} goal-point:'.format(jn_name, k)) 343 | for ts in ts_set: 344 | print( ' TripStop(trip_id={}, stopidx={}, stop_id={}, {}={})'\ 345 | .format(ts.trip.id, ts.stopidx, ts.stop.id, k, tb.u.dts_format(getattr(ts, k))) ) 346 | print('[{}] Checking {} against: {}'.format(jn_name, k, tb.u.dts_format(chk))) 347 | raise_error( 'No trip-stops close to {} goal-point' 348 | ' in time (within {:,}s), min diff: {:,}s', k, self.dts_slack, dt_min ) 349 | 350 | 351 | def assert_journey_results(self, test, journeys, graph=None, verbose=verbose): 352 | 'Assert that all journeys described by test-data (from YAML) match journeys (JourneySet).' 353 | graph = graph or self.graph 354 | if verbose: 355 | print('\n' + ' -'*5, 'Journeys found:') 356 | journeys.pretty_print() 357 | 358 | jn_matched = set() 359 | for jn_name, jn_info in (test.journey_set or dict()).items(): 360 | jn_info_match = False 361 | for journey in journeys: 362 | if id(journey) in jn_matched: continue 363 | if verbose: print('\n[{}] check vs journey:'.format(jn_name), journey) 364 | jn_stats = struct_from_val(jn_info.stats, JourneyStats) 365 | dts_dep_test, dts_arr_test = map(graph.timetable.dts_parse, [jn_stats.start, jn_stats.end]) 366 | dts_dep_jn, dts_arr_jn = journey.dts_dep, journey.dts_arr 367 | 368 | time_check = max( 369 | abs(dts_dep_test - dts_dep_jn), 370 | abs(dts_arr_test - dts_arr_jn) ) <= self.dts_slack 371 | if verbose: 372 | print(' ', 'time check - {}: {} == {} and {} == {}'.format( 373 | ['fail', 'pass'][time_check], 374 | *map(tb.u.dts_format, [dts_dep_test, dts_dep_jn, dts_arr_test, dts_arr_jn]) )) 375 | if not time_check: continue 376 | 377 | for seg_jn, seg_test in it.zip_longest(journey, jn_info.segments.items()): 378 | seg_test_name, seg_test = seg_test 379 | if not (seg_jn and seg_test): break 380 | seg_test = struct_from_val(seg_test, JourneySeg) 381 | a_test, b_test = op.itemgetter(seg_test.src, seg_test.dst)(graph.timetable.stops) 382 | type_test = seg_test.type 383 | if isinstance(seg_jn, tb.t.public.JourneyTrip): 384 | type_jn, a_jn, b_jn = 'trip', seg_jn.ts_from.stop, seg_jn.ts_to.stop 385 | elif isinstance(seg_jn, tb.t.public.JourneyFp): 386 | type_jn, a_jn, b_jn = 'fp', seg_jn.stop_from, seg_jn.stop_to 387 | else: raise ValueError(seg_jn) 388 | if verbose: 389 | print(' ', seg_test_name, type_test == type_jn, a_test is a_jn, b_test is b_jn) 390 | if not (type_test == type_jn and a_test is a_jn and b_test is b_jn): break 391 | else: 392 | jn_info_match = True 393 | jn_matched.add(id(journey)) 394 | break 395 | 396 | if not jn_info_match: 397 | raise AssertionError('No journeys to match test-data for: {}'.format(jn_name)) 398 | if verbose: print('[{}] match found'.format(jn_name)) 399 | 400 | for journey in journeys: 401 | if id(journey) not in jn_matched: 402 | raise AssertionError('Unmatched journey found: {}'.format(journey)) 403 | -------------------------------------------------------------------------------- /tb_routing/gtfs.py: -------------------------------------------------------------------------------- 1 | import itertools as it, operator as op, functools as ft 2 | from collections import namedtuple, defaultdict, OrderedDict 3 | import os, sys, re, csv, math, datetime, enum 4 | 5 | try: import pytz 6 | except ImportError: pytz = None 7 | 8 | from . import utils as u, types as t 9 | 10 | 11 | log = u.get_logger('gtfs') 12 | 13 | 14 | @u.attr_struct(vals_to_attrs=True) 15 | class GTFSConf: 16 | 17 | # Filtering for parser will only produce timetable data (trips/footpaths) 18 | # for specific days, with ones after parse_start_date having 24h*N time offsets. 19 | # Trips starting on before parse_start_date (and up to parse_days_pre) will also 20 | # be processed, so that e.g. journeys starting at midnight on that day can use them. 21 | parse_start_date = None # datetime.datetime object or YYYYMMDD string 22 | parse_days = 2 # should be >= 1 and up to max days limit for journey (probably 1-2) 23 | parse_days_pre = 1 # also >= 1 would make sense 24 | 25 | # gtfs_timezone is only used if parse_start_date is set. 26 | # It is important to account for stuff like daylight saving time, leap seconds, etc 27 | # To understand why, answer a question: 28 | # how many seconds are between 0:00 and 6:00? (not always 6*3600) 29 | gtfs_timezone = 'Europe/London' # pytz zone name or datetime.timezone 30 | 31 | group_stops_into_stations = False # use "parent_station" to group all stops into one under its id 32 | 33 | # Options for footpath-generation - not used if transfers.txt is non-empty 34 | delta_ch = 2*60 # fixed time-delta overhead for changing trips (i.e. p->p footpaths) 35 | footpath_delta_base = 2*60 # footpath_delta = delta_base + km / speed_kmh 36 | footpath_speed_kmh = 5 / 3600 37 | footpath_delta_max = 7*60 # all footpaths longer than that are discarded as invalid 38 | footpath_gen_thresholds = 0, 0.5 39 | 40 | 41 | weekday_columns = [ 'monday', 'tuesday', 42 | 'wednesday', 'thursday', 'friday', 'saturday', 'sunday' ] 43 | 44 | class CalendarException(enum.Enum): added, removed = '1', '2' 45 | 46 | def dt_adjust(dt, d=0, h=0, m=0, s=0, subtract=False): 47 | '''Apply timedelta objects in a sensible manner, 48 | where adding N days only adjusts date, never time. 49 | Note that in general: "dt - delta != dt + (-delta)", 50 | hence `subtract` and negative values are only allowed in `d`.''' 51 | if isinstance(d, datetime.timedelta): 52 | d, h, m, s= d.days, d.hours, d.minutes, d.seconds 53 | assert not any([d.microseconds, d.milliseconds, d.weeks]) 54 | if h == m == s == 0: # adding days should only adjust date, not time 55 | if d == 0: return dt 56 | if d < 0: 57 | assert not subtract, [d, subtract] 58 | d, subtract = -d, True 59 | dt = (dt + datetime.timedelta(d)) if not subtract else (dt - datetime.timedelta(d)) 60 | return dt.tzinfo.localize(dt.replace(tzinfo=None)) 61 | else: 62 | assert not d, 'Adjusting both date by days= and time - probably a bug' 63 | assert h >= 0 and m >= 0 and s >= 0 64 | delta = datetime.timedelta(hours=h, minutes=m, seconds=s) 65 | return dt.tzinfo.normalize((dt + delta) if not subtract else (dt - delta)) 66 | 67 | @u.attr_struct 68 | class GTFSTimeOffset: 69 | keys = 'd h m s' 70 | 71 | # In GTFS stop_times.txt "00:20" can actually mean 01:20 in localtime 72 | # or 23:20 of the previous day, when DST-related time jump happens. 73 | # 74 | # Quote: 75 | # The time is measured from "noon minus 12h" 76 | # (effectively midnight, except for days on which daylight 77 | # savings time changes occur) at the beginning of the service date. 78 | # https://developers.google.com/transit/gtfs/reference/stop_times-file 79 | 80 | @classmethod 81 | def parse(cls, ts_str): 82 | if ':' not in ts_str: return 83 | ts_list = list(int(v.strip()) for v in ts_str.split(':')) 84 | if len(ts_list) == 2: ts_list.append(0) 85 | days, hours = divmod(ts_list[0], 24) 86 | return cls(days, hours, ts_list[1], ts_list[2]) 87 | 88 | @property 89 | def flat(self): 90 | return (self.d * 24 + self.h) * 3600 + self.m * 60 + self.s 91 | 92 | def apply_to_datetime(self, dt): 93 | '''Returns datetime with this offset applied to date specified in `dt`. 94 | Any time set there will be disregarded.''' 95 | d, h, m, s = u.attr.astuple(self) 96 | dt = dt_adjust(dt, d=d).replace(hour=12, minute=0, second=0) # noon of specified day 97 | dt = dt_adjust(dt, h=12, subtract=True) # "noon minus 12h" 98 | return dt_adjust(dt, h=h, m=m, s=s) # "noon minus 12h" + time offset 99 | 100 | ServiceCalendarEntry = namedtuple('SCE', 'date_start date_end weekdays') 101 | 102 | 103 | def iter_gtfs_tuples(gtfs_dir, filename, empty_if_missing=False, yield_fields=False): 104 | log.debug('Processing gtfs file: {}', filename) 105 | if filename.endswith('.txt'): filename = filename[:-4] 106 | tuple_t = ''.join(' '.join(filename.rstrip('s').split('_')).title().split()) 107 | p = gtfs_dir / '{}.txt'.format(filename) 108 | if empty_if_missing and not os.access(str(p), os.R_OK): 109 | if yield_fields: yield list() 110 | return 111 | with p.open(encoding='utf-8-sig') as src: 112 | src_csv = csv.reader(src) 113 | fields = list(v.strip() for v in next(src_csv)) 114 | tuple_t = namedtuple(tuple_t, fields) 115 | if yield_fields: yield fields 116 | for line in src_csv: 117 | try: yield tuple_t(*line) 118 | except TypeError: 119 | log.debug('Skipping bogus CSV line (file: {}): {!r}', p, line) 120 | 121 | def get_timespan_info( svc_calendar, svc_exceptions, 122 | parse_start_date, parse_days, parse_days_pre, 123 | gtfs_timezone, gtfs_date_fmt='%Y%m%d' ): 124 | 'Return TimespanInfo with map of services to days when they are operating within it.' 125 | if isinstance(gtfs_timezone, str): 126 | assert pytz, ['pytz is required for processing timezone spec', gtfs_timezone] 127 | gtfs_timezone = pytz.timezone(gtfs_timezone) 128 | dt_cls, tz = datetime.datetime, gtfs_timezone 129 | 130 | dt_start = parse_start_date 131 | if isinstance(parse_start_date, str): 132 | dt_start = tz.localize(dt_cls.strptime(dt_start, gtfs_date_fmt)) 133 | dt_min = dt_adjust(dt_start, d=parse_days_pre, subtract=True) 134 | date_map = list( dt_adjust(dt_min, d=n) 135 | for n in range(parse_days + parse_days_pre + 1) ) 136 | date_min_str, date_max_str = (d.strftime(gtfs_date_fmt) for d in [dt_min, date_map[-1]]) 137 | date_map = OrderedDict((d.strftime(gtfs_date_fmt), d) for d in date_map) 138 | 139 | svc_days = dict() # {service_id: {date_str: datetime}} 140 | for svc_id, sce in svc_calendar.items(): 141 | if sce.date_start > date_max_str or sce.date_end < date_min_str: continue 142 | days = svc_days.setdefault(svc_id, dict()) 143 | 144 | # Apply any service-specific exceptions 145 | parse_days = dict((date_str, (False, date)) for date_str, date in date_map.items()) 146 | for exc, date_str in svc_exceptions[svc_id]: 147 | if not (date_min_str <= date_str <= date_max_str): continue 148 | if exc == CalendarException.added: 149 | parse_days[date_str] = True, tz.localize(date.strptime(date_str, gtfs_date_fmt)) 150 | elif exc == CalendarException.removed: parse_days.pop(date_str, None) 151 | else: raise ValueError(t) 152 | 153 | # Add datetime to svc_days for each date that service is operating on 154 | for date_str, (exc, dt) in sorted(parse_days.items()): 155 | if not exc: 156 | if date_str < sce.date_start: continue 157 | elif date_str > sce.date_end: break 158 | if not sce.weekdays[dt.weekday()]: continue 159 | days[date_str] = dt 160 | 161 | # Add days for exception-only services, not mentioned in svc_calendar at all 162 | for svc_id, excs in svc_exceptions.items(): 163 | if svc_id in svc_calendar: continue 164 | for date_str in excs[CalendarException.added]: 165 | if not (date_min_str <= date_str <= date_max_str): continue 166 | dt = tz.localize(dt_cls.strptime(date_str, gtfs_date_fmt)) 167 | svc_days.setdefault(svc_id, dict())[date_str] = dt 168 | 169 | if not svc_days: 170 | log.info('No services were found to be operational on specified days') 171 | 172 | return t.public.TimespanInfo( 173 | dt_start, dt_min, svc_days, date_map, date_min_str, date_max_str ) 174 | 175 | def offset_to_dts(dt_min, dt, offset): 176 | if dt is None: return offset.flat 177 | return (offset.apply_to_datetime(dt) - dt_min).total_seconds() 178 | 179 | def calculate_trip_dts(dt_min, dt, offset_arr, offset_dep): 180 | '''Calculate relative timestamps ("dts" floats of seconds) for 181 | arrival/departure GTFSTimeOffsets on a specific day (`dt` datetime). 182 | "relative" to `dt_min` datetime - start of the parsed interval. 183 | If both dt_min and dt are passed as None, offsets are simply taken from 0.''' 184 | if dt is None: 185 | # Either both dt_min and dt are None or neither, 186 | # otherwise dts values won't make sense according to one of them. 187 | assert dt_min is None 188 | return offset_arr.flat, offset_dep.flat 189 | if not offset_arr: 190 | if not trip: # first stop of the trip - arrival ~ departure 191 | if offset_dep: offset_arr = offset_dep 192 | else: raise ValueError('Missing arrival/departure times for trip stop: {}'.format(ts)) 193 | else: offset_arr = trip[-1].offset_dep # "scheduled based on the nearest preceding timed stop" 194 | if not offset_dep: offset_dep = offset_arr 195 | assert offset_arr and offset_dep 196 | return tuple(offset_to_dts(dt_min, dt, o) for o in [offset_arr, offset_dep]) 197 | 198 | def footpath_dt(stop_a, stop_b, delta_base, speed_kmh, math=math): 199 | '''Calculate footpath time-delta (dt) between two stops, 200 | based on their lon/lat distance (using Haversine Formula) and walking-speed constant.''' 201 | # Alternative: use UTM coordinates and KDTree (e.g. scipy) or spatial dbs 202 | lon1, lat1, lon2, lat2 = ( 203 | math.radians(float(v)) for v in 204 | [stop_a.lon, stop_a.lat, stop_b.lon, stop_b.lat] ) 205 | km = 6367 * 2 * math.asin(math.sqrt( 206 | math.sin((lat2 - lat1)/2)**2 + 207 | math.cos(lat1) * math.cos(lat2) * math.sin((lon2 - lon1)/2)**2 )) 208 | return delta_base + km / speed_kmh 209 | 210 | 211 | def parse_timetable(gtfs_dir, conf): 212 | 'Parse Timetable from GTFS data directory.' 213 | # Stops/footpaths that don't belong to trips are discarded here 214 | 215 | ### Calculate processing timespan / calendar and map of services operating there 216 | if conf.parse_start_date: 217 | svc_calendar = dict() 218 | for s in iter_gtfs_tuples(gtfs_dir, 'calendar', empty_if_missing=True): 219 | weekdays = list(bool(int(getattr(s, k))) for k in weekday_columns) 220 | svc_calendar[s.service_id] = ServiceCalendarEntry(s.start_date, s.end_date, weekdays) 221 | 222 | svc_exceptions = defaultdict(ft.partial(defaultdict, set)) 223 | for s in iter_gtfs_tuples(gtfs_dir, 'calendar_dates', empty_if_missing=True): 224 | svc_exceptions[s.service_id][CalendarException(s.exception_type)].add(s.date) 225 | 226 | timespan_info = get_timespan_info( svc_calendar, svc_exceptions, 227 | conf.parse_start_date, conf.parse_days, conf.parse_days_pre, conf.gtfs_timezone ) 228 | else: timespan_info = t.public.TimespanInfo() 229 | 230 | ### Stops (incl. grouping by station) 231 | stop_dict, stop_sets = dict(), dict() # {id: stop}, {id: station_stops} 232 | for s in iter_gtfs_tuples(gtfs_dir, 'stops'): 233 | stop = t.public.Stop(s.stop_id, s.stop_name, float(s.stop_lon), float(s.stop_lat)) 234 | stop_set_id = s.parent_station or s.stop_id 235 | stop_dict[s.stop_id] = stop_set_id, stop 236 | if not s.parent_station: stop_sets[s.stop_id] = {stop} 237 | else: 238 | stop_sets[s.stop_id] = stop_sets.setdefault(stop_set_id, set()) 239 | stop_sets[stop_set_id].add(stop) 240 | if conf.group_stops_into_stations: 241 | for stop_id in stop_dict: # resolve all stops to stations 242 | stop_dict[stop_id] = stop_dict[stop_dict[stop_id][0]] 243 | stop_dict, stop_sets = ( 244 | dict((k, stop) for k, (k_set, stop) in stop_dict.items()), 245 | dict((k, stop_sets[k_set]) for k, (k_set, stop) in stop_dict.items()) ) 246 | 247 | ### Trips 248 | trip_stops = defaultdict(list) 249 | for s in iter_gtfs_tuples(gtfs_dir, 'stop_times'): trip_stops[s.trip_id].append(s) 250 | 251 | trips, stops = t.public.Trips(), t.public.Stops() 252 | for s in iter_gtfs_tuples(gtfs_dir, 'trips'): 253 | if timespan_info.dt_start: 254 | days = timespan_info.service_days.get(s.service_id) 255 | if not days: continue 256 | else: days = {None: None} 257 | for dt in days.values(): 258 | trip, offset_arr_prev = t.public.Trip(), None 259 | for stopidx, ts in enumerate( 260 | sorted(trip_stops[s.trip_id], key=lambda s: int(s.stop_sequence)) ): 261 | offset_arr, offset_dep = map(GTFSTimeOffset.parse, [ts.arrival_time, ts.departure_time]) 262 | if offset_arr_prev is not None: 263 | if offset_arr < offset_arr_prev: offset_arr.d += 1 # assuming bogus 24:00 -> 00:00 264 | offset_arr_prev = offset_arr 265 | dts_arr, dts_dep = calculate_trip_dts(timespan_info.dt_min, dt, offset_arr, offset_dep) 266 | stop = stops.add(stop_dict[ts.stop_id]) 267 | trip.add(t.public.TripStop(trip, stopidx, stop, dts_arr, dts_dep)) 268 | if trip: trips.add(trip) 269 | 270 | ### Footpaths 271 | footpaths, fp_samestop_count, fp_synth = t.public.Footpaths(), 0, False 272 | with footpaths.populate() as fp_add: 273 | get_stop_set = lambda stop_id: list(filter(stops.get, stop_sets.get(stop_id, list()))) 274 | 275 | transfers = iter_gtfs_tuples(gtfs_dir, 'transfers', empty_if_missing=True, yield_fields=True) 276 | transfers_fields = next(transfers) 277 | if 'min_transfer_time' not in transfers_fields: 278 | if transfers_fields: 279 | # Can maybe be used as a hint about which transfers to generate/skip 280 | log.info('Skipping transfers.txt file as it has no "min_transfer_time" field') 281 | else: 282 | for s in transfers: 283 | stops_from, stops_to = map(get_stop_set, [s.from_stop_id, s.to_stop_id]) 284 | if not (stops_from and stops_to): continue 285 | delta = int(s.min_transfer_time) 286 | for stop_from, stop_to in it.product(stops_from, stops_to): 287 | if stop_from == stop_to: fp_samestop_count += 1 288 | fp_add(stop_from, stop_to, delta) 289 | 290 | # links.txt is specific to gbrail.info and has 291 | # transfers that are only valid for specific date/time intervals 292 | for s in iter_gtfs_tuples(gtfs_dir, 'links', empty_if_missing=True): 293 | stops_from, stops_to = map(get_stop_set, [s.from_stop_id, s.to_stop_id]) 294 | if not (stops_from and stops_to): continue 295 | delta = int(s.link_secs) 296 | if not timespan_info.dt_start: # not using calendar info 297 | fp_add(stop_from, stop_to, delta) 298 | continue 299 | if ( s.start_date > timespan_info.date_max_str 300 | or s.end_date < timespan_info.date_min_str ): continue 301 | for dt in timespan_info.date_map.values(): 302 | if not bool(int(getattr(s, weekday_columns[dt.weekday()]))): continue 303 | dts_min, dts_max = ( 304 | offset_to_dts(timespan_info.dt_min, dt, GTFSTimeOffset.parse(v)) 305 | for v in [s.start_time, s.end_time] ) 306 | for stop_from, stop_to in it.product(stops_from, stops_to): 307 | if stop_from == stop_to: fp_samestop_count += 1 308 | fp_add(stop_from, stop_to, delta, dts_min, dts_max) 309 | 310 | if len(stops): 311 | fp_min, fp_min_samestop = conf.footpath_gen_thresholds 312 | if len(footpaths) / len(stops) <= fp_min: 313 | log.debug('No transfers/links data found, generating synthetic footpaths from lon/lat') 314 | fp_synth, fp_delta_func = True, ft.partial( footpath_dt, 315 | delta_base=conf.footpath_delta_base, speed_kmh=conf.footpath_speed_kmh ) 316 | for stop_a, stop_b in it.permutations(list(stops), 2): 317 | delta = fp_delta_func(stop_a, stop_b) 318 | if delta <= conf.footpath_delta_max: fp_add(stop_a, stop_b, delta) 319 | if fp_samestop_count / len(stops) <= fp_min_samestop: 320 | if not fp_synth: 321 | log.debug( 322 | 'Generating missing same-stop footpaths (delta_ch={}),' 323 | ' because source data seem to have very few of them - {} for {} stops', 324 | conf.delta_ch, fp_samestop_count, len(stops) ) 325 | for stop in stops: 326 | if footpaths.connected(stop, stop): continue 327 | fp_add(stop, stop, conf.delta_ch) 328 | fp_samestop_count += 1 329 | 330 | return t.public.Timetable(stops, footpaths, trips, timespan_info) 331 | -------------------------------------------------------------------------------- /tb_routing/types/public.py: -------------------------------------------------------------------------------- 1 | import itertools as it, operator as op, functools as ft 2 | from collections import namedtuple, defaultdict 3 | import enum, datetime, contextlib 4 | 5 | from .. import utils as u 6 | 7 | 8 | class SolutionStatus(enum.Enum): 9 | 'Used as a result for solution (e.g. Trip) comparisons.' 10 | dominated = False 11 | non_dominated = True 12 | equal = None 13 | undecidable = ... 14 | 15 | @classmethod 16 | def better_if(cls, check): 17 | return [cls.dominated, cls.non_dominated][bool(check)] 18 | 19 | 20 | ### TBRoutingEngine input data 21 | 22 | # "We consider public transit networks defined by an aperiodic 23 | # timetable, consisting of a set of stops, a set of footpaths and a set of trips." 24 | 25 | 26 | @u.attr_struct(repr=False, cmp=False) 27 | class Stop: 28 | keys = 'id name lon lat' 29 | def __hash__(self): return hash(self.id) 30 | def __eq__(self, stop): return u.same_type_and_id(self, stop) 31 | def __repr__(self): 32 | if self.id == self.name: return ''.format(self.id) 33 | return ''.format(self.name, self.id) 34 | 35 | class Stops: 36 | def __init__(self): self.set_idx = dict() 37 | 38 | def add(self, stop): 39 | if stop.id in self.set_idx: stop = self.set_idx[stop.id] 40 | else: self.set_idx[stop.id] = stop 41 | return stop 42 | 43 | def get(self, stop): 44 | if isinstance(stop, Stop): stop = stop.id 45 | if stop not in self.set_idx: return 46 | return self.set_idx[stop] 47 | 48 | def __getitem__(self, stop_id): return self.set_idx[stop_id] 49 | def __len__(self): return len(self.set_idx) 50 | def __iter__(self): return iter(self.set_idx.values()) 51 | 52 | 53 | class Footpath: 54 | 55 | def __init__(self): 56 | self.delta_tuples = list() # [(delta, dts_min, dts_max), ...] 57 | 58 | def add(self, delta, dts_min, dts_max): 59 | self.delta_tuples.append((delta, dts_min, dts_max)) 60 | def discard_longer(self, delta_max): 61 | self.delta_tuples = list(filter(lambda t: t[0] <= delta_max, self.delta_tuples)) 62 | def finalize(self): 63 | if len(self.delta_tuples) > 1: self.delta_tuples.sort() 64 | self.delta_tuples = tuple(self.delta_tuples) 65 | 66 | def _check_src_dst(self, delta, dts_min, dts_max, dts_src, dts_dst): 67 | '''Return True if dts min/max are within src/dst constraints. 68 | I.e. whether footpath can take place with 69 | specified arrival-to-src and departure-from-dst times. 70 | None in place of src/dst times is interpreted as "any". 71 | Full length (delta) of footpath must fit into dts_min/max interval for it to be valid.''' 72 | src, dst = dts_src is not None, dts_dst is not None 73 | if not (src or dst): return True 74 | if not src: dts_src = dts_min 75 | elif dts_min: dts_src = max(dts_min, dts_src) 76 | if not dst: dts_dst = dts_max 77 | elif dts_max: dts_dst = min(dts_max, dts_dst) 78 | return dts_dst - dts_src >= delta 79 | 80 | def filtered_deltas(self, dts_src=None, dts_dst=None): 81 | 'Return asc-sorted time deltas for footpaths within given constraints.' 82 | for delta, dts_min, dts_max in self.delta_tuples: 83 | if not self._check_src_dst(delta, dts_min, dts_max, dts_src, dts_dst): continue 84 | yield delta 85 | 86 | def get_shortest(self, **fp_constraints): 87 | '''Return shortest time delta for valid footpath 88 | between stops within given constraints, or None if it cannot be found.''' 89 | try: return next(self.filtered_deltas(**fp_constraints)) 90 | except StopIteration: return None 91 | 92 | def valid_at(self, **fp_constraints): 93 | 'Return (as bool) whether footpath is valid between stops within given constraints.' 94 | return self.get_shortest(**fp_constraints) is not None 95 | 96 | def stat_delta_sum(self): 97 | return 0 if not self.delta_tuples else\ 98 | sum(map(op.itemgetter(0), self.delta_tuples)) 99 | 100 | def __len__(self): return len(self.delta_tuples) 101 | 102 | 103 | class Footpaths: 104 | 105 | _stats_cache_t = namedtuple( 106 | 'StatsCache', 'delta_sum delta_count ch_count conn_count' ) 107 | _stats_cache = None 108 | 109 | def __init__(self): 110 | self.set_idx_to, self.set_idx_from = dict(), dict() 111 | self.fp0 = Footpath() 112 | 113 | def __getstate__(self): 114 | state = self.__dict__ 115 | state.pop('_stats_cache', None) 116 | return state 117 | 118 | def _add(self, stop_a, stop_b, delta, dts_min=0, dts_max=u.inf): 119 | try: fp = self.set_idx_to[stop_a][stop_b] 120 | except KeyError: 121 | fp = Footpath() 122 | self.set_idx_to.setdefault(stop_a, dict())[stop_b] = fp 123 | self.set_idx_from.setdefault(stop_b, dict())[stop_a] = fp 124 | fp.add(delta, dts_min, dts_max) 125 | 126 | @contextlib.contextmanager 127 | def populate(self): 128 | try: yield self._add 129 | finally: 130 | for k1, k2, fp in self: fp.finalize() 131 | self._stats_cache = None 132 | 133 | def get(self, stop_from, stop_to): 134 | try: return self.set_idx_to[stop_from][stop_to] 135 | except KeyError: return self.fp0 136 | 137 | def _filtered_stop_fp_tuples(self, idx_items, fp_constraints): 138 | for stop, fp in idx_items: 139 | if not fp.valid_at(**fp_constraints): continue 140 | yield stop, fp 141 | 142 | def to_stops_from(self, stop, **fp_constraints): 143 | '''Return (stop, fp) tuples only for 144 | stops that have valid footpaths within given constraints.''' 145 | return self._filtered_stop_fp_tuples( 146 | self.set_idx_to.get(stop, dict()).items(), fp_constraints ) 147 | 148 | def from_stops_to(self, stop, **fp_constraints): 149 | '''Return (stop, fp) tuples only for 150 | stops that have valid footpaths within given constraints.''' 151 | return self._filtered_stop_fp_tuples( 152 | self.set_idx_from.get(stop, dict()).items(), fp_constraints ) 153 | 154 | def time_delta(self, stop_from, stop_to, default=None, **fp_constraints): 155 | delta = self.get(stop_from, stop_to).get_shortest(**fp_constraints) 156 | if delta is None: delta = default 157 | return delta 158 | 159 | def connected(self, stop_a, stop_b, **fp_constraints): 160 | '''Return (as bool) whether footpath in any 161 | direction exists between two stops within given constraints.''' 162 | for a, b in [(stop_a, stop_b), (stop_b, stop_a)]: 163 | delta = self.time_delta(a, b, **fp_constraints) 164 | if delta is not None: break 165 | else: return False 166 | return delta is not u.inf 167 | 168 | def _stats(self): 169 | if not self._stats_cache: 170 | delta_sum = delta_count = ch_count = conn_count = 0 171 | for k1, fps_from_k1 in self.set_idx_to.items(): 172 | for k2, fp in fps_from_k1.items(): 173 | if not fp: continue 174 | delta_sum += fp.stat_delta_sum() 175 | delta_count += len(fp) 176 | conn_count += 1 177 | if k1 == k2: ch_count += 1 178 | self._stats_cache = self._stats_cache_t( 179 | delta_sum, delta_count, ch_count, conn_count ) 180 | return self._stats_cache 181 | 182 | def stat_mean_delta(self): 183 | s = self._stats() 184 | return (s.delta_sum / s.delta_count) if s.delta_count else 0 185 | def stat_mean_delta_count(self): 186 | s = self._stats() 187 | return (s.delta_count / s.conn_count) if s.conn_count else 0 188 | def stat_same_stop_count(self): return self._stats().ch_count 189 | 190 | def __iter__(self): 191 | for k1, k1_fps in list(self.set_idx_to.items()): 192 | for k2, fp in list(k1_fps.items()): yield k1, k2, fp 193 | def __len__(self): return self._stats().conn_count 194 | 195 | 196 | @u.attr_struct(repr=False) 197 | class TripStop: 198 | trip = u.attr_init() 199 | stopidx = u.attr_init() 200 | stop = u.attr_init() 201 | dts_arr = u.attr_init() 202 | dts_dep = u.attr_init() 203 | 204 | @classmethod 205 | def dummy_for_stop(cls, stop, dts_arr=0, dts_dep=0): 206 | return cls(None, 0, stop, dts_arr, dts_dep) 207 | 208 | def __hash__(self): return hash((self.trip, self.stopidx)) 209 | def __repr__(self): # mostly to avoid recursion 210 | return ( 'TripStop(' 211 | 'trip_id={line_id_hint}{trip_id}, stopidx={0.stopidx},' 212 | ' stop_id={0.stop.id}, dts_arr={0.dts_arr}, dts_dep={0.dts_dep})' )\ 213 | .format( self, 214 | trip_id=self.trip.id if self.trip else None, 215 | line_id_hint='{}:'.format(self.trip.line_id_hint) 216 | if self.trip and self.trip.line_id_hint else '' ) 217 | 218 | @u.attr_struct(repr=False, cmp=False) 219 | class Trip: 220 | stops = u.attr_init(list) 221 | id = u.attr_init_id() 222 | line_id_hint = u.attr_init(None) # can be set for introspection/debugging 223 | 224 | def add(self, stop): 225 | assert stop.dts_arr <= stop.dts_dep 226 | assert not self.stops or self.stops[-1].dts_dep <= stop.dts_arr 227 | self.stops.append(stop) 228 | 229 | def compare(self, trip): 230 | 'Return SolutionStatus for this trip as compared to other trip.' 231 | check = set( 232 | (None if sa.dts_arr == sb.dts_arr else sa.dts_arr < sb.dts_arr) 233 | for sa, sb in zip(self, trip) ).difference([None]) 234 | if len(check) == 1: return SolutionStatus(check.pop()) 235 | if not check: return SolutionStatus.equal 236 | return SolutionStatus.undecidable 237 | 238 | def __hash__(self): return hash(self.id) 239 | def __eq__(self, trip): return u.same_type_and_id(self, trip) 240 | def __repr__(self): # mostly to avoid recursion 241 | return 'Trip(id={line_id_hint}{0.id}, stops={stops})'.format( 242 | self, stops=len(self.stops), 243 | line_id_hint='{}:'.format(self.line_id_hint) if self.line_id_hint else '' ) 244 | 245 | def __getitem__(self, n): return self.stops[n] 246 | def __len__(self): return len(self.stops) 247 | def __iter__(self): return iter(self.stops) 248 | 249 | class Trips: 250 | 251 | def __init__(self): self.set_idx = dict() 252 | 253 | def add(self, trip): 254 | assert len(trip) >= 2, trip 255 | self.set_idx[trip.id] = trip 256 | 257 | def stat_mean_stops(self): 258 | if not len(self): return 0 259 | return (sum(len(t) for t in self) / len(self)) 260 | 261 | def __getitem__(self, trip_id): return self.set_idx[trip_id] 262 | def __len__(self): return len(self.set_idx) 263 | def __iter__(self): return iter(self.set_idx.values()) 264 | 265 | 266 | @u.attr_struct(slots=False, defaults=None) 267 | class TimespanInfo: 268 | keys = 'dt_start dt_min service_days date_map date_min_str date_max_str' 269 | 270 | _dts_start_cache = None 271 | @property 272 | def dts_start(self): 273 | if self._dts_start_cache is not None: return self._dts_start_cache 274 | self._dts_start_cache = 0 if not self.dt_min\ 275 | else (self.dt_start - self.dt_min).total_seconds() 276 | return self._dts_start_cache 277 | 278 | 279 | @u.attr_struct 280 | class Timetable: 281 | stops = u.attr_init() 282 | footpaths = u.attr_init() 283 | trips = u.attr_init() 284 | timespan = u.attr_init(TimespanInfo) 285 | 286 | def dts_relative(self, dts, dt=None): 287 | if not self.timespan.dt_min: return dts 288 | if not dt: return self.timespan.dts_start + dts 289 | return dts + (dt - self.timespan.dt_min).total_seconds() 290 | 291 | def dts_parse(self, day_time_str, dt=None): 292 | return self.dts_relative(u.dts_parse(day_time_str), dt) 293 | 294 | def dts_format(self, dts): 295 | return u.dts_format(dts - self.timespan.dts_start) 296 | 297 | 298 | 299 | ### TBRoutingEngine query result 300 | 301 | JourneyTrip = namedtuple('JTrip', 'ts_from ts_to') 302 | JourneyFp = namedtuple('JFootpath', 'stop_from stop_to delta') 303 | 304 | @u.attr_struct(slots=False, repr=False, cmp=False) 305 | class Journey: 306 | dts_start = u.attr_init() 307 | segments = u.attr_init(list) 308 | 309 | _stats_cache_t = namedtuple( 310 | 'StatsCache', 'id dts_arr dts_dep trip_count fp_count' ) 311 | _stats_cache = None 312 | 313 | def _stats(self): 314 | if not self._stats_cache: 315 | dts_arr = trip_count = fp_count = 0 316 | dts_dep, dts_dep_fp, hash_vals = None, 0, list() 317 | for seg in self.segments: 318 | if isinstance(seg, JourneyTrip): 319 | trip_count += 1 320 | dts_arr = seg.ts_to.dts_arr 321 | hash_vals.append(seg.ts_from.trip) 322 | if dts_dep is None: dts_dep = seg.ts_from.dts_dep - dts_dep_fp 323 | elif isinstance(seg, JourneyFp): 324 | fp_count += 1 325 | dts_arr = dts_arr + seg.delta 326 | hash_vals.append(seg) 327 | if dts_dep is None: dts_dep_fp += seg.delta 328 | if dts_dep is None: # no trips, only footpaths 329 | dts_dep, dts_arr = self.dts_start, self.dts_start + dts_arr 330 | self._stats_cache = self._stats_cache_t( 331 | hash(tuple(hash_vals)), dts_arr, dts_dep, trip_count, fp_count ) 332 | return self._stats_cache 333 | 334 | def copy(self): 335 | attrs = u.attr.asdict(self) 336 | attrs['segments'] = self.segments.copy() 337 | return Journey(**attrs) 338 | 339 | def append_trip(self, *jtrip_args, **jtrip_kws): 340 | self.segments.append(JourneyTrip(*jtrip_args, **jtrip_kws)) 341 | self._stats_cache = None 342 | return self 343 | 344 | def append_fp(self, stop_from, stop_to, dt): 345 | if not (stop_from == stop_to or dt == 0): 346 | self.segments.append(JourneyFp(stop_from, stop_to, dt)) 347 | self._stats_cache = None 348 | return self 349 | 350 | def compare(self, jn2, _ss=SolutionStatus): 351 | 'Return SolutionStatus for this journey as compared to other journey.' 352 | jn1 = self 353 | if jn1.dts_arr == jn2.dts_arr and jn1.trip_count == jn2.trip_count: 354 | if jn1.dts_dep != jn2.dts_dep: return _ss.better_if(jn1.dts_dep > jn2.dts_dep) 355 | if jn1.fp_count != jn2.fp_count: return _ss.better_if(jn1.fp_count < jn2.fp_count) 356 | return _ss.equal 357 | if jn1.dts_arr >= jn2.dts_arr and jn1.trip_count >= jn2.trip_count: return _ss.dominated 358 | if jn1.dts_arr <= jn2.dts_arr and jn1.trip_count <= jn2.trip_count: return _ss.non_dominated 359 | 360 | def __len__(self): return len(self.segments) 361 | def __iter__(self): return iter(self.segments) 362 | def __hash__(self): return self.id 363 | def __eq__(self, journey): return u.same_type_and_id(self, journey) 364 | 365 | def __getattr__(self, k): 366 | if k in self._stats_cache_t._fields: return getattr(self._stats(), k) 367 | return super(Journey, self).__getattr__(k) 368 | 369 | def __repr__(self): 370 | points = list() 371 | for seg in self.segments: 372 | if isinstance(seg, JourneyTrip): 373 | if not points: 374 | points.append( 375 | '{0.trip.id}:{0.stopidx}:{0.stop.id}:{0.stop.name} [{dts_dep}]'\ 376 | .format(seg.ts_from, dts_dep=u.dts_format(seg.ts_from.dts_dep)) ) 377 | points.append( 378 | '{0.trip.id}:{0.stopidx}:{0.stop.id}:{0.stop.name} [{dts_arr}]'\ 379 | .format(seg.ts_to, dts_arr=u.dts_format(seg.ts_to.dts_arr)) ) 380 | elif isinstance(seg, JourneyFp): 381 | points.append('(fp-to={0.id}:{0.name} dt={1})'.format( 382 | seg.stop_to, datetime.timedelta(seconds=int(seg.dt)) )) 383 | return ''.format(' - '.join(points)) 384 | 385 | def pretty_print(self, dts_format_func=None, indent=0, **print_kws): 386 | if not dts_format_func: dts_format_func = u.dts_format 387 | p = lambda tpl,*a,**k: print(' '*indent + tpl.format(*a,**k), **print_kws) 388 | stop_id_ext = lambda stop:\ 389 | ' [{}]'.format(stop.id) if stop.id != stop.name else '' 390 | 391 | p( 'Journey {:x} (arrival: {}, trips: {}, duration: {}):', 392 | self.id, dts_format_func(self.dts_arr), self.trip_count, 393 | u.dts_format(self.dts_arr - self.dts_dep) ) 394 | for seg in self.segments: 395 | if isinstance(seg, JourneyTrip): 396 | trip_id = seg.ts_from.trip.id 397 | if seg.ts_from.trip.line_id_hint: 398 | trip_id = '{}:{}'.format(seg.ts_from.trip.line_id_hint, trip_id) 399 | p(' trip [{}]:', trip_id) 400 | p( ' from (dep at {dts_dep}): {0.stopidx}:{0.stop.name}{stop_id}', 401 | seg.ts_from, 402 | stop_id=stop_id_ext(seg.ts_from.stop), 403 | dts_dep=dts_format_func(seg.ts_from.dts_dep) ) 404 | p( ' to (arr at {dts_arr}): {0.stopidx}:{0.stop.name}{stop_id}', 405 | seg.ts_to, 406 | stop_id=stop_id_ext(seg.ts_to.stop), 407 | dts_arr=dts_format_func(seg.ts_to.dts_arr) ) 408 | elif isinstance(seg, JourneyFp): 409 | p(' footpath (time: {}):', datetime.timedelta(seconds=int(seg.delta))) 410 | p(' from: {0.name}{stop_id}', seg.stop_from, stop_id=stop_id_ext(seg.stop_from)) 411 | p(' to: {0.name}{stop_id}', seg.stop_to, stop_id=stop_id_ext(seg.stop_to)) 412 | 413 | 414 | @u.attr_struct 415 | class JourneySet: 416 | journeys = u.attr_init(set) 417 | 418 | def add(self, journey): self.journeys.add(journey) 419 | 420 | def __len__(self): return len(self.journeys) 421 | def __iter__(self): return iter(self.journeys) 422 | 423 | def pretty_print(self, dts_format_func=None, indent=0, **print_kws): 424 | print(' '*indent + 'Journey set ({}):'.format(len(self.journeys))) 425 | for journey in sorted( self.journeys, 426 | key=op.attrgetter('dts_dep', 'dts_arr', 'dts_start', 'id') ): 427 | print() 428 | journey.pretty_print(dts_format_func=dts_format_func, indent=indent+2, **print_kws) 429 | -------------------------------------------------------------------------------- /README.rst: -------------------------------------------------------------------------------- 1 | ======================================== 2 | trip-based-public-transit-routing-algo 3 | ======================================== 4 | ---------------------------------------------------------------------- 5 | Python implementation of trip-based public transit routing algorithm 6 | ---------------------------------------------------------------------- 7 | 8 | Implementation of the fast graph-based transit-routing algorithm from the 9 | following papers: 10 | 11 | - Trip-Based Public Transit Routing (`arXiv:1504.07149v2`_, 2015) 12 | - Trip-Based Public Transit Routing Using Condensed Search Trees 13 | (`arXiv:1607.01299v2`_, 2016) 14 | 15 | ...with source data parsed (by cli script) from `GTFS feeds 16 | `_. 17 | 18 | See "Links" section below for more references. 19 | 20 | Not focused on performance too much, mostly data structures layout and 21 | algo-correctness, i.e. just a proof of concept or reference code, 22 | not suitable for any kind of production use. 23 | 24 | | 25 | 26 | .. contents:: 27 | :backlinks: none 28 | 29 | 30 | 31 | Usage 32 | ----- 33 | 34 | There's command-line ``gtfs-tb-routing.py`` script that builds timetable from 35 | GTFS source data, initializes routing engine with it and runs queries on that, 36 | but routing engine itself can be used separately. 37 | 38 | Regardless of interface, highly recommend using PyPy3 (3.3+) to run the thing, 39 | as it gives orders-of-magnitude performance boost here over CPython, and 40 | transfer-set pre-calculation with large datasets can take a while. 41 | 42 | No dedicated attempt at parallelization or memory optimizations is made here, 43 | so it might take much longer than necessary and eat all the RAM regardless. 44 | 45 | 46 | Command-line script 47 | ``````````````````` 48 | 49 | Usage: ``./gtfs-tb-routing.py [options] gtfs-data-dir-or-file command ...`` 50 | 51 | For example, to run a profile query on an (unpacked) GTFS data from specified 52 | dir and pretty-print resulting (pareto-optimal) JourneySet to stdout, 53 | ``query-profile`` command can be used:: 54 | 55 | ./gtfs-tb-routing.py gtfs-data query-profile stop-A stop-B 56 | 57 | See ``./gtfs-tb-routing.py --help`` command output for a full list of all 58 | supported/implemented commands and options. 59 | 60 | Some sample GTFS data zips can be found in ``test/`` directory. 61 | 62 | Links to many open real-world GTFS feeds are available at `transit.land 63 | `_ repository. 64 | 65 | Example usage:: 66 | 67 | % unzip test/gtfs_shizuoka.data.2016-10-13.zip -d gtfs-shizuoka 68 | Archive: test/gtfs_shizuoka.data.2016-10-13.zip 69 | inflating: gtfs-shizuoka/agency.txt 70 | inflating: gtfs-shizuoka/routes.txt 71 | inflating: gtfs-shizuoka/trips.txt 72 | inflating: gtfs-shizuoka/stops.txt 73 | inflating: gtfs-shizuoka/calendar_dates.txt 74 | inflating: gtfs-shizuoka/stop_times.txt 75 | inflating: gtfs-shizuoka/shapes.txt 76 | 77 | % ./gtfs-tb-routing.py gtfs-shizuoka \ 78 | --debug --day 2016-10-14 \ 79 | --cache-timetable gtfs-shizuoka.pickle \ 80 | --cache-precalc gtfs-shizuoka.cache cache 81 | 82 | % ./gtfs-tb-routing.py gtfs-shizuoka.pickle -c gtfs-shizuoka.cache \ 83 | query-earliest-arrival J22209723_0 J2220952426_0 84 | 85 | Journey set (1): 86 | Journey 33883d2af26ea29d (arrival: 08:43:00, trips: 2, duration: 02:33:00): 87 | trip [333]: 88 | from (dep at 06:10:00): 10:小川 [J22209723_0] 89 | to (arr at 06:55:00): 49:島田駅 北口2番のりば [J222093340_2] 90 | trip [341]: 91 | from (dep at 08:35:00): 20:島田駅 北口2番のりば [J222093340_2] 92 | to (arr at 08:43:00): 28:ばらの丘一丁目 [J2220952426_0] 93 | 94 | % ./gtfs-tb-routing.py gtfs-shizuoka.pickle -c gtfs-shizuoka.cache \ 95 | query-earliest-arrival J22209843_0 J222093345_0 96 | 97 | Journey set (1): 98 | Journey 35cd107483780a29 (arrival: 07:41:00, trips: 2, duration: 00:39:00): 99 | trip [458]: 100 | from (dep at 07:02:00): 1:田代環境プラザ [J22209843_0] 101 | to (arr at 07:26:00): 20:島田駅 北口1番のりば [J222093340_1] 102 | footpath (time: 0:02:16): 103 | from: 島田駅 北口1番のりば [J222093340_1] 104 | to: 島田駅 北口2番のりば [J222093340_2] 105 | trip [26]: 106 | from (dep at 07:33:00): 38:島田駅 北口2番のりば [J222093340_2] 107 | to (arr at 07:41:00): 45:島田市民病院 [J222093345_0] 108 | 109 | 110 | % ./gtfs-tb-routing.py gtfs-shizuoka.pickle -c gtfs-shizuoka.cache \ 111 | query-profile J22209723_0 J2220952426_0 112 | 113 | Journey set (7): 114 | 115 | Journey 3387dfa2a4cb3956 (arrival: 08:43:00, trips: 2, duration: 01:23:00): 116 | trip [174]: 117 | from (dep at 07:20:00): 10:小川 [J22209723_0] 118 | to (arr at 08:05:00): 49:島田駅 北口2番のりば [J222093340_2] 119 | trip [341]: 120 | from (dep at 08:35:00): 20:島田駅 北口2番のりば [J222093340_2] 121 | to (arr at 08:43:00): 28:ばらの丘一丁目 [J2220952426_0] 122 | 123 | Journey 338759bc528596df (arrival: 10:53:00, trips: 2, duration: 02:33:00): 124 | trip [54]: 125 | from (dep at 08:20:00): 10:小川 [J22209723_0] 126 | to (arr at 09:05:00): 49:島田駅 北口2番のりば [J222093340_2] 127 | trip [94]: 128 | from (dep at 10:45:00): 20:島田駅 北口2番のりば [J222093340_2] 129 | to (arr at 10:53:00): 28:ばらの丘一丁目 [J2220952426_0] 130 | ... 131 | 132 | 133 | Note that ``cache`` command is used before queries to cache both timetable (for 134 | a specific day and its vicinity) and precalculation result (lines, transfer set) 135 | to avoid doing that for every subsequent query. 136 | 137 | Queries above do not use calendar data, i.e. all trips from the timetable are 138 | considered to be valid. 139 | 140 | To use calendar data, **always specify -d/--day** (and maybe ``--parse-days`` 141 | and ``--parse-days-pre`` options) when building graph - i.e. "cache" command, or 142 | when running query without cache opts. 143 | 144 | Use ``--debug`` option to see pre-calculation progress (useful for large datasets) 145 | and misc other stats and logging. 146 | 147 | 148 | Python REPL (and IPython/Jupyter) 149 | ````````````````````````````````` 150 | 151 | It'd probably make sense to generate graph cache beforehand, i.e. by running:: 152 | 153 | % ./gtfs-tb-routing.py \ 154 | gtfs-gbrail --stops-as-stations --debug --day 2017-05-13 \ 155 | --cache-timetable gtfs-gbrail.pickle --cache-precalc gtfs-gbrail.cache cache 156 | 157 | That will create "gtfs-gbrail.pickle" and "gtfs-gbrail.cache" files from source 158 | data, which take much less time to load than building whole graph from GTFS (for 159 | circa-2017 gbrain.info data on pypy 3.3 and circa-2012 desktop it takes ~30min). 160 | 161 | Be sure to run the REPL in the project dir or have tb_routing importable there 162 | in some other fashion. 163 | 164 | :: 165 | 166 | Python 3.3.5 (ea9979b550eeae87924dc4bef06070e8f8d0e22f, Oct 12 2016, 11:31:15) 167 | [PyPy 5.5.0-alpha0 with GCC 6.2.1 20160830] on linux 168 | Type "help", "copyright", "credits" or "license" for more information. 169 | And now for something completely different: ``apparently confusion is a 170 | feature'' 171 | 172 | >>>> import tb_routing as tb 173 | >>>> tt, r = tb.init_gtfs_router('gtfs-gbrail.pickle', 'gtfs-gbrail.cache') 174 | >>>> journeys = r.query_profile('DIS', 'WWW') 175 | >>>> journeys 176 | ... 177 | >>>> journeys.pretty_print() 178 | ... 179 | 180 | This should allow for easier tinkering, without needing to reload data caches on 181 | every query. 182 | 183 | 184 | Routing engine 185 | `````````````` 186 | 187 | ``tb_routing.engine`` module implements actual routing, and can be used with any 188 | kind of timetable data source, passed as a ``tb_routing.types.public.Timetable`` 189 | to it on init. 190 | 191 | Subsequent queries to engine instance return ``tb_routing.types.public.JourneySet``. 192 | 193 | See `test/simple.py `_ for example of how such Timetable can be 194 | constructed and queried with trivial test-data. 195 | 196 | 197 | Requirements 198 | ```````````` 199 | 200 | - Python 3.x 201 | - `attrs `_ 202 | - (only if gtfs calendar.txt is used) `pytz `_ 203 | - (for tests only) `PyYAML `_ 204 | - (for Python<3.4 only) `pathlib `_ 205 | - (for Python<3.4 only) `enum34 `_ 206 | 207 | To install all these on any random system (to ``~/.local/`` with ``--user``):: 208 | 209 | % python3 --version 210 | Python 3.3.5 (ea9979b550eeae87924dc4bef06070e8f8d0e22f, Oct 12 2016, 11:31:15) 211 | [PyPy 5.5.0-alpha0 with GCC 6.2.1 20160830] 212 | 213 | % python3 -m ensurepip --user 214 | % python3 -m pip install --user attrs pyyaml pytz 215 | 216 | ## For python<3.4 only, but safe to run on later ones as well 217 | % python3 -m pip install --user pathlib enum34 218 | 219 | ## Done, run the app/tests 220 | % ./gtfs-tb-routing.py --help 221 | ... 222 | % python3 -m unittest test.all 223 | 224 | Alternatively, run ``python3 -m virtualenv tb-routing-venv && 225 | . tb-routing-venv/bin/activate`` before above commands to have these modules 226 | installed into "tb-routing-venv" dir, if `virtualenv `_ 227 | module is installed/available (can be installed via pip same as others above). 228 | 229 | 230 | 231 | Notes 232 | ----- 233 | 234 | Some less obvious things are described in this section. 235 | 236 | 237 | Calendar data 238 | ````````````` 239 | 240 | Real-world GTFS feeds usually have calendar.txt or calendar_dates.txt files in 241 | them (and e.g. gbrail.info even has links.txt for time-dependent footpaths), 242 | which define whether specific sets of trips (services) are valid/invalid for 243 | specific date/time ranges. 244 | 245 | In addition to providing correct results, this info can be used to greatly 246 | reduce the initial timetable (by not considering all trips that aren't valid for 247 | specific day) and transfer set size (as some transfers aren't valid due to time 248 | when trips' services operate). 249 | 250 | So to work with any real-world feed, be sure to use ``-d/--day`` option (and 251 | asoociated ones), as that'd both improve performance and provide correct results. 252 | 253 | Default is to parse and consider all trips to be valid for all days. 254 | 255 | 256 | Generated transfers/footpaths 257 | ````````````````````````````` 258 | 259 | Not all GTFS data contains (optional) transfers.txt files, and sometimes these 260 | are very slim or just empty. 261 | 262 | Algorithm used here relies on having both "footpath" links between different 263 | stops and even within same stop ("interchange time" - how soon one can board 264 | different trip after exiting from the last one at the same stop). 265 | 266 | So gtfs parser module, by default, generates fotpaths based on stop locations 267 | (lon/lat) and a bunch of static parameters (like 2 min "base" 268 | interchange/walking time and 5 km/h walking speed), if such data is missing or 269 | doesn't even contain half of interchange times for stops. 270 | 271 | Such generation process can be configured somewhat via ``tb_routing.gtfs.GTFSConf``. 272 | 273 | 274 | Journey optimality criterias 275 | ```````````````````````````` 276 | 277 | Trip-Based algorithm, as described in the `arXiv:1504.07149v2`_ paper optimizes 278 | earliest-arrival queries for two criterias: 279 | 280 | - Earliest arrival time. 281 | - Minimal number of transfers. 282 | 283 | Profile queries there have additional criteria - latest departure time. 284 | 285 | Result of this algorithm is a pareto-optimal set of trip-sequences (i.e. graph 286 | nodes) that lead to optimal set of these parameters. 287 | 288 | To construct journey info from such nodes (trips) in a deterministic and 289 | somewhat sensible fashion, additional "minmal footpath time" criteria is used to 290 | pick optimal edges (footpaths/interchanges), with earliest optimal footpath 291 | preferred over later ones in case of ties. 292 | 293 | 294 | Caching 295 | ``````` 296 | 297 | ``--cache-timetable`` and ``-c/--precalc-cache`` options allow to cache 298 | gtfs-processing/pre-computation results and re-use them between queries, which 299 | can be very useful when working with non-trivial (e.g. real-world) datasets, 300 | 301 | These options can and should be used together, or at least in that order, as 302 | tuples in TransferSet dumped with ``-c/--precalc-cache`` refer to ids of objects 303 | in Timetable. 304 | 305 | ``./gtfs-tb-routing.py ... --cache-timetable ... --cache-precalc ... cache`` 306 | command can be used to simply generate all the caches and exit. 307 | 308 | ``--cache-timetable`` uses pickle serialization, so can be quite slow, 309 | especially when saving data. 310 | 311 | 312 | Tests 313 | ````` 314 | 315 | Commands to run tests from checkout directory:: 316 | 317 | % python3 -m unittest test.all 318 | % python3 -m unittest test.gtfs_shizuoka 319 | % python3 -m unittest -vf test.simple 320 | 321 | ``test.all.case`` also provides global index of all test cases by name:: 322 | 323 | % python3 -m unittest test.all.case.test_journeys_J22209723_J2220952426 324 | % python3 -m unittest test.all.case.testMultipleRoutes 325 | 326 | 327 | Performance optimization 328 | ```````````````````````` 329 | 330 | Pre-calculation in Trip-Based routing algorithm, as noted in paper, is very 331 | suitable for further optimization, especially on multi-core systems, where each 332 | trip in the main loop there can be processed in parallel with minimal 333 | synchronization. 334 | 335 | Python does not provide an easy way to optimize such processing, especially due 336 | to slow serialization of high-level objects and lack of support for cpu-bound 337 | threads working in shared memory. 338 | 339 | Workarounds are possible, but it's probably not worth considering python code 340 | for any kind of production use. 341 | 342 | 343 | Generating timetables from json-dgc graphs 344 | `````````````````````````````````````````` 345 | 346 | `json-dgc `_ is a simple d3-based tool to 347 | interactively draw and save/load directed graphs to/from JSON. 348 | 349 | It can be used to draw some testing transport network, using nodes as stops, 350 | positioning them as they'd be on a flat map (to auto-generate footpaths to ones 351 | that are close) and naming/connecting them according to trip-lines. 352 | 353 | .. figure:: doc/example-images/json-dgc.jpg 354 | :alt: json-dgc webui with loaded example graph 355 | 356 | ``timetable-from-json-dgc.py`` script can then be used to convert saved JSON 357 | graph into a pickled timetable, with trips auto-generated to run with regular 358 | intervals (and some fixed speed) along drawn lines, and footpaths connecting 359 | stops that are close enough. 360 | 361 | Script requires node names to have following format:: 362 | 363 | [:]L-[/L-]... 364 | 365 | Where "line" is an arbitrary id for line (group of non-overtaking trips over 366 | same stops at diff times), and "seq" is a string to sort stops for this line by, 367 | e.g. stops/nodes [L1-a, L1-b, L1-c] will be grouped into same line with 3 stops 368 | in that "a-b-c" order (alphasort). 369 | 370 | Names like "L1-f/L5-a/L3-m" can be used when multiple lines pass through same stop. 371 | Drawn edges aren't actually used by the script, node names/positions should have 372 | all the necessary info. 373 | 374 | See script itself for all the constants like train/footpath speeds, line trips 375 | first/last times, intervals, stop arrival-departure deltas, etc. 376 | 377 | ``timetable-from-json-dgc.example.json`` is an example JSON graph, as produced 378 | by json-dgc, and can be loaded/tweaked there or used as a template to generate 379 | with some other tool (just two lists of all nodes / edges). 380 | 381 | Generated timetable pickle file can be loaded by ``gtfs-tb-routing.py`` cli 382 | script by simply pointing it to a file with pickled timetable instead of gtfs 383 | dir. 384 | 385 | 386 | Using graphviz to render internal graphs 387 | ```````````````````````````````````````` 388 | 389 | ``gtfs-tb-routing.py`` script has ``--dot-...`` options to dump various internal 390 | graphs in `graphviz "dot" format `_, 391 | which can then be rendered by `graphviz `_, one of its 392 | wrappers or any similar tool. 393 | 394 | When visualized, such graphs can be useful to understand what's happening 395 | "under the hood" and easily identify potential issues at a glance. 396 | 397 | For example, to render all stops and lines connecting them from 398 | ``timetable-from-json-dgc.example.json`` graph above and then open it in 399 | `xdot `_ graphviz wrapper, 400 | following commands can be used:: 401 | 402 | % ./gtfs-tb-routing.py -t tt.pickle \ 403 | --dot-for-lines lines.dot query-profile L2-a/L3-k L2-k/L3-i 404 | % xdot lines.dot 405 | 406 | .. figure:: doc/example-images/dot-for-lines.jpg 407 | :alt: xdot showing dot-for-lines graph fragment 408 | 409 | Or, to render a tree of transfer-patterns for a specified source stop:: 410 | 411 | % ./gtfs-tb-routing.py -t tt.pickle \ 412 | query-transfer-patterns --dot-for-tp-subtree tp-subtree.dot L2-a/L3-k L2-k/L3-i 413 | % xdot tp-subtree.dot 414 | 415 | .. figure:: doc/example-images/dot-for-tp-subtree.jpg 416 | :alt: xdot showing dot-for-tp-subtree graph fragment 417 | 418 | See ``-h/--help`` output for the script and relevant subcommands for more of these. 419 | 420 | 421 | 422 | Missing things 423 | -------------- 424 | 425 | - Transfer Patterns tree storage optimization ("Splitting Trees") as per 426 | `arXiv:1607.01299v2`_ paper. 427 | 428 | Proved to be rather difficult to implement in a more-or-less comprehensible 429 | fashion, complicates code quite a lot, is rather boring and slows down the 430 | actual queries, hence somewhat dubious. 431 | 432 | - Profile queries using query trees can probably benefit from optimizations 433 | described in "Multi-criteria Shortest Paths in Time-Dependent Train Networks" 434 | paper. 435 | 436 | - Some additional data from GTFS can be used, e.g. frequences.txt and names for 437 | services/trips/lines for more comprehensible results and easier introspection. 438 | 439 | - Storing data in some db instead of memory and loading it selectively seem to 440 | be the most obvious practical optimization. 441 | 442 | - Some interactive querying interface for long-running instance (e.g. webui or 443 | ipynb) can be useful for all kinds of experiments, in addition to existing 444 | caching code. 445 | 446 | 447 | 448 | Links 449 | ----- 450 | 451 | Papers/docs directly related to this project: 452 | 453 | - Trip-Based Public Transit Routing (`arXiv:1504.07149v2`_, 2015) 454 | 455 | - Trip-Based Public Transit Routing Using Condensed Search Trees 456 | (`arXiv:1607.01299v2`_, 2016) 457 | 458 | This paper relies heavily on algorithms and concepts described in: 459 | 460 | - Fast Routing in Very Large Public Transportation Networks using Transfer Patterns 461 | (`ACM:1888969 `_, 462 | `transferpatterns.pdf `_, 2010) 463 | 464 | - Multi-criteria Shortest Paths in Time-Dependent Train Networks 465 | (`ACM:1788914 `_, 466 | `DisserMullerHannemannEtal2008.pdf 467 | `_, 468 | 2008) 469 | 470 | - `General Transit Feed Specification (GTFS) format info 471 | `_ 472 | 473 | More on the subject: 474 | 475 | - `Topical github awesome-transit list-repo `_ 476 | 477 | - `OpenTripPlanner (OTP) project `_ + `Bibliography.md there 478 | `_ 479 | 480 | Includes implementation of `RAPTOR 481 | `_ -like 482 | RoundBasedProfileRouter (see RepeatedRaptorProfileRouter.java and PR-1922 there). 483 | 484 | - `Graphserver project `_ 485 | 486 | - `transit.land open GTFS transit data feeds/repository `_ 487 | 488 | - Github orgs/groups related to transportation maps/routing: 489 | 490 | - `open-track `_ 491 | - `OpenTransport `_ 492 | 493 | 494 | .. _arXiv\:1504.07149v2: https://arxiv.org/abs/1504.07149 495 | .. _arXiv\:1607.01299v2: https://arxiv.org/abs/1607.01299 496 | -------------------------------------------------------------------------------- /tb_routing/engine.py: -------------------------------------------------------------------------------- 1 | import itertools as it, operator as op, functools as ft 2 | from collections import defaultdict, namedtuple, Counter 3 | 4 | from . import utils as u, types as t 5 | 6 | 7 | @u.attr_struct(vals_to_attrs=True) 8 | class EngineConf: 9 | log_progress_for = None # or a set/list of prefixes 10 | log_progress_steps = 30 11 | 12 | 13 | def timer(self_or_func, func=None, *args, **kws): 14 | 'Calculation call wrapper for timer/progress logging.' 15 | if not func: return lambda s,*a,**k: s.timer_wrapper(self_or_func, s, *a, **k) 16 | return self_or_func.timer_wrapper(func, *args, **kws) 17 | 18 | 19 | def jtrips_to_journeys(footpaths, stop_src, stop_dst, dts_src, results): 20 | 'Convert list/set of QueryResults to JourneySet with proper journey descriptions.' 21 | JourneySoFar = namedtuple('JSF', 'ts_src journey prio') # unfinished journey up to ts_src 22 | 23 | journeys = t.public.JourneySet() 24 | for result in results: 25 | jtrips = result.jtrips 26 | queue = [JourneySoFar( 27 | t.public.TripStop.dummy_for_stop(stop_src), 28 | t.public.Journey(dts_src), prio=0 )] 29 | 30 | for trip in it.chain(jtrips, [None]): # +1 iteration to add fp to stop_dst 31 | queue_prev, queue = queue, list() 32 | for jsf in queue_prev: 33 | 34 | if not trip: # final footpath to stop_dst 35 | ts_list = jsf.ts_src.trip[jsf.ts_src.stopidx+1:] if jsf.ts_src.trip else [jsf.ts_src] 36 | for ts in ts_list: 37 | fp_delta = 0 if ts.stop == stop_dst else\ 38 | footpaths.time_delta(ts.stop, stop_dst, dts_src=ts.dts_arr) 39 | if fp_delta is None: continue 40 | jn = jsf.journey.copy() 41 | if ts.trip: jn.append_trip(jsf.ts_src, ts) 42 | jn.append_fp(ts.stop, stop_dst, fp_delta) 43 | queue.append(JourneySoFar(None, jn, jsf.prio + fp_delta)) 44 | 45 | elif not jsf.ts_src.trip: # footpath from stop_src, not a trip 46 | for ts in trip: 47 | fp_delta = 0 if jsf.ts_src.stop == ts.stop else\ 48 | footpaths.time_delta(jsf.ts_src.stop, ts.stop, dts_dst=ts.dts_dep) 49 | if fp_delta is None: continue 50 | jn = jsf.journey.copy().append_fp(jsf.ts_src.stop, ts.stop, fp_delta) 51 | queue.append(JourneySoFar(ts, jn, jsf.prio + fp_delta)) 52 | 53 | else: # footpath from previous trip - common case 54 | for ts1, ts2 in it.product(jsf.ts_src.trip[jsf.ts_src.stopidx+1:], trip): 55 | fp_delta = footpaths.time_delta( 56 | ts1.stop, ts2.stop, dts_src=ts1.dts_arr, dts_dst=ts2.dts_dep ) 57 | if fp_delta is None: continue 58 | jn = jsf.journey.copy() 59 | jn.append_trip(jsf.ts_src, ts1) 60 | jn.append_fp(ts1.stop, ts2.stop, fp_delta) 61 | queue.append(JourneySoFar(ts2, jn, jsf.prio + fp_delta)) 62 | 63 | if queue: 64 | best_jsf = min(queue, key=op.attrgetter('prio')) 65 | journeys.add(best_jsf.journey) 66 | 67 | return journeys 68 | 69 | 70 | class TimetableError(Exception): pass 71 | 72 | class TBRoutingEngine: 73 | 74 | graph = None 75 | 76 | def __init__(self, timetable, conf=None, cached_graph=None, timer_func=None): 77 | '''Creates Trip-Based Routing Engine from Timetable data.''' 78 | self.conf, self.log = conf or EngineConf(), u.get_logger('tb') 79 | self.timer_wrapper = timer_func if timer_func else lambda f,*a,**k: f(*a,**k) 80 | self.jtrips_to_journeys = ft.partial(self.timer_wrapper, jtrips_to_journeys) 81 | 82 | if not cached_graph: 83 | lines = self.timetable_lines(timetable) 84 | transfers = self.precalc_transfer_set(timetable, lines) 85 | graph = t.base.Graph(timetable, lines, transfers) 86 | else: 87 | graph = self.timer_wrapper(t.base.Graph.load, cached_graph, timetable) 88 | self.graph = graph 89 | 90 | @u.coroutine 91 | def progress_iter(self, prefix, n_max, steps=None, n=0): 92 | 'Progress logging helper coroutine for long calculations.' 93 | prefix_set = self.conf.log_progress_for 94 | if not prefix_set or prefix not in prefix_set: 95 | while True: yield # dry-run 96 | if not steps: steps = self.conf.log_progress_steps 97 | steps = min(n_max, steps) 98 | step_n = steps and n_max / steps 99 | msg_tpl = '[{{}}] Step {{:>{0}.0f}} / {{:{0}d}}{{}}'.format(len(str(steps))) 100 | while True: 101 | dn_msg = yield 102 | if isinstance(dn_msg, tuple): dn, msg = dn_msg 103 | elif isinstance(dn_msg, int): dn, msg = dn_msg, None 104 | else: dn, msg = 1, dn_msg 105 | n += dn 106 | if n == dn or n % step_n < 1: 107 | if msg: 108 | if not isinstance(msg, str): msg = msg[0].format(*msg[1:]) 109 | msg = ': {}'.format(msg) 110 | self.log.debug(msg_tpl, prefix, n / step_n, steps, msg or '') 111 | 112 | 113 | @timer 114 | def timetable_lines(self, timetable): 115 | 'Line (pre-)calculation from Timetable data.' 116 | 117 | line_trips = defaultdict(list) 118 | line_stops = lambda trip: tuple(map(op.attrgetter('stop'), trip)) 119 | for trip in timetable.trips: 120 | dts_chk = -1 121 | for ts in trip: # sanity check 122 | if not (ts.dts_arr >= dts_chk and ts.dts_arr <= ts.dts_dep): 123 | u.log_lines( self.log.debug, 124 | [('Time jumps backwards for stops of the trip: {}', trip)] 125 | + list((' {}', ts) for ts in trip) ) 126 | raise TimetableError('Time jumps backwards for stops of the trip', trip) 127 | dts_chk = ts.dts_dep 128 | line_trips[line_stops(trip)].append(trip) 129 | 130 | lines, progress = t.base.Lines(), self.progress_iter('lines', len(line_trips)) 131 | for trips in line_trips.values(): 132 | progress.send(['line-count={:,}', len(lines)]) 133 | lines_for_stopseq = list() 134 | 135 | # Split same-stops trips into non-overtaking groups 136 | for trip_a in trips: 137 | for line in lines_for_stopseq: 138 | for trip_b in line: 139 | ordering = trip_a.compare(trip_b) 140 | if ordering is ordering.undecidable: break 141 | else: 142 | line.add(trip_a) 143 | break 144 | else: # failed to find line to group trip into 145 | lines_for_stopseq.append(t.base.Line(trip_a)) 146 | 147 | lines.add(*lines_for_stopseq) 148 | 149 | return lines 150 | 151 | @timer 152 | def precalc_transfer_set(self, timetable, lines): 153 | # Steps here are merged from 3 separate steps in the paper 154 | transfers = t.base.TransferSet() 155 | 156 | def update_min_time(min_time_map, stop, dts): 157 | if dts < min_time_map.get(stop, u.inf): 158 | min_time_map[stop] = dts 159 | return True 160 | return False 161 | 162 | counts, progress = Counter(), self.progress_iter('pre-initial-set', len(timetable.trips)) 163 | for n, trip_t in enumerate(timetable.trips): 164 | progress.send([ 'transfer-set-size={:,} processed-trips={:,}, discarded' 165 | ' u-turns={:,} subopt={:,}', len(transfers), n, counts['uturns'], counts['worse'] ]) 166 | min_time_arr, min_time_ch = dict(), dict() 167 | 168 | for i in range(len(trip_t)-1, 0, -1): # first stop of the trip is skipped 169 | ts_p = trip_t[i] 170 | 171 | reachable_stops = list() 172 | update_min_time(min_time_arr, ts_p.stop, ts_p.dts_arr) 173 | for stop_q, fp in timetable.footpaths.to_stops_from(ts_p.stop): 174 | fp_delta = fp.get_shortest(dts_src=ts_p.dts_arr) 175 | if fp_delta is None: continue 176 | dts_q = ts_p.dts_arr + fp_delta 177 | update_min_time(min_time_arr, stop_q, dts_q) 178 | update_min_time(min_time_ch, stop_q, dts_q) 179 | reachable_stops.append((stop_q, fp_delta, dts_q)) 180 | 181 | for stop_q, transfer_fp_delta, dts_q in reachable_stops: 182 | for j, line in lines.lines_with_stop(stop_q): 183 | if j == len(line[0]) - 1: continue # transfers to last stop make no sense 184 | trip_u = line.earliest_trip(j, dts_q) 185 | if not trip_u: continue # all trips for L(q) have departed by dts_q 186 | ts_q = trip_u[j] 187 | 188 | if not ( 189 | line != lines.line_for_trip(trip_t) 190 | or trip_u.compare(trip_t) is t.public.SolutionStatus.non_dominated 191 | or j < i ): continue 192 | 193 | # U-turn transfers 194 | try: ts_t, ts_u = trip_t[i-1], trip_u[j+1] 195 | except IndexError: continue # transfers from-start/to-end of t/u trips 196 | if ts_t.stop == ts_u.stop: 197 | delta = timetable.footpaths.time_delta( 198 | ts_t.stop, ts_u.stop, dts_src=ts_t.dts_arr, dts_dst=ts_u.dts_dep ) 199 | if delta is not None and ts_t.dts_arr + delta <= ts_u.dts_dep: 200 | counts['uturns'] += 1 201 | continue 202 | 203 | # No-improvement transfers 204 | keep = False 205 | for k in range(j+1, len(trip_u)): 206 | ts = trip_u[k] 207 | keep = keep | update_min_time(min_time_arr, ts.stop, ts.dts_arr) 208 | for stop, fp in timetable.footpaths.to_stops_from(ts_u.stop): 209 | fp_delta = fp.get_shortest(dts_src=ts_u.dts_arr) 210 | if fp_delta is None: continue 211 | dts = ts_u.dts_arr + fp_delta 212 | keep = keep | update_min_time(min_time_arr, stop, dts) 213 | keep = keep | update_min_time(min_time_ch, stop, dts) 214 | if not keep: 215 | counts['worse'] += 1 216 | continue 217 | 218 | transfers.add(t.base.Transfer(ts_p, ts_q, transfer_fp_delta)) 219 | 220 | self.log.debug( 'Discarded u-turns={:,}' 221 | ' no-improvement={:,}', counts['uturns'], counts['worse'] ) 222 | self.log.debug('Resulting transfer set size: {:,}', len(transfers)) 223 | return transfers 224 | 225 | 226 | @timer 227 | def query_earliest_arrival(self, stop_src, stop_dst, dts_src): 228 | '''Algorithm 4: Earliest arrival query. 229 | Actually a bicriteria query that finds 230 | min-transfer journeys as well, just called that in the paper.''' 231 | # XXX: special case of profile-query, should be merged into that 232 | timetable, lines, transfers = self.graph 233 | 234 | TripSegment = namedtuple('TripSeg', 'trip stopidx_a stopidx_b journey') 235 | results = t.pareto.QueryResultParetoSet() 236 | R, Q = dict(), dict() 237 | 238 | def enqueue(trip, i, n, jtrips, _ss=t.public.SolutionStatus): 239 | i_max = len(trip) - 1 # for the purposes of "infinity" here 240 | if i >= R.get(trip, i_max): return 241 | Q.setdefault(n, list()).append( 242 | TripSegment(trip, i, R.get(trip, i_max), jtrips.copy()) ) 243 | for trip_u in lines.line_for_trip(trip)\ 244 | .trips_by_relation(trip, _ss.non_dominated, _ss.equal): 245 | R[trip_u] = min(i, R.get(trip_u, i_max)) 246 | 247 | # Trips-to-destintaion index is used here instead of lines-to-destintaion, 248 | # because footpath time deltas are tied to each trip stop times, and can't be 249 | # generalized to lines with multiple of arrival-times for stop, as it is in the algo. 250 | trips_to_dst = dict() # {trip: (i, fp_delta)} 251 | for stop_q, fp in timetable.footpaths.from_stops_to(stop_dst): 252 | if stop_q == stop_dst: fp = None 253 | for i, line in lines.lines_with_stop(stop_q): 254 | for trip in line: 255 | fp_delta = 0 if fp is None else fp.get_shortest(dts_src=trip[i].dts_arr) 256 | if fp_delta is None: continue 257 | trips_to_dst[trip] = i, fp_delta 258 | 259 | # Queue initial set of trips (reachable from stop_src) to examine 260 | for stop_q, fp in timetable.footpaths.to_stops_from(stop_src): 261 | fp_delta = fp.get_shortest(dts_src=dts_src) if stop_q != stop_src else 0 262 | if fp_delta is None: continue 263 | dts_q, jtrips = dts_src + fp_delta, list() 264 | if stop_q == stop_dst: 265 | results.add(t.base.QueryResult(dts_q, 0, jtrips)) 266 | continue # can't be beaten on time or transfers - can only be extended 267 | for i, line in lines.lines_with_stop(stop_q): 268 | trip = line.earliest_trip(i, dts_q) 269 | if trip: enqueue(trip, i, 0, jtrips) 270 | 271 | # Main loop 272 | t_min, n = u.inf, 0 273 | while Q: 274 | for trip, b, e, jtrips in Q.pop(n): 275 | jtrips = jtrips + [trip] 276 | 277 | # Check if trip reaches stop_dst (or its footpath-vicinity) directly 278 | if trip in trips_to_dst: 279 | i_dst, fp_delta = trips_to_dst[trip] 280 | if b < i_dst: # can't reach previous stops, and b->b trips make no sense 281 | dts_dst = trip[i_dst].dts_arr + fp_delta 282 | if dts_dst < t_min: 283 | t_min = dts_dst 284 | results.add(t.base.QueryResult(dts_dst, n, jtrips)) 285 | 286 | for i in range(b+1, e+1): # b < i <= e 287 | if trip[i].dts_arr >= t_min: break # after +1 transfer, it's guaranteed to be dominated 288 | for transfer in transfers.from_trip_stop(trip[i]): 289 | if transfer.ts_to.dts_arr >= t_min: continue 290 | enqueue(transfer.ts_to.trip, transfer.ts_to.stopidx, n+1, jtrips) 291 | 292 | n += 1 293 | 294 | return self.jtrips_to_journeys( 295 | timetable.footpaths, stop_src, stop_dst, dts_src, results ) 296 | 297 | 298 | @timer 299 | def query_profile(self, stop_src, stop_dst, dts_edt=None, dts_ldt=None, max_transfers=15): 300 | '''Profile query, returning a list of pareto-optimal JourneySet results with Journeys 301 | from stop_src to stop_dst, with departure at stop_src in a day-time (dts) interval 302 | from dts_edt (earliest departure time) to dts_ldt (latest).''' 303 | timetable, lines, transfers = self.graph 304 | if dts_edt is None: dts_edt = timetable.dts_parse('00:00') 305 | if dts_ldt is None: dts_ldt = timetable.dts_parse('24:00') 306 | 307 | DepartureCriteriaCheck = namedtuple('DCCheck', 'trip stopidx dts_src journey') 308 | TripSegment = namedtuple('TripSeg', 'trip stopidx_a stopidx_b journey') 309 | 310 | results = t.pareto.QueryResultParetoSet() 311 | R, Q = dict(), dict() 312 | 313 | def enqueue(trip, i, n, jtrips, _ss=t.public.SolutionStatus): 314 | i_max = len(trip) - 1 # for the purposes of "infinity" here 315 | # Labels here are set for "n, trip" instead of "trip", so that 316 | # they can be reused after n jumps back to 0 (see main loop below). 317 | if i >= R.get((n, trip), i_max): return 318 | Q.setdefault(n, list()).append( 319 | TripSegment(trip, i, R.get((n, trip), i_max), jtrips.copy()) ) 320 | for trip_u in lines.line_for_trip(trip)\ 321 | .trips_by_relation(trip, _ss.non_dominated, _ss.equal): 322 | i_min = min(i, R.get((n, trip_u), i_max)) 323 | for m in range(n, max_transfers): R[m, trip_u] = i_min 324 | 325 | # Trips-to-destintaion index is used here instead of lines-to-destintaion, 326 | # because footpath time deltas are tied to each trip stop times, and can't be 327 | # generalized to lines with multiple of arrival-times for stop, as it is in the algo. 328 | trips_to_dst = dict() # {trip: (i, fp_delta)} 329 | for stop_q, fp in timetable.footpaths.from_stops_to(stop_dst): 330 | if stop_q == stop_dst: fp = None 331 | for i, line in lines.lines_with_stop(stop_q): 332 | for trip in line: 333 | fp_delta = 0 if fp is None else fp.get_shortest(dts_src=trip[i].dts_arr) 334 | if fp_delta is None: continue 335 | trips_to_dst[trip] = i, fp_delta 336 | 337 | # Same as with earliest-arrival, queue set of trips reachable from stop_src, 338 | # but instead of queuing all checks (one for each trip) with same departure time, 339 | # queue one DepartureCriteriaCheck for every departure time of each trip from 340 | # these reachable stops. 341 | profile_queue = list() 342 | for stop_q, fp in timetable.footpaths.to_stops_from(stop_src): 343 | if stop_q == stop_src: fp = None 344 | if stop_q == stop_dst: 345 | # Direct src-to-dst footpath can't be easily compared to 346 | # other results, as it has no fixed departure/arrival times, 347 | # hence added here as a special "exceptional" result. 348 | results.add_exception(t.base.QueryResult(None, 0, list())) 349 | for i, line in lines.lines_with_stop(stop_q): 350 | for trip in line: 351 | fp_delta = 0 if fp is None else\ 352 | fp.get_shortest(dts_src=dts_edt, dts_dst=trip[i].dts_dep) 353 | if fp_delta is None: continue 354 | dts_min, dts_max = trip[i].dts_arr - fp_delta, trip[i].dts_dep - fp_delta 355 | if not (dts_edt <= dts_max and dts_ldt >= dts_min): continue 356 | profile_queue.append(DepartureCriteriaCheck(trip, i, min(dts_ldt, dts_max), list())) 357 | # Latest departures are processed first because labels (R) are reused for the whole query, 358 | # and journeys with later-dep-time dominate earlier, so they are processed first and all 359 | # trips with earlier departure not improving on arrival time (not passing check in enqueue()) 360 | # will be suboptimal anyway, hence skipped. 361 | profile_queue.sort(key=op.attrgetter('dts_src'), reverse=True) # latest-to-earliest 362 | 363 | t_min_idx = dict() # indexed by n, so that it can be reused, same as R. 364 | for dts_src, checks in it.groupby(profile_queue, op.attrgetter('dts_src')): 365 | # Each iteration of this loop is same as an earliest-arrival query, 366 | # with starting set of trips (with same departure time) pulled from profile_queue. 367 | # Labels for trips (R) can be reused, cutting down amount of work for 2+ checks dramatically. 368 | n = 0 369 | for trip, stopidx, dts_src, jtrips in checks: enqueue(trip, stopidx, n, jtrips) 370 | 371 | while Q and n < max_transfers: 372 | t_min = t_min_idx.get(n, u.inf) 373 | for trip, b, e, jtrips in Q.pop(n): 374 | jtrips = jtrips + [trip] 375 | 376 | # Check if trip reaches stop_dst (or its footpath-vicinity) directly 377 | if trip in trips_to_dst: 378 | i_dst, fp_delta = trips_to_dst[trip] 379 | if b < i_dst: # can't reach previous stops, and b->b trips make no sense 380 | dts_dst = trip[i_dst].dts_arr + fp_delta 381 | if dts_dst < t_min: 382 | t_min_idx[n] = dts_dst 383 | results.add(t.base.QueryResult(dts_dst, n, jtrips, dts_src)) 384 | 385 | # Check if trip can lead to nondominated journeys, and queue trips reachable from it 386 | for i in range(b+1, e+1): # b < i <= e 387 | if trip[i].dts_arr >= t_min: break # after +1 transfer, it's guaranteed to be dominated 388 | for transfer in transfers.from_trip_stop(trip[i]): 389 | if transfer.ts_to.dts_arr >= t_min: continue 390 | enqueue(transfer.ts_to.trip, transfer.ts_to.stopidx, n+1, jtrips) 391 | 392 | n += 1 393 | Q.clear() # to flush n > max_transfers leftovers there 394 | 395 | return self.jtrips_to_journeys( 396 | timetable.footpaths, stop_src, stop_dst, dts_edt, results ) 397 | 398 | 399 | def query_profile_all_to_all(self, max_transfers=15): 400 | 'Run all-to-all profile query, yielding (stop_src, stop_labels) tuples.' 401 | # To avoid duplicating paper-1 algos' weird naming/types here: 402 | # R -> trip_labels: Mapping[(n, Trip), int] 403 | # Q -> queue: Sequence[TripSegment] (no point using Q-mapping here) 404 | 405 | timetable, lines, transfers = self.graph 406 | 407 | DepartureCriteriaCheck = namedtuple('DCCheck', 'trip stopidx dts_src ts_list') 408 | TripSegment = namedtuple('TripSeg', 'trip stopidx_a stopidx_b ts_list') 409 | StopLabelSet = ft.partial( t.pareto.ParetoSet, 410 | lambda v: (v.dts_arr, len(v.ts_list) - 1, v.dts_dep) ) 411 | 412 | stop_labels = dict() # {stop: ts_list (all TripStops on the way from stop_src to stop)} 413 | trip_tails_checked = dict() # {trip: earliest_checked_stopidx} 414 | 415 | def enqueue(trip, i, ts_list, _ss=t.public.SolutionStatus): 416 | 'Ensures that each TripStop is only ever processed once via trip_tails_checked index.' 417 | n, i_max = len(ts_list), len(trip) - 1 418 | if i >= trip_tails_checked.get((n, trip), i_max): return 419 | queue.append(TripSegment(trip, i, trip_tails_checked.get((n, trip), i_max), ts_list.copy())) 420 | for trip_u in lines.line_for_trip(trip)\ 421 | .trips_by_relation(trip, _ss.non_dominated, _ss.equal): 422 | i_min = min(i, trip_tails_checked.get((n, trip_u), i_max)) 423 | for m in range(n, max_transfers+1): trip_tails_checked[m, trip_u] = i_min 424 | 425 | for stop_src in timetable.stops: 426 | stop_labels.clear() 427 | trip_tails_checked.clear() 428 | 429 | profile_queue = list() 430 | for stop_q, fp in timetable.footpaths.to_stops_from(stop_src): 431 | if stop_q == stop_src: fp = None 432 | for i, line in lines.lines_with_stop(stop_q): 433 | for trip in line: 434 | fp_delta = 0 if fp is None else fp.get_shortest(dts_dst=trip[i].dts_dep) 435 | if fp_delta is None: continue 436 | profile_queue.append( 437 | DepartureCriteriaCheck(trip, i, trip[i].dts_dep - fp_delta, list()) ) 438 | profile_queue.sort(key=op.attrgetter('dts_src'), reverse=True) # latest-to-earliest 439 | 440 | for dts_src, checks in it.groupby(profile_queue, op.attrgetter('dts_src')): 441 | queue = list() 442 | for trip, stopidx, dts_src, ts_list in checks: enqueue(trip, stopidx, ts_list) 443 | 444 | for n in range(0, max_transfers): 445 | if not queue: break 446 | queue_prev, queue = queue, list() 447 | for trip, b, e, ts_list in queue_prev: 448 | ts_list = ts_list + [trip[b]] # trip[b] is transfer.ts_to - internal tree node 449 | for i in range(b+1, e+1): # b < i <= e 450 | ts = trip[i] 451 | 452 | # Update labels for all stops reachable from this TripStop 453 | for stop_q, fp in timetable.footpaths.to_stops_from(ts.stop): 454 | if stop_q == stop_src: continue 455 | fp_delta = fp.get_shortest(dts_src=trip[i].dts_arr) 456 | if fp_delta is None: continue 457 | stop_q_arr = ts.dts_arr + fp_delta 458 | if stop_q not in stop_labels: stop_labels[stop_q] = StopLabelSet() 459 | stop_labels[stop_q].add(t.base.StopLabel(dts_src, stop_q_arr, ts_list)) 460 | 461 | for transfer in transfers.from_trip_stop(ts): 462 | enqueue(transfer.ts_to.trip, transfer.ts_to.stopidx, ts_list) 463 | 464 | yield stop_src, stop_labels 465 | 466 | @timer 467 | def build_tp_tree(self, **query_kws): 468 | '''Run all-to-all profile query to build Transfer-Patterns 469 | prefix-tree of stop_dst->stop_src Line connections.''' 470 | timetable, lines, transfers = self.graph 471 | 472 | tree = t.tp.TPTree() # adj-lists, with nodes being either Stop or Line objects 473 | subtree_stats = Counter() 474 | 475 | progress = self.progress_iter('transfer-patterns', len(timetable.stops)) 476 | for stop_src, stop_labels in self.query_profile_all_to_all(**query_kws): 477 | means = subtree_stats['count'] 478 | if means == 0: means = [0, 0, 0] 479 | else: means = list(int(subtree_stats[k] / means) for k in ['nodes', 'depth', 'dst']) 480 | progress.send([ 481 | 'tree-nodes={:,} (unique={:,}),' 482 | ' subtree means: nodes={:,} depth={:,} breadth/dst-count={:,}', 483 | sum(tree.stats.total.values()), len(tree.stats.total) ] + means) 484 | 485 | subtree, subtree_depth = tree[stop_src], list() 486 | node_src = subtree.node(stop_src, t='src') 487 | for stop_dst, sl_set in stop_labels.items(): 488 | node_dst = subtree.node(stop_dst) 489 | for sl in sl_set: 490 | node, depth = node_dst, 0 491 | for ts in reversed(sl.ts_list): 492 | node_prev, node = node, subtree.node( 493 | t.base.LineStop(lines.line_for_trip(ts.trip).id, ts.stopidx), no_path_to=node ) 494 | node_prev.edges_to.add(node) 495 | depth += 1 496 | node.edges_to.add(node_src) 497 | subtree_depth.append(depth) 498 | subtree_stats.update(dict( count=1, dst=len(stop_labels), 499 | depth=u.max(subtree_depth, 0), nodes=tree.stats.prefix[stop_src] )) 500 | 501 | self.log.debug( 502 | 'Search-tree stats: nodes={0.nodes:,} (unique={0.nodes_unique:,},' 503 | ' src={0.t_src:,}, dst={0.t_stop:,}, line-stops={0.t_line:,}), edges={0.edges:,}', 504 | tree.stat_counts() ) 505 | return tree 506 | 507 | def build_tp_engine(self, tp_tree=None, **tree_opts): 508 | if not tp_tree: tp_tree = self.build_tp_tree(**tree_opts) 509 | return TBTPRoutingEngine(self.graph, tp_tree, self.conf, timer_func=self.timer_wrapper) 510 | 511 | 512 | 513 | class TBTPRoutingEngine: 514 | 515 | graph = tree = None 516 | 517 | def __init__(self, graph, tp_tree, conf=None, timer_func=None): 518 | self.conf, self.log = conf or EngineConf(), u.get_logger('tb.tp') 519 | self.graph, self.tree = graph, tp_tree 520 | self.timer_wrapper = timer_func if timer_func else lambda f,*a,**k: f(*a,**k) 521 | self.jtrips_to_journeys = ft.partial(self.timer_wrapper, jtrips_to_journeys) 522 | 523 | @timer 524 | def build_query_tree(self, stop_src, stop_dst): 525 | query_tree = t.tp.TPTree(prefix=stop_src) 526 | subtree = self.tree[stop_src] 527 | 528 | queue = [(subtree[stop_dst], list())] 529 | while queue: 530 | queue_prev, queue = queue, list() 531 | for node, path in queue_prev: 532 | path = path + [node] 533 | for k in node.edges_to: 534 | node_k = subtree[k] 535 | if node_k.value != stop_src: 536 | queue.append((node_k, path.copy())) 537 | continue 538 | 539 | # Add src->...->dst path to query_tree, reusing LineStop nodes 540 | node = query_tree.node(node_k) 541 | for node_next in reversed(path): # reverse() because tp_tree has dst->...->src paths 542 | node_next = query_tree.node(node_next, no_path_to=node) 543 | node.edges_to.add(node_next) 544 | node = node_next 545 | 546 | qt_stats = query_tree.stat_counts() 547 | self.log.debug( 548 | 'Query-tree stats: nodes={0.nodes:,} (unique={0.nodes_unique:,},' 549 | ' stops={0.t_stop:,}, line-stops={0.t_line:,}), edges={0.edges:,}', qt_stats ) 550 | return query_tree if qt_stats.nodes > 0 else None 551 | 552 | @timer 553 | def query_profile(self, stop_src, stop_dst, dts_edt, dts_ldt, query_tree=..., max_transfers=15): 554 | timetable, lines, transfers = self.graph 555 | if query_tree is ...: query_tree = self.build_query_tree(stop_src, stop_dst) 556 | if not query_tree: return list() 557 | 558 | NodeLabel = namedtuple('NodeLabel', 'dts_start ts n journey') 559 | NodeLabelCheck = namedtuple('NodeLabelChk', 'node label') 560 | 561 | node_labels = defaultdict(ft.partial(t.pareto.ParetoSet, 'ts.dts_arr n dts_start')) 562 | prio_queue = t.pareto.PrioQueue(lambda v: (-v.label.dts_start, v.label.ts.dts_arr, v.label.n)) 563 | results = t.pareto.QueryResultParetoSet() 564 | 565 | # Queue starting points for each trip of the lines reachable from stop_src node 566 | for node in query_tree[stop_src].edges_to: 567 | if node.value == stop_dst: 568 | # Direct src-to-dst footpath can't be easily compared to 569 | # other results, as it has no fixed departure/arrival times, 570 | # hence added here as a special "exceptional" result. 571 | results.add_exception(t.base.QueryResult(None, 0, list())) 572 | continue 573 | ls_line, ls_stopidx = lines[node.value.line_id], node.value.stopidx 574 | ls_stop = ls_line.stops[ls_stopidx] 575 | fp = None if ls_stop == stop_src else timetable.footpaths.get(stop_src, ls_stop) 576 | for trip in ls_line: 577 | ts = trip[ls_stopidx] 578 | fp_delta = 0 if fp is None else fp.get_shortest(dts_src=dts_edt, dts_dst=ts.dts_dep) 579 | if fp_delta is None: continue 580 | dts_min, dts_max = ts.dts_arr - fp_delta, ts.dts_dep - fp_delta 581 | if not (dts_edt <= dts_max and dts_ldt >= dts_min): continue 582 | prio_queue.push(NodeLabelCheck( 583 | node, NodeLabel(min(dts_ldt, dts_max), ts, 0, [trip]) )) 584 | 585 | # Main loop 586 | while prio_queue: 587 | node_src, label_src = prio_queue.pop() 588 | 589 | for node in node_src.edges_to: 590 | if node.value != stop_dst: 591 | ls_line, ls_stopidx = lines[node.value.line_id], node.value.stopidx 592 | stop = ls_line.stops[ls_stopidx] 593 | else: ls_line, stop = None, node.value # ... -> stop_dst 594 | 595 | if not ls_line: # lineN -> stop_dst 596 | dts = min( 597 | (ts.dts_arr + timetable.footpaths.time_delta( 598 | ts.stop, stop, dts_src=ts.dts_arr, default=u.inf )) 599 | for ts in label_src.ts.trip[label_src.ts.stopidx+1:] ) 600 | assert dts < u.inf # must be at least one, otherwise tp_tree is wrong 601 | node_label = NodeLabel( label_src.dts_start, 602 | t.public.TripStop.dummy_for_stop(stop, dts_arr=dts), 603 | label_src.n, label_src.journey ) 604 | 605 | else: # lineN -> lineN+1 606 | node_transfers = list( transfer 607 | for ts in label_src.ts.trip[label_src.ts.stopidx+1:] 608 | for transfer in transfers.from_trip_stop(ts) 609 | if transfer.ts_to.stopidx == ls_stopidx 610 | and lines.line_for_trip(transfer.ts_to.trip) == ls_line ) 611 | if node_transfers: 612 | transfer = min(node_transfers, key=op.attrgetter('ts_to.dts_arr')) 613 | node_label = NodeLabel( label_src.dts_start, 614 | transfer.ts_to, label_src.n+1, label_src.journey + [transfer.ts_to.trip] ) 615 | else: node_label = None # only possible for other trips of node_src 616 | 617 | if node_label and node_labels[node].add(node_label): 618 | prio_queue.push(NodeLabelCheck(node, node_label)) 619 | 620 | for label in node_labels[query_tree[stop_dst]]: 621 | results.add(t.base.QueryResult(label.ts.dts_arr, label.n, label.journey, label.dts_start)) 622 | return self.jtrips_to_journeys(timetable.footpaths, stop_src, stop_dst, dts_edt, results) 623 | --------------------------------------------------------------------------------