├── README.md ├── display.py └── lib ├── __init__.py ├── __init__.pyc ├── google ├── __init__.py ├── __init__.pyc └── protobuf │ ├── __init__.py │ ├── __init__.pyc │ ├── compiler │ ├── __init__.py │ ├── __init__.pyc │ ├── plugin_pb2.py │ └── plugin_pb2.pyc │ ├── descriptor.py │ ├── descriptor.pyc │ ├── descriptor_pb2.py │ ├── descriptor_pb2.pyc │ ├── internal │ ├── __init__.py │ ├── __init__.pyc │ ├── api_implementation.py │ ├── api_implementation.pyc │ ├── containers.py │ ├── containers.pyc │ ├── cpp_message.py │ ├── cpp_message.pyc │ ├── decoder.py │ ├── decoder.pyc │ ├── encoder.py │ ├── encoder.pyc │ ├── message_listener.py │ ├── message_listener.pyc │ ├── python_message.py │ ├── python_message.pyc │ ├── type_checkers.py │ ├── type_checkers.pyc │ ├── wire_format.py │ └── wire_format.pyc │ ├── message.py │ ├── message.pyc │ ├── reflection.py │ ├── reflection.pyc │ ├── service.py │ ├── service.pyc │ ├── service_reflection.py │ ├── service_reflection.pyc │ ├── text_format.py │ └── text_format.pyc ├── gtfs-realtime.proto ├── gtfs_realtime_pb2.py └── gtfs_realtime_pb2.pyc /README.md: -------------------------------------------------------------------------------- 1 | gtfs-realtime-display 2 | ===================== 3 | 4 | Google Transit Feed Specificiation viewer 5 | 6 | This is a super-simple script that will show the contents of Protocol Buffer file that follows the GTFS-realtime Protocol Buffer standard. 7 | 8 | **Usage** 9 | > python display.py --file FeedFile.pb 10 | 11 | or 12 | > python diplay.py --url 'http://www.example.com/FeedFile.pb' 13 | 14 | And saving the output to a file is easy: 15 | > python display.py --file FeedFile.pb > output.txt 16 | 17 | **Dependencies** 18 | * Python 2.x (not sure if Python 3 will work) 19 | 20 | *Note:* I removed the dependency to install Google's Python [Protocol Buffers](http://code.google.com/p/protobuf/downloads/list) module, as this can be confusing to those just starting out with Python (the audience of this script). -------------------------------------------------------------------------------- /display.py: -------------------------------------------------------------------------------- 1 | #! /usr/bin/python 2 | 3 | import argparse 4 | import sys 5 | import urllib2 6 | 7 | sys.path.append("lib") 8 | 9 | from lib import gtfs_realtime_pb2 10 | 11 | def Display(feed_message): 12 | print feed_message 13 | 14 | parser = argparse.ArgumentParser(description='Read a GTFS-realtime Protocol Buffer file.') 15 | parser.add_argument("-f", "--file", dest="file", 16 | help="file to read", metavar="FILE") 17 | parser.add_argument("-u", "--url", dest="url", 18 | help="URL to read") 19 | 20 | args = vars(parser.parse_args()) 21 | 22 | if args['file'] is None and args['url'] is None: 23 | raise ValueError('File or URL required.') 24 | 25 | feed_message = gtfs_realtime_pb2.FeedMessage() 26 | 27 | if args['file']: 28 | f = open(args['file'], "rb") 29 | feed_message.ParseFromString(f.read()) 30 | f.close() 31 | else: 32 | response = urllib2.urlopen(args['url']) 33 | feed_message.ParseFromString(response.read()) 34 | 35 | Display(feed_message) 36 | -------------------------------------------------------------------------------- /lib/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bretwalker/gtfs-realtime-display/33230f9bd8234ddc598e127b0cd3533c8b25ea2f/lib/__init__.py -------------------------------------------------------------------------------- /lib/__init__.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bretwalker/gtfs-realtime-display/33230f9bd8234ddc598e127b0cd3533c8b25ea2f/lib/__init__.pyc -------------------------------------------------------------------------------- /lib/google/__init__.py: -------------------------------------------------------------------------------- 1 | __import__('pkg_resources').declare_namespace(__name__) 2 | -------------------------------------------------------------------------------- /lib/google/__init__.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bretwalker/gtfs-realtime-display/33230f9bd8234ddc598e127b0cd3533c8b25ea2f/lib/google/__init__.pyc -------------------------------------------------------------------------------- /lib/google/protobuf/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bretwalker/gtfs-realtime-display/33230f9bd8234ddc598e127b0cd3533c8b25ea2f/lib/google/protobuf/__init__.py -------------------------------------------------------------------------------- /lib/google/protobuf/__init__.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bretwalker/gtfs-realtime-display/33230f9bd8234ddc598e127b0cd3533c8b25ea2f/lib/google/protobuf/__init__.pyc -------------------------------------------------------------------------------- /lib/google/protobuf/compiler/__init__.py: -------------------------------------------------------------------------------- 1 | __import__("pkg_resources").declare_namespace(__name__) -------------------------------------------------------------------------------- /lib/google/protobuf/compiler/__init__.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bretwalker/gtfs-realtime-display/33230f9bd8234ddc598e127b0cd3533c8b25ea2f/lib/google/protobuf/compiler/__init__.pyc -------------------------------------------------------------------------------- /lib/google/protobuf/compiler/plugin_pb2.py: -------------------------------------------------------------------------------- 1 | # Generated by the protocol buffer compiler. DO NOT EDIT! 2 | 3 | from google.protobuf import descriptor 4 | from google.protobuf import message 5 | from google.protobuf import reflection 6 | from google.protobuf import descriptor_pb2 7 | # @@protoc_insertion_point(imports) 8 | 9 | 10 | import google.protobuf.descriptor_pb2 11 | 12 | DESCRIPTOR = descriptor.FileDescriptor( 13 | name='google/protobuf/compiler/plugin.proto', 14 | package='google.protobuf.compiler', 15 | serialized_pb='\n%google/protobuf/compiler/plugin.proto\x12\x18google.protobuf.compiler\x1a google/protobuf/descriptor.proto\"}\n\x14\x43odeGeneratorRequest\x12\x18\n\x10\x66ile_to_generate\x18\x01 \x03(\t\x12\x11\n\tparameter\x18\x02 \x01(\t\x12\x38\n\nproto_file\x18\x0f \x03(\x0b\x32$.google.protobuf.FileDescriptorProto\"\xaa\x01\n\x15\x43odeGeneratorResponse\x12\r\n\x05\x65rror\x18\x01 \x01(\t\x12\x42\n\x04\x66ile\x18\x0f \x03(\x0b\x32\x34.google.protobuf.compiler.CodeGeneratorResponse.File\x1a>\n\x04\x46ile\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x17\n\x0finsertion_point\x18\x02 \x01(\t\x12\x0f\n\x07\x63ontent\x18\x0f \x01(\t') 16 | 17 | 18 | 19 | 20 | _CODEGENERATORREQUEST = descriptor.Descriptor( 21 | name='CodeGeneratorRequest', 22 | full_name='google.protobuf.compiler.CodeGeneratorRequest', 23 | filename=None, 24 | file=DESCRIPTOR, 25 | containing_type=None, 26 | fields=[ 27 | descriptor.FieldDescriptor( 28 | name='file_to_generate', full_name='google.protobuf.compiler.CodeGeneratorRequest.file_to_generate', index=0, 29 | number=1, type=9, cpp_type=9, label=3, 30 | has_default_value=False, default_value=[], 31 | message_type=None, enum_type=None, containing_type=None, 32 | is_extension=False, extension_scope=None, 33 | options=None), 34 | descriptor.FieldDescriptor( 35 | name='parameter', full_name='google.protobuf.compiler.CodeGeneratorRequest.parameter', index=1, 36 | number=2, type=9, cpp_type=9, label=1, 37 | has_default_value=False, default_value=unicode("", "utf-8"), 38 | message_type=None, enum_type=None, containing_type=None, 39 | is_extension=False, extension_scope=None, 40 | options=None), 41 | descriptor.FieldDescriptor( 42 | name='proto_file', full_name='google.protobuf.compiler.CodeGeneratorRequest.proto_file', index=2, 43 | number=15, type=11, cpp_type=10, label=3, 44 | has_default_value=False, default_value=[], 45 | message_type=None, enum_type=None, containing_type=None, 46 | is_extension=False, extension_scope=None, 47 | options=None), 48 | ], 49 | extensions=[ 50 | ], 51 | nested_types=[], 52 | enum_types=[ 53 | ], 54 | options=None, 55 | is_extendable=False, 56 | extension_ranges=[], 57 | serialized_start=101, 58 | serialized_end=226, 59 | ) 60 | 61 | 62 | _CODEGENERATORRESPONSE_FILE = descriptor.Descriptor( 63 | name='File', 64 | full_name='google.protobuf.compiler.CodeGeneratorResponse.File', 65 | filename=None, 66 | file=DESCRIPTOR, 67 | containing_type=None, 68 | fields=[ 69 | descriptor.FieldDescriptor( 70 | name='name', full_name='google.protobuf.compiler.CodeGeneratorResponse.File.name', index=0, 71 | number=1, type=9, cpp_type=9, label=1, 72 | has_default_value=False, default_value=unicode("", "utf-8"), 73 | message_type=None, enum_type=None, containing_type=None, 74 | is_extension=False, extension_scope=None, 75 | options=None), 76 | descriptor.FieldDescriptor( 77 | name='insertion_point', full_name='google.protobuf.compiler.CodeGeneratorResponse.File.insertion_point', index=1, 78 | number=2, type=9, cpp_type=9, label=1, 79 | has_default_value=False, default_value=unicode("", "utf-8"), 80 | message_type=None, enum_type=None, containing_type=None, 81 | is_extension=False, extension_scope=None, 82 | options=None), 83 | descriptor.FieldDescriptor( 84 | name='content', full_name='google.protobuf.compiler.CodeGeneratorResponse.File.content', index=2, 85 | number=15, type=9, cpp_type=9, label=1, 86 | has_default_value=False, default_value=unicode("", "utf-8"), 87 | message_type=None, enum_type=None, containing_type=None, 88 | is_extension=False, extension_scope=None, 89 | options=None), 90 | ], 91 | extensions=[ 92 | ], 93 | nested_types=[], 94 | enum_types=[ 95 | ], 96 | options=None, 97 | is_extendable=False, 98 | extension_ranges=[], 99 | serialized_start=337, 100 | serialized_end=399, 101 | ) 102 | 103 | _CODEGENERATORRESPONSE = descriptor.Descriptor( 104 | name='CodeGeneratorResponse', 105 | full_name='google.protobuf.compiler.CodeGeneratorResponse', 106 | filename=None, 107 | file=DESCRIPTOR, 108 | containing_type=None, 109 | fields=[ 110 | descriptor.FieldDescriptor( 111 | name='error', full_name='google.protobuf.compiler.CodeGeneratorResponse.error', index=0, 112 | number=1, type=9, cpp_type=9, label=1, 113 | has_default_value=False, default_value=unicode("", "utf-8"), 114 | message_type=None, enum_type=None, containing_type=None, 115 | is_extension=False, extension_scope=None, 116 | options=None), 117 | descriptor.FieldDescriptor( 118 | name='file', full_name='google.protobuf.compiler.CodeGeneratorResponse.file', index=1, 119 | number=15, type=11, cpp_type=10, label=3, 120 | has_default_value=False, default_value=[], 121 | message_type=None, enum_type=None, containing_type=None, 122 | is_extension=False, extension_scope=None, 123 | options=None), 124 | ], 125 | extensions=[ 126 | ], 127 | nested_types=[_CODEGENERATORRESPONSE_FILE, ], 128 | enum_types=[ 129 | ], 130 | options=None, 131 | is_extendable=False, 132 | extension_ranges=[], 133 | serialized_start=229, 134 | serialized_end=399, 135 | ) 136 | 137 | _CODEGENERATORREQUEST.fields_by_name['proto_file'].message_type = google.protobuf.descriptor_pb2._FILEDESCRIPTORPROTO 138 | _CODEGENERATORRESPONSE_FILE.containing_type = _CODEGENERATORRESPONSE; 139 | _CODEGENERATORRESPONSE.fields_by_name['file'].message_type = _CODEGENERATORRESPONSE_FILE 140 | DESCRIPTOR.message_types_by_name['CodeGeneratorRequest'] = _CODEGENERATORREQUEST 141 | DESCRIPTOR.message_types_by_name['CodeGeneratorResponse'] = _CODEGENERATORRESPONSE 142 | 143 | class CodeGeneratorRequest(message.Message): 144 | __metaclass__ = reflection.GeneratedProtocolMessageType 145 | DESCRIPTOR = _CODEGENERATORREQUEST 146 | 147 | # @@protoc_insertion_point(class_scope:google.protobuf.compiler.CodeGeneratorRequest) 148 | 149 | class CodeGeneratorResponse(message.Message): 150 | __metaclass__ = reflection.GeneratedProtocolMessageType 151 | 152 | class File(message.Message): 153 | __metaclass__ = reflection.GeneratedProtocolMessageType 154 | DESCRIPTOR = _CODEGENERATORRESPONSE_FILE 155 | 156 | # @@protoc_insertion_point(class_scope:google.protobuf.compiler.CodeGeneratorResponse.File) 157 | DESCRIPTOR = _CODEGENERATORRESPONSE 158 | 159 | # @@protoc_insertion_point(class_scope:google.protobuf.compiler.CodeGeneratorResponse) 160 | 161 | # @@protoc_insertion_point(module_scope) 162 | -------------------------------------------------------------------------------- /lib/google/protobuf/compiler/plugin_pb2.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bretwalker/gtfs-realtime-display/33230f9bd8234ddc598e127b0cd3533c8b25ea2f/lib/google/protobuf/compiler/plugin_pb2.pyc -------------------------------------------------------------------------------- /lib/google/protobuf/descriptor.py: -------------------------------------------------------------------------------- 1 | # Protocol Buffers - Google's data interchange format 2 | # Copyright 2008 Google Inc. All rights reserved. 3 | # http://code.google.com/p/protobuf/ 4 | # 5 | # Redistribution and use in source and binary forms, with or without 6 | # modification, are permitted provided that the following conditions are 7 | # met: 8 | # 9 | # * Redistributions of source code must retain the above copyright 10 | # notice, this list of conditions and the following disclaimer. 11 | # * Redistributions in binary form must reproduce the above 12 | # copyright notice, this list of conditions and the following disclaimer 13 | # in the documentation and/or other materials provided with the 14 | # distribution. 15 | # * Neither the name of Google Inc. nor the names of its 16 | # contributors may be used to endorse or promote products derived from 17 | # this software without specific prior written permission. 18 | # 19 | # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 20 | # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 21 | # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 22 | # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 23 | # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 24 | # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 25 | # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 26 | # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 27 | # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 28 | # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 29 | # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 30 | 31 | """Descriptors essentially contain exactly the information found in a .proto 32 | file, in types that make this information accessible in Python. 33 | """ 34 | 35 | __author__ = 'robinson@google.com (Will Robinson)' 36 | 37 | 38 | from google.protobuf.internal import api_implementation 39 | 40 | 41 | if api_implementation.Type() == 'cpp': 42 | from google.protobuf.internal import cpp_message 43 | 44 | 45 | class Error(Exception): 46 | """Base error for this module.""" 47 | 48 | 49 | class DescriptorBase(object): 50 | 51 | """Descriptors base class. 52 | 53 | This class is the base of all descriptor classes. It provides common options 54 | related functionaility. 55 | 56 | Attributes: 57 | has_options: True if the descriptor has non-default options. Usually it 58 | is not necessary to read this -- just call GetOptions() which will 59 | happily return the default instance. However, it's sometimes useful 60 | for efficiency, and also useful inside the protobuf implementation to 61 | avoid some bootstrapping issues. 62 | """ 63 | 64 | def __init__(self, options, options_class_name): 65 | """Initialize the descriptor given its options message and the name of the 66 | class of the options message. The name of the class is required in case 67 | the options message is None and has to be created. 68 | """ 69 | self._options = options 70 | self._options_class_name = options_class_name 71 | 72 | # Does this descriptor have non-default options? 73 | self.has_options = options is not None 74 | 75 | def GetOptions(self): 76 | """Retrieves descriptor options. 77 | 78 | This method returns the options set or creates the default options for the 79 | descriptor. 80 | """ 81 | if self._options: 82 | return self._options 83 | from google.protobuf import descriptor_pb2 84 | try: 85 | options_class = getattr(descriptor_pb2, self._options_class_name) 86 | except AttributeError: 87 | raise RuntimeError('Unknown options class name %s!' % 88 | (self._options_class_name)) 89 | self._options = options_class() 90 | return self._options 91 | 92 | 93 | class _NestedDescriptorBase(DescriptorBase): 94 | """Common class for descriptors that can be nested.""" 95 | 96 | def __init__(self, options, options_class_name, name, full_name, 97 | file, containing_type, serialized_start=None, 98 | serialized_end=None): 99 | """Constructor. 100 | 101 | Args: 102 | options: Protocol message options or None 103 | to use default message options. 104 | options_class_name: (str) The class name of the above options. 105 | 106 | name: (str) Name of this protocol message type. 107 | full_name: (str) Fully-qualified name of this protocol message type, 108 | which will include protocol "package" name and the name of any 109 | enclosing types. 110 | file: (FileDescriptor) Reference to file info. 111 | containing_type: if provided, this is a nested descriptor, with this 112 | descriptor as parent, otherwise None. 113 | serialized_start: The start index (inclusive) in block in the 114 | file.serialized_pb that describes this descriptor. 115 | serialized_end: The end index (exclusive) in block in the 116 | file.serialized_pb that describes this descriptor. 117 | """ 118 | super(_NestedDescriptorBase, self).__init__( 119 | options, options_class_name) 120 | 121 | self.name = name 122 | # TODO(falk): Add function to calculate full_name instead of having it in 123 | # memory? 124 | self.full_name = full_name 125 | self.file = file 126 | self.containing_type = containing_type 127 | 128 | self._serialized_start = serialized_start 129 | self._serialized_end = serialized_end 130 | 131 | def GetTopLevelContainingType(self): 132 | """Returns the root if this is a nested type, or itself if its the root.""" 133 | desc = self 134 | while desc.containing_type is not None: 135 | desc = desc.containing_type 136 | return desc 137 | 138 | def CopyToProto(self, proto): 139 | """Copies this to the matching proto in descriptor_pb2. 140 | 141 | Args: 142 | proto: An empty proto instance from descriptor_pb2. 143 | 144 | Raises: 145 | Error: If self couldnt be serialized, due to to few constructor arguments. 146 | """ 147 | if (self.file is not None and 148 | self._serialized_start is not None and 149 | self._serialized_end is not None): 150 | proto.ParseFromString(self.file.serialized_pb[ 151 | self._serialized_start:self._serialized_end]) 152 | else: 153 | raise Error('Descriptor does not contain serialization.') 154 | 155 | 156 | class Descriptor(_NestedDescriptorBase): 157 | 158 | """Descriptor for a protocol message type. 159 | 160 | A Descriptor instance has the following attributes: 161 | 162 | name: (str) Name of this protocol message type. 163 | full_name: (str) Fully-qualified name of this protocol message type, 164 | which will include protocol "package" name and the name of any 165 | enclosing types. 166 | 167 | containing_type: (Descriptor) Reference to the descriptor of the 168 | type containing us, or None if this is top-level. 169 | 170 | fields: (list of FieldDescriptors) Field descriptors for all 171 | fields in this type. 172 | fields_by_number: (dict int -> FieldDescriptor) Same FieldDescriptor 173 | objects as in |fields|, but indexed by "number" attribute in each 174 | FieldDescriptor. 175 | fields_by_name: (dict str -> FieldDescriptor) Same FieldDescriptor 176 | objects as in |fields|, but indexed by "name" attribute in each 177 | FieldDescriptor. 178 | 179 | nested_types: (list of Descriptors) Descriptor references 180 | for all protocol message types nested within this one. 181 | nested_types_by_name: (dict str -> Descriptor) Same Descriptor 182 | objects as in |nested_types|, but indexed by "name" attribute 183 | in each Descriptor. 184 | 185 | enum_types: (list of EnumDescriptors) EnumDescriptor references 186 | for all enums contained within this type. 187 | enum_types_by_name: (dict str ->EnumDescriptor) Same EnumDescriptor 188 | objects as in |enum_types|, but indexed by "name" attribute 189 | in each EnumDescriptor. 190 | enum_values_by_name: (dict str -> EnumValueDescriptor) Dict mapping 191 | from enum value name to EnumValueDescriptor for that value. 192 | 193 | extensions: (list of FieldDescriptor) All extensions defined directly 194 | within this message type (NOT within a nested type). 195 | extensions_by_name: (dict, string -> FieldDescriptor) Same FieldDescriptor 196 | objects as |extensions|, but indexed by "name" attribute of each 197 | FieldDescriptor. 198 | 199 | is_extendable: Does this type define any extension ranges? 200 | 201 | options: (descriptor_pb2.MessageOptions) Protocol message options or None 202 | to use default message options. 203 | 204 | file: (FileDescriptor) Reference to file descriptor. 205 | """ 206 | 207 | def __init__(self, name, full_name, filename, containing_type, fields, 208 | nested_types, enum_types, extensions, options=None, 209 | is_extendable=True, extension_ranges=None, file=None, 210 | serialized_start=None, serialized_end=None): 211 | """Arguments to __init__() are as described in the description 212 | of Descriptor fields above. 213 | 214 | Note that filename is an obsolete argument, that is not used anymore. 215 | Please use file.name to access this as an attribute. 216 | """ 217 | super(Descriptor, self).__init__( 218 | options, 'MessageOptions', name, full_name, file, 219 | containing_type, serialized_start=serialized_start, 220 | serialized_end=serialized_start) 221 | 222 | # We have fields in addition to fields_by_name and fields_by_number, 223 | # so that: 224 | # 1. Clients can index fields by "order in which they're listed." 225 | # 2. Clients can easily iterate over all fields with the terse 226 | # syntax: for f in descriptor.fields: ... 227 | self.fields = fields 228 | for field in self.fields: 229 | field.containing_type = self 230 | self.fields_by_number = dict((f.number, f) for f in fields) 231 | self.fields_by_name = dict((f.name, f) for f in fields) 232 | 233 | self.nested_types = nested_types 234 | self.nested_types_by_name = dict((t.name, t) for t in nested_types) 235 | 236 | self.enum_types = enum_types 237 | for enum_type in self.enum_types: 238 | enum_type.containing_type = self 239 | self.enum_types_by_name = dict((t.name, t) for t in enum_types) 240 | self.enum_values_by_name = dict( 241 | (v.name, v) for t in enum_types for v in t.values) 242 | 243 | self.extensions = extensions 244 | for extension in self.extensions: 245 | extension.extension_scope = self 246 | self.extensions_by_name = dict((f.name, f) for f in extensions) 247 | self.is_extendable = is_extendable 248 | self.extension_ranges = extension_ranges 249 | 250 | self._serialized_start = serialized_start 251 | self._serialized_end = serialized_end 252 | 253 | def CopyToProto(self, proto): 254 | """Copies this to a descriptor_pb2.DescriptorProto. 255 | 256 | Args: 257 | proto: An empty descriptor_pb2.DescriptorProto. 258 | """ 259 | # This function is overriden to give a better doc comment. 260 | super(Descriptor, self).CopyToProto(proto) 261 | 262 | 263 | # TODO(robinson): We should have aggressive checking here, 264 | # for example: 265 | # * If you specify a repeated field, you should not be allowed 266 | # to specify a default value. 267 | # * [Other examples here as needed]. 268 | # 269 | # TODO(robinson): for this and other *Descriptor classes, we 270 | # might also want to lock things down aggressively (e.g., 271 | # prevent clients from setting the attributes). Having 272 | # stronger invariants here in general will reduce the number 273 | # of runtime checks we must do in reflection.py... 274 | class FieldDescriptor(DescriptorBase): 275 | 276 | """Descriptor for a single field in a .proto file. 277 | 278 | A FieldDescriptor instance has the following attriubtes: 279 | 280 | name: (str) Name of this field, exactly as it appears in .proto. 281 | full_name: (str) Name of this field, including containing scope. This is 282 | particularly relevant for extensions. 283 | index: (int) Dense, 0-indexed index giving the order that this 284 | field textually appears within its message in the .proto file. 285 | number: (int) Tag number declared for this field in the .proto file. 286 | 287 | type: (One of the TYPE_* constants below) Declared type. 288 | cpp_type: (One of the CPPTYPE_* constants below) C++ type used to 289 | represent this field. 290 | 291 | label: (One of the LABEL_* constants below) Tells whether this 292 | field is optional, required, or repeated. 293 | has_default_value: (bool) True if this field has a default value defined, 294 | otherwise false. 295 | default_value: (Varies) Default value of this field. Only 296 | meaningful for non-repeated scalar fields. Repeated fields 297 | should always set this to [], and non-repeated composite 298 | fields should always set this to None. 299 | 300 | containing_type: (Descriptor) Descriptor of the protocol message 301 | type that contains this field. Set by the Descriptor constructor 302 | if we're passed into one. 303 | Somewhat confusingly, for extension fields, this is the 304 | descriptor of the EXTENDED message, not the descriptor 305 | of the message containing this field. (See is_extension and 306 | extension_scope below). 307 | message_type: (Descriptor) If a composite field, a descriptor 308 | of the message type contained in this field. Otherwise, this is None. 309 | enum_type: (EnumDescriptor) If this field contains an enum, a 310 | descriptor of that enum. Otherwise, this is None. 311 | 312 | is_extension: True iff this describes an extension field. 313 | extension_scope: (Descriptor) Only meaningful if is_extension is True. 314 | Gives the message that immediately contains this extension field. 315 | Will be None iff we're a top-level (file-level) extension field. 316 | 317 | options: (descriptor_pb2.FieldOptions) Protocol message field options or 318 | None to use default field options. 319 | """ 320 | 321 | # Must be consistent with C++ FieldDescriptor::Type enum in 322 | # descriptor.h. 323 | # 324 | # TODO(robinson): Find a way to eliminate this repetition. 325 | TYPE_DOUBLE = 1 326 | TYPE_FLOAT = 2 327 | TYPE_INT64 = 3 328 | TYPE_UINT64 = 4 329 | TYPE_INT32 = 5 330 | TYPE_FIXED64 = 6 331 | TYPE_FIXED32 = 7 332 | TYPE_BOOL = 8 333 | TYPE_STRING = 9 334 | TYPE_GROUP = 10 335 | TYPE_MESSAGE = 11 336 | TYPE_BYTES = 12 337 | TYPE_UINT32 = 13 338 | TYPE_ENUM = 14 339 | TYPE_SFIXED32 = 15 340 | TYPE_SFIXED64 = 16 341 | TYPE_SINT32 = 17 342 | TYPE_SINT64 = 18 343 | MAX_TYPE = 18 344 | 345 | # Must be consistent with C++ FieldDescriptor::CppType enum in 346 | # descriptor.h. 347 | # 348 | # TODO(robinson): Find a way to eliminate this repetition. 349 | CPPTYPE_INT32 = 1 350 | CPPTYPE_INT64 = 2 351 | CPPTYPE_UINT32 = 3 352 | CPPTYPE_UINT64 = 4 353 | CPPTYPE_DOUBLE = 5 354 | CPPTYPE_FLOAT = 6 355 | CPPTYPE_BOOL = 7 356 | CPPTYPE_ENUM = 8 357 | CPPTYPE_STRING = 9 358 | CPPTYPE_MESSAGE = 10 359 | MAX_CPPTYPE = 10 360 | 361 | # Must be consistent with C++ FieldDescriptor::Label enum in 362 | # descriptor.h. 363 | # 364 | # TODO(robinson): Find a way to eliminate this repetition. 365 | LABEL_OPTIONAL = 1 366 | LABEL_REQUIRED = 2 367 | LABEL_REPEATED = 3 368 | MAX_LABEL = 3 369 | 370 | def __init__(self, name, full_name, index, number, type, cpp_type, label, 371 | default_value, message_type, enum_type, containing_type, 372 | is_extension, extension_scope, options=None, 373 | has_default_value=True): 374 | """The arguments are as described in the description of FieldDescriptor 375 | attributes above. 376 | 377 | Note that containing_type may be None, and may be set later if necessary 378 | (to deal with circular references between message types, for example). 379 | Likewise for extension_scope. 380 | """ 381 | super(FieldDescriptor, self).__init__(options, 'FieldOptions') 382 | self.name = name 383 | self.full_name = full_name 384 | self.index = index 385 | self.number = number 386 | self.type = type 387 | self.cpp_type = cpp_type 388 | self.label = label 389 | self.has_default_value = has_default_value 390 | self.default_value = default_value 391 | self.containing_type = containing_type 392 | self.message_type = message_type 393 | self.enum_type = enum_type 394 | self.is_extension = is_extension 395 | self.extension_scope = extension_scope 396 | if api_implementation.Type() == 'cpp': 397 | if is_extension: 398 | self._cdescriptor = cpp_message.GetExtensionDescriptor(full_name) 399 | else: 400 | self._cdescriptor = cpp_message.GetFieldDescriptor(full_name) 401 | else: 402 | self._cdescriptor = None 403 | 404 | 405 | class EnumDescriptor(_NestedDescriptorBase): 406 | 407 | """Descriptor for an enum defined in a .proto file. 408 | 409 | An EnumDescriptor instance has the following attributes: 410 | 411 | name: (str) Name of the enum type. 412 | full_name: (str) Full name of the type, including package name 413 | and any enclosing type(s). 414 | 415 | values: (list of EnumValueDescriptors) List of the values 416 | in this enum. 417 | values_by_name: (dict str -> EnumValueDescriptor) Same as |values|, 418 | but indexed by the "name" field of each EnumValueDescriptor. 419 | values_by_number: (dict int -> EnumValueDescriptor) Same as |values|, 420 | but indexed by the "number" field of each EnumValueDescriptor. 421 | containing_type: (Descriptor) Descriptor of the immediate containing 422 | type of this enum, or None if this is an enum defined at the 423 | top level in a .proto file. Set by Descriptor's constructor 424 | if we're passed into one. 425 | file: (FileDescriptor) Reference to file descriptor. 426 | options: (descriptor_pb2.EnumOptions) Enum options message or 427 | None to use default enum options. 428 | """ 429 | 430 | def __init__(self, name, full_name, filename, values, 431 | containing_type=None, options=None, file=None, 432 | serialized_start=None, serialized_end=None): 433 | """Arguments are as described in the attribute description above. 434 | 435 | Note that filename is an obsolete argument, that is not used anymore. 436 | Please use file.name to access this as an attribute. 437 | """ 438 | super(EnumDescriptor, self).__init__( 439 | options, 'EnumOptions', name, full_name, file, 440 | containing_type, serialized_start=serialized_start, 441 | serialized_end=serialized_start) 442 | 443 | self.values = values 444 | for value in self.values: 445 | value.type = self 446 | self.values_by_name = dict((v.name, v) for v in values) 447 | self.values_by_number = dict((v.number, v) for v in values) 448 | 449 | self._serialized_start = serialized_start 450 | self._serialized_end = serialized_end 451 | 452 | def CopyToProto(self, proto): 453 | """Copies this to a descriptor_pb2.EnumDescriptorProto. 454 | 455 | Args: 456 | proto: An empty descriptor_pb2.EnumDescriptorProto. 457 | """ 458 | # This function is overriden to give a better doc comment. 459 | super(EnumDescriptor, self).CopyToProto(proto) 460 | 461 | 462 | class EnumValueDescriptor(DescriptorBase): 463 | 464 | """Descriptor for a single value within an enum. 465 | 466 | name: (str) Name of this value. 467 | index: (int) Dense, 0-indexed index giving the order that this 468 | value appears textually within its enum in the .proto file. 469 | number: (int) Actual number assigned to this enum value. 470 | type: (EnumDescriptor) EnumDescriptor to which this value 471 | belongs. Set by EnumDescriptor's constructor if we're 472 | passed into one. 473 | options: (descriptor_pb2.EnumValueOptions) Enum value options message or 474 | None to use default enum value options options. 475 | """ 476 | 477 | def __init__(self, name, index, number, type=None, options=None): 478 | """Arguments are as described in the attribute description above.""" 479 | super(EnumValueDescriptor, self).__init__(options, 'EnumValueOptions') 480 | self.name = name 481 | self.index = index 482 | self.number = number 483 | self.type = type 484 | 485 | 486 | class ServiceDescriptor(_NestedDescriptorBase): 487 | 488 | """Descriptor for a service. 489 | 490 | name: (str) Name of the service. 491 | full_name: (str) Full name of the service, including package name. 492 | index: (int) 0-indexed index giving the order that this services 493 | definition appears withing the .proto file. 494 | methods: (list of MethodDescriptor) List of methods provided by this 495 | service. 496 | options: (descriptor_pb2.ServiceOptions) Service options message or 497 | None to use default service options. 498 | file: (FileDescriptor) Reference to file info. 499 | """ 500 | 501 | def __init__(self, name, full_name, index, methods, options=None, file=None, 502 | serialized_start=None, serialized_end=None): 503 | super(ServiceDescriptor, self).__init__( 504 | options, 'ServiceOptions', name, full_name, file, 505 | None, serialized_start=serialized_start, 506 | serialized_end=serialized_end) 507 | self.index = index 508 | self.methods = methods 509 | # Set the containing service for each method in this service. 510 | for method in self.methods: 511 | method.containing_service = self 512 | 513 | def FindMethodByName(self, name): 514 | """Searches for the specified method, and returns its descriptor.""" 515 | for method in self.methods: 516 | if name == method.name: 517 | return method 518 | return None 519 | 520 | def CopyToProto(self, proto): 521 | """Copies this to a descriptor_pb2.ServiceDescriptorProto. 522 | 523 | Args: 524 | proto: An empty descriptor_pb2.ServiceDescriptorProto. 525 | """ 526 | # This function is overriden to give a better doc comment. 527 | super(ServiceDescriptor, self).CopyToProto(proto) 528 | 529 | 530 | class MethodDescriptor(DescriptorBase): 531 | 532 | """Descriptor for a method in a service. 533 | 534 | name: (str) Name of the method within the service. 535 | full_name: (str) Full name of method. 536 | index: (int) 0-indexed index of the method inside the service. 537 | containing_service: (ServiceDescriptor) The service that contains this 538 | method. 539 | input_type: The descriptor of the message that this method accepts. 540 | output_type: The descriptor of the message that this method returns. 541 | options: (descriptor_pb2.MethodOptions) Method options message or 542 | None to use default method options. 543 | """ 544 | 545 | def __init__(self, name, full_name, index, containing_service, 546 | input_type, output_type, options=None): 547 | """The arguments are as described in the description of MethodDescriptor 548 | attributes above. 549 | 550 | Note that containing_service may be None, and may be set later if necessary. 551 | """ 552 | super(MethodDescriptor, self).__init__(options, 'MethodOptions') 553 | self.name = name 554 | self.full_name = full_name 555 | self.index = index 556 | self.containing_service = containing_service 557 | self.input_type = input_type 558 | self.output_type = output_type 559 | 560 | 561 | class FileDescriptor(DescriptorBase): 562 | """Descriptor for a file. Mimics the descriptor_pb2.FileDescriptorProto. 563 | 564 | name: name of file, relative to root of source tree. 565 | package: name of the package 566 | serialized_pb: (str) Byte string of serialized 567 | descriptor_pb2.FileDescriptorProto. 568 | """ 569 | 570 | def __init__(self, name, package, options=None, serialized_pb=None): 571 | """Constructor.""" 572 | super(FileDescriptor, self).__init__(options, 'FileOptions') 573 | 574 | self.message_types_by_name = {} 575 | self.name = name 576 | self.package = package 577 | self.serialized_pb = serialized_pb 578 | if (api_implementation.Type() == 'cpp' and 579 | self.serialized_pb is not None): 580 | cpp_message.BuildFile(self.serialized_pb) 581 | 582 | def CopyToProto(self, proto): 583 | """Copies this to a descriptor_pb2.FileDescriptorProto. 584 | 585 | Args: 586 | proto: An empty descriptor_pb2.FileDescriptorProto. 587 | """ 588 | proto.ParseFromString(self.serialized_pb) 589 | 590 | 591 | def _ParseOptions(message, string): 592 | """Parses serialized options. 593 | 594 | This helper function is used to parse serialized options in generated 595 | proto2 files. It must not be used outside proto2. 596 | """ 597 | message.ParseFromString(string) 598 | return message 599 | -------------------------------------------------------------------------------- /lib/google/protobuf/descriptor.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bretwalker/gtfs-realtime-display/33230f9bd8234ddc598e127b0cd3533c8b25ea2f/lib/google/protobuf/descriptor.pyc -------------------------------------------------------------------------------- /lib/google/protobuf/descriptor_pb2.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bretwalker/gtfs-realtime-display/33230f9bd8234ddc598e127b0cd3533c8b25ea2f/lib/google/protobuf/descriptor_pb2.pyc -------------------------------------------------------------------------------- /lib/google/protobuf/internal/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bretwalker/gtfs-realtime-display/33230f9bd8234ddc598e127b0cd3533c8b25ea2f/lib/google/protobuf/internal/__init__.py -------------------------------------------------------------------------------- /lib/google/protobuf/internal/__init__.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bretwalker/gtfs-realtime-display/33230f9bd8234ddc598e127b0cd3533c8b25ea2f/lib/google/protobuf/internal/__init__.pyc -------------------------------------------------------------------------------- /lib/google/protobuf/internal/api_implementation.py: -------------------------------------------------------------------------------- 1 | # Protocol Buffers - Google's data interchange format 2 | # Copyright 2008 Google Inc. All rights reserved. 3 | # http://code.google.com/p/protobuf/ 4 | # 5 | # Redistribution and use in source and binary forms, with or without 6 | # modification, are permitted provided that the following conditions are 7 | # met: 8 | # 9 | # * Redistributions of source code must retain the above copyright 10 | # notice, this list of conditions and the following disclaimer. 11 | # * Redistributions in binary form must reproduce the above 12 | # copyright notice, this list of conditions and the following disclaimer 13 | # in the documentation and/or other materials provided with the 14 | # distribution. 15 | # * Neither the name of Google Inc. nor the names of its 16 | # contributors may be used to endorse or promote products derived from 17 | # this software without specific prior written permission. 18 | # 19 | # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 20 | # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 21 | # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 22 | # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 23 | # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 24 | # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 25 | # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 26 | # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 27 | # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 28 | # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 29 | # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 30 | 31 | """ 32 | This module is the central entity that determines which implementation of the 33 | API is used. 34 | """ 35 | 36 | __author__ = 'petar@google.com (Petar Petrov)' 37 | 38 | import os 39 | # This environment variable can be used to switch to a certain implementation 40 | # of the Python API. Right now only 'python' and 'cpp' are valid values. Any 41 | # other value will be ignored. 42 | _implementation_type = os.getenv('PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION', 43 | 'python') 44 | 45 | 46 | if _implementation_type != 'python': 47 | # For now, by default use the pure-Python implementation. 48 | # The code below checks if the C extension is available and 49 | # uses it if it is available. 50 | _implementation_type = 'cpp' 51 | ## Determine automatically which implementation to use. 52 | #try: 53 | # from google.protobuf.internal import cpp_message 54 | # _implementation_type = 'cpp' 55 | #except ImportError, e: 56 | # _implementation_type = 'python' 57 | 58 | 59 | # Usage of this function is discouraged. Clients shouldn't care which 60 | # implementation of the API is in use. Note that there is no guarantee 61 | # that differences between APIs will be maintained. 62 | # Please don't use this function if possible. 63 | def Type(): 64 | return _implementation_type 65 | -------------------------------------------------------------------------------- /lib/google/protobuf/internal/api_implementation.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bretwalker/gtfs-realtime-display/33230f9bd8234ddc598e127b0cd3533c8b25ea2f/lib/google/protobuf/internal/api_implementation.pyc -------------------------------------------------------------------------------- /lib/google/protobuf/internal/containers.py: -------------------------------------------------------------------------------- 1 | # Protocol Buffers - Google's data interchange format 2 | # Copyright 2008 Google Inc. All rights reserved. 3 | # http://code.google.com/p/protobuf/ 4 | # 5 | # Redistribution and use in source and binary forms, with or without 6 | # modification, are permitted provided that the following conditions are 7 | # met: 8 | # 9 | # * Redistributions of source code must retain the above copyright 10 | # notice, this list of conditions and the following disclaimer. 11 | # * Redistributions in binary form must reproduce the above 12 | # copyright notice, this list of conditions and the following disclaimer 13 | # in the documentation and/or other materials provided with the 14 | # distribution. 15 | # * Neither the name of Google Inc. nor the names of its 16 | # contributors may be used to endorse or promote products derived from 17 | # this software without specific prior written permission. 18 | # 19 | # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 20 | # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 21 | # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 22 | # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 23 | # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 24 | # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 25 | # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 26 | # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 27 | # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 28 | # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 29 | # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 30 | 31 | """Contains container classes to represent different protocol buffer types. 32 | 33 | This file defines container classes which represent categories of protocol 34 | buffer field types which need extra maintenance. Currently these categories 35 | are: 36 | - Repeated scalar fields - These are all repeated fields which aren't 37 | composite (e.g. they are of simple types like int32, string, etc). 38 | - Repeated composite fields - Repeated fields which are composite. This 39 | includes groups and nested messages. 40 | """ 41 | 42 | __author__ = 'petar@google.com (Petar Petrov)' 43 | 44 | 45 | class BaseContainer(object): 46 | 47 | """Base container class.""" 48 | 49 | # Minimizes memory usage and disallows assignment to other attributes. 50 | __slots__ = ['_message_listener', '_values'] 51 | 52 | def __init__(self, message_listener): 53 | """ 54 | Args: 55 | message_listener: A MessageListener implementation. 56 | The RepeatedScalarFieldContainer will call this object's 57 | Modified() method when it is modified. 58 | """ 59 | self._message_listener = message_listener 60 | self._values = [] 61 | 62 | def __getitem__(self, key): 63 | """Retrieves item by the specified key.""" 64 | return self._values[key] 65 | 66 | def __len__(self): 67 | """Returns the number of elements in the container.""" 68 | return len(self._values) 69 | 70 | def __ne__(self, other): 71 | """Checks if another instance isn't equal to this one.""" 72 | # The concrete classes should define __eq__. 73 | return not self == other 74 | 75 | def __hash__(self): 76 | raise TypeError('unhashable object') 77 | 78 | def __repr__(self): 79 | return repr(self._values) 80 | 81 | def sort(self, sort_function=cmp): 82 | self._values.sort(sort_function) 83 | 84 | 85 | class RepeatedScalarFieldContainer(BaseContainer): 86 | 87 | """Simple, type-checked, list-like container for holding repeated scalars.""" 88 | 89 | # Disallows assignment to other attributes. 90 | __slots__ = ['_type_checker'] 91 | 92 | def __init__(self, message_listener, type_checker): 93 | """ 94 | Args: 95 | message_listener: A MessageListener implementation. 96 | The RepeatedScalarFieldContainer will call this object's 97 | Modified() method when it is modified. 98 | type_checker: A type_checkers.ValueChecker instance to run on elements 99 | inserted into this container. 100 | """ 101 | super(RepeatedScalarFieldContainer, self).__init__(message_listener) 102 | self._type_checker = type_checker 103 | 104 | def append(self, value): 105 | """Appends an item to the list. Similar to list.append().""" 106 | self._type_checker.CheckValue(value) 107 | self._values.append(value) 108 | if not self._message_listener.dirty: 109 | self._message_listener.Modified() 110 | 111 | def insert(self, key, value): 112 | """Inserts the item at the specified position. Similar to list.insert().""" 113 | self._type_checker.CheckValue(value) 114 | self._values.insert(key, value) 115 | if not self._message_listener.dirty: 116 | self._message_listener.Modified() 117 | 118 | def extend(self, elem_seq): 119 | """Extends by appending the given sequence. Similar to list.extend().""" 120 | if not elem_seq: 121 | return 122 | 123 | new_values = [] 124 | for elem in elem_seq: 125 | self._type_checker.CheckValue(elem) 126 | new_values.append(elem) 127 | self._values.extend(new_values) 128 | self._message_listener.Modified() 129 | 130 | def MergeFrom(self, other): 131 | """Appends the contents of another repeated field of the same type to this 132 | one. We do not check the types of the individual fields. 133 | """ 134 | self._values.extend(other._values) 135 | self._message_listener.Modified() 136 | 137 | def remove(self, elem): 138 | """Removes an item from the list. Similar to list.remove().""" 139 | self._values.remove(elem) 140 | self._message_listener.Modified() 141 | 142 | def __setitem__(self, key, value): 143 | """Sets the item on the specified position.""" 144 | self._type_checker.CheckValue(value) 145 | self._values[key] = value 146 | self._message_listener.Modified() 147 | 148 | def __getslice__(self, start, stop): 149 | """Retrieves the subset of items from between the specified indices.""" 150 | return self._values[start:stop] 151 | 152 | def __setslice__(self, start, stop, values): 153 | """Sets the subset of items from between the specified indices.""" 154 | new_values = [] 155 | for value in values: 156 | self._type_checker.CheckValue(value) 157 | new_values.append(value) 158 | self._values[start:stop] = new_values 159 | self._message_listener.Modified() 160 | 161 | def __delitem__(self, key): 162 | """Deletes the item at the specified position.""" 163 | del self._values[key] 164 | self._message_listener.Modified() 165 | 166 | def __delslice__(self, start, stop): 167 | """Deletes the subset of items from between the specified indices.""" 168 | del self._values[start:stop] 169 | self._message_listener.Modified() 170 | 171 | def __eq__(self, other): 172 | """Compares the current instance with another one.""" 173 | if self is other: 174 | return True 175 | # Special case for the same type which should be common and fast. 176 | if isinstance(other, self.__class__): 177 | return other._values == self._values 178 | # We are presumably comparing against some other sequence type. 179 | return other == self._values 180 | 181 | 182 | class RepeatedCompositeFieldContainer(BaseContainer): 183 | 184 | """Simple, list-like container for holding repeated composite fields.""" 185 | 186 | # Disallows assignment to other attributes. 187 | __slots__ = ['_message_descriptor'] 188 | 189 | def __init__(self, message_listener, message_descriptor): 190 | """ 191 | Note that we pass in a descriptor instead of the generated directly, 192 | since at the time we construct a _RepeatedCompositeFieldContainer we 193 | haven't yet necessarily initialized the type that will be contained in the 194 | container. 195 | 196 | Args: 197 | message_listener: A MessageListener implementation. 198 | The RepeatedCompositeFieldContainer will call this object's 199 | Modified() method when it is modified. 200 | message_descriptor: A Descriptor instance describing the protocol type 201 | that should be present in this container. We'll use the 202 | _concrete_class field of this descriptor when the client calls add(). 203 | """ 204 | super(RepeatedCompositeFieldContainer, self).__init__(message_listener) 205 | self._message_descriptor = message_descriptor 206 | 207 | def add(self, **kwargs): 208 | """Adds a new element at the end of the list and returns it. Keyword 209 | arguments may be used to initialize the element. 210 | """ 211 | new_element = self._message_descriptor._concrete_class(**kwargs) 212 | new_element._SetListener(self._message_listener) 213 | self._values.append(new_element) 214 | if not self._message_listener.dirty: 215 | self._message_listener.Modified() 216 | return new_element 217 | 218 | def extend(self, elem_seq): 219 | """Extends by appending the given sequence of elements of the same type 220 | as this one, copying each individual message. 221 | """ 222 | message_class = self._message_descriptor._concrete_class 223 | listener = self._message_listener 224 | values = self._values 225 | for message in elem_seq: 226 | new_element = message_class() 227 | new_element._SetListener(listener) 228 | new_element.MergeFrom(message) 229 | values.append(new_element) 230 | listener.Modified() 231 | 232 | def MergeFrom(self, other): 233 | """Appends the contents of another repeated field of the same type to this 234 | one, copying each individual message. 235 | """ 236 | self.extend(other._values) 237 | 238 | def __getslice__(self, start, stop): 239 | """Retrieves the subset of items from between the specified indices.""" 240 | return self._values[start:stop] 241 | 242 | def __delitem__(self, key): 243 | """Deletes the item at the specified position.""" 244 | del self._values[key] 245 | self._message_listener.Modified() 246 | 247 | def __delslice__(self, start, stop): 248 | """Deletes the subset of items from between the specified indices.""" 249 | del self._values[start:stop] 250 | self._message_listener.Modified() 251 | 252 | def __eq__(self, other): 253 | """Compares the current instance with another one.""" 254 | if self is other: 255 | return True 256 | if not isinstance(other, self.__class__): 257 | raise TypeError('Can only compare repeated composite fields against ' 258 | 'other repeated composite fields.') 259 | return self._values == other._values 260 | -------------------------------------------------------------------------------- /lib/google/protobuf/internal/containers.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bretwalker/gtfs-realtime-display/33230f9bd8234ddc598e127b0cd3533c8b25ea2f/lib/google/protobuf/internal/containers.pyc -------------------------------------------------------------------------------- /lib/google/protobuf/internal/cpp_message.py: -------------------------------------------------------------------------------- 1 | # Protocol Buffers - Google's data interchange format 2 | # Copyright 2008 Google Inc. All rights reserved. 3 | # http://code.google.com/p/protobuf/ 4 | # 5 | # Redistribution and use in source and binary forms, with or without 6 | # modification, are permitted provided that the following conditions are 7 | # met: 8 | # 9 | # * Redistributions of source code must retain the above copyright 10 | # notice, this list of conditions and the following disclaimer. 11 | # * Redistributions in binary form must reproduce the above 12 | # copyright notice, this list of conditions and the following disclaimer 13 | # in the documentation and/or other materials provided with the 14 | # distribution. 15 | # * Neither the name of Google Inc. nor the names of its 16 | # contributors may be used to endorse or promote products derived from 17 | # this software without specific prior written permission. 18 | # 19 | # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 20 | # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 21 | # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 22 | # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 23 | # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 24 | # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 25 | # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 26 | # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 27 | # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 28 | # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 29 | # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 30 | 31 | """Contains helper functions used to create protocol message classes from 32 | Descriptor objects at runtime backed by the protocol buffer C++ API. 33 | """ 34 | 35 | __author__ = 'petar@google.com (Petar Petrov)' 36 | 37 | import operator 38 | from google.protobuf.internal import _net_proto2___python 39 | from google.protobuf import message 40 | 41 | 42 | _LABEL_REPEATED = _net_proto2___python.LABEL_REPEATED 43 | _LABEL_OPTIONAL = _net_proto2___python.LABEL_OPTIONAL 44 | _CPPTYPE_MESSAGE = _net_proto2___python.CPPTYPE_MESSAGE 45 | _TYPE_MESSAGE = _net_proto2___python.TYPE_MESSAGE 46 | 47 | 48 | def GetDescriptorPool(): 49 | """Creates a new DescriptorPool C++ object.""" 50 | return _net_proto2___python.NewCDescriptorPool() 51 | 52 | 53 | _pool = GetDescriptorPool() 54 | 55 | 56 | def GetFieldDescriptor(full_field_name): 57 | """Searches for a field descriptor given a full field name.""" 58 | return _pool.FindFieldByName(full_field_name) 59 | 60 | 61 | def BuildFile(content): 62 | """Registers a new proto file in the underlying C++ descriptor pool.""" 63 | _net_proto2___python.BuildFile(content) 64 | 65 | 66 | def GetExtensionDescriptor(full_extension_name): 67 | """Searches for extension descriptor given a full field name.""" 68 | return _pool.FindExtensionByName(full_extension_name) 69 | 70 | 71 | def NewCMessage(full_message_name): 72 | """Creates a new C++ protocol message by its name.""" 73 | return _net_proto2___python.NewCMessage(full_message_name) 74 | 75 | 76 | def ScalarProperty(cdescriptor): 77 | """Returns a scalar property for the given descriptor.""" 78 | 79 | def Getter(self): 80 | return self._cmsg.GetScalar(cdescriptor) 81 | 82 | def Setter(self, value): 83 | self._cmsg.SetScalar(cdescriptor, value) 84 | 85 | return property(Getter, Setter) 86 | 87 | 88 | def CompositeProperty(cdescriptor, message_type): 89 | """Returns a Python property the given composite field.""" 90 | 91 | def Getter(self): 92 | sub_message = self._composite_fields.get(cdescriptor.name, None) 93 | if sub_message is None: 94 | cmessage = self._cmsg.NewSubMessage(cdescriptor) 95 | sub_message = message_type._concrete_class(__cmessage=cmessage) 96 | self._composite_fields[cdescriptor.name] = sub_message 97 | return sub_message 98 | 99 | return property(Getter) 100 | 101 | 102 | class RepeatedScalarContainer(object): 103 | """Container for repeated scalar fields.""" 104 | 105 | __slots__ = ['_message', '_cfield_descriptor', '_cmsg'] 106 | 107 | def __init__(self, msg, cfield_descriptor): 108 | self._message = msg 109 | self._cmsg = msg._cmsg 110 | self._cfield_descriptor = cfield_descriptor 111 | 112 | def append(self, value): 113 | self._cmsg.AddRepeatedScalar( 114 | self._cfield_descriptor, value) 115 | 116 | def extend(self, sequence): 117 | for element in sequence: 118 | self.append(element) 119 | 120 | def insert(self, key, value): 121 | values = self[slice(None, None, None)] 122 | values.insert(key, value) 123 | self._cmsg.AssignRepeatedScalar(self._cfield_descriptor, values) 124 | 125 | def remove(self, value): 126 | values = self[slice(None, None, None)] 127 | values.remove(value) 128 | self._cmsg.AssignRepeatedScalar(self._cfield_descriptor, values) 129 | 130 | def __setitem__(self, key, value): 131 | values = self[slice(None, None, None)] 132 | values[key] = value 133 | self._cmsg.AssignRepeatedScalar(self._cfield_descriptor, values) 134 | 135 | def __getitem__(self, key): 136 | return self._cmsg.GetRepeatedScalar(self._cfield_descriptor, key) 137 | 138 | def __delitem__(self, key): 139 | self._cmsg.DeleteRepeatedField(self._cfield_descriptor, key) 140 | 141 | def __len__(self): 142 | return len(self[slice(None, None, None)]) 143 | 144 | def __eq__(self, other): 145 | if self is other: 146 | return True 147 | if not operator.isSequenceType(other): 148 | raise TypeError( 149 | 'Can only compare repeated scalar fields against sequences.') 150 | # We are presumably comparing against some other sequence type. 151 | return other == self[slice(None, None, None)] 152 | 153 | def __ne__(self, other): 154 | return not self == other 155 | 156 | def __hash__(self): 157 | raise TypeError('unhashable object') 158 | 159 | def sort(self, sort_function=cmp): 160 | values = self[slice(None, None, None)] 161 | values.sort(sort_function) 162 | self._cmsg.AssignRepeatedScalar(self._cfield_descriptor, values) 163 | 164 | 165 | def RepeatedScalarProperty(cdescriptor): 166 | """Returns a Python property the given repeated scalar field.""" 167 | 168 | def Getter(self): 169 | container = self._composite_fields.get(cdescriptor.name, None) 170 | if container is None: 171 | container = RepeatedScalarContainer(self, cdescriptor) 172 | self._composite_fields[cdescriptor.name] = container 173 | return container 174 | 175 | def Setter(self, new_value): 176 | raise AttributeError('Assignment not allowed to repeated field ' 177 | '"%s" in protocol message object.' % cdescriptor.name) 178 | 179 | doc = 'Magic attribute generated for "%s" proto field.' % cdescriptor.name 180 | return property(Getter, Setter, doc=doc) 181 | 182 | 183 | class RepeatedCompositeContainer(object): 184 | """Container for repeated composite fields.""" 185 | 186 | __slots__ = ['_message', '_subclass', '_cfield_descriptor', '_cmsg'] 187 | 188 | def __init__(self, msg, cfield_descriptor, subclass): 189 | self._message = msg 190 | self._cmsg = msg._cmsg 191 | self._subclass = subclass 192 | self._cfield_descriptor = cfield_descriptor 193 | 194 | def add(self, **kwargs): 195 | cmessage = self._cmsg.AddMessage(self._cfield_descriptor) 196 | return self._subclass(__cmessage=cmessage, __owner=self._message, **kwargs) 197 | 198 | def extend(self, elem_seq): 199 | """Extends by appending the given sequence of elements of the same type 200 | as this one, copying each individual message. 201 | """ 202 | for message in elem_seq: 203 | self.add().MergeFrom(message) 204 | 205 | def MergeFrom(self, other): 206 | for message in other[:]: 207 | self.add().MergeFrom(message) 208 | 209 | def __getitem__(self, key): 210 | cmessages = self._cmsg.GetRepeatedMessage( 211 | self._cfield_descriptor, key) 212 | subclass = self._subclass 213 | if not isinstance(cmessages, list): 214 | return subclass(__cmessage=cmessages, __owner=self._message) 215 | 216 | return [subclass(__cmessage=m, __owner=self._message) for m in cmessages] 217 | 218 | def __delitem__(self, key): 219 | self._cmsg.DeleteRepeatedField( 220 | self._cfield_descriptor, key) 221 | 222 | def __len__(self): 223 | return self._cmsg.FieldLength(self._cfield_descriptor) 224 | 225 | def __eq__(self, other): 226 | """Compares the current instance with another one.""" 227 | if self is other: 228 | return True 229 | if not isinstance(other, self.__class__): 230 | raise TypeError('Can only compare repeated composite fields against ' 231 | 'other repeated composite fields.') 232 | messages = self[slice(None, None, None)] 233 | other_messages = other[slice(None, None, None)] 234 | return messages == other_messages 235 | 236 | def __hash__(self): 237 | raise TypeError('unhashable object') 238 | 239 | def sort(self, sort_function=cmp): 240 | messages = [] 241 | for index in range(len(self)): 242 | # messages[i][0] is where the i-th element of the new array has to come 243 | # from. 244 | # messages[i][1] is where the i-th element of the old array has to go. 245 | messages.append([index, 0, self[index]]) 246 | messages.sort(lambda x,y: sort_function(x[2], y[2])) 247 | 248 | # Remember which position each elements has to move to. 249 | for i in range(len(messages)): 250 | messages[messages[i][0]][1] = i 251 | 252 | # Apply the transposition. 253 | for i in range(len(messages)): 254 | from_position = messages[i][0] 255 | if i == from_position: 256 | continue 257 | self._cmsg.SwapRepeatedFieldElements( 258 | self._cfield_descriptor, i, from_position) 259 | messages[messages[i][1]][0] = from_position 260 | 261 | 262 | def RepeatedCompositeProperty(cdescriptor, message_type): 263 | """Returns a Python property for the given repeated composite field.""" 264 | 265 | def Getter(self): 266 | container = self._composite_fields.get(cdescriptor.name, None) 267 | if container is None: 268 | container = RepeatedCompositeContainer( 269 | self, cdescriptor, message_type._concrete_class) 270 | self._composite_fields[cdescriptor.name] = container 271 | return container 272 | 273 | def Setter(self, new_value): 274 | raise AttributeError('Assignment not allowed to repeated field ' 275 | '"%s" in protocol message object.' % cdescriptor.name) 276 | 277 | doc = 'Magic attribute generated for "%s" proto field.' % cdescriptor.name 278 | return property(Getter, Setter, doc=doc) 279 | 280 | 281 | class ExtensionDict(object): 282 | """Extension dictionary added to each protocol message.""" 283 | 284 | def __init__(self, msg): 285 | self._message = msg 286 | self._cmsg = msg._cmsg 287 | self._values = {} 288 | 289 | def __setitem__(self, extension, value): 290 | from google.protobuf import descriptor 291 | if not isinstance(extension, descriptor.FieldDescriptor): 292 | raise KeyError('Bad extension %r.' % (extension,)) 293 | cdescriptor = extension._cdescriptor 294 | if (cdescriptor.label != _LABEL_OPTIONAL or 295 | cdescriptor.cpp_type == _CPPTYPE_MESSAGE): 296 | raise TypeError('Extension %r is repeated and/or a composite type.' % ( 297 | extension.full_name,)) 298 | self._cmsg.SetScalar(cdescriptor, value) 299 | self._values[extension] = value 300 | 301 | def __getitem__(self, extension): 302 | from google.protobuf import descriptor 303 | if not isinstance(extension, descriptor.FieldDescriptor): 304 | raise KeyError('Bad extension %r.' % (extension,)) 305 | 306 | cdescriptor = extension._cdescriptor 307 | if (cdescriptor.label != _LABEL_REPEATED and 308 | cdescriptor.cpp_type != _CPPTYPE_MESSAGE): 309 | return self._cmsg.GetScalar(cdescriptor) 310 | 311 | ext = self._values.get(extension, None) 312 | if ext is not None: 313 | return ext 314 | 315 | ext = self._CreateNewHandle(extension) 316 | self._values[extension] = ext 317 | return ext 318 | 319 | def ClearExtension(self, extension): 320 | from google.protobuf import descriptor 321 | if not isinstance(extension, descriptor.FieldDescriptor): 322 | raise KeyError('Bad extension %r.' % (extension,)) 323 | self._cmsg.ClearFieldByDescriptor(extension._cdescriptor) 324 | if extension in self._values: 325 | del self._values[extension] 326 | 327 | def HasExtension(self, extension): 328 | from google.protobuf import descriptor 329 | if not isinstance(extension, descriptor.FieldDescriptor): 330 | raise KeyError('Bad extension %r.' % (extension,)) 331 | return self._cmsg.HasFieldByDescriptor(extension._cdescriptor) 332 | 333 | def _FindExtensionByName(self, name): 334 | """Tries to find a known extension with the specified name. 335 | 336 | Args: 337 | name: Extension full name. 338 | 339 | Returns: 340 | Extension field descriptor. 341 | """ 342 | return self._message._extensions_by_name.get(name, None) 343 | 344 | def _CreateNewHandle(self, extension): 345 | cdescriptor = extension._cdescriptor 346 | if (cdescriptor.label != _LABEL_REPEATED and 347 | cdescriptor.cpp_type == _CPPTYPE_MESSAGE): 348 | cmessage = self._cmsg.NewSubMessage(cdescriptor) 349 | return extension.message_type._concrete_class(__cmessage=cmessage) 350 | 351 | if cdescriptor.label == _LABEL_REPEATED: 352 | if cdescriptor.cpp_type == _CPPTYPE_MESSAGE: 353 | return RepeatedCompositeContainer( 354 | self._message, cdescriptor, extension.message_type._concrete_class) 355 | else: 356 | return RepeatedScalarContainer(self._message, cdescriptor) 357 | # This shouldn't happen! 358 | assert False 359 | return None 360 | 361 | 362 | def NewMessage(message_descriptor, dictionary): 363 | """Creates a new protocol message *class*.""" 364 | _AddClassAttributesForNestedExtensions(message_descriptor, dictionary) 365 | _AddEnumValues(message_descriptor, dictionary) 366 | _AddDescriptors(message_descriptor, dictionary) 367 | 368 | 369 | def InitMessage(message_descriptor, cls): 370 | """Constructs a new message instance (called before instance's __init__).""" 371 | cls._extensions_by_name = {} 372 | _AddInitMethod(message_descriptor, cls) 373 | _AddMessageMethods(message_descriptor, cls) 374 | _AddPropertiesForExtensions(message_descriptor, cls) 375 | 376 | 377 | def _AddDescriptors(message_descriptor, dictionary): 378 | """Sets up a new protocol message class dictionary. 379 | 380 | Args: 381 | message_descriptor: A Descriptor instance describing this message type. 382 | dictionary: Class dictionary to which we'll add a '__slots__' entry. 383 | """ 384 | dictionary['__descriptors'] = {} 385 | for field in message_descriptor.fields: 386 | dictionary['__descriptors'][field.name] = GetFieldDescriptor( 387 | field.full_name) 388 | 389 | dictionary['__slots__'] = list(dictionary['__descriptors'].iterkeys()) + [ 390 | '_cmsg', '_owner', '_composite_fields', 'Extensions'] 391 | 392 | 393 | def _AddEnumValues(message_descriptor, dictionary): 394 | """Sets class-level attributes for all enum fields defined in this message. 395 | 396 | Args: 397 | message_descriptor: Descriptor object for this message type. 398 | dictionary: Class dictionary that should be populated. 399 | """ 400 | for enum_type in message_descriptor.enum_types: 401 | for enum_value in enum_type.values: 402 | dictionary[enum_value.name] = enum_value.number 403 | 404 | 405 | def _AddClassAttributesForNestedExtensions(message_descriptor, dictionary): 406 | """Adds class attributes for the nested extensions.""" 407 | extension_dict = message_descriptor.extensions_by_name 408 | for extension_name, extension_field in extension_dict.iteritems(): 409 | assert extension_name not in dictionary 410 | dictionary[extension_name] = extension_field 411 | 412 | 413 | def _AddInitMethod(message_descriptor, cls): 414 | """Adds an __init__ method to cls.""" 415 | 416 | # Create and attach message field properties to the message class. 417 | # This can be done just once per message class, since property setters and 418 | # getters are passed the message instance. 419 | # This makes message instantiation extremely fast, and at the same time it 420 | # doesn't require the creation of property objects for each message instance, 421 | # which saves a lot of memory. 422 | for field in message_descriptor.fields: 423 | field_cdescriptor = cls.__descriptors[field.name] 424 | if field.label == _LABEL_REPEATED: 425 | if field.cpp_type == _CPPTYPE_MESSAGE: 426 | value = RepeatedCompositeProperty(field_cdescriptor, field.message_type) 427 | else: 428 | value = RepeatedScalarProperty(field_cdescriptor) 429 | elif field.cpp_type == _CPPTYPE_MESSAGE: 430 | value = CompositeProperty(field_cdescriptor, field.message_type) 431 | else: 432 | value = ScalarProperty(field_cdescriptor) 433 | setattr(cls, field.name, value) 434 | 435 | # Attach a constant with the field number. 436 | constant_name = field.name.upper() + '_FIELD_NUMBER' 437 | setattr(cls, constant_name, field.number) 438 | 439 | def Init(self, **kwargs): 440 | """Message constructor.""" 441 | cmessage = kwargs.pop('__cmessage', None) 442 | if cmessage is None: 443 | self._cmsg = NewCMessage(message_descriptor.full_name) 444 | else: 445 | self._cmsg = cmessage 446 | 447 | # Keep a reference to the owner, as the owner keeps a reference to the 448 | # underlying protocol buffer message. 449 | owner = kwargs.pop('__owner', None) 450 | if owner is not None: 451 | self._owner = owner 452 | 453 | self.Extensions = ExtensionDict(self) 454 | self._composite_fields = {} 455 | 456 | for field_name, field_value in kwargs.iteritems(): 457 | field_cdescriptor = self.__descriptors.get(field_name, None) 458 | if field_cdescriptor is None: 459 | raise ValueError('Protocol message has no "%s" field.' % field_name) 460 | if field_cdescriptor.label == _LABEL_REPEATED: 461 | if field_cdescriptor.cpp_type == _CPPTYPE_MESSAGE: 462 | for val in field_value: 463 | getattr(self, field_name).add().MergeFrom(val) 464 | else: 465 | getattr(self, field_name).extend(field_value) 466 | elif field_cdescriptor.cpp_type == _CPPTYPE_MESSAGE: 467 | getattr(self, field_name).MergeFrom(field_value) 468 | else: 469 | setattr(self, field_name, field_value) 470 | 471 | Init.__module__ = None 472 | Init.__doc__ = None 473 | cls.__init__ = Init 474 | 475 | 476 | def _IsMessageSetExtension(field): 477 | """Checks if a field is a message set extension.""" 478 | return (field.is_extension and 479 | field.containing_type.has_options and 480 | field.containing_type.GetOptions().message_set_wire_format and 481 | field.type == _TYPE_MESSAGE and 482 | field.message_type == field.extension_scope and 483 | field.label == _LABEL_OPTIONAL) 484 | 485 | 486 | def _AddMessageMethods(message_descriptor, cls): 487 | """Adds the methods to a protocol message class.""" 488 | if message_descriptor.is_extendable: 489 | 490 | def ClearExtension(self, extension): 491 | self.Extensions.ClearExtension(extension) 492 | 493 | def HasExtension(self, extension): 494 | return self.Extensions.HasExtension(extension) 495 | 496 | def HasField(self, field_name): 497 | return self._cmsg.HasField(field_name) 498 | 499 | def ClearField(self, field_name): 500 | if field_name in self._composite_fields: 501 | del self._composite_fields[field_name] 502 | self._cmsg.ClearField(field_name) 503 | 504 | def Clear(self): 505 | return self._cmsg.Clear() 506 | 507 | def IsInitialized(self, errors=None): 508 | if self._cmsg.IsInitialized(): 509 | return True 510 | if errors is not None: 511 | errors.extend(self.FindInitializationErrors()); 512 | return False 513 | 514 | def SerializeToString(self): 515 | if not self.IsInitialized(): 516 | raise message.EncodeError( 517 | 'Message is missing required fields: ' + 518 | ','.join(self.FindInitializationErrors())) 519 | return self._cmsg.SerializeToString() 520 | 521 | def SerializePartialToString(self): 522 | return self._cmsg.SerializePartialToString() 523 | 524 | def ParseFromString(self, serialized): 525 | self.Clear() 526 | self.MergeFromString(serialized) 527 | 528 | def MergeFromString(self, serialized): 529 | byte_size = self._cmsg.MergeFromString(serialized) 530 | if byte_size < 0: 531 | raise message.DecodeError('Unable to merge from string.') 532 | return byte_size 533 | 534 | def MergeFrom(self, msg): 535 | if not isinstance(msg, cls): 536 | raise TypeError( 537 | "Parameter to MergeFrom() must be instance of same class.") 538 | self._cmsg.MergeFrom(msg._cmsg) 539 | 540 | def CopyFrom(self, msg): 541 | self._cmsg.CopyFrom(msg._cmsg) 542 | 543 | def ByteSize(self): 544 | return self._cmsg.ByteSize() 545 | 546 | def SetInParent(self): 547 | return self._cmsg.SetInParent() 548 | 549 | def ListFields(self): 550 | all_fields = [] 551 | field_list = self._cmsg.ListFields() 552 | fields_by_name = cls.DESCRIPTOR.fields_by_name 553 | for is_extension, field_name in field_list: 554 | if is_extension: 555 | extension = cls._extensions_by_name[field_name] 556 | all_fields.append((extension, self.Extensions[extension])) 557 | else: 558 | field_descriptor = fields_by_name[field_name] 559 | all_fields.append( 560 | (field_descriptor, getattr(self, field_name))) 561 | all_fields.sort(key=lambda item: item[0].number) 562 | return all_fields 563 | 564 | def FindInitializationErrors(self): 565 | return self._cmsg.FindInitializationErrors() 566 | 567 | def __str__(self): 568 | return self._cmsg.DebugString() 569 | 570 | def __eq__(self, other): 571 | if self is other: 572 | return True 573 | if not isinstance(other, self.__class__): 574 | return False 575 | return self.ListFields() == other.ListFields() 576 | 577 | def __ne__(self, other): 578 | return not self == other 579 | 580 | def __hash__(self): 581 | raise TypeError('unhashable object') 582 | 583 | def __unicode__(self): 584 | return text_format.MessageToString(self, as_utf8=True).decode('utf-8') 585 | 586 | # Attach the local methods to the message class. 587 | for key, value in locals().copy().iteritems(): 588 | if key not in ('key', 'value', '__builtins__', '__name__', '__doc__'): 589 | setattr(cls, key, value) 590 | 591 | # Static methods: 592 | 593 | def RegisterExtension(extension_handle): 594 | extension_handle.containing_type = cls.DESCRIPTOR 595 | cls._extensions_by_name[extension_handle.full_name] = extension_handle 596 | 597 | if _IsMessageSetExtension(extension_handle): 598 | # MessageSet extension. Also register under type name. 599 | cls._extensions_by_name[ 600 | extension_handle.message_type.full_name] = extension_handle 601 | cls.RegisterExtension = staticmethod(RegisterExtension) 602 | 603 | def FromString(string): 604 | msg = cls() 605 | msg.MergeFromString(string) 606 | return msg 607 | cls.FromString = staticmethod(FromString) 608 | 609 | 610 | 611 | def _AddPropertiesForExtensions(message_descriptor, cls): 612 | """Adds properties for all fields in this protocol message type.""" 613 | extension_dict = message_descriptor.extensions_by_name 614 | for extension_name, extension_field in extension_dict.iteritems(): 615 | constant_name = extension_name.upper() + '_FIELD_NUMBER' 616 | setattr(cls, constant_name, extension_field.number) 617 | -------------------------------------------------------------------------------- /lib/google/protobuf/internal/cpp_message.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bretwalker/gtfs-realtime-display/33230f9bd8234ddc598e127b0cd3533c8b25ea2f/lib/google/protobuf/internal/cpp_message.pyc -------------------------------------------------------------------------------- /lib/google/protobuf/internal/decoder.py: -------------------------------------------------------------------------------- 1 | # Protocol Buffers - Google's data interchange format 2 | # Copyright 2008 Google Inc. All rights reserved. 3 | # http://code.google.com/p/protobuf/ 4 | # 5 | # Redistribution and use in source and binary forms, with or without 6 | # modification, are permitted provided that the following conditions are 7 | # met: 8 | # 9 | # * Redistributions of source code must retain the above copyright 10 | # notice, this list of conditions and the following disclaimer. 11 | # * Redistributions in binary form must reproduce the above 12 | # copyright notice, this list of conditions and the following disclaimer 13 | # in the documentation and/or other materials provided with the 14 | # distribution. 15 | # * Neither the name of Google Inc. nor the names of its 16 | # contributors may be used to endorse or promote products derived from 17 | # this software without specific prior written permission. 18 | # 19 | # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 20 | # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 21 | # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 22 | # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 23 | # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 24 | # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 25 | # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 26 | # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 27 | # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 28 | # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 29 | # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 30 | 31 | """Code for decoding protocol buffer primitives. 32 | 33 | This code is very similar to encoder.py -- read the docs for that module first. 34 | 35 | A "decoder" is a function with the signature: 36 | Decode(buffer, pos, end, message, field_dict) 37 | The arguments are: 38 | buffer: The string containing the encoded message. 39 | pos: The current position in the string. 40 | end: The position in the string where the current message ends. May be 41 | less than len(buffer) if we're reading a sub-message. 42 | message: The message object into which we're parsing. 43 | field_dict: message._fields (avoids a hashtable lookup). 44 | The decoder reads the field and stores it into field_dict, returning the new 45 | buffer position. A decoder for a repeated field may proactively decode all of 46 | the elements of that field, if they appear consecutively. 47 | 48 | Note that decoders may throw any of the following: 49 | IndexError: Indicates a truncated message. 50 | struct.error: Unpacking of a fixed-width field failed. 51 | message.DecodeError: Other errors. 52 | 53 | Decoders are expected to raise an exception if they are called with pos > end. 54 | This allows callers to be lax about bounds checking: it's fineto read past 55 | "end" as long as you are sure that someone else will notice and throw an 56 | exception later on. 57 | 58 | Something up the call stack is expected to catch IndexError and struct.error 59 | and convert them to message.DecodeError. 60 | 61 | Decoders are constructed using decoder constructors with the signature: 62 | MakeDecoder(field_number, is_repeated, is_packed, key, new_default) 63 | The arguments are: 64 | field_number: The field number of the field we want to decode. 65 | is_repeated: Is the field a repeated field? (bool) 66 | is_packed: Is the field a packed field? (bool) 67 | key: The key to use when looking up the field within field_dict. 68 | (This is actually the FieldDescriptor but nothing in this 69 | file should depend on that.) 70 | new_default: A function which takes a message object as a parameter and 71 | returns a new instance of the default value for this field. 72 | (This is called for repeated fields and sub-messages, when an 73 | instance does not already exist.) 74 | 75 | As with encoders, we define a decoder constructor for every type of field. 76 | Then, for every field of every message class we construct an actual decoder. 77 | That decoder goes into a dict indexed by tag, so when we decode a message 78 | we repeatedly read a tag, look up the corresponding decoder, and invoke it. 79 | """ 80 | 81 | __author__ = 'kenton@google.com (Kenton Varda)' 82 | 83 | import struct 84 | from google.protobuf.internal import encoder 85 | from google.protobuf.internal import wire_format 86 | from google.protobuf import message 87 | 88 | 89 | # This will overflow and thus become IEEE-754 "infinity". We would use 90 | # "float('inf')" but it doesn't work on Windows pre-Python-2.6. 91 | _POS_INF = 1e10000 92 | _NEG_INF = -_POS_INF 93 | _NAN = _POS_INF * 0 94 | 95 | 96 | # This is not for optimization, but rather to avoid conflicts with local 97 | # variables named "message". 98 | _DecodeError = message.DecodeError 99 | 100 | 101 | def _VarintDecoder(mask): 102 | """Return an encoder for a basic varint value (does not include tag). 103 | 104 | Decoded values will be bitwise-anded with the given mask before being 105 | returned, e.g. to limit them to 32 bits. The returned decoder does not 106 | take the usual "end" parameter -- the caller is expected to do bounds checking 107 | after the fact (often the caller can defer such checking until later). The 108 | decoder returns a (value, new_pos) pair. 109 | """ 110 | 111 | local_ord = ord 112 | def DecodeVarint(buffer, pos): 113 | result = 0 114 | shift = 0 115 | while 1: 116 | b = local_ord(buffer[pos]) 117 | result |= ((b & 0x7f) << shift) 118 | pos += 1 119 | if not (b & 0x80): 120 | result &= mask 121 | return (result, pos) 122 | shift += 7 123 | if shift >= 64: 124 | raise _DecodeError('Too many bytes when decoding varint.') 125 | return DecodeVarint 126 | 127 | 128 | def _SignedVarintDecoder(mask): 129 | """Like _VarintDecoder() but decodes signed values.""" 130 | 131 | local_ord = ord 132 | def DecodeVarint(buffer, pos): 133 | result = 0 134 | shift = 0 135 | while 1: 136 | b = local_ord(buffer[pos]) 137 | result |= ((b & 0x7f) << shift) 138 | pos += 1 139 | if not (b & 0x80): 140 | if result > 0x7fffffffffffffff: 141 | result -= (1 << 64) 142 | result |= ~mask 143 | else: 144 | result &= mask 145 | return (result, pos) 146 | shift += 7 147 | if shift >= 64: 148 | raise _DecodeError('Too many bytes when decoding varint.') 149 | return DecodeVarint 150 | 151 | 152 | _DecodeVarint = _VarintDecoder((1 << 64) - 1) 153 | _DecodeSignedVarint = _SignedVarintDecoder((1 << 64) - 1) 154 | 155 | # Use these versions for values which must be limited to 32 bits. 156 | _DecodeVarint32 = _VarintDecoder((1 << 32) - 1) 157 | _DecodeSignedVarint32 = _SignedVarintDecoder((1 << 32) - 1) 158 | 159 | 160 | def ReadTag(buffer, pos): 161 | """Read a tag from the buffer, and return a (tag_bytes, new_pos) tuple. 162 | 163 | We return the raw bytes of the tag rather than decoding them. The raw 164 | bytes can then be used to look up the proper decoder. This effectively allows 165 | us to trade some work that would be done in pure-python (decoding a varint) 166 | for work that is done in C (searching for a byte string in a hash table). 167 | In a low-level language it would be much cheaper to decode the varint and 168 | use that, but not in Python. 169 | """ 170 | 171 | start = pos 172 | while ord(buffer[pos]) & 0x80: 173 | pos += 1 174 | pos += 1 175 | return (buffer[start:pos], pos) 176 | 177 | 178 | # -------------------------------------------------------------------- 179 | 180 | 181 | def _SimpleDecoder(wire_type, decode_value): 182 | """Return a constructor for a decoder for fields of a particular type. 183 | 184 | Args: 185 | wire_type: The field's wire type. 186 | decode_value: A function which decodes an individual value, e.g. 187 | _DecodeVarint() 188 | """ 189 | 190 | def SpecificDecoder(field_number, is_repeated, is_packed, key, new_default): 191 | if is_packed: 192 | local_DecodeVarint = _DecodeVarint 193 | def DecodePackedField(buffer, pos, end, message, field_dict): 194 | value = field_dict.get(key) 195 | if value is None: 196 | value = field_dict.setdefault(key, new_default(message)) 197 | (endpoint, pos) = local_DecodeVarint(buffer, pos) 198 | endpoint += pos 199 | if endpoint > end: 200 | raise _DecodeError('Truncated message.') 201 | while pos < endpoint: 202 | (element, pos) = decode_value(buffer, pos) 203 | value.append(element) 204 | if pos > endpoint: 205 | del value[-1] # Discard corrupt value. 206 | raise _DecodeError('Packed element was truncated.') 207 | return pos 208 | return DecodePackedField 209 | elif is_repeated: 210 | tag_bytes = encoder.TagBytes(field_number, wire_type) 211 | tag_len = len(tag_bytes) 212 | def DecodeRepeatedField(buffer, pos, end, message, field_dict): 213 | value = field_dict.get(key) 214 | if value is None: 215 | value = field_dict.setdefault(key, new_default(message)) 216 | while 1: 217 | (element, new_pos) = decode_value(buffer, pos) 218 | value.append(element) 219 | # Predict that the next tag is another copy of the same repeated 220 | # field. 221 | pos = new_pos + tag_len 222 | if buffer[new_pos:pos] != tag_bytes or new_pos >= end: 223 | # Prediction failed. Return. 224 | if new_pos > end: 225 | raise _DecodeError('Truncated message.') 226 | return new_pos 227 | return DecodeRepeatedField 228 | else: 229 | def DecodeField(buffer, pos, end, message, field_dict): 230 | (field_dict[key], pos) = decode_value(buffer, pos) 231 | if pos > end: 232 | del field_dict[key] # Discard corrupt value. 233 | raise _DecodeError('Truncated message.') 234 | return pos 235 | return DecodeField 236 | 237 | return SpecificDecoder 238 | 239 | 240 | def _ModifiedDecoder(wire_type, decode_value, modify_value): 241 | """Like SimpleDecoder but additionally invokes modify_value on every value 242 | before storing it. Usually modify_value is ZigZagDecode. 243 | """ 244 | 245 | # Reusing _SimpleDecoder is slightly slower than copying a bunch of code, but 246 | # not enough to make a significant difference. 247 | 248 | def InnerDecode(buffer, pos): 249 | (result, new_pos) = decode_value(buffer, pos) 250 | return (modify_value(result), new_pos) 251 | return _SimpleDecoder(wire_type, InnerDecode) 252 | 253 | 254 | def _StructPackDecoder(wire_type, format): 255 | """Return a constructor for a decoder for a fixed-width field. 256 | 257 | Args: 258 | wire_type: The field's wire type. 259 | format: The format string to pass to struct.unpack(). 260 | """ 261 | 262 | value_size = struct.calcsize(format) 263 | local_unpack = struct.unpack 264 | 265 | # Reusing _SimpleDecoder is slightly slower than copying a bunch of code, but 266 | # not enough to make a significant difference. 267 | 268 | # Note that we expect someone up-stack to catch struct.error and convert 269 | # it to _DecodeError -- this way we don't have to set up exception- 270 | # handling blocks every time we parse one value. 271 | 272 | def InnerDecode(buffer, pos): 273 | new_pos = pos + value_size 274 | result = local_unpack(format, buffer[pos:new_pos])[0] 275 | return (result, new_pos) 276 | return _SimpleDecoder(wire_type, InnerDecode) 277 | 278 | 279 | def _FloatDecoder(): 280 | """Returns a decoder for a float field. 281 | 282 | This code works around a bug in struct.unpack for non-finite 32-bit 283 | floating-point values. 284 | """ 285 | 286 | local_unpack = struct.unpack 287 | 288 | def InnerDecode(buffer, pos): 289 | # We expect a 32-bit value in little-endian byte order. Bit 1 is the sign 290 | # bit, bits 2-9 represent the exponent, and bits 10-32 are the significand. 291 | new_pos = pos + 4 292 | float_bytes = buffer[pos:new_pos] 293 | 294 | # If this value has all its exponent bits set, then it's non-finite. 295 | # In Python 2.4, struct.unpack will convert it to a finite 64-bit value. 296 | # To avoid that, we parse it specially. 297 | if ((float_bytes[3] in '\x7F\xFF') 298 | and (float_bytes[2] >= '\x80')): 299 | # If at least one significand bit is set... 300 | if float_bytes[0:3] != '\x00\x00\x80': 301 | return (_NAN, new_pos) 302 | # If sign bit is set... 303 | if float_bytes[3] == '\xFF': 304 | return (_NEG_INF, new_pos) 305 | return (_POS_INF, new_pos) 306 | 307 | # Note that we expect someone up-stack to catch struct.error and convert 308 | # it to _DecodeError -- this way we don't have to set up exception- 309 | # handling blocks every time we parse one value. 310 | result = local_unpack('= '\xF0') 334 | and (double_bytes[0:7] != '\x00\x00\x00\x00\x00\x00\xF0')): 335 | return (_NAN, new_pos) 336 | 337 | # Note that we expect someone up-stack to catch struct.error and convert 338 | # it to _DecodeError -- this way we don't have to set up exception- 339 | # handling blocks every time we parse one value. 340 | result = local_unpack(' end: 396 | raise _DecodeError('Truncated string.') 397 | value.append(local_unicode(buffer[pos:new_pos], 'utf-8')) 398 | # Predict that the next tag is another copy of the same repeated field. 399 | pos = new_pos + tag_len 400 | if buffer[new_pos:pos] != tag_bytes or new_pos == end: 401 | # Prediction failed. Return. 402 | return new_pos 403 | return DecodeRepeatedField 404 | else: 405 | def DecodeField(buffer, pos, end, message, field_dict): 406 | (size, pos) = local_DecodeVarint(buffer, pos) 407 | new_pos = pos + size 408 | if new_pos > end: 409 | raise _DecodeError('Truncated string.') 410 | field_dict[key] = local_unicode(buffer[pos:new_pos], 'utf-8') 411 | return new_pos 412 | return DecodeField 413 | 414 | 415 | def BytesDecoder(field_number, is_repeated, is_packed, key, new_default): 416 | """Returns a decoder for a bytes field.""" 417 | 418 | local_DecodeVarint = _DecodeVarint 419 | 420 | assert not is_packed 421 | if is_repeated: 422 | tag_bytes = encoder.TagBytes(field_number, 423 | wire_format.WIRETYPE_LENGTH_DELIMITED) 424 | tag_len = len(tag_bytes) 425 | def DecodeRepeatedField(buffer, pos, end, message, field_dict): 426 | value = field_dict.get(key) 427 | if value is None: 428 | value = field_dict.setdefault(key, new_default(message)) 429 | while 1: 430 | (size, pos) = local_DecodeVarint(buffer, pos) 431 | new_pos = pos + size 432 | if new_pos > end: 433 | raise _DecodeError('Truncated string.') 434 | value.append(buffer[pos:new_pos]) 435 | # Predict that the next tag is another copy of the same repeated field. 436 | pos = new_pos + tag_len 437 | if buffer[new_pos:pos] != tag_bytes or new_pos == end: 438 | # Prediction failed. Return. 439 | return new_pos 440 | return DecodeRepeatedField 441 | else: 442 | def DecodeField(buffer, pos, end, message, field_dict): 443 | (size, pos) = local_DecodeVarint(buffer, pos) 444 | new_pos = pos + size 445 | if new_pos > end: 446 | raise _DecodeError('Truncated string.') 447 | field_dict[key] = buffer[pos:new_pos] 448 | return new_pos 449 | return DecodeField 450 | 451 | 452 | def GroupDecoder(field_number, is_repeated, is_packed, key, new_default): 453 | """Returns a decoder for a group field.""" 454 | 455 | end_tag_bytes = encoder.TagBytes(field_number, 456 | wire_format.WIRETYPE_END_GROUP) 457 | end_tag_len = len(end_tag_bytes) 458 | 459 | assert not is_packed 460 | if is_repeated: 461 | tag_bytes = encoder.TagBytes(field_number, 462 | wire_format.WIRETYPE_START_GROUP) 463 | tag_len = len(tag_bytes) 464 | def DecodeRepeatedField(buffer, pos, end, message, field_dict): 465 | value = field_dict.get(key) 466 | if value is None: 467 | value = field_dict.setdefault(key, new_default(message)) 468 | while 1: 469 | value = field_dict.get(key) 470 | if value is None: 471 | value = field_dict.setdefault(key, new_default(message)) 472 | # Read sub-message. 473 | pos = value.add()._InternalParse(buffer, pos, end) 474 | # Read end tag. 475 | new_pos = pos+end_tag_len 476 | if buffer[pos:new_pos] != end_tag_bytes or new_pos > end: 477 | raise _DecodeError('Missing group end tag.') 478 | # Predict that the next tag is another copy of the same repeated field. 479 | pos = new_pos + tag_len 480 | if buffer[new_pos:pos] != tag_bytes or new_pos == end: 481 | # Prediction failed. Return. 482 | return new_pos 483 | return DecodeRepeatedField 484 | else: 485 | def DecodeField(buffer, pos, end, message, field_dict): 486 | value = field_dict.get(key) 487 | if value is None: 488 | value = field_dict.setdefault(key, new_default(message)) 489 | # Read sub-message. 490 | pos = value._InternalParse(buffer, pos, end) 491 | # Read end tag. 492 | new_pos = pos+end_tag_len 493 | if buffer[pos:new_pos] != end_tag_bytes or new_pos > end: 494 | raise _DecodeError('Missing group end tag.') 495 | return new_pos 496 | return DecodeField 497 | 498 | 499 | def MessageDecoder(field_number, is_repeated, is_packed, key, new_default): 500 | """Returns a decoder for a message field.""" 501 | 502 | local_DecodeVarint = _DecodeVarint 503 | 504 | assert not is_packed 505 | if is_repeated: 506 | tag_bytes = encoder.TagBytes(field_number, 507 | wire_format.WIRETYPE_LENGTH_DELIMITED) 508 | tag_len = len(tag_bytes) 509 | def DecodeRepeatedField(buffer, pos, end, message, field_dict): 510 | value = field_dict.get(key) 511 | if value is None: 512 | value = field_dict.setdefault(key, new_default(message)) 513 | while 1: 514 | value = field_dict.get(key) 515 | if value is None: 516 | value = field_dict.setdefault(key, new_default(message)) 517 | # Read length. 518 | (size, pos) = local_DecodeVarint(buffer, pos) 519 | new_pos = pos + size 520 | if new_pos > end: 521 | raise _DecodeError('Truncated message.') 522 | # Read sub-message. 523 | if value.add()._InternalParse(buffer, pos, new_pos) != new_pos: 524 | # The only reason _InternalParse would return early is if it 525 | # encountered an end-group tag. 526 | raise _DecodeError('Unexpected end-group tag.') 527 | # Predict that the next tag is another copy of the same repeated field. 528 | pos = new_pos + tag_len 529 | if buffer[new_pos:pos] != tag_bytes or new_pos == end: 530 | # Prediction failed. Return. 531 | return new_pos 532 | return DecodeRepeatedField 533 | else: 534 | def DecodeField(buffer, pos, end, message, field_dict): 535 | value = field_dict.get(key) 536 | if value is None: 537 | value = field_dict.setdefault(key, new_default(message)) 538 | # Read length. 539 | (size, pos) = local_DecodeVarint(buffer, pos) 540 | new_pos = pos + size 541 | if new_pos > end: 542 | raise _DecodeError('Truncated message.') 543 | # Read sub-message. 544 | if value._InternalParse(buffer, pos, new_pos) != new_pos: 545 | # The only reason _InternalParse would return early is if it encountered 546 | # an end-group tag. 547 | raise _DecodeError('Unexpected end-group tag.') 548 | return new_pos 549 | return DecodeField 550 | 551 | 552 | # -------------------------------------------------------------------- 553 | 554 | MESSAGE_SET_ITEM_TAG = encoder.TagBytes(1, wire_format.WIRETYPE_START_GROUP) 555 | 556 | def MessageSetItemDecoder(extensions_by_number): 557 | """Returns a decoder for a MessageSet item. 558 | 559 | The parameter is the _extensions_by_number map for the message class. 560 | 561 | The message set message looks like this: 562 | message MessageSet { 563 | repeated group Item = 1 { 564 | required int32 type_id = 2; 565 | required string message = 3; 566 | } 567 | } 568 | """ 569 | 570 | type_id_tag_bytes = encoder.TagBytes(2, wire_format.WIRETYPE_VARINT) 571 | message_tag_bytes = encoder.TagBytes(3, wire_format.WIRETYPE_LENGTH_DELIMITED) 572 | item_end_tag_bytes = encoder.TagBytes(1, wire_format.WIRETYPE_END_GROUP) 573 | 574 | local_ReadTag = ReadTag 575 | local_DecodeVarint = _DecodeVarint 576 | local_SkipField = SkipField 577 | 578 | def DecodeItem(buffer, pos, end, message, field_dict): 579 | type_id = -1 580 | message_start = -1 581 | message_end = -1 582 | 583 | # Technically, type_id and message can appear in any order, so we need 584 | # a little loop here. 585 | while 1: 586 | (tag_bytes, pos) = local_ReadTag(buffer, pos) 587 | if tag_bytes == type_id_tag_bytes: 588 | (type_id, pos) = local_DecodeVarint(buffer, pos) 589 | elif tag_bytes == message_tag_bytes: 590 | (size, message_start) = local_DecodeVarint(buffer, pos) 591 | pos = message_end = message_start + size 592 | elif tag_bytes == item_end_tag_bytes: 593 | break 594 | else: 595 | pos = SkipField(buffer, pos, end, tag_bytes) 596 | if pos == -1: 597 | raise _DecodeError('Missing group end tag.') 598 | 599 | if pos > end: 600 | raise _DecodeError('Truncated message.') 601 | 602 | if type_id == -1: 603 | raise _DecodeError('MessageSet item missing type_id.') 604 | if message_start == -1: 605 | raise _DecodeError('MessageSet item missing message.') 606 | 607 | extension = extensions_by_number.get(type_id) 608 | if extension is not None: 609 | value = field_dict.get(extension) 610 | if value is None: 611 | value = field_dict.setdefault( 612 | extension, extension.message_type._concrete_class()) 613 | if value._InternalParse(buffer, message_start,message_end) != message_end: 614 | # The only reason _InternalParse would return early is if it encountered 615 | # an end-group tag. 616 | raise _DecodeError('Unexpected end-group tag.') 617 | 618 | return pos 619 | 620 | return DecodeItem 621 | 622 | # -------------------------------------------------------------------- 623 | # Optimization is not as heavy here because calls to SkipField() are rare, 624 | # except for handling end-group tags. 625 | 626 | def _SkipVarint(buffer, pos, end): 627 | """Skip a varint value. Returns the new position.""" 628 | 629 | while ord(buffer[pos]) & 0x80: 630 | pos += 1 631 | pos += 1 632 | if pos > end: 633 | raise _DecodeError('Truncated message.') 634 | return pos 635 | 636 | def _SkipFixed64(buffer, pos, end): 637 | """Skip a fixed64 value. Returns the new position.""" 638 | 639 | pos += 8 640 | if pos > end: 641 | raise _DecodeError('Truncated message.') 642 | return pos 643 | 644 | def _SkipLengthDelimited(buffer, pos, end): 645 | """Skip a length-delimited value. Returns the new position.""" 646 | 647 | (size, pos) = _DecodeVarint(buffer, pos) 648 | pos += size 649 | if pos > end: 650 | raise _DecodeError('Truncated message.') 651 | return pos 652 | 653 | def _SkipGroup(buffer, pos, end): 654 | """Skip sub-group. Returns the new position.""" 655 | 656 | while 1: 657 | (tag_bytes, pos) = ReadTag(buffer, pos) 658 | new_pos = SkipField(buffer, pos, end, tag_bytes) 659 | if new_pos == -1: 660 | return pos 661 | pos = new_pos 662 | 663 | def _EndGroup(buffer, pos, end): 664 | """Skipping an END_GROUP tag returns -1 to tell the parent loop to break.""" 665 | 666 | return -1 667 | 668 | def _SkipFixed32(buffer, pos, end): 669 | """Skip a fixed32 value. Returns the new position.""" 670 | 671 | pos += 4 672 | if pos > end: 673 | raise _DecodeError('Truncated message.') 674 | return pos 675 | 676 | def _RaiseInvalidWireType(buffer, pos, end): 677 | """Skip function for unknown wire types. Raises an exception.""" 678 | 679 | raise _DecodeError('Tag had invalid wire type.') 680 | 681 | def _FieldSkipper(): 682 | """Constructs the SkipField function.""" 683 | 684 | WIRETYPE_TO_SKIPPER = [ 685 | _SkipVarint, 686 | _SkipFixed64, 687 | _SkipLengthDelimited, 688 | _SkipGroup, 689 | _EndGroup, 690 | _SkipFixed32, 691 | _RaiseInvalidWireType, 692 | _RaiseInvalidWireType, 693 | ] 694 | 695 | wiretype_mask = wire_format.TAG_TYPE_MASK 696 | local_ord = ord 697 | 698 | def SkipField(buffer, pos, end, tag_bytes): 699 | """Skips a field with the specified tag. 700 | 701 | |pos| should point to the byte immediately after the tag. 702 | 703 | Returns: 704 | The new position (after the tag value), or -1 if the tag is an end-group 705 | tag (in which case the calling loop should break). 706 | """ 707 | 708 | # The wire type is always in the first byte since varints are little-endian. 709 | wire_type = local_ord(tag_bytes[0]) & wiretype_mask 710 | return WIRETYPE_TO_SKIPPER[wire_type](buffer, pos, end) 711 | 712 | return SkipField 713 | 714 | SkipField = _FieldSkipper() 715 | -------------------------------------------------------------------------------- /lib/google/protobuf/internal/decoder.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bretwalker/gtfs-realtime-display/33230f9bd8234ddc598e127b0cd3533c8b25ea2f/lib/google/protobuf/internal/decoder.pyc -------------------------------------------------------------------------------- /lib/google/protobuf/internal/encoder.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bretwalker/gtfs-realtime-display/33230f9bd8234ddc598e127b0cd3533c8b25ea2f/lib/google/protobuf/internal/encoder.pyc -------------------------------------------------------------------------------- /lib/google/protobuf/internal/message_listener.py: -------------------------------------------------------------------------------- 1 | # Protocol Buffers - Google's data interchange format 2 | # Copyright 2008 Google Inc. All rights reserved. 3 | # http://code.google.com/p/protobuf/ 4 | # 5 | # Redistribution and use in source and binary forms, with or without 6 | # modification, are permitted provided that the following conditions are 7 | # met: 8 | # 9 | # * Redistributions of source code must retain the above copyright 10 | # notice, this list of conditions and the following disclaimer. 11 | # * Redistributions in binary form must reproduce the above 12 | # copyright notice, this list of conditions and the following disclaimer 13 | # in the documentation and/or other materials provided with the 14 | # distribution. 15 | # * Neither the name of Google Inc. nor the names of its 16 | # contributors may be used to endorse or promote products derived from 17 | # this software without specific prior written permission. 18 | # 19 | # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 20 | # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 21 | # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 22 | # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 23 | # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 24 | # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 25 | # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 26 | # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 27 | # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 28 | # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 29 | # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 30 | 31 | """Defines a listener interface for observing certain 32 | state transitions on Message objects. 33 | 34 | Also defines a null implementation of this interface. 35 | """ 36 | 37 | __author__ = 'robinson@google.com (Will Robinson)' 38 | 39 | 40 | class MessageListener(object): 41 | 42 | """Listens for modifications made to a message. Meant to be registered via 43 | Message._SetListener(). 44 | 45 | Attributes: 46 | dirty: If True, then calling Modified() would be a no-op. This can be 47 | used to avoid these calls entirely in the common case. 48 | """ 49 | 50 | def Modified(self): 51 | """Called every time the message is modified in such a way that the parent 52 | message may need to be updated. This currently means either: 53 | (a) The message was modified for the first time, so the parent message 54 | should henceforth mark the message as present. 55 | (b) The message's cached byte size became dirty -- i.e. the message was 56 | modified for the first time after a previous call to ByteSize(). 57 | Therefore the parent should also mark its byte size as dirty. 58 | Note that (a) implies (b), since new objects start out with a client cached 59 | size (zero). However, we document (a) explicitly because it is important. 60 | 61 | Modified() will *only* be called in response to one of these two events -- 62 | not every time the sub-message is modified. 63 | 64 | Note that if the listener's |dirty| attribute is true, then calling 65 | Modified at the moment would be a no-op, so it can be skipped. Performance- 66 | sensitive callers should check this attribute directly before calling since 67 | it will be true most of the time. 68 | """ 69 | 70 | raise NotImplementedError 71 | 72 | 73 | class NullMessageListener(object): 74 | 75 | """No-op MessageListener implementation.""" 76 | 77 | def Modified(self): 78 | pass 79 | -------------------------------------------------------------------------------- /lib/google/protobuf/internal/message_listener.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bretwalker/gtfs-realtime-display/33230f9bd8234ddc598e127b0cd3533c8b25ea2f/lib/google/protobuf/internal/message_listener.pyc -------------------------------------------------------------------------------- /lib/google/protobuf/internal/python_message.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bretwalker/gtfs-realtime-display/33230f9bd8234ddc598e127b0cd3533c8b25ea2f/lib/google/protobuf/internal/python_message.pyc -------------------------------------------------------------------------------- /lib/google/protobuf/internal/type_checkers.py: -------------------------------------------------------------------------------- 1 | # Protocol Buffers - Google's data interchange format 2 | # Copyright 2008 Google Inc. All rights reserved. 3 | # http://code.google.com/p/protobuf/ 4 | # 5 | # Redistribution and use in source and binary forms, with or without 6 | # modification, are permitted provided that the following conditions are 7 | # met: 8 | # 9 | # * Redistributions of source code must retain the above copyright 10 | # notice, this list of conditions and the following disclaimer. 11 | # * Redistributions in binary form must reproduce the above 12 | # copyright notice, this list of conditions and the following disclaimer 13 | # in the documentation and/or other materials provided with the 14 | # distribution. 15 | # * Neither the name of Google Inc. nor the names of its 16 | # contributors may be used to endorse or promote products derived from 17 | # this software without specific prior written permission. 18 | # 19 | # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 20 | # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 21 | # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 22 | # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 23 | # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 24 | # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 25 | # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 26 | # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 27 | # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 28 | # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 29 | # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 30 | 31 | """Provides type checking routines. 32 | 33 | This module defines type checking utilities in the forms of dictionaries: 34 | 35 | VALUE_CHECKERS: A dictionary of field types and a value validation object. 36 | TYPE_TO_BYTE_SIZE_FN: A dictionary with field types and a size computing 37 | function. 38 | TYPE_TO_SERIALIZE_METHOD: A dictionary with field types and serialization 39 | function. 40 | FIELD_TYPE_TO_WIRE_TYPE: A dictionary with field typed and their 41 | coresponding wire types. 42 | TYPE_TO_DESERIALIZE_METHOD: A dictionary with field types and deserialization 43 | function. 44 | """ 45 | 46 | __author__ = 'robinson@google.com (Will Robinson)' 47 | 48 | from google.protobuf.internal import decoder 49 | from google.protobuf.internal import encoder 50 | from google.protobuf.internal import wire_format 51 | from google.protobuf import descriptor 52 | 53 | _FieldDescriptor = descriptor.FieldDescriptor 54 | 55 | 56 | def GetTypeChecker(cpp_type, field_type): 57 | """Returns a type checker for a message field of the specified types. 58 | 59 | Args: 60 | cpp_type: C++ type of the field (see descriptor.py). 61 | field_type: Protocol message field type (see descriptor.py). 62 | 63 | Returns: 64 | An instance of TypeChecker which can be used to verify the types 65 | of values assigned to a field of the specified type. 66 | """ 67 | if (cpp_type == _FieldDescriptor.CPPTYPE_STRING and 68 | field_type == _FieldDescriptor.TYPE_STRING): 69 | return UnicodeValueChecker() 70 | return _VALUE_CHECKERS[cpp_type] 71 | 72 | 73 | # None of the typecheckers below make any attempt to guard against people 74 | # subclassing builtin types and doing weird things. We're not trying to 75 | # protect against malicious clients here, just people accidentally shooting 76 | # themselves in the foot in obvious ways. 77 | 78 | class TypeChecker(object): 79 | 80 | """Type checker used to catch type errors as early as possible 81 | when the client is setting scalar fields in protocol messages. 82 | """ 83 | 84 | def __init__(self, *acceptable_types): 85 | self._acceptable_types = acceptable_types 86 | 87 | def CheckValue(self, proposed_value): 88 | if not isinstance(proposed_value, self._acceptable_types): 89 | message = ('%.1024r has type %s, but expected one of: %s' % 90 | (proposed_value, type(proposed_value), self._acceptable_types)) 91 | raise TypeError(message) 92 | 93 | 94 | # IntValueChecker and its subclasses perform integer type-checks 95 | # and bounds-checks. 96 | class IntValueChecker(object): 97 | 98 | """Checker used for integer fields. Performs type-check and range check.""" 99 | 100 | def CheckValue(self, proposed_value): 101 | if not isinstance(proposed_value, (int, long)): 102 | message = ('%.1024r has type %s, but expected one of: %s' % 103 | (proposed_value, type(proposed_value), (int, long))) 104 | raise TypeError(message) 105 | if not self._MIN <= proposed_value <= self._MAX: 106 | raise ValueError('Value out of range: %d' % proposed_value) 107 | 108 | 109 | class UnicodeValueChecker(object): 110 | 111 | """Checker used for string fields.""" 112 | 113 | def CheckValue(self, proposed_value): 114 | if not isinstance(proposed_value, (str, unicode)): 115 | message = ('%.1024r has type %s, but expected one of: %s' % 116 | (proposed_value, type(proposed_value), (str, unicode))) 117 | raise TypeError(message) 118 | 119 | # If the value is of type 'str' make sure that it is in 7-bit ASCII 120 | # encoding. 121 | if isinstance(proposed_value, str): 122 | try: 123 | unicode(proposed_value, 'ascii') 124 | except UnicodeDecodeError: 125 | raise ValueError('%.1024r has type str, but isn\'t in 7-bit ASCII ' 126 | 'encoding. Non-ASCII strings must be converted to ' 127 | 'unicode objects before being added.' % 128 | (proposed_value)) 129 | 130 | 131 | class Int32ValueChecker(IntValueChecker): 132 | # We're sure to use ints instead of longs here since comparison may be more 133 | # efficient. 134 | _MIN = -2147483648 135 | _MAX = 2147483647 136 | 137 | 138 | class Uint32ValueChecker(IntValueChecker): 139 | _MIN = 0 140 | _MAX = (1 << 32) - 1 141 | 142 | 143 | class Int64ValueChecker(IntValueChecker): 144 | _MIN = -(1 << 63) 145 | _MAX = (1 << 63) - 1 146 | 147 | 148 | class Uint64ValueChecker(IntValueChecker): 149 | _MIN = 0 150 | _MAX = (1 << 64) - 1 151 | 152 | 153 | # Type-checkers for all scalar CPPTYPEs. 154 | _VALUE_CHECKERS = { 155 | _FieldDescriptor.CPPTYPE_INT32: Int32ValueChecker(), 156 | _FieldDescriptor.CPPTYPE_INT64: Int64ValueChecker(), 157 | _FieldDescriptor.CPPTYPE_UINT32: Uint32ValueChecker(), 158 | _FieldDescriptor.CPPTYPE_UINT64: Uint64ValueChecker(), 159 | _FieldDescriptor.CPPTYPE_DOUBLE: TypeChecker( 160 | float, int, long), 161 | _FieldDescriptor.CPPTYPE_FLOAT: TypeChecker( 162 | float, int, long), 163 | _FieldDescriptor.CPPTYPE_BOOL: TypeChecker(bool, int), 164 | _FieldDescriptor.CPPTYPE_ENUM: Int32ValueChecker(), 165 | _FieldDescriptor.CPPTYPE_STRING: TypeChecker(str), 166 | } 167 | 168 | 169 | # Map from field type to a function F, such that F(field_num, value) 170 | # gives the total byte size for a value of the given type. This 171 | # byte size includes tag information and any other additional space 172 | # associated with serializing "value". 173 | TYPE_TO_BYTE_SIZE_FN = { 174 | _FieldDescriptor.TYPE_DOUBLE: wire_format.DoubleByteSize, 175 | _FieldDescriptor.TYPE_FLOAT: wire_format.FloatByteSize, 176 | _FieldDescriptor.TYPE_INT64: wire_format.Int64ByteSize, 177 | _FieldDescriptor.TYPE_UINT64: wire_format.UInt64ByteSize, 178 | _FieldDescriptor.TYPE_INT32: wire_format.Int32ByteSize, 179 | _FieldDescriptor.TYPE_FIXED64: wire_format.Fixed64ByteSize, 180 | _FieldDescriptor.TYPE_FIXED32: wire_format.Fixed32ByteSize, 181 | _FieldDescriptor.TYPE_BOOL: wire_format.BoolByteSize, 182 | _FieldDescriptor.TYPE_STRING: wire_format.StringByteSize, 183 | _FieldDescriptor.TYPE_GROUP: wire_format.GroupByteSize, 184 | _FieldDescriptor.TYPE_MESSAGE: wire_format.MessageByteSize, 185 | _FieldDescriptor.TYPE_BYTES: wire_format.BytesByteSize, 186 | _FieldDescriptor.TYPE_UINT32: wire_format.UInt32ByteSize, 187 | _FieldDescriptor.TYPE_ENUM: wire_format.EnumByteSize, 188 | _FieldDescriptor.TYPE_SFIXED32: wire_format.SFixed32ByteSize, 189 | _FieldDescriptor.TYPE_SFIXED64: wire_format.SFixed64ByteSize, 190 | _FieldDescriptor.TYPE_SINT32: wire_format.SInt32ByteSize, 191 | _FieldDescriptor.TYPE_SINT64: wire_format.SInt64ByteSize 192 | } 193 | 194 | 195 | # Maps from field types to encoder constructors. 196 | TYPE_TO_ENCODER = { 197 | _FieldDescriptor.TYPE_DOUBLE: encoder.DoubleEncoder, 198 | _FieldDescriptor.TYPE_FLOAT: encoder.FloatEncoder, 199 | _FieldDescriptor.TYPE_INT64: encoder.Int64Encoder, 200 | _FieldDescriptor.TYPE_UINT64: encoder.UInt64Encoder, 201 | _FieldDescriptor.TYPE_INT32: encoder.Int32Encoder, 202 | _FieldDescriptor.TYPE_FIXED64: encoder.Fixed64Encoder, 203 | _FieldDescriptor.TYPE_FIXED32: encoder.Fixed32Encoder, 204 | _FieldDescriptor.TYPE_BOOL: encoder.BoolEncoder, 205 | _FieldDescriptor.TYPE_STRING: encoder.StringEncoder, 206 | _FieldDescriptor.TYPE_GROUP: encoder.GroupEncoder, 207 | _FieldDescriptor.TYPE_MESSAGE: encoder.MessageEncoder, 208 | _FieldDescriptor.TYPE_BYTES: encoder.BytesEncoder, 209 | _FieldDescriptor.TYPE_UINT32: encoder.UInt32Encoder, 210 | _FieldDescriptor.TYPE_ENUM: encoder.EnumEncoder, 211 | _FieldDescriptor.TYPE_SFIXED32: encoder.SFixed32Encoder, 212 | _FieldDescriptor.TYPE_SFIXED64: encoder.SFixed64Encoder, 213 | _FieldDescriptor.TYPE_SINT32: encoder.SInt32Encoder, 214 | _FieldDescriptor.TYPE_SINT64: encoder.SInt64Encoder, 215 | } 216 | 217 | 218 | # Maps from field types to sizer constructors. 219 | TYPE_TO_SIZER = { 220 | _FieldDescriptor.TYPE_DOUBLE: encoder.DoubleSizer, 221 | _FieldDescriptor.TYPE_FLOAT: encoder.FloatSizer, 222 | _FieldDescriptor.TYPE_INT64: encoder.Int64Sizer, 223 | _FieldDescriptor.TYPE_UINT64: encoder.UInt64Sizer, 224 | _FieldDescriptor.TYPE_INT32: encoder.Int32Sizer, 225 | _FieldDescriptor.TYPE_FIXED64: encoder.Fixed64Sizer, 226 | _FieldDescriptor.TYPE_FIXED32: encoder.Fixed32Sizer, 227 | _FieldDescriptor.TYPE_BOOL: encoder.BoolSizer, 228 | _FieldDescriptor.TYPE_STRING: encoder.StringSizer, 229 | _FieldDescriptor.TYPE_GROUP: encoder.GroupSizer, 230 | _FieldDescriptor.TYPE_MESSAGE: encoder.MessageSizer, 231 | _FieldDescriptor.TYPE_BYTES: encoder.BytesSizer, 232 | _FieldDescriptor.TYPE_UINT32: encoder.UInt32Sizer, 233 | _FieldDescriptor.TYPE_ENUM: encoder.EnumSizer, 234 | _FieldDescriptor.TYPE_SFIXED32: encoder.SFixed32Sizer, 235 | _FieldDescriptor.TYPE_SFIXED64: encoder.SFixed64Sizer, 236 | _FieldDescriptor.TYPE_SINT32: encoder.SInt32Sizer, 237 | _FieldDescriptor.TYPE_SINT64: encoder.SInt64Sizer, 238 | } 239 | 240 | 241 | # Maps from field type to a decoder constructor. 242 | TYPE_TO_DECODER = { 243 | _FieldDescriptor.TYPE_DOUBLE: decoder.DoubleDecoder, 244 | _FieldDescriptor.TYPE_FLOAT: decoder.FloatDecoder, 245 | _FieldDescriptor.TYPE_INT64: decoder.Int64Decoder, 246 | _FieldDescriptor.TYPE_UINT64: decoder.UInt64Decoder, 247 | _FieldDescriptor.TYPE_INT32: decoder.Int32Decoder, 248 | _FieldDescriptor.TYPE_FIXED64: decoder.Fixed64Decoder, 249 | _FieldDescriptor.TYPE_FIXED32: decoder.Fixed32Decoder, 250 | _FieldDescriptor.TYPE_BOOL: decoder.BoolDecoder, 251 | _FieldDescriptor.TYPE_STRING: decoder.StringDecoder, 252 | _FieldDescriptor.TYPE_GROUP: decoder.GroupDecoder, 253 | _FieldDescriptor.TYPE_MESSAGE: decoder.MessageDecoder, 254 | _FieldDescriptor.TYPE_BYTES: decoder.BytesDecoder, 255 | _FieldDescriptor.TYPE_UINT32: decoder.UInt32Decoder, 256 | _FieldDescriptor.TYPE_ENUM: decoder.EnumDecoder, 257 | _FieldDescriptor.TYPE_SFIXED32: decoder.SFixed32Decoder, 258 | _FieldDescriptor.TYPE_SFIXED64: decoder.SFixed64Decoder, 259 | _FieldDescriptor.TYPE_SINT32: decoder.SInt32Decoder, 260 | _FieldDescriptor.TYPE_SINT64: decoder.SInt64Decoder, 261 | } 262 | 263 | # Maps from field type to expected wiretype. 264 | FIELD_TYPE_TO_WIRE_TYPE = { 265 | _FieldDescriptor.TYPE_DOUBLE: wire_format.WIRETYPE_FIXED64, 266 | _FieldDescriptor.TYPE_FLOAT: wire_format.WIRETYPE_FIXED32, 267 | _FieldDescriptor.TYPE_INT64: wire_format.WIRETYPE_VARINT, 268 | _FieldDescriptor.TYPE_UINT64: wire_format.WIRETYPE_VARINT, 269 | _FieldDescriptor.TYPE_INT32: wire_format.WIRETYPE_VARINT, 270 | _FieldDescriptor.TYPE_FIXED64: wire_format.WIRETYPE_FIXED64, 271 | _FieldDescriptor.TYPE_FIXED32: wire_format.WIRETYPE_FIXED32, 272 | _FieldDescriptor.TYPE_BOOL: wire_format.WIRETYPE_VARINT, 273 | _FieldDescriptor.TYPE_STRING: 274 | wire_format.WIRETYPE_LENGTH_DELIMITED, 275 | _FieldDescriptor.TYPE_GROUP: wire_format.WIRETYPE_START_GROUP, 276 | _FieldDescriptor.TYPE_MESSAGE: 277 | wire_format.WIRETYPE_LENGTH_DELIMITED, 278 | _FieldDescriptor.TYPE_BYTES: 279 | wire_format.WIRETYPE_LENGTH_DELIMITED, 280 | _FieldDescriptor.TYPE_UINT32: wire_format.WIRETYPE_VARINT, 281 | _FieldDescriptor.TYPE_ENUM: wire_format.WIRETYPE_VARINT, 282 | _FieldDescriptor.TYPE_SFIXED32: wire_format.WIRETYPE_FIXED32, 283 | _FieldDescriptor.TYPE_SFIXED64: wire_format.WIRETYPE_FIXED64, 284 | _FieldDescriptor.TYPE_SINT32: wire_format.WIRETYPE_VARINT, 285 | _FieldDescriptor.TYPE_SINT64: wire_format.WIRETYPE_VARINT, 286 | } 287 | -------------------------------------------------------------------------------- /lib/google/protobuf/internal/type_checkers.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bretwalker/gtfs-realtime-display/33230f9bd8234ddc598e127b0cd3533c8b25ea2f/lib/google/protobuf/internal/type_checkers.pyc -------------------------------------------------------------------------------- /lib/google/protobuf/internal/wire_format.py: -------------------------------------------------------------------------------- 1 | # Protocol Buffers - Google's data interchange format 2 | # Copyright 2008 Google Inc. All rights reserved. 3 | # http://code.google.com/p/protobuf/ 4 | # 5 | # Redistribution and use in source and binary forms, with or without 6 | # modification, are permitted provided that the following conditions are 7 | # met: 8 | # 9 | # * Redistributions of source code must retain the above copyright 10 | # notice, this list of conditions and the following disclaimer. 11 | # * Redistributions in binary form must reproduce the above 12 | # copyright notice, this list of conditions and the following disclaimer 13 | # in the documentation and/or other materials provided with the 14 | # distribution. 15 | # * Neither the name of Google Inc. nor the names of its 16 | # contributors may be used to endorse or promote products derived from 17 | # this software without specific prior written permission. 18 | # 19 | # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 20 | # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 21 | # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 22 | # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 23 | # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 24 | # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 25 | # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 26 | # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 27 | # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 28 | # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 29 | # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 30 | 31 | """Constants and static functions to support protocol buffer wire format.""" 32 | 33 | __author__ = 'robinson@google.com (Will Robinson)' 34 | 35 | import struct 36 | from google.protobuf import descriptor 37 | from google.protobuf import message 38 | 39 | 40 | TAG_TYPE_BITS = 3 # Number of bits used to hold type info in a proto tag. 41 | TAG_TYPE_MASK = (1 << TAG_TYPE_BITS) - 1 # 0x7 42 | 43 | # These numbers identify the wire type of a protocol buffer value. 44 | # We use the least-significant TAG_TYPE_BITS bits of the varint-encoded 45 | # tag-and-type to store one of these WIRETYPE_* constants. 46 | # These values must match WireType enum in google/protobuf/wire_format.h. 47 | WIRETYPE_VARINT = 0 48 | WIRETYPE_FIXED64 = 1 49 | WIRETYPE_LENGTH_DELIMITED = 2 50 | WIRETYPE_START_GROUP = 3 51 | WIRETYPE_END_GROUP = 4 52 | WIRETYPE_FIXED32 = 5 53 | _WIRETYPE_MAX = 5 54 | 55 | 56 | # Bounds for various integer types. 57 | INT32_MAX = int((1 << 31) - 1) 58 | INT32_MIN = int(-(1 << 31)) 59 | UINT32_MAX = (1 << 32) - 1 60 | 61 | INT64_MAX = (1 << 63) - 1 62 | INT64_MIN = -(1 << 63) 63 | UINT64_MAX = (1 << 64) - 1 64 | 65 | # "struct" format strings that will encode/decode the specified formats. 66 | FORMAT_UINT32_LITTLE_ENDIAN = '> TAG_TYPE_BITS), (tag & TAG_TYPE_MASK) 98 | 99 | 100 | def ZigZagEncode(value): 101 | """ZigZag Transform: Encodes signed integers so that they can be 102 | effectively used with varint encoding. See wire_format.h for 103 | more details. 104 | """ 105 | if value >= 0: 106 | return value << 1 107 | return (value << 1) ^ (~0) 108 | 109 | 110 | def ZigZagDecode(value): 111 | """Inverse of ZigZagEncode().""" 112 | if not value & 0x1: 113 | return value >> 1 114 | return (value >> 1) ^ (~0) 115 | 116 | 117 | 118 | # The *ByteSize() functions below return the number of bytes required to 119 | # serialize "field number + type" information and then serialize the value. 120 | 121 | 122 | def Int32ByteSize(field_number, int32): 123 | return Int64ByteSize(field_number, int32) 124 | 125 | 126 | def Int32ByteSizeNoTag(int32): 127 | return _VarUInt64ByteSizeNoTag(0xffffffffffffffff & int32) 128 | 129 | 130 | def Int64ByteSize(field_number, int64): 131 | # Have to convert to uint before calling UInt64ByteSize(). 132 | return UInt64ByteSize(field_number, 0xffffffffffffffff & int64) 133 | 134 | 135 | def UInt32ByteSize(field_number, uint32): 136 | return UInt64ByteSize(field_number, uint32) 137 | 138 | 139 | def UInt64ByteSize(field_number, uint64): 140 | return TagByteSize(field_number) + _VarUInt64ByteSizeNoTag(uint64) 141 | 142 | 143 | def SInt32ByteSize(field_number, int32): 144 | return UInt32ByteSize(field_number, ZigZagEncode(int32)) 145 | 146 | 147 | def SInt64ByteSize(field_number, int64): 148 | return UInt64ByteSize(field_number, ZigZagEncode(int64)) 149 | 150 | 151 | def Fixed32ByteSize(field_number, fixed32): 152 | return TagByteSize(field_number) + 4 153 | 154 | 155 | def Fixed64ByteSize(field_number, fixed64): 156 | return TagByteSize(field_number) + 8 157 | 158 | 159 | def SFixed32ByteSize(field_number, sfixed32): 160 | return TagByteSize(field_number) + 4 161 | 162 | 163 | def SFixed64ByteSize(field_number, sfixed64): 164 | return TagByteSize(field_number) + 8 165 | 166 | 167 | def FloatByteSize(field_number, flt): 168 | return TagByteSize(field_number) + 4 169 | 170 | 171 | def DoubleByteSize(field_number, double): 172 | return TagByteSize(field_number) + 8 173 | 174 | 175 | def BoolByteSize(field_number, b): 176 | return TagByteSize(field_number) + 1 177 | 178 | 179 | def EnumByteSize(field_number, enum): 180 | return UInt32ByteSize(field_number, enum) 181 | 182 | 183 | def StringByteSize(field_number, string): 184 | return BytesByteSize(field_number, string.encode('utf-8')) 185 | 186 | 187 | def BytesByteSize(field_number, b): 188 | return (TagByteSize(field_number) 189 | + _VarUInt64ByteSizeNoTag(len(b)) 190 | + len(b)) 191 | 192 | 193 | def GroupByteSize(field_number, message): 194 | return (2 * TagByteSize(field_number) # START and END group. 195 | + message.ByteSize()) 196 | 197 | 198 | def MessageByteSize(field_number, message): 199 | return (TagByteSize(field_number) 200 | + _VarUInt64ByteSizeNoTag(message.ByteSize()) 201 | + message.ByteSize()) 202 | 203 | 204 | def MessageSetItemByteSize(field_number, msg): 205 | # First compute the sizes of the tags. 206 | # There are 2 tags for the beginning and ending of the repeated group, that 207 | # is field number 1, one with field number 2 (type_id) and one with field 208 | # number 3 (message). 209 | total_size = (2 * TagByteSize(1) + TagByteSize(2) + TagByteSize(3)) 210 | 211 | # Add the number of bytes for type_id. 212 | total_size += _VarUInt64ByteSizeNoTag(field_number) 213 | 214 | message_size = msg.ByteSize() 215 | 216 | # The number of bytes for encoding the length of the message. 217 | total_size += _VarUInt64ByteSizeNoTag(message_size) 218 | 219 | # The size of the message. 220 | total_size += message_size 221 | return total_size 222 | 223 | 224 | def TagByteSize(field_number): 225 | """Returns the bytes required to serialize a tag with this field number.""" 226 | # Just pass in type 0, since the type won't affect the tag+type size. 227 | return _VarUInt64ByteSizeNoTag(PackTag(field_number, 0)) 228 | 229 | 230 | # Private helper function for the *ByteSize() functions above. 231 | 232 | def _VarUInt64ByteSizeNoTag(uint64): 233 | """Returns the number of bytes required to serialize a single varint 234 | using boundary value comparisons. (unrolled loop optimization -WPierce) 235 | uint64 must be unsigned. 236 | """ 237 | if uint64 <= 0x7f: return 1 238 | if uint64 <= 0x3fff: return 2 239 | if uint64 <= 0x1fffff: return 3 240 | if uint64 <= 0xfffffff: return 4 241 | if uint64 <= 0x7ffffffff: return 5 242 | if uint64 <= 0x3ffffffffff: return 6 243 | if uint64 <= 0x1ffffffffffff: return 7 244 | if uint64 <= 0xffffffffffffff: return 8 245 | if uint64 <= 0x7fffffffffffffff: return 9 246 | if uint64 > UINT64_MAX: 247 | raise message.EncodeError('Value out of range: %d' % uint64) 248 | return 10 249 | 250 | 251 | NON_PACKABLE_TYPES = ( 252 | descriptor.FieldDescriptor.TYPE_STRING, 253 | descriptor.FieldDescriptor.TYPE_GROUP, 254 | descriptor.FieldDescriptor.TYPE_MESSAGE, 255 | descriptor.FieldDescriptor.TYPE_BYTES 256 | ) 257 | 258 | 259 | def IsTypePackable(field_type): 260 | """Return true iff packable = true is valid for fields of this type. 261 | 262 | Args: 263 | field_type: a FieldDescriptor::Type value. 264 | 265 | Returns: 266 | True iff fields of this type are packable. 267 | """ 268 | return field_type not in NON_PACKABLE_TYPES 269 | -------------------------------------------------------------------------------- /lib/google/protobuf/internal/wire_format.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bretwalker/gtfs-realtime-display/33230f9bd8234ddc598e127b0cd3533c8b25ea2f/lib/google/protobuf/internal/wire_format.pyc -------------------------------------------------------------------------------- /lib/google/protobuf/message.py: -------------------------------------------------------------------------------- 1 | # Protocol Buffers - Google's data interchange format 2 | # Copyright 2008 Google Inc. All rights reserved. 3 | # http://code.google.com/p/protobuf/ 4 | # 5 | # Redistribution and use in source and binary forms, with or without 6 | # modification, are permitted provided that the following conditions are 7 | # met: 8 | # 9 | # * Redistributions of source code must retain the above copyright 10 | # notice, this list of conditions and the following disclaimer. 11 | # * Redistributions in binary form must reproduce the above 12 | # copyright notice, this list of conditions and the following disclaimer 13 | # in the documentation and/or other materials provided with the 14 | # distribution. 15 | # * Neither the name of Google Inc. nor the names of its 16 | # contributors may be used to endorse or promote products derived from 17 | # this software without specific prior written permission. 18 | # 19 | # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 20 | # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 21 | # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 22 | # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 23 | # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 24 | # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 25 | # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 26 | # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 27 | # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 28 | # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 29 | # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 30 | 31 | # TODO(robinson): We should just make these methods all "pure-virtual" and move 32 | # all implementation out, into reflection.py for now. 33 | 34 | 35 | """Contains an abstract base class for protocol messages.""" 36 | 37 | __author__ = 'robinson@google.com (Will Robinson)' 38 | 39 | 40 | class Error(Exception): pass 41 | class DecodeError(Error): pass 42 | class EncodeError(Error): pass 43 | 44 | 45 | class Message(object): 46 | 47 | """Abstract base class for protocol messages. 48 | 49 | Protocol message classes are almost always generated by the protocol 50 | compiler. These generated types subclass Message and implement the methods 51 | shown below. 52 | 53 | TODO(robinson): Link to an HTML document here. 54 | 55 | TODO(robinson): Document that instances of this class will also 56 | have an Extensions attribute with __getitem__ and __setitem__. 57 | Again, not sure how to best convey this. 58 | 59 | TODO(robinson): Document that the class must also have a static 60 | RegisterExtension(extension_field) method. 61 | Not sure how to best express at this point. 62 | """ 63 | 64 | # TODO(robinson): Document these fields and methods. 65 | 66 | __slots__ = [] 67 | 68 | DESCRIPTOR = None 69 | 70 | def __deepcopy__(self, memo=None): 71 | clone = type(self)() 72 | clone.MergeFrom(self) 73 | return clone 74 | 75 | def __eq__(self, other_msg): 76 | raise NotImplementedError 77 | 78 | def __ne__(self, other_msg): 79 | # Can't just say self != other_msg, since that would infinitely recurse. :) 80 | return not self == other_msg 81 | 82 | def __hash__(self): 83 | raise TypeError('unhashable object') 84 | 85 | def __str__(self): 86 | raise NotImplementedError 87 | 88 | def __unicode__(self): 89 | raise NotImplementedError 90 | 91 | def MergeFrom(self, other_msg): 92 | """Merges the contents of the specified message into current message. 93 | 94 | This method merges the contents of the specified message into the current 95 | message. Singular fields that are set in the specified message overwrite 96 | the corresponding fields in the current message. Repeated fields are 97 | appended. Singular sub-messages and groups are recursively merged. 98 | 99 | Args: 100 | other_msg: Message to merge into the current message. 101 | """ 102 | raise NotImplementedError 103 | 104 | def CopyFrom(self, other_msg): 105 | """Copies the content of the specified message into the current message. 106 | 107 | The method clears the current message and then merges the specified 108 | message using MergeFrom. 109 | 110 | Args: 111 | other_msg: Message to copy into the current one. 112 | """ 113 | if self is other_msg: 114 | return 115 | self.Clear() 116 | self.MergeFrom(other_msg) 117 | 118 | def Clear(self): 119 | """Clears all data that was set in the message.""" 120 | raise NotImplementedError 121 | 122 | def SetInParent(self): 123 | """Mark this as present in the parent. 124 | 125 | This normally happens automatically when you assign a field of a 126 | sub-message, but sometimes you want to make the sub-message 127 | present while keeping it empty. If you find yourself using this, 128 | you may want to reconsider your design.""" 129 | raise NotImplementedError 130 | 131 | def IsInitialized(self): 132 | """Checks if the message is initialized. 133 | 134 | Returns: 135 | The method returns True if the message is initialized (i.e. all of its 136 | required fields are set). 137 | """ 138 | raise NotImplementedError 139 | 140 | # TODO(robinson): MergeFromString() should probably return None and be 141 | # implemented in terms of a helper that returns the # of bytes read. Our 142 | # deserialization routines would use the helper when recursively 143 | # deserializing, but the end user would almost always just want the no-return 144 | # MergeFromString(). 145 | 146 | def MergeFromString(self, serialized): 147 | """Merges serialized protocol buffer data into this message. 148 | 149 | When we find a field in |serialized| that is already present 150 | in this message: 151 | - If it's a "repeated" field, we append to the end of our list. 152 | - Else, if it's a scalar, we overwrite our field. 153 | - Else, (it's a nonrepeated composite), we recursively merge 154 | into the existing composite. 155 | 156 | TODO(robinson): Document handling of unknown fields. 157 | 158 | Args: 159 | serialized: Any object that allows us to call buffer(serialized) 160 | to access a string of bytes using the buffer interface. 161 | 162 | TODO(robinson): When we switch to a helper, this will return None. 163 | 164 | Returns: 165 | The number of bytes read from |serialized|. 166 | For non-group messages, this will always be len(serialized), 167 | but for messages which are actually groups, this will 168 | generally be less than len(serialized), since we must 169 | stop when we reach an END_GROUP tag. Note that if 170 | we *do* stop because of an END_GROUP tag, the number 171 | of bytes returned does not include the bytes 172 | for the END_GROUP tag information. 173 | """ 174 | raise NotImplementedError 175 | 176 | def ParseFromString(self, serialized): 177 | """Like MergeFromString(), except we clear the object first.""" 178 | self.Clear() 179 | self.MergeFromString(serialized) 180 | 181 | def SerializeToString(self): 182 | """Serializes the protocol message to a binary string. 183 | 184 | Returns: 185 | A binary string representation of the message if all of the required 186 | fields in the message are set (i.e. the message is initialized). 187 | 188 | Raises: 189 | message.EncodeError if the message isn't initialized. 190 | """ 191 | raise NotImplementedError 192 | 193 | def SerializePartialToString(self): 194 | """Serializes the protocol message to a binary string. 195 | 196 | This method is similar to SerializeToString but doesn't check if the 197 | message is initialized. 198 | 199 | Returns: 200 | A string representation of the partial message. 201 | """ 202 | raise NotImplementedError 203 | 204 | # TODO(robinson): Decide whether we like these better 205 | # than auto-generated has_foo() and clear_foo() methods 206 | # on the instances themselves. This way is less consistent 207 | # with C++, but it makes reflection-type access easier and 208 | # reduces the number of magically autogenerated things. 209 | # 210 | # TODO(robinson): Be sure to document (and test) exactly 211 | # which field names are accepted here. Are we case-sensitive? 212 | # What do we do with fields that share names with Python keywords 213 | # like 'lambda' and 'yield'? 214 | # 215 | # nnorwitz says: 216 | # """ 217 | # Typically (in python), an underscore is appended to names that are 218 | # keywords. So they would become lambda_ or yield_. 219 | # """ 220 | def ListFields(self): 221 | """Returns a list of (FieldDescriptor, value) tuples for all 222 | fields in the message which are not empty. A singular field is non-empty 223 | if HasField() would return true, and a repeated field is non-empty if 224 | it contains at least one element. The fields are ordered by field 225 | number""" 226 | raise NotImplementedError 227 | 228 | def HasField(self, field_name): 229 | """Checks if a certain field is set for the message. Note if the 230 | field_name is not defined in the message descriptor, ValueError will be 231 | raised.""" 232 | raise NotImplementedError 233 | 234 | def ClearField(self, field_name): 235 | raise NotImplementedError 236 | 237 | def HasExtension(self, extension_handle): 238 | raise NotImplementedError 239 | 240 | def ClearExtension(self, extension_handle): 241 | raise NotImplementedError 242 | 243 | def ByteSize(self): 244 | """Returns the serialized size of this message. 245 | Recursively calls ByteSize() on all contained messages. 246 | """ 247 | raise NotImplementedError 248 | 249 | def _SetListener(self, message_listener): 250 | """Internal method used by the protocol message implementation. 251 | Clients should not call this directly. 252 | 253 | Sets a listener that this message will call on certain state transitions. 254 | 255 | The purpose of this method is to register back-edges from children to 256 | parents at runtime, for the purpose of setting "has" bits and 257 | byte-size-dirty bits in the parent and ancestor objects whenever a child or 258 | descendant object is modified. 259 | 260 | If the client wants to disconnect this Message from the object tree, she 261 | explicitly sets callback to None. 262 | 263 | If message_listener is None, unregisters any existing listener. Otherwise, 264 | message_listener must implement the MessageListener interface in 265 | internal/message_listener.py, and we discard any listener registered 266 | via a previous _SetListener() call. 267 | """ 268 | raise NotImplementedError 269 | -------------------------------------------------------------------------------- /lib/google/protobuf/message.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bretwalker/gtfs-realtime-display/33230f9bd8234ddc598e127b0cd3533c8b25ea2f/lib/google/protobuf/message.pyc -------------------------------------------------------------------------------- /lib/google/protobuf/reflection.py: -------------------------------------------------------------------------------- 1 | # Protocol Buffers - Google's data interchange format 2 | # Copyright 2008 Google Inc. All rights reserved. 3 | # http://code.google.com/p/protobuf/ 4 | # 5 | # Redistribution and use in source and binary forms, with or without 6 | # modification, are permitted provided that the following conditions are 7 | # met: 8 | # 9 | # * Redistributions of source code must retain the above copyright 10 | # notice, this list of conditions and the following disclaimer. 11 | # * Redistributions in binary form must reproduce the above 12 | # copyright notice, this list of conditions and the following disclaimer 13 | # in the documentation and/or other materials provided with the 14 | # distribution. 15 | # * Neither the name of Google Inc. nor the names of its 16 | # contributors may be used to endorse or promote products derived from 17 | # this software without specific prior written permission. 18 | # 19 | # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 20 | # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 21 | # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 22 | # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 23 | # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 24 | # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 25 | # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 26 | # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 27 | # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 28 | # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 29 | # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 30 | 31 | # This code is meant to work on Python 2.4 and above only. 32 | 33 | """Contains a metaclass and helper functions used to create 34 | protocol message classes from Descriptor objects at runtime. 35 | 36 | Recall that a metaclass is the "type" of a class. 37 | (A class is to a metaclass what an instance is to a class.) 38 | 39 | In this case, we use the GeneratedProtocolMessageType metaclass 40 | to inject all the useful functionality into the classes 41 | output by the protocol compiler at compile-time. 42 | 43 | The upshot of all this is that the real implementation 44 | details for ALL pure-Python protocol buffers are *here in 45 | this file*. 46 | """ 47 | 48 | __author__ = 'robinson@google.com (Will Robinson)' 49 | 50 | 51 | from google.protobuf.internal import api_implementation 52 | from google.protobuf import descriptor as descriptor_mod 53 | _FieldDescriptor = descriptor_mod.FieldDescriptor 54 | 55 | 56 | if api_implementation.Type() == 'cpp': 57 | from google.protobuf.internal import cpp_message 58 | _NewMessage = cpp_message.NewMessage 59 | _InitMessage = cpp_message.InitMessage 60 | else: 61 | from google.protobuf.internal import python_message 62 | _NewMessage = python_message.NewMessage 63 | _InitMessage = python_message.InitMessage 64 | 65 | 66 | class GeneratedProtocolMessageType(type): 67 | 68 | """Metaclass for protocol message classes created at runtime from Descriptors. 69 | 70 | We add implementations for all methods described in the Message class. We 71 | also create properties to allow getting/setting all fields in the protocol 72 | message. Finally, we create slots to prevent users from accidentally 73 | "setting" nonexistent fields in the protocol message, which then wouldn't get 74 | serialized / deserialized properly. 75 | 76 | The protocol compiler currently uses this metaclass to create protocol 77 | message classes at runtime. Clients can also manually create their own 78 | classes at runtime, as in this example: 79 | 80 | mydescriptor = Descriptor(.....) 81 | class MyProtoClass(Message): 82 | __metaclass__ = GeneratedProtocolMessageType 83 | DESCRIPTOR = mydescriptor 84 | myproto_instance = MyProtoClass() 85 | myproto.foo_field = 23 86 | ... 87 | """ 88 | 89 | # Must be consistent with the protocol-compiler code in 90 | # proto2/compiler/internal/generator.*. 91 | _DESCRIPTOR_KEY = 'DESCRIPTOR' 92 | 93 | def __new__(cls, name, bases, dictionary): 94 | """Custom allocation for runtime-generated class types. 95 | 96 | We override __new__ because this is apparently the only place 97 | where we can meaningfully set __slots__ on the class we're creating(?). 98 | (The interplay between metaclasses and slots is not very well-documented). 99 | 100 | Args: 101 | name: Name of the class (ignored, but required by the 102 | metaclass protocol). 103 | bases: Base classes of the class we're constructing. 104 | (Should be message.Message). We ignore this field, but 105 | it's required by the metaclass protocol 106 | dictionary: The class dictionary of the class we're 107 | constructing. dictionary[_DESCRIPTOR_KEY] must contain 108 | a Descriptor object describing this protocol message 109 | type. 110 | 111 | Returns: 112 | Newly-allocated class. 113 | """ 114 | descriptor = dictionary[GeneratedProtocolMessageType._DESCRIPTOR_KEY] 115 | _NewMessage(descriptor, dictionary) 116 | superclass = super(GeneratedProtocolMessageType, cls) 117 | 118 | new_class = superclass.__new__(cls, name, bases, dictionary) 119 | setattr(descriptor, '_concrete_class', new_class) 120 | return new_class 121 | 122 | def __init__(cls, name, bases, dictionary): 123 | """Here we perform the majority of our work on the class. 124 | We add enum getters, an __init__ method, implementations 125 | of all Message methods, and properties for all fields 126 | in the protocol type. 127 | 128 | Args: 129 | name: Name of the class (ignored, but required by the 130 | metaclass protocol). 131 | bases: Base classes of the class we're constructing. 132 | (Should be message.Message). We ignore this field, but 133 | it's required by the metaclass protocol 134 | dictionary: The class dictionary of the class we're 135 | constructing. dictionary[_DESCRIPTOR_KEY] must contain 136 | a Descriptor object describing this protocol message 137 | type. 138 | """ 139 | descriptor = dictionary[GeneratedProtocolMessageType._DESCRIPTOR_KEY] 140 | _InitMessage(descriptor, cls) 141 | superclass = super(GeneratedProtocolMessageType, cls) 142 | superclass.__init__(name, bases, dictionary) 143 | -------------------------------------------------------------------------------- /lib/google/protobuf/reflection.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bretwalker/gtfs-realtime-display/33230f9bd8234ddc598e127b0cd3533c8b25ea2f/lib/google/protobuf/reflection.pyc -------------------------------------------------------------------------------- /lib/google/protobuf/service.py: -------------------------------------------------------------------------------- 1 | # Protocol Buffers - Google's data interchange format 2 | # Copyright 2008 Google Inc. All rights reserved. 3 | # http://code.google.com/p/protobuf/ 4 | # 5 | # Redistribution and use in source and binary forms, with or without 6 | # modification, are permitted provided that the following conditions are 7 | # met: 8 | # 9 | # * Redistributions of source code must retain the above copyright 10 | # notice, this list of conditions and the following disclaimer. 11 | # * Redistributions in binary form must reproduce the above 12 | # copyright notice, this list of conditions and the following disclaimer 13 | # in the documentation and/or other materials provided with the 14 | # distribution. 15 | # * Neither the name of Google Inc. nor the names of its 16 | # contributors may be used to endorse or promote products derived from 17 | # this software without specific prior written permission. 18 | # 19 | # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 20 | # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 21 | # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 22 | # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 23 | # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 24 | # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 25 | # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 26 | # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 27 | # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 28 | # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 29 | # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 30 | 31 | """DEPRECATED: Declares the RPC service interfaces. 32 | 33 | This module declares the abstract interfaces underlying proto2 RPC 34 | services. These are intended to be independent of any particular RPC 35 | implementation, so that proto2 services can be used on top of a variety 36 | of implementations. Starting with version 2.3.0, RPC implementations should 37 | not try to build on these, but should instead provide code generator plugins 38 | which generate code specific to the particular RPC implementation. This way 39 | the generated code can be more appropriate for the implementation in use 40 | and can avoid unnecessary layers of indirection. 41 | """ 42 | 43 | __author__ = 'petar@google.com (Petar Petrov)' 44 | 45 | 46 | class RpcException(Exception): 47 | """Exception raised on failed blocking RPC method call.""" 48 | pass 49 | 50 | 51 | class Service(object): 52 | 53 | """Abstract base interface for protocol-buffer-based RPC services. 54 | 55 | Services themselves are abstract classes (implemented either by servers or as 56 | stubs), but they subclass this base interface. The methods of this 57 | interface can be used to call the methods of the service without knowing 58 | its exact type at compile time (analogous to the Message interface). 59 | """ 60 | 61 | def GetDescriptor(): 62 | """Retrieves this service's descriptor.""" 63 | raise NotImplementedError 64 | 65 | def CallMethod(self, method_descriptor, rpc_controller, 66 | request, done): 67 | """Calls a method of the service specified by method_descriptor. 68 | 69 | If "done" is None then the call is blocking and the response 70 | message will be returned directly. Otherwise the call is asynchronous 71 | and "done" will later be called with the response value. 72 | 73 | In the blocking case, RpcException will be raised on error. 74 | 75 | Preconditions: 76 | * method_descriptor.service == GetDescriptor 77 | * request is of the exact same classes as returned by 78 | GetRequestClass(method). 79 | * After the call has started, the request must not be modified. 80 | * "rpc_controller" is of the correct type for the RPC implementation being 81 | used by this Service. For stubs, the "correct type" depends on the 82 | RpcChannel which the stub is using. 83 | 84 | Postconditions: 85 | * "done" will be called when the method is complete. This may be 86 | before CallMethod() returns or it may be at some point in the future. 87 | * If the RPC failed, the response value passed to "done" will be None. 88 | Further details about the failure can be found by querying the 89 | RpcController. 90 | """ 91 | raise NotImplementedError 92 | 93 | def GetRequestClass(self, method_descriptor): 94 | """Returns the class of the request message for the specified method. 95 | 96 | CallMethod() requires that the request is of a particular subclass of 97 | Message. GetRequestClass() gets the default instance of this required 98 | type. 99 | 100 | Example: 101 | method = service.GetDescriptor().FindMethodByName("Foo") 102 | request = stub.GetRequestClass(method)() 103 | request.ParseFromString(input) 104 | service.CallMethod(method, request, callback) 105 | """ 106 | raise NotImplementedError 107 | 108 | def GetResponseClass(self, method_descriptor): 109 | """Returns the class of the response message for the specified method. 110 | 111 | This method isn't really needed, as the RpcChannel's CallMethod constructs 112 | the response protocol message. It's provided anyway in case it is useful 113 | for the caller to know the response type in advance. 114 | """ 115 | raise NotImplementedError 116 | 117 | 118 | class RpcController(object): 119 | 120 | """An RpcController mediates a single method call. 121 | 122 | The primary purpose of the controller is to provide a way to manipulate 123 | settings specific to the RPC implementation and to find out about RPC-level 124 | errors. The methods provided by the RpcController interface are intended 125 | to be a "least common denominator" set of features which we expect all 126 | implementations to support. Specific implementations may provide more 127 | advanced features (e.g. deadline propagation). 128 | """ 129 | 130 | # Client-side methods below 131 | 132 | def Reset(self): 133 | """Resets the RpcController to its initial state. 134 | 135 | After the RpcController has been reset, it may be reused in 136 | a new call. Must not be called while an RPC is in progress. 137 | """ 138 | raise NotImplementedError 139 | 140 | def Failed(self): 141 | """Returns true if the call failed. 142 | 143 | After a call has finished, returns true if the call failed. The possible 144 | reasons for failure depend on the RPC implementation. Failed() must not 145 | be called before a call has finished. If Failed() returns true, the 146 | contents of the response message are undefined. 147 | """ 148 | raise NotImplementedError 149 | 150 | def ErrorText(self): 151 | """If Failed is true, returns a human-readable description of the error.""" 152 | raise NotImplementedError 153 | 154 | def StartCancel(self): 155 | """Initiate cancellation. 156 | 157 | Advises the RPC system that the caller desires that the RPC call be 158 | canceled. The RPC system may cancel it immediately, may wait awhile and 159 | then cancel it, or may not even cancel the call at all. If the call is 160 | canceled, the "done" callback will still be called and the RpcController 161 | will indicate that the call failed at that time. 162 | """ 163 | raise NotImplementedError 164 | 165 | # Server-side methods below 166 | 167 | def SetFailed(self, reason): 168 | """Sets a failure reason. 169 | 170 | Causes Failed() to return true on the client side. "reason" will be 171 | incorporated into the message returned by ErrorText(). If you find 172 | you need to return machine-readable information about failures, you 173 | should incorporate it into your response protocol buffer and should 174 | NOT call SetFailed(). 175 | """ 176 | raise NotImplementedError 177 | 178 | def IsCanceled(self): 179 | """Checks if the client cancelled the RPC. 180 | 181 | If true, indicates that the client canceled the RPC, so the server may 182 | as well give up on replying to it. The server should still call the 183 | final "done" callback. 184 | """ 185 | raise NotImplementedError 186 | 187 | def NotifyOnCancel(self, callback): 188 | """Sets a callback to invoke on cancel. 189 | 190 | Asks that the given callback be called when the RPC is canceled. The 191 | callback will always be called exactly once. If the RPC completes without 192 | being canceled, the callback will be called after completion. If the RPC 193 | has already been canceled when NotifyOnCancel() is called, the callback 194 | will be called immediately. 195 | 196 | NotifyOnCancel() must be called no more than once per request. 197 | """ 198 | raise NotImplementedError 199 | 200 | 201 | class RpcChannel(object): 202 | 203 | """Abstract interface for an RPC channel. 204 | 205 | An RpcChannel represents a communication line to a service which can be used 206 | to call that service's methods. The service may be running on another 207 | machine. Normally, you should not use an RpcChannel directly, but instead 208 | construct a stub {@link Service} wrapping it. Example: 209 | 210 | Example: 211 | RpcChannel channel = rpcImpl.Channel("remotehost.example.com:1234") 212 | RpcController controller = rpcImpl.Controller() 213 | MyService service = MyService_Stub(channel) 214 | service.MyMethod(controller, request, callback) 215 | """ 216 | 217 | def CallMethod(self, method_descriptor, rpc_controller, 218 | request, response_class, done): 219 | """Calls the method identified by the descriptor. 220 | 221 | Call the given method of the remote service. The signature of this 222 | procedure looks the same as Service.CallMethod(), but the requirements 223 | are less strict in one important way: the request object doesn't have to 224 | be of any specific class as long as its descriptor is method.input_type. 225 | """ 226 | raise NotImplementedError 227 | -------------------------------------------------------------------------------- /lib/google/protobuf/service.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bretwalker/gtfs-realtime-display/33230f9bd8234ddc598e127b0cd3533c8b25ea2f/lib/google/protobuf/service.pyc -------------------------------------------------------------------------------- /lib/google/protobuf/service_reflection.py: -------------------------------------------------------------------------------- 1 | # Protocol Buffers - Google's data interchange format 2 | # Copyright 2008 Google Inc. All rights reserved. 3 | # http://code.google.com/p/protobuf/ 4 | # 5 | # Redistribution and use in source and binary forms, with or without 6 | # modification, are permitted provided that the following conditions are 7 | # met: 8 | # 9 | # * Redistributions of source code must retain the above copyright 10 | # notice, this list of conditions and the following disclaimer. 11 | # * Redistributions in binary form must reproduce the above 12 | # copyright notice, this list of conditions and the following disclaimer 13 | # in the documentation and/or other materials provided with the 14 | # distribution. 15 | # * Neither the name of Google Inc. nor the names of its 16 | # contributors may be used to endorse or promote products derived from 17 | # this software without specific prior written permission. 18 | # 19 | # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 20 | # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 21 | # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 22 | # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 23 | # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 24 | # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 25 | # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 26 | # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 27 | # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 28 | # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 29 | # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 30 | 31 | """Contains metaclasses used to create protocol service and service stub 32 | classes from ServiceDescriptor objects at runtime. 33 | 34 | The GeneratedServiceType and GeneratedServiceStubType metaclasses are used to 35 | inject all useful functionality into the classes output by the protocol 36 | compiler at compile-time. 37 | """ 38 | 39 | __author__ = 'petar@google.com (Petar Petrov)' 40 | 41 | 42 | class GeneratedServiceType(type): 43 | 44 | """Metaclass for service classes created at runtime from ServiceDescriptors. 45 | 46 | Implementations for all methods described in the Service class are added here 47 | by this class. We also create properties to allow getting/setting all fields 48 | in the protocol message. 49 | 50 | The protocol compiler currently uses this metaclass to create protocol service 51 | classes at runtime. Clients can also manually create their own classes at 52 | runtime, as in this example: 53 | 54 | mydescriptor = ServiceDescriptor(.....) 55 | class MyProtoService(service.Service): 56 | __metaclass__ = GeneratedServiceType 57 | DESCRIPTOR = mydescriptor 58 | myservice_instance = MyProtoService() 59 | ... 60 | """ 61 | 62 | _DESCRIPTOR_KEY = 'DESCRIPTOR' 63 | 64 | def __init__(cls, name, bases, dictionary): 65 | """Creates a message service class. 66 | 67 | Args: 68 | name: Name of the class (ignored, but required by the metaclass 69 | protocol). 70 | bases: Base classes of the class being constructed. 71 | dictionary: The class dictionary of the class being constructed. 72 | dictionary[_DESCRIPTOR_KEY] must contain a ServiceDescriptor object 73 | describing this protocol service type. 74 | """ 75 | # Don't do anything if this class doesn't have a descriptor. This happens 76 | # when a service class is subclassed. 77 | if GeneratedServiceType._DESCRIPTOR_KEY not in dictionary: 78 | return 79 | descriptor = dictionary[GeneratedServiceType._DESCRIPTOR_KEY] 80 | service_builder = _ServiceBuilder(descriptor) 81 | service_builder.BuildService(cls) 82 | 83 | 84 | class GeneratedServiceStubType(GeneratedServiceType): 85 | 86 | """Metaclass for service stubs created at runtime from ServiceDescriptors. 87 | 88 | This class has similar responsibilities as GeneratedServiceType, except that 89 | it creates the service stub classes. 90 | """ 91 | 92 | _DESCRIPTOR_KEY = 'DESCRIPTOR' 93 | 94 | def __init__(cls, name, bases, dictionary): 95 | """Creates a message service stub class. 96 | 97 | Args: 98 | name: Name of the class (ignored, here). 99 | bases: Base classes of the class being constructed. 100 | dictionary: The class dictionary of the class being constructed. 101 | dictionary[_DESCRIPTOR_KEY] must contain a ServiceDescriptor object 102 | describing this protocol service type. 103 | """ 104 | super(GeneratedServiceStubType, cls).__init__(name, bases, dictionary) 105 | # Don't do anything if this class doesn't have a descriptor. This happens 106 | # when a service stub is subclassed. 107 | if GeneratedServiceStubType._DESCRIPTOR_KEY not in dictionary: 108 | return 109 | descriptor = dictionary[GeneratedServiceStubType._DESCRIPTOR_KEY] 110 | service_stub_builder = _ServiceStubBuilder(descriptor) 111 | service_stub_builder.BuildServiceStub(cls) 112 | 113 | 114 | class _ServiceBuilder(object): 115 | 116 | """This class constructs a protocol service class using a service descriptor. 117 | 118 | Given a service descriptor, this class constructs a class that represents 119 | the specified service descriptor. One service builder instance constructs 120 | exactly one service class. That means all instances of that class share the 121 | same builder. 122 | """ 123 | 124 | def __init__(self, service_descriptor): 125 | """Initializes an instance of the service class builder. 126 | 127 | Args: 128 | service_descriptor: ServiceDescriptor to use when constructing the 129 | service class. 130 | """ 131 | self.descriptor = service_descriptor 132 | 133 | def BuildService(self, cls): 134 | """Constructs the service class. 135 | 136 | Args: 137 | cls: The class that will be constructed. 138 | """ 139 | 140 | # CallMethod needs to operate with an instance of the Service class. This 141 | # internal wrapper function exists only to be able to pass the service 142 | # instance to the method that does the real CallMethod work. 143 | def _WrapCallMethod(srvc, method_descriptor, 144 | rpc_controller, request, callback): 145 | return self._CallMethod(srvc, method_descriptor, 146 | rpc_controller, request, callback) 147 | self.cls = cls 148 | cls.CallMethod = _WrapCallMethod 149 | cls.GetDescriptor = staticmethod(lambda: self.descriptor) 150 | cls.GetDescriptor.__doc__ = "Returns the service descriptor." 151 | cls.GetRequestClass = self._GetRequestClass 152 | cls.GetResponseClass = self._GetResponseClass 153 | for method in self.descriptor.methods: 154 | setattr(cls, method.name, self._GenerateNonImplementedMethod(method)) 155 | 156 | def _CallMethod(self, srvc, method_descriptor, 157 | rpc_controller, request, callback): 158 | """Calls the method described by a given method descriptor. 159 | 160 | Args: 161 | srvc: Instance of the service for which this method is called. 162 | method_descriptor: Descriptor that represent the method to call. 163 | rpc_controller: RPC controller to use for this method's execution. 164 | request: Request protocol message. 165 | callback: A callback to invoke after the method has completed. 166 | """ 167 | if method_descriptor.containing_service != self.descriptor: 168 | raise RuntimeError( 169 | 'CallMethod() given method descriptor for wrong service type.') 170 | method = getattr(srvc, method_descriptor.name) 171 | return method(rpc_controller, request, callback) 172 | 173 | def _GetRequestClass(self, method_descriptor): 174 | """Returns the class of the request protocol message. 175 | 176 | Args: 177 | method_descriptor: Descriptor of the method for which to return the 178 | request protocol message class. 179 | 180 | Returns: 181 | A class that represents the input protocol message of the specified 182 | method. 183 | """ 184 | if method_descriptor.containing_service != self.descriptor: 185 | raise RuntimeError( 186 | 'GetRequestClass() given method descriptor for wrong service type.') 187 | return method_descriptor.input_type._concrete_class 188 | 189 | def _GetResponseClass(self, method_descriptor): 190 | """Returns the class of the response protocol message. 191 | 192 | Args: 193 | method_descriptor: Descriptor of the method for which to return the 194 | response protocol message class. 195 | 196 | Returns: 197 | A class that represents the output protocol message of the specified 198 | method. 199 | """ 200 | if method_descriptor.containing_service != self.descriptor: 201 | raise RuntimeError( 202 | 'GetResponseClass() given method descriptor for wrong service type.') 203 | return method_descriptor.output_type._concrete_class 204 | 205 | def _GenerateNonImplementedMethod(self, method): 206 | """Generates and returns a method that can be set for a service methods. 207 | 208 | Args: 209 | method: Descriptor of the service method for which a method is to be 210 | generated. 211 | 212 | Returns: 213 | A method that can be added to the service class. 214 | """ 215 | return lambda inst, rpc_controller, request, callback: ( 216 | self._NonImplementedMethod(method.name, rpc_controller, callback)) 217 | 218 | def _NonImplementedMethod(self, method_name, rpc_controller, callback): 219 | """The body of all methods in the generated service class. 220 | 221 | Args: 222 | method_name: Name of the method being executed. 223 | rpc_controller: RPC controller used to execute this method. 224 | callback: A callback which will be invoked when the method finishes. 225 | """ 226 | rpc_controller.SetFailed('Method %s not implemented.' % method_name) 227 | callback(None) 228 | 229 | 230 | class _ServiceStubBuilder(object): 231 | 232 | """Constructs a protocol service stub class using a service descriptor. 233 | 234 | Given a service descriptor, this class constructs a suitable stub class. 235 | A stub is just a type-safe wrapper around an RpcChannel which emulates a 236 | local implementation of the service. 237 | 238 | One service stub builder instance constructs exactly one class. It means all 239 | instances of that class share the same service stub builder. 240 | """ 241 | 242 | def __init__(self, service_descriptor): 243 | """Initializes an instance of the service stub class builder. 244 | 245 | Args: 246 | service_descriptor: ServiceDescriptor to use when constructing the 247 | stub class. 248 | """ 249 | self.descriptor = service_descriptor 250 | 251 | def BuildServiceStub(self, cls): 252 | """Constructs the stub class. 253 | 254 | Args: 255 | cls: The class that will be constructed. 256 | """ 257 | 258 | def _ServiceStubInit(stub, rpc_channel): 259 | stub.rpc_channel = rpc_channel 260 | self.cls = cls 261 | cls.__init__ = _ServiceStubInit 262 | for method in self.descriptor.methods: 263 | setattr(cls, method.name, self._GenerateStubMethod(method)) 264 | 265 | def _GenerateStubMethod(self, method): 266 | return (lambda inst, rpc_controller, request, callback=None: 267 | self._StubMethod(inst, method, rpc_controller, request, callback)) 268 | 269 | def _StubMethod(self, stub, method_descriptor, 270 | rpc_controller, request, callback): 271 | """The body of all service methods in the generated stub class. 272 | 273 | Args: 274 | stub: Stub instance. 275 | method_descriptor: Descriptor of the invoked method. 276 | rpc_controller: Rpc controller to execute the method. 277 | request: Request protocol message. 278 | callback: A callback to execute when the method finishes. 279 | Returns: 280 | Response message (in case of blocking call). 281 | """ 282 | return stub.rpc_channel.CallMethod( 283 | method_descriptor, rpc_controller, request, 284 | method_descriptor.output_type._concrete_class, callback) 285 | -------------------------------------------------------------------------------- /lib/google/protobuf/service_reflection.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bretwalker/gtfs-realtime-display/33230f9bd8234ddc598e127b0cd3533c8b25ea2f/lib/google/protobuf/service_reflection.pyc -------------------------------------------------------------------------------- /lib/google/protobuf/text_format.py: -------------------------------------------------------------------------------- 1 | # Protocol Buffers - Google's data interchange format 2 | # Copyright 2008 Google Inc. All rights reserved. 3 | # http://code.google.com/p/protobuf/ 4 | # 5 | # Redistribution and use in source and binary forms, with or without 6 | # modification, are permitted provided that the following conditions are 7 | # met: 8 | # 9 | # * Redistributions of source code must retain the above copyright 10 | # notice, this list of conditions and the following disclaimer. 11 | # * Redistributions in binary form must reproduce the above 12 | # copyright notice, this list of conditions and the following disclaimer 13 | # in the documentation and/or other materials provided with the 14 | # distribution. 15 | # * Neither the name of Google Inc. nor the names of its 16 | # contributors may be used to endorse or promote products derived from 17 | # this software without specific prior written permission. 18 | # 19 | # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 20 | # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 21 | # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 22 | # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 23 | # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 24 | # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 25 | # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 26 | # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 27 | # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 28 | # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 29 | # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 30 | 31 | """Contains routines for printing protocol messages in text format.""" 32 | 33 | __author__ = 'kenton@google.com (Kenton Varda)' 34 | 35 | import cStringIO 36 | import re 37 | 38 | from collections import deque 39 | from google.protobuf.internal import type_checkers 40 | from google.protobuf import descriptor 41 | 42 | __all__ = [ 'MessageToString', 'PrintMessage', 'PrintField', 43 | 'PrintFieldValue', 'Merge' ] 44 | 45 | 46 | # Infinity and NaN are not explicitly supported by Python pre-2.6, and 47 | # float('inf') does not work on Windows (pre-2.6). 48 | _INFINITY = 1e10000 # overflows, thus will actually be infinity. 49 | _NAN = _INFINITY * 0 50 | 51 | 52 | class ParseError(Exception): 53 | """Thrown in case of ASCII parsing error.""" 54 | 55 | 56 | def MessageToString(message, as_utf8=False, as_one_line=False): 57 | out = cStringIO.StringIO() 58 | PrintMessage(message, out, as_utf8=as_utf8, as_one_line=as_one_line) 59 | result = out.getvalue() 60 | out.close() 61 | if as_one_line: 62 | return result.rstrip() 63 | return result 64 | 65 | 66 | def PrintMessage(message, out, indent=0, as_utf8=False, as_one_line=False): 67 | for field, value in message.ListFields(): 68 | if field.label == descriptor.FieldDescriptor.LABEL_REPEATED: 69 | for element in value: 70 | PrintField(field, element, out, indent, as_utf8, as_one_line) 71 | else: 72 | PrintField(field, value, out, indent, as_utf8, as_one_line) 73 | 74 | 75 | def PrintField(field, value, out, indent=0, as_utf8=False, as_one_line=False): 76 | """Print a single field name/value pair. For repeated fields, the value 77 | should be a single element.""" 78 | 79 | out.write(' ' * indent); 80 | if field.is_extension: 81 | out.write('[') 82 | if (field.containing_type.GetOptions().message_set_wire_format and 83 | field.type == descriptor.FieldDescriptor.TYPE_MESSAGE and 84 | field.message_type == field.extension_scope and 85 | field.label == descriptor.FieldDescriptor.LABEL_OPTIONAL): 86 | out.write(field.message_type.full_name) 87 | else: 88 | out.write(field.full_name) 89 | out.write(']') 90 | elif field.type == descriptor.FieldDescriptor.TYPE_GROUP: 91 | # For groups, use the capitalized name. 92 | out.write(field.message_type.name) 93 | else: 94 | out.write(field.name) 95 | 96 | if field.cpp_type != descriptor.FieldDescriptor.CPPTYPE_MESSAGE: 97 | # The colon is optional in this case, but our cross-language golden files 98 | # don't include it. 99 | out.write(': ') 100 | 101 | PrintFieldValue(field, value, out, indent, as_utf8, as_one_line) 102 | if as_one_line: 103 | out.write(' ') 104 | else: 105 | out.write('\n') 106 | 107 | 108 | def PrintFieldValue(field, value, out, indent=0, 109 | as_utf8=False, as_one_line=False): 110 | """Print a single field value (not including name). For repeated fields, 111 | the value should be a single element.""" 112 | 113 | if field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_MESSAGE: 114 | if as_one_line: 115 | out.write(' { ') 116 | PrintMessage(value, out, indent, as_utf8, as_one_line) 117 | out.write('}') 118 | else: 119 | out.write(' {\n') 120 | PrintMessage(value, out, indent + 2, as_utf8, as_one_line) 121 | out.write(' ' * indent + '}') 122 | elif field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_ENUM: 123 | out.write(field.enum_type.values_by_number[value].name) 124 | elif field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_STRING: 125 | out.write('\"') 126 | if type(value) is unicode: 127 | out.write(_CEscape(value.encode('utf-8'), as_utf8)) 128 | else: 129 | out.write(_CEscape(value, as_utf8)) 130 | out.write('\"') 131 | elif field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_BOOL: 132 | if value: 133 | out.write("true") 134 | else: 135 | out.write("false") 136 | else: 137 | out.write(str(value)) 138 | 139 | 140 | def Merge(text, message): 141 | """Merges an ASCII representation of a protocol message into a message. 142 | 143 | Args: 144 | text: Message ASCII representation. 145 | message: A protocol buffer message to merge into. 146 | 147 | Raises: 148 | ParseError: On ASCII parsing problems. 149 | """ 150 | tokenizer = _Tokenizer(text) 151 | while not tokenizer.AtEnd(): 152 | _MergeField(tokenizer, message) 153 | 154 | 155 | def _MergeField(tokenizer, message): 156 | """Merges a single protocol message field into a message. 157 | 158 | Args: 159 | tokenizer: A tokenizer to parse the field name and values. 160 | message: A protocol message to record the data. 161 | 162 | Raises: 163 | ParseError: In case of ASCII parsing problems. 164 | """ 165 | message_descriptor = message.DESCRIPTOR 166 | if tokenizer.TryConsume('['): 167 | name = [tokenizer.ConsumeIdentifier()] 168 | while tokenizer.TryConsume('.'): 169 | name.append(tokenizer.ConsumeIdentifier()) 170 | name = '.'.join(name) 171 | 172 | if not message_descriptor.is_extendable: 173 | raise tokenizer.ParseErrorPreviousToken( 174 | 'Message type "%s" does not have extensions.' % 175 | message_descriptor.full_name) 176 | field = message.Extensions._FindExtensionByName(name) 177 | if not field: 178 | raise tokenizer.ParseErrorPreviousToken( 179 | 'Extension "%s" not registered.' % name) 180 | elif message_descriptor != field.containing_type: 181 | raise tokenizer.ParseErrorPreviousToken( 182 | 'Extension "%s" does not extend message type "%s".' % ( 183 | name, message_descriptor.full_name)) 184 | tokenizer.Consume(']') 185 | else: 186 | name = tokenizer.ConsumeIdentifier() 187 | field = message_descriptor.fields_by_name.get(name, None) 188 | 189 | # Group names are expected to be capitalized as they appear in the 190 | # .proto file, which actually matches their type names, not their field 191 | # names. 192 | if not field: 193 | field = message_descriptor.fields_by_name.get(name.lower(), None) 194 | if field and field.type != descriptor.FieldDescriptor.TYPE_GROUP: 195 | field = None 196 | 197 | if (field and field.type == descriptor.FieldDescriptor.TYPE_GROUP and 198 | field.message_type.name != name): 199 | field = None 200 | 201 | if not field: 202 | raise tokenizer.ParseErrorPreviousToken( 203 | 'Message type "%s" has no field named "%s".' % ( 204 | message_descriptor.full_name, name)) 205 | 206 | if field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_MESSAGE: 207 | tokenizer.TryConsume(':') 208 | 209 | if tokenizer.TryConsume('<'): 210 | end_token = '>' 211 | else: 212 | tokenizer.Consume('{') 213 | end_token = '}' 214 | 215 | if field.label == descriptor.FieldDescriptor.LABEL_REPEATED: 216 | if field.is_extension: 217 | sub_message = message.Extensions[field].add() 218 | else: 219 | sub_message = getattr(message, field.name).add() 220 | else: 221 | if field.is_extension: 222 | sub_message = message.Extensions[field] 223 | else: 224 | sub_message = getattr(message, field.name) 225 | sub_message.SetInParent() 226 | 227 | while not tokenizer.TryConsume(end_token): 228 | if tokenizer.AtEnd(): 229 | raise tokenizer.ParseErrorPreviousToken('Expected "%s".' % (end_token)) 230 | _MergeField(tokenizer, sub_message) 231 | else: 232 | _MergeScalarField(tokenizer, message, field) 233 | 234 | 235 | def _MergeScalarField(tokenizer, message, field): 236 | """Merges a single protocol message scalar field into a message. 237 | 238 | Args: 239 | tokenizer: A tokenizer to parse the field value. 240 | message: A protocol message to record the data. 241 | field: The descriptor of the field to be merged. 242 | 243 | Raises: 244 | ParseError: In case of ASCII parsing problems. 245 | RuntimeError: On runtime errors. 246 | """ 247 | tokenizer.Consume(':') 248 | value = None 249 | 250 | if field.type in (descriptor.FieldDescriptor.TYPE_INT32, 251 | descriptor.FieldDescriptor.TYPE_SINT32, 252 | descriptor.FieldDescriptor.TYPE_SFIXED32): 253 | value = tokenizer.ConsumeInt32() 254 | elif field.type in (descriptor.FieldDescriptor.TYPE_INT64, 255 | descriptor.FieldDescriptor.TYPE_SINT64, 256 | descriptor.FieldDescriptor.TYPE_SFIXED64): 257 | value = tokenizer.ConsumeInt64() 258 | elif field.type in (descriptor.FieldDescriptor.TYPE_UINT32, 259 | descriptor.FieldDescriptor.TYPE_FIXED32): 260 | value = tokenizer.ConsumeUint32() 261 | elif field.type in (descriptor.FieldDescriptor.TYPE_UINT64, 262 | descriptor.FieldDescriptor.TYPE_FIXED64): 263 | value = tokenizer.ConsumeUint64() 264 | elif field.type in (descriptor.FieldDescriptor.TYPE_FLOAT, 265 | descriptor.FieldDescriptor.TYPE_DOUBLE): 266 | value = tokenizer.ConsumeFloat() 267 | elif field.type == descriptor.FieldDescriptor.TYPE_BOOL: 268 | value = tokenizer.ConsumeBool() 269 | elif field.type == descriptor.FieldDescriptor.TYPE_STRING: 270 | value = tokenizer.ConsumeString() 271 | elif field.type == descriptor.FieldDescriptor.TYPE_BYTES: 272 | value = tokenizer.ConsumeByteString() 273 | elif field.type == descriptor.FieldDescriptor.TYPE_ENUM: 274 | # Enum can be specified by a number (the enum value), or by 275 | # a string literal (the enum name). 276 | enum_descriptor = field.enum_type 277 | if tokenizer.LookingAtInteger(): 278 | number = tokenizer.ConsumeInt32() 279 | enum_value = enum_descriptor.values_by_number.get(number, None) 280 | if enum_value is None: 281 | raise tokenizer.ParseErrorPreviousToken( 282 | 'Enum type "%s" has no value with number %d.' % ( 283 | enum_descriptor.full_name, number)) 284 | else: 285 | identifier = tokenizer.ConsumeIdentifier() 286 | enum_value = enum_descriptor.values_by_name.get(identifier, None) 287 | if enum_value is None: 288 | raise tokenizer.ParseErrorPreviousToken( 289 | 'Enum type "%s" has no value named %s.' % ( 290 | enum_descriptor.full_name, identifier)) 291 | value = enum_value.number 292 | else: 293 | raise RuntimeError('Unknown field type %d' % field.type) 294 | 295 | if field.label == descriptor.FieldDescriptor.LABEL_REPEATED: 296 | if field.is_extension: 297 | message.Extensions[field].append(value) 298 | else: 299 | getattr(message, field.name).append(value) 300 | else: 301 | if field.is_extension: 302 | message.Extensions[field] = value 303 | else: 304 | setattr(message, field.name, value) 305 | 306 | 307 | class _Tokenizer(object): 308 | """Protocol buffer ASCII representation tokenizer. 309 | 310 | This class handles the lower level string parsing by splitting it into 311 | meaningful tokens. 312 | 313 | It was directly ported from the Java protocol buffer API. 314 | """ 315 | 316 | _WHITESPACE = re.compile('(\\s|(#.*$))+', re.MULTILINE) 317 | _TOKEN = re.compile( 318 | '[a-zA-Z_][0-9a-zA-Z_+-]*|' # an identifier 319 | '[0-9+-][0-9a-zA-Z_.+-]*|' # a number 320 | '\"([^\"\n\\\\]|\\\\.)*(\"|\\\\?$)|' # a double-quoted string 321 | '\'([^\'\n\\\\]|\\\\.)*(\'|\\\\?$)') # a single-quoted string 322 | _IDENTIFIER = re.compile('\w+') 323 | _INTEGER_CHECKERS = [type_checkers.Uint32ValueChecker(), 324 | type_checkers.Int32ValueChecker(), 325 | type_checkers.Uint64ValueChecker(), 326 | type_checkers.Int64ValueChecker()] 327 | _FLOAT_INFINITY = re.compile('-?inf(inity)?f?', re.IGNORECASE) 328 | _FLOAT_NAN = re.compile("nanf?", re.IGNORECASE) 329 | 330 | def __init__(self, text_message): 331 | self._text_message = text_message 332 | 333 | self._position = 0 334 | self._line = -1 335 | self._column = 0 336 | self._token_start = None 337 | self.token = '' 338 | self._lines = deque(text_message.split('\n')) 339 | self._current_line = '' 340 | self._previous_line = 0 341 | self._previous_column = 0 342 | self._SkipWhitespace() 343 | self.NextToken() 344 | 345 | def AtEnd(self): 346 | """Checks the end of the text was reached. 347 | 348 | Returns: 349 | True iff the end was reached. 350 | """ 351 | return self.token == '' 352 | 353 | def _PopLine(self): 354 | while len(self._current_line) <= self._column: 355 | if not self._lines: 356 | self._current_line = '' 357 | return 358 | self._line += 1 359 | self._column = 0 360 | self._current_line = self._lines.popleft() 361 | 362 | def _SkipWhitespace(self): 363 | while True: 364 | self._PopLine() 365 | match = self._WHITESPACE.match(self._current_line, self._column) 366 | if not match: 367 | break 368 | length = len(match.group(0)) 369 | self._column += length 370 | 371 | def TryConsume(self, token): 372 | """Tries to consume a given piece of text. 373 | 374 | Args: 375 | token: Text to consume. 376 | 377 | Returns: 378 | True iff the text was consumed. 379 | """ 380 | if self.token == token: 381 | self.NextToken() 382 | return True 383 | return False 384 | 385 | def Consume(self, token): 386 | """Consumes a piece of text. 387 | 388 | Args: 389 | token: Text to consume. 390 | 391 | Raises: 392 | ParseError: If the text couldn't be consumed. 393 | """ 394 | if not self.TryConsume(token): 395 | raise self._ParseError('Expected "%s".' % token) 396 | 397 | def LookingAtInteger(self): 398 | """Checks if the current token is an integer. 399 | 400 | Returns: 401 | True iff the current token is an integer. 402 | """ 403 | if not self.token: 404 | return False 405 | c = self.token[0] 406 | return (c >= '0' and c <= '9') or c == '-' or c == '+' 407 | 408 | def ConsumeIdentifier(self): 409 | """Consumes protocol message field identifier. 410 | 411 | Returns: 412 | Identifier string. 413 | 414 | Raises: 415 | ParseError: If an identifier couldn't be consumed. 416 | """ 417 | result = self.token 418 | if not self._IDENTIFIER.match(result): 419 | raise self._ParseError('Expected identifier.') 420 | self.NextToken() 421 | return result 422 | 423 | def ConsumeInt32(self): 424 | """Consumes a signed 32bit integer number. 425 | 426 | Returns: 427 | The integer parsed. 428 | 429 | Raises: 430 | ParseError: If a signed 32bit integer couldn't be consumed. 431 | """ 432 | try: 433 | result = self._ParseInteger(self.token, is_signed=True, is_long=False) 434 | except ValueError, e: 435 | raise self._IntegerParseError(e) 436 | self.NextToken() 437 | return result 438 | 439 | def ConsumeUint32(self): 440 | """Consumes an unsigned 32bit integer number. 441 | 442 | Returns: 443 | The integer parsed. 444 | 445 | Raises: 446 | ParseError: If an unsigned 32bit integer couldn't be consumed. 447 | """ 448 | try: 449 | result = self._ParseInteger(self.token, is_signed=False, is_long=False) 450 | except ValueError, e: 451 | raise self._IntegerParseError(e) 452 | self.NextToken() 453 | return result 454 | 455 | def ConsumeInt64(self): 456 | """Consumes a signed 64bit integer number. 457 | 458 | Returns: 459 | The integer parsed. 460 | 461 | Raises: 462 | ParseError: If a signed 64bit integer couldn't be consumed. 463 | """ 464 | try: 465 | result = self._ParseInteger(self.token, is_signed=True, is_long=True) 466 | except ValueError, e: 467 | raise self._IntegerParseError(e) 468 | self.NextToken() 469 | return result 470 | 471 | def ConsumeUint64(self): 472 | """Consumes an unsigned 64bit integer number. 473 | 474 | Returns: 475 | The integer parsed. 476 | 477 | Raises: 478 | ParseError: If an unsigned 64bit integer couldn't be consumed. 479 | """ 480 | try: 481 | result = self._ParseInteger(self.token, is_signed=False, is_long=True) 482 | except ValueError, e: 483 | raise self._IntegerParseError(e) 484 | self.NextToken() 485 | return result 486 | 487 | def ConsumeFloat(self): 488 | """Consumes an floating point number. 489 | 490 | Returns: 491 | The number parsed. 492 | 493 | Raises: 494 | ParseError: If a floating point number couldn't be consumed. 495 | """ 496 | text = self.token 497 | if self._FLOAT_INFINITY.match(text): 498 | self.NextToken() 499 | if text.startswith('-'): 500 | return -_INFINITY 501 | return _INFINITY 502 | 503 | if self._FLOAT_NAN.match(text): 504 | self.NextToken() 505 | return _NAN 506 | 507 | try: 508 | result = float(text) 509 | except ValueError, e: 510 | raise self._FloatParseError(e) 511 | self.NextToken() 512 | return result 513 | 514 | def ConsumeBool(self): 515 | """Consumes a boolean value. 516 | 517 | Returns: 518 | The bool parsed. 519 | 520 | Raises: 521 | ParseError: If a boolean value couldn't be consumed. 522 | """ 523 | if self.token in ('true', 't', '1'): 524 | self.NextToken() 525 | return True 526 | elif self.token in ('false', 'f', '0'): 527 | self.NextToken() 528 | return False 529 | else: 530 | raise self._ParseError('Expected "true" or "false".') 531 | 532 | def ConsumeString(self): 533 | """Consumes a string value. 534 | 535 | Returns: 536 | The string parsed. 537 | 538 | Raises: 539 | ParseError: If a string value couldn't be consumed. 540 | """ 541 | bytes = self.ConsumeByteString() 542 | try: 543 | return unicode(bytes, 'utf-8') 544 | except UnicodeDecodeError, e: 545 | raise self._StringParseError(e) 546 | 547 | def ConsumeByteString(self): 548 | """Consumes a byte array value. 549 | 550 | Returns: 551 | The array parsed (as a string). 552 | 553 | Raises: 554 | ParseError: If a byte array value couldn't be consumed. 555 | """ 556 | list = [self._ConsumeSingleByteString()] 557 | while len(self.token) > 0 and self.token[0] in ('\'', '"'): 558 | list.append(self._ConsumeSingleByteString()) 559 | return "".join(list) 560 | 561 | def _ConsumeSingleByteString(self): 562 | """Consume one token of a string literal. 563 | 564 | String literals (whether bytes or text) can come in multiple adjacent 565 | tokens which are automatically concatenated, like in C or Python. This 566 | method only consumes one token. 567 | """ 568 | text = self.token 569 | if len(text) < 1 or text[0] not in ('\'', '"'): 570 | raise self._ParseError('Exptected string.') 571 | 572 | if len(text) < 2 or text[-1] != text[0]: 573 | raise self._ParseError('String missing ending quote.') 574 | 575 | try: 576 | result = _CUnescape(text[1:-1]) 577 | except ValueError, e: 578 | raise self._ParseError(str(e)) 579 | self.NextToken() 580 | return result 581 | 582 | def _ParseInteger(self, text, is_signed=False, is_long=False): 583 | """Parses an integer. 584 | 585 | Args: 586 | text: The text to parse. 587 | is_signed: True if a signed integer must be parsed. 588 | is_long: True if a long integer must be parsed. 589 | 590 | Returns: 591 | The integer value. 592 | 593 | Raises: 594 | ValueError: Thrown Iff the text is not a valid integer. 595 | """ 596 | pos = 0 597 | if text.startswith('-'): 598 | pos += 1 599 | 600 | base = 10 601 | if text.startswith('0x', pos) or text.startswith('0X', pos): 602 | base = 16 603 | elif text.startswith('0', pos): 604 | base = 8 605 | 606 | # Do the actual parsing. Exception handling is propagated to caller. 607 | result = int(text, base) 608 | 609 | # Check if the integer is sane. Exceptions handled by callers. 610 | checker = self._INTEGER_CHECKERS[2 * int(is_long) + int(is_signed)] 611 | checker.CheckValue(result) 612 | return result 613 | 614 | def ParseErrorPreviousToken(self, message): 615 | """Creates and *returns* a ParseError for the previously read token. 616 | 617 | Args: 618 | message: A message to set for the exception. 619 | 620 | Returns: 621 | A ParseError instance. 622 | """ 623 | return ParseError('%d:%d : %s' % ( 624 | self._previous_line + 1, self._previous_column + 1, message)) 625 | 626 | def _ParseError(self, message): 627 | """Creates and *returns* a ParseError for the current token.""" 628 | return ParseError('%d:%d : %s' % ( 629 | self._line + 1, self._column - len(self.token) + 1, message)) 630 | 631 | def _IntegerParseError(self, e): 632 | return self._ParseError('Couldn\'t parse integer: ' + str(e)) 633 | 634 | def _FloatParseError(self, e): 635 | return self._ParseError('Couldn\'t parse number: ' + str(e)) 636 | 637 | def _StringParseError(self, e): 638 | return self._ParseError('Couldn\'t parse string: ' + str(e)) 639 | 640 | def NextToken(self): 641 | """Reads the next meaningful token.""" 642 | self._previous_line = self._line 643 | self._previous_column = self._column 644 | 645 | self._column += len(self.token) 646 | self._SkipWhitespace() 647 | 648 | if not self._lines and len(self._current_line) <= self._column: 649 | self.token = '' 650 | return 651 | 652 | match = self._TOKEN.match(self._current_line, self._column) 653 | if match: 654 | token = match.group(0) 655 | self.token = token 656 | else: 657 | self.token = self._current_line[self._column] 658 | 659 | 660 | # text.encode('string_escape') does not seem to satisfy our needs as it 661 | # encodes unprintable characters using two-digit hex escapes whereas our 662 | # C++ unescaping function allows hex escapes to be any length. So, 663 | # "\0011".encode('string_escape') ends up being "\\x011", which will be 664 | # decoded in C++ as a single-character string with char code 0x11. 665 | def _CEscape(text, as_utf8): 666 | def escape(c): 667 | o = ord(c) 668 | if o == 10: return r"\n" # optional escape 669 | if o == 13: return r"\r" # optional escape 670 | if o == 9: return r"\t" # optional escape 671 | if o == 39: return r"\'" # optional escape 672 | 673 | if o == 34: return r'\"' # necessary escape 674 | if o == 92: return r"\\" # necessary escape 675 | 676 | # necessary escapes 677 | if not as_utf8 and (o >= 127 or o < 32): return "\\%03o" % o 678 | return c 679 | return "".join([escape(c) for c in text]) 680 | 681 | 682 | _CUNESCAPE_HEX = re.compile('\\\\x([0-9a-fA-F]{2}|[0-9a-fA-F])') 683 | 684 | 685 | def _CUnescape(text): 686 | def ReplaceHex(m): 687 | return chr(int(m.group(0)[2:], 16)) 688 | # This is required because the 'string_escape' encoding doesn't 689 | # allow single-digit hex escapes (like '\xf'). 690 | result = _CUNESCAPE_HEX.sub(ReplaceHex, text) 691 | return result.decode('string_escape') 692 | -------------------------------------------------------------------------------- /lib/google/protobuf/text_format.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bretwalker/gtfs-realtime-display/33230f9bd8234ddc598e127b0cd3533c8b25ea2f/lib/google/protobuf/text_format.pyc -------------------------------------------------------------------------------- /lib/gtfs-realtime.proto: -------------------------------------------------------------------------------- 1 | // Copyright 2011 Google Inc 2 | // 3 | // The content of this file is licensed under the Creative Commons Attribution 4 | // 3.0 License. 5 | // 6 | // Protocol definition file for GTFS-realtime. 7 | // 8 | // GTFS-realtime lets transit agencies provide consumers with realtime 9 | // information about disruptions to their service (stations closed, lines not 10 | // operating, important delays etc), location of their vehicles and expected 11 | // arrival times. 12 | // 13 | // This protocol is published at: 14 | // http://developers.google.com/transit/gtfs-realtime/ 15 | 16 | syntax = "proto2"; 17 | 18 | option java_package = "com.google.transit.realtime"; 19 | package transit_realtime; 20 | 21 | // The contents of a feed message. 22 | // A feed is a continuous stream of feed messages. Each message in the stream is 23 | // obtained as a response to an appropriate HTTP GET request. 24 | // A realtime feed is always defined with relation to an existing GTFS feed. 25 | // All the entity ids are resolved with respect to the GTFS feed. 26 | // 27 | // A feed depends on some external configuration: 28 | // - The corresponding GTFS feed. 29 | // - Feed application (updates, positions or alerts). A feed should contain only 30 | // items of one specified application; all the other entities will be ignored. 31 | // - Polling frequency 32 | message FeedMessage { 33 | // Metadata about this feed and feed message. 34 | required FeedHeader header = 1; 35 | 36 | // Contents of the feed. 37 | repeated FeedEntity entity = 2; 38 | } 39 | 40 | // Metadata about a feed, included in feed messages. 41 | message FeedHeader { 42 | // Version of the feed specification. 43 | // The current version is 1.0. 44 | required string gtfs_realtime_version = 1; 45 | 46 | // Determines whether the current fetch is incremental. Currently, 47 | // DIFFERENTIAL mode is unsupported and behavior is unspecified for feeds 48 | // that use this mode. There are discussions on the GTFS-realtime mailing 49 | // list around fully specifying the behavior of DIFFERENTIAL mode and the 50 | // documentation will be updated when those discussions are finalized. 51 | enum Incrementality { 52 | FULL_DATASET = 0; 53 | DIFFERENTIAL = 1; 54 | } 55 | optional Incrementality incrementality = 2 [default = FULL_DATASET]; 56 | 57 | // This timestamp identifies the moment when the content of this feed has been 58 | // created (in server time). In POSIX time (i.e., number of seconds since 59 | // January 1st 1970 00:00:00 UTC). 60 | optional uint64 timestamp = 3; 61 | 62 | // The extensions namespace allows 3rd-party developers to extend the 63 | // GTFS-realtime specification in order to add and evaluate new features and 64 | // modifications to the spec. 65 | extensions 1000 to 1999; 66 | } 67 | 68 | // A definition (or update) of an entity in the transit feed. 69 | message FeedEntity { 70 | // The ids are used only to provide incrementality support. The id should be 71 | // unique within a FeedMessage. Consequent FeedMessages may contain 72 | // FeedEntities with the same id. In case of a DIFFERENTIAL update the new 73 | // FeedEntity with some id will replace the old FeedEntity with the same id 74 | // (or delete it - see is_deleted below). 75 | // The actual GTFS entities (e.g. stations, routes, trips) referenced by the 76 | // feed must be specified by explicit selectors (see EntitySelector below for 77 | // more info). 78 | required string id = 1; 79 | 80 | // Whether this entity is to be deleted. Relevant only for incremental 81 | // fetches. 82 | optional bool is_deleted = 2 [default = false]; 83 | 84 | // Data about the entity itself. Exactly one of the following fields must be 85 | // present (unless the entity is being deleted). 86 | optional TripUpdate trip_update = 3; 87 | optional VehiclePosition vehicle = 4; 88 | optional Alert alert = 5; 89 | } 90 | 91 | // 92 | // Entities used in the feed. 93 | // 94 | 95 | // Realtime update of the progress of a vehicle along a trip. 96 | // Depending on the value of ScheduleRelationship, a TripUpdate can specify: 97 | // - A trip that proceeds along the schedule. 98 | // - A trip that proceeds along a route but has no fixed schedule. 99 | // - A trip that have been added or removed with regard to schedule. 100 | // 101 | // The updates can be for future, predicted arrival/departure events, or for 102 | // past events that already occurred. 103 | // Normally, updates should get more precise and more certain (see 104 | // uncertainty below) as the events gets closer to current time. 105 | // Even if that is not possible, the information for past events should be 106 | // precise and certain. In particular, if an update points to time in the past 107 | // but its update's uncertainty is not 0, the client should conclude that the 108 | // update is a (wrong) prediction and that the trip has not completed yet. 109 | // 110 | // Note that the update can describe a trip that is already completed. 111 | // To this end, it is enough to provide an update for the last stop of the trip. 112 | // If the time of that is in the past, the client will conclude from that that 113 | // the whole trip is in the past (it is possible, although inconsequential, to 114 | // also provide updates for preceding stops). 115 | // This option is most relevant for a trip that has completed ahead of schedule, 116 | // but according to the schedule, the trip is still proceeding at the current 117 | // time. Removing the updates for this trip could make the client assume 118 | // that the trip is still proceeding. 119 | // Note that the feed provider is allowed, but not required, to purge past 120 | // updates - this is one case where this would be practically useful. 121 | message TripUpdate { 122 | // The Trip that this message applies to. There can be at most one 123 | // TripUpdate entity for each actual trip instance. 124 | // If there is none, that means there is no prediction information available. 125 | // It does *not* mean that the trip is progressing according to schedule. 126 | required TripDescriptor trip = 1; 127 | 128 | // Additional information on the vehicle that is serving this trip. 129 | optional VehicleDescriptor vehicle = 3; 130 | 131 | // Timing information for a single predicted event (either arrival or 132 | // departure). 133 | // Timing consists of delay and/or estimated time, and uncertainty. 134 | // - delay should be used when the prediction is given relative to some 135 | // existing schedule in GTFS. 136 | // - time should be given whether there is a predicted schedule or not. If 137 | // both time and delay are specified, time will take precedence 138 | // (although normally, time, if given for a scheduled trip, should be 139 | // equal to scheduled time in GTFS + delay). 140 | // 141 | // Uncertainty applies equally to both time and delay. 142 | // The uncertainty roughly specifies the expected error in true delay (but 143 | // note, we don't yet define its precise statistical meaning). It's possible 144 | // for the uncertainty to be 0, for example for trains that are driven under 145 | // computer timing control. 146 | message StopTimeEvent { 147 | // Delay (in seconds) can be positive (meaning that the vehicle is late) or 148 | // negative (meaning that the vehicle is ahead of schedule). Delay of 0 149 | // means that the vehicle is exactly on time. 150 | optional int32 delay = 1; 151 | 152 | // Event as absolute time. 153 | // In Unix time (i.e., number of seconds since January 1st 1970 00:00:00 154 | // UTC). 155 | optional int64 time = 2; 156 | 157 | // If uncertainty is omitted, it is interpreted as unknown. 158 | // If the prediction is unknown or too uncertain, the delay (or time) field 159 | // should be empty. In such case, the uncertainty field is ignored. 160 | // To specify a completely certain prediction, set its uncertainty to 0. 161 | optional int32 uncertainty = 3; 162 | 163 | // The extensions namespace allows 3rd-party developers to extend the 164 | // GTFS-realtime specification in order to add and evaluate new features 165 | // and modifications to the spec. 166 | extensions 1000 to 1999; 167 | } 168 | 169 | // Realtime update for arrival and/or departure events for a given stop on a 170 | // trip. Updates can be supplied for both past and future events. 171 | // The producer is allowed, although not required, to drop past events. 172 | message StopTimeUpdate { 173 | // The update is linked to a specific stop either through stop_sequence or 174 | // stop_id, so one of the fields below must necessarily be set. 175 | // See the documentation in TripDescriptor for more information. 176 | 177 | // Must be the same as in stop_times.txt in the corresponding GTFS feed. 178 | optional uint32 stop_sequence = 1; 179 | // Must be the same as in stops.txt in the corresponding GTFS feed. 180 | optional string stop_id = 4; 181 | 182 | optional StopTimeEvent arrival = 2; 183 | optional StopTimeEvent departure = 3; 184 | 185 | // The relation between this StopTime and the static schedule. 186 | enum ScheduleRelationship { 187 | // The vehicle is proceeding in accordance with its static schedule of 188 | // stops, although not necessarily according to the times of the schedule. 189 | // At least one of arrival and departure must be provided. If the schedule 190 | // for this stop contains both arrival and departure times then so must 191 | // this update. An update with only an arrival, say, where the schedule 192 | // has both, indicates that the trip is terminating early at this stop. 193 | SCHEDULED = 0; 194 | 195 | // The stop is skipped, i.e., the vehicle will not stop at this stop. 196 | // Arrival and departure are optional. 197 | SKIPPED = 1; 198 | 199 | // No data is given for this stop. The main intention for this value is to 200 | // give the predictions only for part of a trip, i.e., if the last update 201 | // for a trip has a NO_DATA specifier, then StopTimes for the rest of the 202 | // stops in the trip are considered to be unspecified as well. 203 | // Neither arrival nor departure should be supplied. 204 | NO_DATA = 2; 205 | } 206 | optional ScheduleRelationship schedule_relationship = 5 207 | [default = SCHEDULED]; 208 | 209 | // The extensions namespace allows 3rd-party developers to extend the 210 | // GTFS-realtime specification in order to add and evaluate new features 211 | // and modifications to the spec. 212 | extensions 1000 to 1999; 213 | } 214 | 215 | // Updates to StopTimes for the trip (both future, i.e., predictions, and in 216 | // some cases, past ones, i.e., those that already happened). 217 | // The updates must be sorted by stop_sequence, and apply for all the 218 | // following stops of the trip up to the next specified one. 219 | // 220 | // Example 1: 221 | // For a trip with 20 stops, a StopTimeUpdate with arrival delay and departure 222 | // delay of 0 for stop_sequence of the current stop means that the trip is 223 | // exactly on time. 224 | // 225 | // Example 2: 226 | // For the same trip instance, 3 StopTimeUpdates are provided: 227 | // - delay of 5 min for stop_sequence 3 228 | // - delay of 1 min for stop_sequence 8 229 | // - delay of unspecified duration for stop_sequence 10 230 | // This will be interpreted as: 231 | // - stop_sequences 3,4,5,6,7 have delay of 5 min. 232 | // - stop_sequences 8,9 have delay of 1 min. 233 | // - stop_sequences 10,... have unknown delay. 234 | repeated StopTimeUpdate stop_time_update = 2; 235 | 236 | // Moment at which the vehicle's real-time progress was measured. In POSIX 237 | // time (i.e., the number of seconds since January 1st 1970 00:00:00 UTC). 238 | optional uint64 timestamp = 4; 239 | 240 | // The extensions namespace allows 3rd-party developers to extend the 241 | // GTFS-realtime specification in order to add and evaluate new features and 242 | // modifications to the spec. 243 | extensions 1000 to 1999; 244 | } 245 | 246 | // Realtime positioning information for a given vehicle. 247 | message VehiclePosition { 248 | // The Trip that this vehicle is serving. 249 | // Can be empty or partial if the vehicle can not be identified with a given 250 | // trip instance. 251 | optional TripDescriptor trip = 1; 252 | 253 | // Additional information on the vehicle that is serving this trip. 254 | optional VehicleDescriptor vehicle = 8; 255 | 256 | // Current position of this vehicle. 257 | optional Position position = 2; 258 | 259 | // The stop sequence index of the current stop. The meaning of 260 | // current_stop_sequence (i.e., the stop that it refers to) is determined by 261 | // current_status. 262 | // If current_status is missing IN_TRANSIT_TO is assumed. 263 | optional uint32 current_stop_sequence = 3; 264 | // Identifies the current stop. The value must be the same as in stops.txt in 265 | // the corresponding GTFS feed. 266 | optional string stop_id = 7; 267 | 268 | enum VehicleStopStatus { 269 | // The vehicle is just about to arrive at the stop (on a stop 270 | // display, the vehicle symbol typically flashes). 271 | INCOMING_AT = 0; 272 | 273 | // The vehicle is standing at the stop. 274 | STOPPED_AT = 1; 275 | 276 | // The vehicle has departed and is in transit to the next stop. 277 | IN_TRANSIT_TO = 2; 278 | } 279 | // The exact status of the vehicle with respect to the current stop. 280 | // Ignored if current_stop_sequence is missing. 281 | optional VehicleStopStatus current_status = 4 [default = IN_TRANSIT_TO]; 282 | 283 | // Moment at which the vehicle's position was measured. In POSIX time 284 | // (i.e., number of seconds since January 1st 1970 00:00:00 UTC). 285 | optional uint64 timestamp = 5; 286 | 287 | // Congestion level that is affecting this vehicle. 288 | enum CongestionLevel { 289 | UNKNOWN_CONGESTION_LEVEL = 0; 290 | RUNNING_SMOOTHLY = 1; 291 | STOP_AND_GO = 2; 292 | CONGESTION = 3; 293 | SEVERE_CONGESTION = 4; // People leaving their cars. 294 | } 295 | optional CongestionLevel congestion_level = 6; 296 | 297 | // The extensions namespace allows 3rd-party developers to extend the 298 | // GTFS-realtime specification in order to add and evaluate new features 299 | // and modifications to the spec. 300 | extensions 1000 to 1999; 301 | } 302 | 303 | // An alert, indicating some sort of incident in the public transit network. 304 | message Alert { 305 | // Time when the alert should be shown to the user. If missing, the 306 | // alert will be shown as long as it appears in the feed. 307 | // If multiple ranges are given, the alert will be shown during all of them. 308 | repeated TimeRange active_period = 1; 309 | 310 | // Entities whose users we should notify of this alert. 311 | repeated EntitySelector informed_entity = 5; 312 | 313 | // Cause of this alert. 314 | enum Cause { 315 | UNKNOWN_CAUSE = 1; 316 | OTHER_CAUSE = 2; // Not machine-representable. 317 | TECHNICAL_PROBLEM = 3; 318 | STRIKE = 4; // Public transit agency employees stopped working. 319 | DEMONSTRATION = 5; // People are blocking the streets. 320 | ACCIDENT = 6; 321 | HOLIDAY = 7; 322 | WEATHER = 8; 323 | MAINTENANCE = 9; 324 | CONSTRUCTION = 10; 325 | POLICE_ACTIVITY = 11; 326 | MEDICAL_EMERGENCY = 12; 327 | } 328 | optional Cause cause = 6 [default = UNKNOWN_CAUSE]; 329 | 330 | // What is the effect of this problem on the affected entity. 331 | enum Effect { 332 | NO_SERVICE = 1; 333 | REDUCED_SERVICE = 2; 334 | 335 | // We don't care about INsignificant delays: they are hard to detect, have 336 | // little impact on the user, and would clutter the results as they are too 337 | // frequent. 338 | SIGNIFICANT_DELAYS = 3; 339 | 340 | DETOUR = 4; 341 | ADDITIONAL_SERVICE = 5; 342 | MODIFIED_SERVICE = 6; 343 | OTHER_EFFECT = 7; 344 | UNKNOWN_EFFECT = 8; 345 | STOP_MOVED = 9; 346 | } 347 | optional Effect effect = 7 [default = UNKNOWN_EFFECT]; 348 | 349 | // The URL which provides additional information about the alert. 350 | optional TranslatedString url = 8; 351 | 352 | // Alert header. Contains a short summary of the alert text as plain-text. 353 | optional TranslatedString header_text = 10; 354 | 355 | // Full description for the alert as plain-text. The information in the 356 | // description should add to the information of the header. 357 | optional TranslatedString description_text = 11; 358 | 359 | // The extensions namespace allows 3rd-party developers to extend the 360 | // GTFS-realtime specification in order to add and evaluate new features 361 | // and modifications to the spec. 362 | extensions 1000 to 1999; 363 | } 364 | 365 | // 366 | // Low level data structures used above. 367 | // 368 | 369 | // A time interval. The interval is considered active at time 't' if 't' is 370 | // greater than or equal to the start time and less than the end time. 371 | message TimeRange { 372 | // Start time, in POSIX time (i.e., number of seconds since January 1st 1970 373 | // 00:00:00 UTC). 374 | // If missing, the interval starts at minus infinity. 375 | optional uint64 start = 1; 376 | 377 | // End time, in POSIX time (i.e., number of seconds since January 1st 1970 378 | // 00:00:00 UTC). 379 | // If missing, the interval ends at plus infinity. 380 | optional uint64 end = 2; 381 | } 382 | 383 | // A position. 384 | message Position { 385 | // Degrees North, in the WGS-84 coordinate system. 386 | required float latitude = 1; 387 | 388 | // Degrees East, in the WGS-84 coordinate system. 389 | required float longitude = 2; 390 | 391 | // Bearing, in degrees, clockwise from North, i.e., 0 is North and 90 is East. 392 | // This can be the compass bearing, or the direction towards the next stop 393 | // or intermediate location. 394 | // This should not be direction deduced from the sequence of previous 395 | // positions, which can be computed from previous data. 396 | optional float bearing = 3; 397 | 398 | // Odometer value, in meters. 399 | optional double odometer = 4; 400 | // Momentary speed measured by the vehicle, in meters per second. 401 | optional float speed = 5; 402 | 403 | // The extensions namespace allows 3rd-party developers to extend the 404 | // GTFS-realtime specification in order to add and evaluate new features 405 | // and modifications to the spec. 406 | extensions 1000 to 1999; 407 | } 408 | 409 | // A descriptor that identifies an instance of a GTFS trip, or all instances of 410 | // a trip along a route. 411 | // - To specify a single trip instance, the trip_id (and if necessary, 412 | // start_time) is set. If route_id is also set, then it should be same as one 413 | // that the given trip corresponds to. 414 | // - To specify all the trips along a given route, only the route_id should be 415 | // set. Note that if the trip_id is not known, then stop sequence ids in 416 | // TripUpdate are not sufficient, and stop_ids must be provided as well. In 417 | // addition, absolute arrival/departure times must be provided. 418 | message TripDescriptor { 419 | // The trip_id from the GTFS feed that this selector refers to. 420 | // For non frequency expanded trips, this field is enough to uniquely identify 421 | // the trip. For frequency expanded, start_time and start_date might also be 422 | // necessary. 423 | optional string trip_id = 1; 424 | 425 | // The route_id from the GTFS that this selector refers to. 426 | optional string route_id = 5; 427 | 428 | // The scheduled start time of this trip instance. 429 | // This field should be given only if the trip is frequency-expanded in the 430 | // GTFS feed. The value must precisely correspond to start_time specified for 431 | // the route in the GTFS feed plus some multiple of headway_secs. 432 | // Format of the field is same as that of GTFS/frequencies.txt/start_time, 433 | // e.g., 11:15:35 or 25:15:35. 434 | optional string start_time = 2; 435 | 436 | // The scheduled start date of this trip instance. 437 | // Must be provided to disambiguate trips that are so late as to collide with 438 | // a scheduled trip on a next day. For example, for a train that departs 8:00 439 | // and 20:00 every day, and is 12 hours late, there would be two distinct 440 | // trips on the same time. 441 | // This field can be provided but is not mandatory for schedules in which such 442 | // collisions are impossible - for example, a service running on hourly 443 | // schedule where a vehicle that is one hour late is not considered to be 444 | // related to schedule anymore. 445 | // In YYYYMMDD format. 446 | optional string start_date = 3; 447 | 448 | // The relation between this trip and the static schedule. If a trip is done 449 | // in accordance with temporary schedule, not reflected in GTFS, then it 450 | // shouldn't be marked as SCHEDULED, but likely as ADDED. 451 | enum ScheduleRelationship { 452 | // Trip that is running in accordance with its GTFS schedule, or is close 453 | // enough to the scheduled trip to be associated with it. 454 | SCHEDULED = 0; 455 | 456 | // An extra trip that was added in addition to a running schedule, for 457 | // example, to replace a broken vehicle or to respond to sudden passenger 458 | // load. 459 | ADDED = 1; 460 | 461 | // A trip that is running with no schedule associated to it, for example, if 462 | // there is no schedule at all. 463 | UNSCHEDULED = 2; 464 | 465 | // A trip that existed in the schedule but was removed. 466 | CANCELED = 3; 467 | 468 | // A trip that replaces a portion of static schedule. 469 | // If the trip selector identifies a certain trip instance, then only that 470 | // instance is replaced. If the selector identifies a route, then all the 471 | // trips along that route are replaced. 472 | // 473 | // The replacement applies only to the portion of the trip supplied. For 474 | // instance, consider a route that goes through stops A,B,C,D,E,F, and a 475 | // REPLACEMENT trip provides data for stops A,B,C. Then, the times for stops 476 | // D,E,F are still taken from the static schedule. 477 | // 478 | // A feed might supply several REPLACEMENT trips. In this case, the portion 479 | // of static schedule that is replaced is the union of what is defined by 480 | // all the feeds. Normally, all the REPLACEMENT trips should either 481 | // correspond to the same route or to individual trip instances. 482 | REPLACEMENT = 5; 483 | } 484 | optional ScheduleRelationship schedule_relationship = 4; 485 | 486 | // The extensions namespace allows 3rd-party developers to extend the 487 | // GTFS-realtime specification in order to add and evaluate new features 488 | // and modifications to the spec. 489 | extensions 1000 to 1999; 490 | } 491 | 492 | // Identification information for the vehicle performing the trip. 493 | message VehicleDescriptor { 494 | // Internal system identification of the vehicle. Should be unique per 495 | // vehicle, and can be used for tracking the vehicle as it proceeds through 496 | // the system. 497 | optional string id = 1; 498 | 499 | // User visible label, i.e., something that must be shown to the passenger to 500 | // help identify the correct vehicle. 501 | optional string label = 2; 502 | 503 | // The license plate of the vehicle. 504 | optional string license_plate = 3; 505 | 506 | // The extensions namespace allows 3rd-party developers to extend the 507 | // GTFS-realtime specification in order to add and evaluate new features 508 | // and modifications to the spec. 509 | extensions 1000 to 1999; 510 | } 511 | 512 | // A selector for an entity in a GTFS feed. 513 | message EntitySelector { 514 | // The values of the fields should correspond to the appropriate fields in the 515 | // GTFS feed. 516 | // At least one specifier must be given. If several are given, then the 517 | // matching has to apply to all the given specifiers. 518 | optional string agency_id = 1; 519 | optional string route_id = 2; 520 | // corresponds to route_type in GTFS. 521 | optional int32 route_type = 3; 522 | optional TripDescriptor trip = 4; 523 | optional string stop_id = 5; 524 | 525 | // The extensions namespace allows 3rd-party developers to extend the 526 | // GTFS-realtime specification in order to add and evaluate new features 527 | // and modifications to the spec. 528 | extensions 1000 to 1999; 529 | } 530 | 531 | // An internationalized message containing per-language versions of a snippet of 532 | // text or a URL. 533 | // One of the strings from a message will be picked up. The resolution proceeds 534 | // as follows: 535 | // 1. If the UI language matches the language code of a translation, 536 | // the first matching translation is picked. 537 | // 2. If a default UI language (e.g., English) matches the language code of a 538 | // translation, the first matching translation is picked. 539 | // 3. If some translation has an unspecified language code, that translation is 540 | // picked. 541 | message TranslatedString { 542 | message Translation { 543 | // A UTF-8 string containing the message. 544 | required string text = 1; 545 | // BCP-47 language code. Can be omitted if the language is unknown or if 546 | // no i18n is done at all for the feed. At most one translation is 547 | // allowed to have an unspecified language tag. 548 | optional string language = 2; 549 | } 550 | // At least one translation must be provided. 551 | repeated Translation translation = 1; 552 | } 553 | 554 | -------------------------------------------------------------------------------- /lib/gtfs_realtime_pb2.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bretwalker/gtfs-realtime-display/33230f9bd8234ddc598e127b0cd3533c8b25ea2f/lib/gtfs_realtime_pb2.pyc --------------------------------------------------------------------------------