├── .gitignore
├── .idea
└── vcs.xml
├── .travis.yml
├── LICENSE
├── README.md
├── __init__.py
├── img
└── production_deployment_diagram.png
├── influxdbds.py
├── influxdbmeta.py
├── local.py
├── requirements.txt
├── server.py
├── test_data
└── test_metadata.xml
└── tests.py
/.gitignore:
--------------------------------------------------------------------------------
1 | # Byte-compiled / optimized / DLL files
2 | __pycache__/
3 | *.py[cod]
4 | *$py.class
5 |
6 | # C extensions
7 | *.so
8 |
9 | # Distribution / packaging
10 | .Python
11 | env/
12 | build/
13 | develop-eggs/
14 | dist/
15 | downloads/
16 | eggs/
17 | .eggs/
18 | lib/
19 | lib64/
20 | parts/
21 | sdist/
22 | var/
23 | *.egg-info/
24 | .installed.cfg
25 | *.egg
26 |
27 | # PyInstaller
28 | # Usually these files are written by a python script from a template
29 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
30 | *.manifest
31 | *.spec
32 |
33 | # Installer logs
34 | pip-log.txt
35 | pip-delete-this-directory.txt
36 |
37 | # Unit test / coverage reports
38 | htmlcov/
39 | .tox/
40 | .coverage
41 | .coverage.*
42 | .cache
43 | nosetests.xml
44 | coverage.xml
45 | *,cover
46 | .hypothesis/
47 |
48 | # Translations
49 | *.mo
50 | *.pot
51 |
52 | # Django stuff:
53 | *.log
54 | local_settings.py
55 |
56 | # Flask stuff:
57 | instance/
58 | .webassets-cache
59 |
60 | # Scrapy stuff:
61 | .scrapy
62 |
63 | # Sphinx documentation
64 | docs/_build/
65 |
66 | # PyBuilder
67 | target/
68 |
69 | # IPython Notebook
70 | .ipynb_checkpoints
71 |
72 | # pyenv
73 | .python-version
74 |
75 | # celery beat schedule file
76 | celerybeat-schedule
77 |
78 | # dotenv
79 | .env
80 |
81 | # virtualenv
82 | venv/
83 | ENV/
84 |
85 | # Spyder project settings
86 | .spyderproject
87 |
88 | # Rope project settings
89 | .ropeproject
90 | /*.conf
91 | .idea/
92 | /generated.xml
93 |
94 | # any generated xml files
95 | *.xml
96 |
--------------------------------------------------------------------------------
/.idea/vcs.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
--------------------------------------------------------------------------------
/.travis.yml:
--------------------------------------------------------------------------------
1 | language: python
2 | python:
3 | - "2.7"
4 | install: "pip install -r requirements.txt responses"
5 | script: python tests.py
6 | notifications:
7 | email: false
8 | slack:
9 | rooms:
10 | - secure: RUi6TJ5a0+kSt3kPI4fB048/Euz8+IX2cRZuwUJ0FMuFKKiZDz9e7ltfFfkeglyEM32qdtzJAX6xhFxOkUFigs8iGkWGZqsVuehOYDXI7Kqsd6WSKyiPio48VApi/9ILSONeQWT0bqvxQ/jsA5XpYoZBCmDPL99u1DPO3oMt2fkhhFibZMOgkqEtK6CPa8eFZOvLC+tJOQr7fBPlPreEgvGIC3ZL7nBPonHhS2eLCBOdJUyzv4EPH/N3Js0kowLNL6NfPg1v9AVzfVtKDyKqdAnUpdPqsvQw+S985rTYBZZ+4VihcGMzzAOSyp3qYieYHvGgsOY/N+x3ytx7RUp7p82b6WMEg0D5MYNXZlL6HbwWob/vRuvYp9d4zdlUohO8FE4AssrBTBtGXZng68Eng9uo2F4Kc5flSDdrCifPs2tb1mlTWnmwogVEQcWzUeHZEHdBCYlE/PyzWQCFSWMkMV63MeweAVOpSye15Hv64/QmybSVRclGOAGPH/iKsgWc99hCIC11I56tZY3Cb0d7eakVRuemYXeAXHRjnUDai0XCt6Pp/DgRClyw8a/IugsI6mBKwVYUXGJMEumPrS11cu8tIvBNbHseP4NYwX8hUysy3TklJAQsp7lHQFUqkqaHf41eg0aawFsyl5FftYwwmAEBIsMW7E6K2np8qMZtt6c=
11 | on_success: always
12 | on_failure: always
13 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2017 Synergetic Engineering
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # influxdb_odata
2 |
3 | [](https://travis-ci.org/Synergetic-Engineering/odata-influxdb) [](https://opensource.org/licenses/MIT)
4 |
5 | This project allows you to use the [OData REST API](http://www.odata.org/) to access [InfluxDB](https://www.influxdata.com/) data.
6 |
7 | This enables software such as Power BI, Excel, SAP, Drupal, and LINQPad to access InfluxDB data.
8 | (_Note: only some of these have been tested._)
9 |
10 | ## Requirements:
11 |
12 | python: Currently requires Python 2. Tested on latest 2.7.
13 |
14 | [pyslet](https://github.com/swl10/pyslet): The OData functionality from the pyslet project
15 | is used in this project.
16 |
17 | ## Usage:
18 |
19 | Run the following command to generate a sample config file:
20 |
21 | `python server.py --makeSampleConfig`
22 |
23 | Update the `dsn` in the conf file to reflect your InfluxDB server location.
24 |
25 | You can change the hostname/port for the API server by updating
26 | `service_advertise_root`, `server_listen_interface`, and `server_listen_port`
27 | in the conf file.
28 |
29 | Start your odata endpoint server with `python server.py`.
30 |
31 | Point an OData browser to `http://hostname:8080/`
32 |
33 | ## Production:
34 |
35 | The recommended production deployment is as follows:
36 |
37 | 
38 |
39 | The odata-influxdb service is stateless/sessionless. An XML file is generated upon starting the
40 | server to describe your InfluxDB metadata structure in a way that pyslet can understand. You
41 | can decrease server startup time drastically by disabling this feature in your `.conf` file
42 | (`[metadata] -> autogenerate=no`) after it has been generated once. You'll need to re-enable
43 | it if your InfluxDB structure changes. You can also keep this feature disabled if you need
44 | to hand-edit your .xml file to limit/change what is browseable to OData clients.
45 |
46 | It is recommended that you run InfluxDB with auth enabled. Odata-influxdb passes through
47 | http basic auth credentials to your InfluxDB server. You can specify a user in your
48 | .conf file dsn settings. Example: `[influxdb] dsn=influxdb://user:pass@localhost:8086`
49 |
50 | The default setting `[influxdb] max_items_per_query=50` is set extremely conservatively.
51 | It is recommended to increase this value to as high as 1000 depending on your testing of
52 | response times.
53 |
54 | ## Tests:
55 |
56 | Run unit tests with `python tests.py`
57 |
58 | ## OData layout:
59 |
60 | Upon startup, the server pulls the metadata from your InfluxDB server
61 | (database names, measurement names, field keys, and tag keys).
62 |
63 | Each measurement is set up as an OData table. All field keys and tag keys
64 | from the InfluxDB database are included in the table, but many values
65 | may be null depending on your InfluxDB setup. You can use OData $select
66 | query options to limit which columns are returned.
67 |
68 | ## Filters
69 |
70 | OData $filter spec is supported, but has some limitations.
71 |
72 | Supported operators are:
73 |
74 | * gt (greater than, >)
75 | * ge (greater than or equal to, >=)
76 | * lt (less than, <)
77 | * le (less than or equal to, <=)
78 | * eq (equals, =)
79 | * ne (not equal to, !=)
80 | * and (boolean and)
81 |
82 | ## Grouping
83 |
84 | This project currently depends on pyslet currently gives us OData 2
85 | support, which does not include grouping, so this project provies
86 | a non-standard implementation of grouping operations. Because of this,
87 | you cannot use GUI tools to form the grouping queries.
88 |
89 | * InfluxDB requires a WHERE clause on the time field when grouping by time.*
90 |
91 | * The release version of pyslet had a [bug](https://github.com/swl10/pyslet/issues/71)
92 | (now fixed) where you could not use a field called "time" so use "timestamp" to refer
93 | to InfluxDB's "time" field.*
94 |
95 | * When using aggregate functions without grouping by '*', only influxdb fields
96 | will be populated in the result, not tags. It is recommended to use
97 | `influxdbgroupby=*` in your queries, as it is not very expensive and
98 | allows flexibility in your OData client processing.
99 |
100 | ### Example queries:
101 |
102 | #### Group by day. Aggregate the mean of each field.
103 |
104 | Query URL:
105 | ```
106 | /db?$filter=timestamp ge datetime'2017-01-01T00:00:00' and timestamp le datetime'2017-03-01T00:00:00'&$top=1000&groupByTime=1h&aggregate=mean
107 |
108 | ```
109 |
110 | Resulting InfluxDB query:
111 | ```
112 | SELECT mean(*) FROM measurement
113 | WHERE time >= '2017-01-01 AND time <= '2017-03-01'
114 | GROUP BY time(1d)
115 | ```
116 |
117 | #### Group by day. Aggregate the mean of each field. Also group by all tag keys
118 |
119 | Query URL:
120 | ```
121 | /db?$filter=timestamp ge datetime'2017-01-01T00:00:00' and timestamp le datetime'2017-03-01T00:00:00'&$top=1000&groupByTime=1h&aggregate=mean&influxgroupby=*
122 |
123 | ```
124 |
125 | Resulting InfluxDB query:
126 | ```
127 | SELECT mean(*) FROM measurement
128 | WHERE time >= '2017-01-01' AND time <= '2017-03-01'
129 | GROUP BY *,time(1d)
130 | ```
131 |
132 |
--------------------------------------------------------------------------------
/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Synergetic-Engineering/odata-influxdb/8cfad5c44e31d7b05b9e115ee0328422cce05157/__init__.py
--------------------------------------------------------------------------------
/img/production_deployment_diagram.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Synergetic-Engineering/odata-influxdb/8cfad5c44e31d7b05b9e115ee0328422cce05157/img/production_deployment_diagram.png
--------------------------------------------------------------------------------
/influxdbds.py:
--------------------------------------------------------------------------------
1 | import datetime
2 | import numbers
3 | import logging
4 | import sys
5 | import influxdb
6 | from functools32 import lru_cache
7 | from pyslet.iso8601 import TimePoint
8 | import pyslet.rfc2396 as uri
9 | from pyslet.odata2.core import EntityCollection, CommonExpression, PropertyExpression, BinaryExpression, \
10 | LiteralExpression, Operator, SystemQueryOption, format_expand, format_select, ODataURI
11 | from pyslet.py2 import to_text
12 |
13 | from local import request
14 |
15 | logger = logging.getLogger("odata-influxdb")
16 |
17 | operator_symbols = {
18 | Operator.lt: ' < ',
19 | Operator.le: ' <= ',
20 | Operator.gt: ' > ',
21 | Operator.ge: ' >= ',
22 | Operator.eq: ' = ',
23 | Operator.ne: ' != ',
24 | getattr(Operator, 'and'): ' AND ' # Operator.and doesn't resolve in Python
25 | }
26 |
27 | @lru_cache()
28 | def get_tags_and_field_keys(client, measurement_name, db_name):
29 | client.switch_database(db_name)
30 | field_keys = tuple(f['fieldKey'] for f in client.query('SHOW FIELD KEYS')[measurement_name])
31 | tag_keys = tuple(t['tagKey'] for t in client.query('SHOW TAG KEYS')[measurement_name])
32 | return tuple(field_keys + tag_keys)
33 |
34 |
35 | class InfluxDBEntityContainer(object):
36 | """Object used to represent an Entity Container (influxdb database)
37 |
38 | modelled after the SQLEntityContainer in pyslet (sqlds.py)
39 |
40 | container
41 | pyslet.odata2.csdl.EntityContainer
42 |
43 | dsn
44 | data source name in the format: influxdb://user:pass@host:port/
45 | supported schemes include https+influxdb:// and udp+influxdb://
46 | """
47 | def __init__(self, container, dsn, topmax, **kwargs):
48 | self.container = container
49 | self.dsn = dsn
50 | self.client = influxdb.InfluxDBClient.from_dsn(self.dsn)
51 | self._topmax = topmax
52 | for es in self.container.EntitySet:
53 | self.bind_entity_set(es)
54 |
55 | def bind_entity_set(self, entity_set):
56 | entity_set.bind(self.get_collection_class(), container=self)
57 |
58 | def get_collection_class(self):
59 | return InfluxDBMeasurement
60 |
61 |
62 | def unmangle_db_name(db_name):
63 | """corresponds to mangle_db_name in influxdbmeta.py"""
64 | if db_name == u'internal':
65 | db_name = u'_internal'
66 | db_name = db_name.replace('_dsh_', '-')
67 | return db_name
68 |
69 |
70 | # noinspection SqlDialectInspection
71 | def unmangle_measurement_name(measurement_name):
72 | """corresponds to mangle_measurement_name in influxdbmeta.py"""
73 | measurement_name = measurement_name.replace('_sp_', ' ')
74 | measurement_name = measurement_name.replace('_dsh_', '-')
75 | return measurement_name
76 |
77 |
78 | def unmangle_entity_set_name(name):
79 | db_name, m_name = name.split('__', 1)
80 | db_name = unmangle_db_name(db_name)
81 | m_name = unmangle_measurement_name(m_name)
82 | return db_name, m_name
83 |
84 |
85 | def parse_influxdb_time(t_str):
86 | """
87 | returns a `datetime` object (some precision from influxdb may be lost)
88 | :type t_str: str
89 | :param t_str: a string representing the time from influxdb (ex. '2017-01-01T23:01:41.123456789Z')
90 | """
91 | try:
92 | return datetime.datetime.strptime(t_str[:26].rstrip('Z'), '%Y-%m-%dT%H:%M:%S.%f')
93 | except ValueError:
94 | return datetime.datetime.strptime(t_str[:19], '%Y-%m-%dT%H:%M:%S')
95 |
96 |
97 | class InfluxDBMeasurement(EntityCollection):
98 | """represents a measurement query, containing points
99 |
100 | name should be "database.measurement"
101 | """
102 | def __init__(self, container, **kwargs):
103 | super(InfluxDBMeasurement, self).__init__(**kwargs)
104 | self.container = container
105 | self.db_name, self.measurement_name = unmangle_entity_set_name(self.entity_set.name)
106 | self.topmax = getattr(self.container, '_topmax', 50)
107 | self.default_user = self.container.client._username
108 | self.default_pass = self.container.client._password
109 |
110 | #@lru_cache()
111 | def _query_len(self):
112 | """influxdb only counts non-null values, so we return the count of the field with maximum non-null values"""
113 | q = u'SELECT COUNT(*) FROM "{}" {} {}'.format(
114 | self.measurement_name,
115 | self._where_expression(),
116 | self._groupby_expression()
117 | ).strip()
118 | self.container.client.switch_database(self.db_name)
119 | logger.info('Querying InfluxDB: {}'.format(q))
120 | rs = self.container.client.query(q)
121 | interval_list = list(rs.get_points())
122 | if request and request.args.get('aggregate'):
123 | max_count = len(interval_list)
124 | else:
125 | max_count = max(val for val in rs.get_points().next().values() if isinstance(val, numbers.Number))
126 | self._influxdb_len = max_count
127 | return max_count
128 |
129 | def __len__(self):
130 | return self._query_len()
131 |
132 | def set_expand(self, expand, select=None):
133 | """Sets the expand and select query options for this collection.
134 |
135 | The expand query option causes the named navigation properties
136 | to be expanded and the associated entities to be loaded in to
137 | the entity instances before they are returned by this collection.
138 |
139 | *expand* is a dictionary of expand rules. Expansions can be chained,
140 | represented by the dictionary entry also being a dictionary::
141 |
142 | # expand the Customer navigation property...
143 | { 'Customer': None }
144 | # expand the Customer and Invoice navigation properties
145 | { 'Customer':None, 'Invoice':None }
146 | # expand the Customer property and then the Orders property within Customer
147 | { 'Customer': {'Orders':None} }
148 |
149 | The select query option restricts the properties that are set in
150 | returned entities. The *select* option is a similar dictionary
151 | structure, the main difference being that it can contain the
152 | single key '*' indicating that all *data* properties are
153 | selected."""
154 | self.entity_set.entityType.ValidateExpansion(expand, select)
155 | self.expand = expand
156 | # in influxdb, you must always query at LEAST the time field
157 | if select is not None and 'timestamp' not in select:
158 | select['timestamp'] = None
159 | self.select = select
160 | self.lastEntity = None
161 |
162 | def expand_entities(self, entityIterable):
163 | """Utility method for data providers.
164 |
165 | Given an object that iterates over all entities in the
166 | collection, returns a generator function that returns expanded
167 | entities with select rules applied according to
168 | :py:attr:`expand` and :py:attr:`select` rules.
169 |
170 | Data providers should use a better method of expanded entities
171 | if possible as this implementation simply iterates through the
172 | entities and calls :py:meth:`Entity.Expand` on each one."""
173 | for e in entityIterable:
174 | if self.expand or self.select:
175 | e.Expand(self.expand, self.select)
176 | yield e
177 |
178 | def itervalues(self):
179 | return self.expand_entities(
180 | self._generate_entities())
181 |
182 | def non_aggregate_field_name(self, f):
183 | agg = request.args.get('aggregate').lower()
184 | parts = f.split('_', 1)
185 | if parts[0] == agg:
186 | return parts[1]
187 | else:
188 | raise KeyError('invalid field received from influxdb: {}'.format(f))
189 |
190 | def _generate_entities(self):
191 | # SELECT_clause [INTO_clause] FROM_clause [WHERE_clause]
192 | # [GROUP_BY_clause] [ORDER_BY_clause] LIMIT_clause OFFSET [SLIMIT_clause]
193 | if request:
194 | auth = getattr(request, 'authorization', None)
195 | else:
196 | auth = None
197 | if auth is not None:
198 | self.container.client.switch_user(auth.username, auth.password)
199 | else:
200 | self.container.client.switch_user(self.default_user, self.default_pass)
201 | q = u'SELECT {} FROM "{}" {} {} {} {}'.format(
202 | self._select_expression(),
203 | self.measurement_name,
204 | self._where_expression(),
205 | self._groupby_expression(),
206 | self._orderby_expression(),
207 | self._limit_expression(),
208 | ).strip()
209 | logger.info('Querying InfluxDB: {}'.format(q))
210 |
211 | result = self.container.client.query(q, database=self.db_name)
212 | #fields = get_tags_and_field_keys(self.container.client, self.measurement_name, self.db_name)
213 |
214 | for measurement_name, tag_set in result.keys():
215 | for row in result[measurement_name, tag_set]:
216 | e = self.new_entity()
217 | t = parse_influxdb_time(row['time'])
218 | e['timestamp'].set_from_value(t)
219 | if self.select is None or '*' in self.select:
220 | for influxdb_field_name, influxdb_field_value in row.items():
221 | if influxdb_field_name == 'time':
222 | continue # time has already been set
223 | try:
224 | entity_property = e[influxdb_field_name]
225 | except KeyError:
226 | # assume aggregated field
227 | entity_property = e[self.non_aggregate_field_name(influxdb_field_name)]
228 | entity_property.set_from_value(influxdb_field_value)
229 | if tag_set is not None:
230 | for tag, value in tag_set.items():
231 | e[tag].set_from_value(value)
232 | else:
233 | for odata_field_name in self.select:
234 | if odata_field_name == 'timestamp':
235 | continue # time has already been set
236 | if request and request.args.get('aggregate'):
237 | e[odata_field_name].set_from_value(
238 | row[request.args.get('aggregate') + '_' + odata_field_name])
239 | e[odata_field_name].set_from_value(row[odata_field_name])
240 | e.exists = True
241 | self.lastEntity = e
242 | yield e
243 |
244 | def _select_expression(self):
245 | """formats the list of fields for the SQL SELECT statement, with aggregation functions if specified
246 | with &aggregate=func in the querystring"""
247 | field_format = u'{}'
248 | if request:
249 | aggregate_func = request.args.get('aggregate', None)
250 | if aggregate_func is not None:
251 | field_format = u'{}({{0}}) as {{0}}'.format(aggregate_func)
252 |
253 | def select_key(spec_key):
254 | if spec_key == u'*':
255 | tmp = field_format.format(spec_key)
256 | if u"as *" in tmp:
257 | return tmp[:tmp.find(u"as *")] # ... inelegant
258 | return field_format.format(spec_key.strip())
259 |
260 | if self.select is None or '*' in self.select:
261 | return select_key(u'*')
262 | else:
263 | # join the selected fields
264 | # if specified, format aggregate: func(field) as field
265 | # influxdb always returns the time field, and doesn't like it if you ask when there's a groupby anyway
266 | return u','.join((select_key(k)
267 | for k in self.select.keys() if k != u'timestamp'))
268 |
269 | def _where_expression(self):
270 | """generates a valid InfluxDB "WHERE" query part from the parsed filter (set with self.set_filter)"""
271 | if self.filter is None:
272 | return u''
273 | return u'WHERE {}'.format(self._sql_where_expression(self.filter))
274 |
275 | def _sql_where_expression(self, filter_expression):
276 | if filter_expression is None:
277 | return ''
278 | elif isinstance(filter_expression, BinaryExpression):
279 | expressions = (filter_expression.operands[0],
280 | filter_expression.operands[1])
281 | symbol = operator_symbols[filter_expression.operator]
282 | return symbol.join(self._sql_expression(o) for o in expressions)
283 | else:
284 | raise NotImplementedError
285 |
286 | def _groupby_expression(self):
287 | group_by = []
288 | if request:
289 | group_by_raw = request.args.get(u'influxgroupby', None)
290 | if group_by_raw is not None and self.filter is not None:
291 | group_by_raw = group_by_raw.strip().split(',')
292 | for g in group_by_raw:
293 | if g == u'*':
294 | group_by.append(g)
295 | else:
296 | group_by.append(u'"{}"'.format(g))
297 | group_by_time_raw = request.args.get('groupByTime', None)
298 | if group_by_time_raw is not None:
299 | group_by.append('time({})'.format(group_by_time_raw))
300 | if len(group_by) == 0:
301 | return ''
302 | else:
303 | return 'GROUP BY {}'.format(','.join(group_by))
304 |
305 | def _orderby_expression(self):
306 | """generates a valid InfluxDB "ORDER BY" query part from the parsed order by clause (set with self.set_orderby)"""
307 | return ''
308 |
309 | def _limit_expression(self):
310 | if not self.paging:
311 | return ''
312 | if not self.skip:
313 | return 'LIMIT {}'.format(str(self.top))
314 | return 'LIMIT {} OFFSET {}'.format(str(self.top), str(self.skip))
315 |
316 | def _sql_expression(self, expression):
317 | if isinstance(expression, PropertyExpression):
318 | if expression.name == 'timestamp':
319 | return 'time'
320 | return expression.name
321 | elif isinstance(expression, LiteralExpression):
322 | return self._format_literal(expression.value.value)
323 | elif isinstance(expression, BinaryExpression):
324 | return self._sql_where_expression(expression)
325 |
326 | def _format_literal(self, val):
327 | if isinstance(val, unicode):
328 | return u"'{}'".format(val)
329 | elif isinstance(val, TimePoint):
330 | return u"'{0.date} {0.time}'".format(val)
331 | else:
332 | return str(val)
333 |
334 | def __getitem__(self, key):
335 | raise NotImplementedError
336 |
337 | def set_page(self, top, skip=0, skiptoken=None):
338 | self.top = int(top or 0) or self.topmax # a None value for top causes the default iterpage method to set a skiptoken
339 | self.skip = skip
340 | self.skiptoken = int(skiptoken or 0)
341 | self.nextSkiptoken = None
342 |
343 | def iterpage(self, set_next=False):
344 | """returns iterable subset of entities, defined by parameters to self.set_page"""
345 | if self.top == 0: # invalid, return nothing
346 | return
347 | if self.skiptoken >= len(self):
348 | self.nextSkiptoken = None
349 | self.skip = None
350 | self.skiptoken = None
351 | return
352 | if self.skip is None:
353 | if self.skiptoken is not None:
354 | self.skip = int(self.skiptoken)
355 | else:
356 | self.skip = 0
357 | self.paging = True
358 | if set_next:
359 | # yield all pages
360 | done = False
361 | while self.skiptoken <= len(self):
362 | self.nextSkiptoken = (self.skiptoken or 0) + self.top
363 | for e in self.itervalues():
364 | yield e
365 | self.skiptoken = self.nextSkiptoken
366 | self.paging = False
367 | self.top = self.skip = 0
368 | self.skiptoken = self.nextSkiptoken = None
369 | else:
370 | # yield one page
371 | self.nextSkiptoken = (self.skiptoken or 0) + min(len(self), self.top)
372 | for e in self.itervalues():
373 | yield e
374 | self.paging = False
375 |
376 | def get_next_page_location(self):
377 | """Returns the location of this page of the collection
378 |
379 | The result is a :py:class:`rfc2396.URI` instance."""
380 | token = self.next_skiptoken()
381 | if token is not None:
382 | baseURL = self.get_location()
383 | sysQueryOptions = {}
384 | if self.filter is not None:
385 | sysQueryOptions[
386 | SystemQueryOption.filter] = unicode(self.filter)
387 | if self.expand is not None:
388 | sysQueryOptions[
389 | SystemQueryOption.expand] = format_expand(self.expand)
390 | if self.select is not None:
391 | sysQueryOptions[
392 | SystemQueryOption.select] = format_select(self.select)
393 | if self.orderby is not None:
394 | sysQueryOptions[
395 | SystemQueryOption.orderby] = CommonExpression.OrderByToString(
396 | self.orderby)
397 | sysQueryOptions[SystemQueryOption.skiptoken] = unicode(token)
398 | extraOptions = ''
399 | if request:
400 | extraOptions = u'&' + u'&'.join([
401 | u'{0}={1}'.format(k, v) for k, v in request.args.items() if k[0] != u'$'])
402 | return uri.URI.from_octets(
403 | str(baseURL) +
404 | "?" +
405 | ODataURI.format_sys_query_options(sysQueryOptions) +
406 | extraOptions
407 | )
408 | else:
409 | return None
410 |
--------------------------------------------------------------------------------
/influxdbmeta.py:
--------------------------------------------------------------------------------
1 | from itertools import chain
2 |
3 | from influxdb import InfluxDBClient
4 |
5 | xml_head = """
6 |
10 |
11 | """
14 |
15 | xml_foot = """
16 |
17 |
18 | """
19 |
20 | influx_type_to_edm_type = {
21 | 'float': 'Edm.Double', # influxdb stores floats in a float64 format
22 | 'integer': 'Edm.Int64', # influxdb stores integers as 64-bit signed
23 | 'string': 'Edm.String'
24 | }
25 |
26 |
27 | def get_edm_type(influx_type):
28 | if influx_type is None:
29 | return 'Edm.String'
30 | else:
31 | return influx_type_to_edm_type[influx_type]
32 |
33 |
34 | def mangle_measurement_name(m_name):
35 | """corresponds to unmangle_measurement_name in influxdbds.py"""
36 | m_name = m_name.replace(' ', '_sp_')
37 | m_name = m_name.replace('-', '_dsh_')
38 | return m_name
39 |
40 |
41 | def mangle_db_name(db_name):
42 | """corresponds to unmangle_db_name in influxdbds.py"""
43 | db_name = db_name.strip('_') # edmx names cannot begin with '_'
44 | db_name = db_name.replace('-', '_dsh_')
45 | return db_name
46 |
47 |
48 | def db_name__measurement_name(db_name, m_name):
49 | return '{}__{}'.format(
50 | mangle_db_name(db_name),
51 | mangle_measurement_name(m_name)
52 | )
53 |
54 |
55 | class InfluxDB(object):
56 | def __init__(self, dsn):
57 | self.client = InfluxDBClient.from_dsn(dsn)
58 |
59 | def fields(self, db_name):
60 | """returns a tuple of dicts where each dict has attributes (name, type, edm_type)"""
61 | fields_rs = self.client.query('SHOW FIELD KEYS', database=db_name)
62 | tags_rs = self.client.query('SHOW TAG KEYS', database=db_name)
63 | # expand and deduplicate
64 | fields = set(tuple(f.items()) for f in chain(*chain(fields_rs, tags_rs)))
65 | fields = (dict(
66 | name=f[0][1],
67 | type='string' if len(f)==1 else f[1][1],
68 | edm_type=get_edm_type('string' if len(f)==1 else f[1][1])
69 | ) for f in fields)
70 | return tuple(fields)
71 |
72 | @property
73 | def measurements(self):
74 | measurements = []
75 | for db in self.databases:
76 | q = 'SHOW MEASUREMENTS'
77 | rs = self.client.query(q, database=db[u'name'])
78 |
79 | def m_dict(m):
80 | d = dict(m)
81 | d['db_name'] = db['name']
82 | d['mangled_db'] = mangle_db_name(db['name'])
83 | d['mangled_measurement'] = mangle_measurement_name(m['name'])
84 | d['mangled_path'] = db_name__measurement_name(db['name'], m['name'])
85 | d['fields'] = self.fields(db['name'])
86 | return d
87 | measurements.extend(m_dict(m) for m in rs.get_points())
88 | return measurements
89 |
90 | @property
91 | def databases(self):
92 | rs = self.client.get_list_database()
93 | return iter(rs)
94 |
95 |
96 | def gen_entity_set_xml(m):
97 | return ''.format(m['mangled_path'], m['mangled_path'])
98 |
99 |
100 | def generate_properties_xml(m):
101 | return '\n'.join(
102 | ''.format(f['name'], f['edm_type']) for f in m['fields']
103 | )
104 |
105 |
106 | def generate_key_xml(m):
107 | """influxdb has no concept of a key, so we use the time value (NOT gauranteed to be unique)"""
108 | return ''
109 |
110 |
111 | def gen_entity_type_xml(m):
112 | return '{}\n{}'.format(
113 | m['mangled_path'],
114 | generate_key_xml(m),
115 | generate_properties_xml(m))
116 |
117 |
118 | def entity_sets_and_types(db):
119 | """generate xml entries for entity sets (containers) and entity types (with properties)"""
120 | entity_sets = []
121 | entity_types = []
122 | for m in db.measurements:
123 | entity_sets.append(gen_entity_set_xml(m))
124 | entity_types.append(gen_entity_type_xml(m))
125 | return entity_sets, entity_types
126 |
127 |
128 | def generate_metadata(dsn):
129 | """connect to influxdb, read the structure, and return an edmx xml file string"""
130 | i = InfluxDB(dsn)
131 | entity_sets, entity_types = entity_sets_and_types(i)
132 | output = """{}
133 |
134 | {}
135 |
136 | {}
137 | {}""".format(xml_head, '\n'.join(entity_sets), '\n'.join(entity_types), xml_foot)
138 | return output
139 |
--------------------------------------------------------------------------------
/local.py:
--------------------------------------------------------------------------------
1 | from werkzeug.local import Local, LocalManager
2 |
3 | local = Local()
4 | local_manager = LocalManager()
5 |
6 | request = local('request')
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | pyslet
2 | influxdb
3 | functools32
4 | werkzeug
--------------------------------------------------------------------------------
/server.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | import logging
3 | import os
4 | import sys
5 | from urlparse import urlparse
6 | from ConfigParser import ConfigParser
7 | from wsgiref.simple_server import make_server
8 | from werkzeug.wrappers import AuthorizationMixin, BaseRequest, Response
9 | from local import local, local_manager
10 |
11 |
12 | import pyslet.odata2.metadata as edmx
13 | from pyslet.odata2.server import ReadOnlyServer
14 |
15 | from influxdbmeta import generate_metadata
16 | from influxdbds import InfluxDBEntityContainer
17 |
18 | cache_app = None #: our Server instance
19 |
20 | #logging.basicConfig()
21 | logHandler = logging.StreamHandler(sys.stdout)
22 | logFormatter = logging.Formatter(fmt='%(levelname)s:%(name)s:%(message)s')
23 | #logHandler.formatter = logFormatter
24 | logger = logging.getLogger("odata-influxdb")
25 | logger.addHandler(logHandler)
26 | logger.setLevel(logging.DEBUG)
27 |
28 |
29 | class Request(BaseRequest, AuthorizationMixin):
30 | pass
31 |
32 |
33 | class HTTPAuthPassThrough(object):
34 | def __init__(self, app):
35 | self.wrapped = app
36 | self.realm = 'influxdb'
37 |
38 | def __call__(self, environ, start_response):
39 | local.request = req = Request(environ)
40 | if req.authorization is None:
41 | resp = Response('Unauthorized. Please supply authorization.',
42 | status=401,
43 | headers={
44 | ('WWW-Authenticate', 'Basic Realm="{}"'.format(self.realm)),
45 | }
46 | )
47 | return resp(environ, start_response)
48 | return self.wrapped(environ, start_response)
49 |
50 |
51 | class FileExistsError(IOError):
52 | def __init__(self, path):
53 | self.__path = path
54 |
55 | def __str__(self):
56 | return 'file already exists: {}'.format(self.__path)
57 |
58 |
59 | def load_metadata(config):
60 | """Regenerate and load the metadata file and connects the InfluxDBEntityContainer."""
61 | metadata_filename = config.get('metadata', 'metadata_file')
62 | dsn = config.get('influxdb', 'dsn')
63 |
64 | if config.getboolean('metadata', 'autogenerate'):
65 | logger.info("Generating OData metadata xml file from InfluxDB metadata")
66 | metadata = generate_metadata(dsn)
67 | with open(metadata_filename, 'wb') as f:
68 | f.write(metadata)
69 |
70 | doc = edmx.Document()
71 | with open(metadata_filename, 'rb') as f:
72 | doc.ReadFromStream(f)
73 | container = doc.root.DataServices['InfluxDBSchema.InfluxDB']
74 | try:
75 | topmax = config.getint('influxdb', 'max_items_per_query')
76 | except:
77 | topmax = 50
78 | InfluxDBEntityContainer(container=container, dsn=dsn, topmax=topmax)
79 | return doc
80 |
81 |
82 | def configure_app(c, doc):
83 | service_root = c.get('server', 'service_advertise_root')
84 | logger.info("Advertising service at %s" % service_root)
85 | app = ReadOnlyServer(serviceRoot=service_root)
86 | app.SetModel(doc)
87 | return app
88 |
89 |
90 | def start_server(c, doc):
91 | app = configure_app(c, doc)
92 | if c.getboolean('influxdb', 'authentication_required'):
93 | app = HTTPAuthPassThrough(app)
94 | app = local_manager.make_middleware(app)
95 | from werkzeug.serving import run_simple
96 | listen_interface = c.get('server', 'server_listen_interface')
97 | listen_port = int(c.get('server', 'server_listen_port'))
98 | logger.info("Starting HTTP server on: interface: %s, port: %i..." % (listen_interface, listen_port))
99 | run_simple(listen_interface, listen_port, application=app)
100 |
101 |
102 | def get_sample_config():
103 | config = ConfigParser(allow_no_value=True)
104 | config.add_section('server')
105 | config.set('server', 'service_advertise_root', 'http://localhost:8080')
106 | config.set('server', 'server_listen_interface', '127.0.0.1')
107 | config.set('server', 'server_listen_port', '8080')
108 | config.add_section('metadata')
109 | config.set('metadata', '; set autogenerate to "no" for quicker startup of the server if you know your influxdb structure has not changed')
110 | config.set('metadata', 'autogenerate', 'yes')
111 | config.set('metadata', '; metadata_file specifies the location of the metadata file to generate')
112 | config.set('metadata', 'metadata_file', 'test_metadata.xml')
113 | config.add_section('influxdb')
114 | config.set('influxdb', '; supported schemes include https+influxdb:// and udp+influxdb://')
115 | config.set('influxdb', '; user:pass in this dsn is used for generating metadata')
116 | config.set('influxdb', 'dsn', 'influxdb://user:pass@localhost:8086')
117 | config.set('influxdb', 'max_items_per_query', '50')
118 | config.set('influxdb', '; authentication_required will pass through http basic auth username')
119 | config.set('influxdb', '; and password to influxdb')
120 | config.set('influxdb', 'authentication_required', 'no')
121 | return config
122 |
123 |
124 | def make_sample_config():
125 | config = get_sample_config()
126 | sample_name = 'sample.conf'
127 | if os.path.exists(sample_name):
128 | raise FileExistsError(sample_name)
129 | with open(sample_name, 'w') as cf:
130 | config.write(cf)
131 | print('generated sample conf at: {}'.format(os.path.join(os.getcwd(), sample_name)))
132 |
133 |
134 | def get_config(config):
135 | with open(config, 'r') as fp:
136 | c = get_sample_config()
137 | c.readfp(fp)
138 | return c
139 |
140 |
141 | def main():
142 | """read config and start odata api server"""
143 | # parse arguments
144 | p = argparse.ArgumentParser()
145 | p.add_argument('-c', '--config',
146 | help='specify a conf file (default=production.conf)',
147 | default='production.conf')
148 | p.add_argument('-m', '--makeSampleConfig',
149 | help='generates sample.conf in your current directory (does not start server)',
150 | action="store_true")
151 | args = p.parse_args()
152 |
153 | if args.makeSampleConfig:
154 | make_sample_config()
155 | sys.exit()
156 |
157 | # parse config file
158 | c = get_config(args.config)
159 |
160 | # generate and load metadata
161 | doc = load_metadata(c)
162 |
163 | # start server
164 | start_server(c, doc)
165 |
166 |
167 | if __name__ == '__main__':
168 | main()
169 |
--------------------------------------------------------------------------------
/test_data/test_metadata.xml:
--------------------------------------------------------------------------------
1 |
2 |
6 |
7 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
26 |
27 |
28 |
29 |
30 |
31 |
32 |
33 |
34 |
35 |
36 |
37 |
38 |
39 |
--------------------------------------------------------------------------------
/tests.py:
--------------------------------------------------------------------------------
1 | import random
2 | import re
3 | import unittest
4 | import os
5 | try:
6 | from responses import RequestsMock
7 | except ImportError as e:
8 | print('unit tests require responses library: try `pip install responses`')
9 | raise e
10 | from server import generate_metadata, get_sample_config, load_metadata
11 | from influxdbmeta import db_name__measurement_name, mangle_db_name, mangle_measurement_name
12 | from influxdbds import unmangle_measurement_name, unmangle_db_name, unmangle_entity_set_name
13 | from pyslet.odata2 import core
14 |
15 | NUM_TEST_POINTS = 100
16 |
17 | json_database_list = {
18 | "results": [{
19 | "statement_id": 0,
20 | "series": [{
21 | "name": "databases",
22 | "columns": ["name"],
23 | "values": [
24 | ["_internal"],
25 | ["database1"]]}]}]}
26 |
27 | json_measurement_list = {
28 | "results": [{
29 | "statement_id": 0,
30 | "series": [{
31 | "name": "measurements",
32 | "columns": ["name"],
33 | "values": [
34 | ["measurement1"],
35 | ["measurement with spaces"]]}]}]}
36 |
37 | json_tag_keys = {
38 | "results": [{
39 | "statement_id": 0,
40 | "series": [{
41 | "name": "measurement1",
42 | "columns": ["tagKey"],
43 | "values": [["tag1"],
44 | ["tag2"]]}]}]}
45 |
46 | json_field_keys = {
47 | "results": [{
48 | "statement_id": 0,
49 | "series": [{
50 | "name": "measurement1",
51 | "columns": ["fieldKey", "fieldType"],
52 | "values": [["float_field", "float"],
53 | ["int_field", "integer"]]}]}]}
54 |
55 |
56 | def json_points_list(measurement_name, page_size=None):
57 | num_values = page_size or NUM_TEST_POINTS
58 | tag1_values = ["foo", "bar"]
59 | tag2_values = ["one", "zero"]
60 | value_list = [
61 | ["2017-01-01T00:00:00Z",
62 | random.choice(tag1_values), random.choice(tag2_values),
63 | random.random(), random.randint(-40,40)]
64 | for i in range(num_values)
65 | ]
66 | return {
67 | "results": [{
68 | "statement_id": 0,
69 | "series": [{
70 | "name": measurement_name,
71 | "columns": ["time", 'tag1', 'tag2', 'float_field', 'int_field'],
72 | "values": value_list}]}]}
73 |
74 |
75 | def json_count(measurement_name):
76 | return {
77 | "results": [{
78 | "statement_id": 0,
79 | "series": [{
80 | "name": measurement_name,
81 | "columns": [
82 | "time",
83 | "float_field",
84 | "int_field"],
85 | "values": [[
86 | "1970-01-01T00:00:00Z", NUM_TEST_POINTS, NUM_TEST_POINTS]]}]}]}
87 |
88 |
89 | class TestInfluxOData(unittest.TestCase):
90 | def setUp(self):
91 | self._config = get_sample_config()
92 | self._config.set('influxdb', 'dsn', 'influxdb://localhost:8086')
93 | self._config.set('metadata', 'autogenerate', 'no')
94 | self._config.set('metadata', 'metadata_file', os.path.join('test_data', 'test_metadata.xml'))
95 | self._doc = load_metadata(self._config)
96 | self._container = self._doc.root.DataServices['InfluxDBSchema.InfluxDB']
97 |
98 | def test_generate_metadata(self):
99 | with RequestsMock() as rsp:
100 | rsp.add(rsp.GET, re.compile('.*SHOW\+DATABASES.*'),
101 | json=json_database_list, match_querystring=True)
102 | rsp.add(rsp.GET, re.compile('.*SHOW\+MEASUREMENTS.*'),
103 | json=json_measurement_list, match_querystring=True)
104 | rsp.add(rsp.GET, re.compile('.*SHOW\+MEASUREMENTS.*'),
105 | json=json_measurement_list, match_querystring=True)
106 | rsp.add(rsp.GET, re.compile('.*SHOW\+FIELD\+KEYS.*'),
107 | json=json_field_keys, match_querystring=True)
108 | rsp.add(rsp.GET, re.compile('.*SHOW\+TAG\+KEYS.*'),
109 | json=json_tag_keys, match_querystring=True)
110 | rsp.add(rsp.GET, re.compile('.*SHOW\+FIELD\+KEYS.*'),
111 | json=json_field_keys, match_querystring=True)
112 | rsp.add(rsp.GET, re.compile('.*SHOW\+TAG\+KEYS.*'),
113 | json=json_tag_keys, match_querystring=True)
114 | rsp.add(rsp.GET, re.compile('.*SHOW\+FIELD\+KEYS.*'),
115 | json=json_field_keys, match_querystring=True)
116 | rsp.add(rsp.GET, re.compile('.*SHOW\+TAG\+KEYS.*'),
117 | json=json_tag_keys, match_querystring=True)
118 | rsp.add(rsp.GET, re.compile('.*SHOW\+FIELD\+KEYS.*'),
119 | json=json_field_keys, match_querystring=True)
120 | rsp.add(rsp.GET, re.compile('.*SHOW\+TAG\+KEYS.*'),
121 | json=json_tag_keys, match_querystring=True)
122 |
123 | metadata = generate_metadata('influxdb://localhost:8086')
124 | file1 = open(os.path.join('test_data', 'test_metadata.xml'), 'r').read()
125 | open(os.path.join('test_data', 'tmp_metadata.xml'), 'wb').write(metadata)
126 | self.assert_(metadata == file1)
127 |
128 | def test_where_clause(self):
129 | first_feed = next(self._container.itervalues())
130 | collection = first_feed.OpenCollection()
131 |
132 | def where_clause_from_string(filter_str):
133 | e = core.CommonExpression.from_str(filter_str)
134 | collection.set_filter(e)
135 | where = collection._where_expression()
136 | return where
137 |
138 | where = where_clause_from_string(u"prop eq 'test'")
139 | self.assertEqual(where, u"WHERE prop = 'test'", msg="Correct where clause for eq operator")
140 | where = where_clause_from_string(u"prop gt 0")
141 | self.assertEqual(where, u"WHERE prop > 0", msg="Correct where clause for gt operator (Int)")
142 | where = where_clause_from_string(u"prop ge 0")
143 | self.assertEqual(where, u"WHERE prop >= 0", msg="Correct where clause for ge operator (Int)")
144 | where = where_clause_from_string(u"prop lt 0")
145 | self.assertEqual(where, u"WHERE prop < 0", msg="Correct where clause for lt operator (Int)")
146 | where = where_clause_from_string(u"prop le 0")
147 | self.assertEqual(where, u"WHERE prop <= 0", msg="Correct where clause for le operator (Int)")
148 | where = where_clause_from_string(u"prop gt -32.53425D")
149 | self.assertEqual(where, u"WHERE prop > -32.53425", msg="Correct where clause for eq operator (Float)")
150 | where = where_clause_from_string(u"timestamp ge datetime'2016-01-01T00:00:00' and timestamp le datetime'2016-12-31T00:00:00'")
151 | self.assertEqual(where, u"WHERE time >= '2016-01-01 00:00:00' AND time <= '2016-12-31 00:00:00'")
152 | collection.close()
153 |
154 | def test_groupby_expression(self):
155 | first_feed = next(self._container.itervalues())
156 | collection = first_feed.OpenCollection()
157 | self.assertEqual(collection._groupby_expression(), '')
158 |
159 | def test_limit_expression(self):
160 | first_feed = next(self._container.itervalues())
161 | collection = first_feed.OpenCollection()
162 | expr = collection._limit_expression()
163 | self.assertEqual(expr, '')
164 | collection.set_page(top=100)
165 | collection.paging = True
166 | expr = collection._limit_expression()
167 | self.assertEqual(expr, 'LIMIT 100')
168 | collection.set_page(top=10, skip=10)
169 | collection.paging = True
170 | expr = collection._limit_expression()
171 | self.assertEqual(expr, 'LIMIT 10 OFFSET 10')
172 |
173 | def test_len_collection(self):
174 | first_feed = next(self._container.itervalues())
175 | collection = first_feed.OpenCollection()
176 |
177 | with RequestsMock() as rsp:
178 | rsp.add(rsp.GET, re.compile('.*SELECT\+COUNT.*'),
179 | json=json_count(collection.name), match_querystring=True)
180 |
181 | len_collection = len(collection)
182 | self.assertEqual(len_collection, NUM_TEST_POINTS)
183 |
184 | def test_iterpage(self):
185 | first_feed = next(self._container.itervalues())
186 | collection = first_feed.OpenCollection()
187 |
188 | page_size = 200
189 | collection.set_page(top=page_size, skip=0)
190 |
191 | with RequestsMock() as rsp:
192 | re_limit = re.compile('.*q=SELECT\+%2A\+FROM\+%22measurement1.*LIMIT\+200&')
193 | re_limit_offset = re.compile('.*q=SELECT\+%2A\+FROM\+%22measurement1.*LIMIT\+200\+OFFSET\+200&')
194 | rsp.add(rsp.GET, re.compile('.*SELECT\+COUNT.*'),
195 | json=json_count(collection.name), match_querystring=True)
196 | rsp.add(rsp.GET, re.compile('.*SELECT\+COUNT.*'),
197 | json=json_count(collection.name), match_querystring=True)
198 | rsp.add(rsp.GET, re_limit,
199 | json=json_points_list('measurement1', page_size=page_size), match_querystring=True)
200 | rsp.add(rsp.GET, re.compile('.*SELECT\+COUNT.*'),
201 | json=json_count(collection.name), match_querystring=True)
202 | rsp.add(rsp.GET, re.compile('.*SELECT\+COUNT.*'),
203 | json=json_count(collection.name), match_querystring=True)
204 | rsp.add(rsp.GET, re_limit_offset,
205 | json=json_points_list('measurement1', page_size=page_size), match_querystring=True)
206 |
207 | first_page = list(collection.iterpage())
208 | collection.set_page(top=page_size, skip=page_size)
209 | second_page = list(collection.iterpage())
210 | collection.close()
211 |
212 | def test_generate_entities(self):
213 | first_feed = next(self._container.itervalues())
214 | with first_feed.OpenCollection() as collection:
215 | with RequestsMock() as rsp:
216 | rsp.add(rsp.GET, re.compile('.*q=SELECT\+%2A\+FROM\+%22measurement1%22&'),
217 | json=json_points_list(collection.name), match_querystring=True)
218 |
219 | for e in collection._generate_entities():
220 | self.assertIsInstance(e, core.Entity)
221 |
222 |
223 | class TestUtilFunctions(unittest.TestCase):
224 | def test_name_mangling(self):
225 | mangled = mangle_db_name('test')
226 | unmangled = unmangle_db_name(mangled)
227 | self.assertEqual('test', unmangled)
228 |
229 | mangled = mangle_db_name('_internal')
230 | unmangled = unmangle_db_name(mangled)
231 | self.assertNotEqual(mangled[0], '_')
232 | self.assertEqual('_internal', unmangled)
233 |
234 | mangled = mangle_measurement_name('test with spaces')
235 | unmangled = unmangle_measurement_name(mangled)
236 | self.assertNotIn(' ', mangled)
237 | self.assertEqual('test with spaces', unmangled)
238 |
239 | mangled = db_name__measurement_name('testdb', 'Testing 123')
240 | db, unmangled = unmangle_entity_set_name(mangled)
241 | self.assertNotIn(' ', mangled)
242 | self.assertEqual('Testing 123', unmangled)
243 |
244 |
245 | if __name__ == '__main__':
246 | unittest.main()
247 |
--------------------------------------------------------------------------------