├── azure_storage_logging ├── __init__.py └── handlers.py ├── MANIFEST.in ├── .gitignore ├── setup.py ├── LICENSE ├── README.rst └── tests └── tests.py /azure_storage_logging/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | include LICENSE 2 | include MANIFEST.in 3 | include README.rst 4 | recursive-include azure_storage_logging *.py 5 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | *.py[co] 2 | *.sw[a-z] 3 | *.orig 4 | *.egg-info 5 | *~ 6 | .DS_Store 7 | Thumbs.db 8 | 9 | #pycharm 10 | .idea/* -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | try: 2 | from setuptools import setup 3 | except ImportError: 4 | from distutils.core import setup 5 | 6 | CLASSIFIERS=[ 7 | 'Development Status :: 4 - Beta', 8 | 'License :: OSI Approved :: Apache Software License', 9 | 'Programming Language :: Python', 10 | 'Programming Language :: Python :: 2', 11 | 'Programming Language :: Python :: 2.7', 12 | 'Programming Language :: Python :: 3', 13 | 'Programming Language :: Python :: 3.3', 14 | 'Programming Language :: Python :: 3.4', 15 | 'Programming Language :: Python :: 3.5', 16 | 'Topic :: System :: Logging', 17 | ] 18 | 19 | setup( 20 | name='azure-storage-logging', 21 | version='0.5.1', 22 | description='Logging handlers to send logs to Microsoft Azure Storage', 23 | long_description=open('README.rst').read(), 24 | author='Michiya Takahashi', 25 | author_email='michiya.takahashi@gmail.com', 26 | url='https://github.com/michiya/azure-storage-logging', 27 | license='Apache License 2.0', 28 | packages=['azure_storage_logging'], 29 | install_requires=[ 30 | 'azure-storage>=0.33.0', 31 | ], 32 | classifiers=CLASSIFIERS, 33 | keywords='azure logging', 34 | ) 35 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | 2 | Apache License 3 | Version 2.0, January 2004 4 | http://www.apache.org/licenses/ 5 | 6 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 7 | 8 | 1. Definitions. 9 | 10 | "License" shall mean the terms and conditions for use, reproduction, 11 | and distribution as defined by Sections 1 through 9 of this document. 12 | 13 | "Licensor" shall mean the copyright owner or entity authorized by 14 | the copyright owner that is granting the License. 15 | 16 | "Legal Entity" shall mean the union of the acting entity and all 17 | other entities that control, are controlled by, or are under common 18 | control with that entity. For the purposes of this definition, 19 | "control" means (i) the power, direct or indirect, to cause the 20 | direction or management of such entity, whether by contract or 21 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 22 | outstanding shares, or (iii) beneficial ownership of such entity. 23 | 24 | "You" (or "Your") shall mean an individual or Legal Entity 25 | exercising permissions granted by this License. 26 | 27 | "Source" form shall mean the preferred form for making modifications, 28 | including but not limited to software source code, documentation 29 | source, and configuration files. 30 | 31 | "Object" form shall mean any form resulting from mechanical 32 | transformation or translation of a Source form, including but 33 | not limited to compiled object code, generated documentation, 34 | and conversions to other media types. 35 | 36 | "Work" shall mean the work of authorship, whether in Source or 37 | Object form, made available under the License, as indicated by a 38 | copyright notice that is included in or attached to the work 39 | (an example is provided in the Appendix below). 40 | 41 | "Derivative Works" shall mean any work, whether in Source or Object 42 | form, that is based on (or derived from) the Work and for which the 43 | editorial revisions, annotations, elaborations, or other modifications 44 | represent, as a whole, an original work of authorship. For the purposes 45 | of this License, Derivative Works shall not include works that remain 46 | separable from, or merely link (or bind by name) to the interfaces of, 47 | the Work and Derivative Works thereof. 48 | 49 | "Contribution" shall mean any work of authorship, including 50 | the original version of the Work and any modifications or additions 51 | to that Work or Derivative Works thereof, that is intentionally 52 | submitted to Licensor for inclusion in the Work by the copyright owner 53 | or by an individual or Legal Entity authorized to submit on behalf of 54 | the copyright owner. For the purposes of this definition, "submitted" 55 | means any form of electronic, verbal, or written communication sent 56 | to the Licensor or its representatives, including but not limited to 57 | communication on electronic mailing lists, source code control systems, 58 | and issue tracking systems that are managed by, or on behalf of, the 59 | Licensor for the purpose of discussing and improving the Work, but 60 | excluding communication that is conspicuously marked or otherwise 61 | designated in writing by the copyright owner as "Not a Contribution." 62 | 63 | "Contributor" shall mean Licensor and any individual or Legal Entity 64 | on behalf of whom a Contribution has been received by Licensor and 65 | subsequently incorporated within the Work. 66 | 67 | 2. Grant of Copyright License. Subject to the terms and conditions of 68 | this License, each Contributor hereby grants to You a perpetual, 69 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 70 | copyright license to reproduce, prepare Derivative Works of, 71 | publicly display, publicly perform, sublicense, and distribute the 72 | Work and such Derivative Works in Source or Object form. 73 | 74 | 3. Grant of Patent License. Subject to the terms and conditions of 75 | this License, each Contributor hereby grants to You a perpetual, 76 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 77 | (except as stated in this section) patent license to make, have made, 78 | use, offer to sell, sell, import, and otherwise transfer the Work, 79 | where such license applies only to those patent claims licensable 80 | by such Contributor that are necessarily infringed by their 81 | Contribution(s) alone or by combination of their Contribution(s) 82 | with the Work to which such Contribution(s) was submitted. If You 83 | institute patent litigation against any entity (including a 84 | cross-claim or counterclaim in a lawsuit) alleging that the Work 85 | or a Contribution incorporated within the Work constitutes direct 86 | or contributory patent infringement, then any patent licenses 87 | granted to You under this License for that Work shall terminate 88 | as of the date such litigation is filed. 89 | 90 | 4. Redistribution. You may reproduce and distribute copies of the 91 | Work or Derivative Works thereof in any medium, with or without 92 | modifications, and in Source or Object form, provided that You 93 | meet the following conditions: 94 | 95 | (a) You must give any other recipients of the Work or 96 | Derivative Works a copy of this License; and 97 | 98 | (b) You must cause any modified files to carry prominent notices 99 | stating that You changed the files; and 100 | 101 | (c) You must retain, in the Source form of any Derivative Works 102 | that You distribute, all copyright, patent, trademark, and 103 | attribution notices from the Source form of the Work, 104 | excluding those notices that do not pertain to any part of 105 | the Derivative Works; and 106 | 107 | (d) If the Work includes a "NOTICE" text file as part of its 108 | distribution, then any Derivative Works that You distribute must 109 | include a readable copy of the attribution notices contained 110 | within such NOTICE file, excluding those notices that do not 111 | pertain to any part of the Derivative Works, in at least one 112 | of the following places: within a NOTICE text file distributed 113 | as part of the Derivative Works; within the Source form or 114 | documentation, if provided along with the Derivative Works; or, 115 | within a display generated by the Derivative Works, if and 116 | wherever such third-party notices normally appear. The contents 117 | of the NOTICE file are for informational purposes only and 118 | do not modify the License. You may add Your own attribution 119 | notices within Derivative Works that You distribute, alongside 120 | or as an addendum to the NOTICE text from the Work, provided 121 | that such additional attribution notices cannot be construed 122 | as modifying the License. 123 | 124 | You may add Your own copyright statement to Your modifications and 125 | may provide additional or different license terms and conditions 126 | for use, reproduction, or distribution of Your modifications, or 127 | for any such Derivative Works as a whole, provided Your use, 128 | reproduction, and distribution of the Work otherwise complies with 129 | the conditions stated in this License. 130 | 131 | 5. Submission of Contributions. Unless You explicitly state otherwise, 132 | any Contribution intentionally submitted for inclusion in the Work 133 | by You to the Licensor shall be under the terms and conditions of 134 | this License, without any additional terms or conditions. 135 | Notwithstanding the above, nothing herein shall supersede or modify 136 | the terms of any separate license agreement you may have executed 137 | with Licensor regarding such Contributions. 138 | 139 | 6. Trademarks. This License does not grant permission to use the trade 140 | names, trademarks, service marks, or product names of the Licensor, 141 | except as required for reasonable and customary use in describing the 142 | origin of the Work and reproducing the content of the NOTICE file. 143 | 144 | 7. Disclaimer of Warranty. Unless required by applicable law or 145 | agreed to in writing, Licensor provides the Work (and each 146 | Contributor provides its Contributions) on an "AS IS" BASIS, 147 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 148 | implied, including, without limitation, any warranties or conditions 149 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 150 | PARTICULAR PURPOSE. You are solely responsible for determining the 151 | appropriateness of using or redistributing the Work and assume any 152 | risks associated with Your exercise of permissions under this License. 153 | 154 | 8. Limitation of Liability. In no event and under no legal theory, 155 | whether in tort (including negligence), contract, or otherwise, 156 | unless required by applicable law (such as deliberate and grossly 157 | negligent acts) or agreed to in writing, shall any Contributor be 158 | liable to You for damages, including any direct, indirect, special, 159 | incidental, or consequential damages of any character arising as a 160 | result of this License or out of the use or inability to use the 161 | Work (including but not limited to damages for loss of goodwill, 162 | work stoppage, computer failure or malfunction, or any and all 163 | other commercial damages or losses), even if such Contributor 164 | has been advised of the possibility of such damages. 165 | 166 | 9. Accepting Warranty or Additional Liability. While redistributing 167 | the Work or Derivative Works thereof, You may choose to offer, 168 | and charge a fee for, acceptance of support, warranty, indemnity, 169 | or other liability obligations and/or rights consistent with this 170 | License. However, in accepting such obligations, You may act only 171 | on Your own behalf and on Your sole responsibility, not on behalf 172 | of any other Contributor, and only if You agree to indemnify, 173 | defend, and hold each Contributor harmless for any liability 174 | incurred by, or claims asserted against, such Contributor by reason 175 | of your accepting any such warranty or additional liability. 176 | 177 | END OF TERMS AND CONDITIONS 178 | 179 | APPENDIX: How to apply the Apache License to your work. 180 | 181 | To apply the Apache License to your work, attach the following 182 | boilerplate notice, with the fields enclosed by brackets "[]" 183 | replaced with your own identifying information. (Don't include 184 | the brackets!) The text should be enclosed in the appropriate 185 | comment syntax for the file format. We also recommend that a 186 | file or class name and description of purpose be included on the 187 | same "printed page" as the copyright notice for easier 188 | identification within third-party archives. 189 | 190 | Copyright [yyyy] [name of copyright owner] 191 | 192 | Licensed under the Apache License, Version 2.0 (the "License"); 193 | you may not use this file except in compliance with the License. 194 | You may obtain a copy of the License at 195 | 196 | http://www.apache.org/licenses/LICENSE-2.0 197 | 198 | Unless required by applicable law or agreed to in writing, software 199 | distributed under the License is distributed on an "AS IS" BASIS, 200 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 201 | See the License for the specific language governing permissions and 202 | limitations under the License. 203 | -------------------------------------------------------------------------------- /azure_storage_logging/handlers.py: -------------------------------------------------------------------------------- 1 | # Copyright 2013-2015 Michiya Takahashi 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | import logging 15 | import os 16 | import string 17 | import sys 18 | from base64 import b64encode 19 | from datetime import datetime 20 | from logging.handlers import RotatingFileHandler, TimedRotatingFileHandler 21 | from socket import gethostname 22 | from tempfile import mkstemp 23 | from zipfile import ZIP_DEFLATED, ZipFile 24 | 25 | from azure.storage.blob import BlockBlobService 26 | from azure.storage.blob.models import ContentSettings 27 | from azure.storage.queue import QueueService 28 | from azure.storage.table import TableBatch, TableService 29 | 30 | _PY3 = sys.version_info[0] == 3 31 | 32 | 33 | def _formatName(name, params): 34 | if _PY3: 35 | # try all possible formattings 36 | name = string.Template(name).substitute(**params) 37 | name = name.format(**params) 38 | return name % params 39 | 40 | 41 | class _BlobStorageFileHandler(object): 42 | 43 | def __init__(self, 44 | account_name=None, 45 | account_key=None, 46 | protocol='https', 47 | container='logs', 48 | zip_compression=False, 49 | max_connections=1, 50 | max_retries=5, 51 | retry_wait=1.0, 52 | is_emulated=False): 53 | self.service = BlockBlobService(account_name=account_name, 54 | account_key=account_key, 55 | is_emulated=is_emulated, 56 | protocol=protocol) 57 | self.container_created = False 58 | hostname = gethostname() 59 | self.meta = {'hostname': hostname.replace('_', '-'), 60 | 'process': os.getpid()} 61 | self.container = (container % self.meta).lower() 62 | self.meta['hostname'] = hostname 63 | self.zip_compression = zip_compression 64 | self.max_connections = max_connections 65 | self.max_retries = max_retries 66 | self.retry_wait = retry_wait 67 | 68 | def put_file_into_storage(self, dirName, fileName): 69 | """ 70 | Ship the outdated log file to the specified blob container. 71 | """ 72 | if not self.container_created: 73 | self.service.create_container(self.container) 74 | self.container_created = True 75 | fd, tmpfile_path = None, '' 76 | try: 77 | file_path = os.path.join(dirName, fileName) 78 | if self.zip_compression: 79 | suffix, content_type = '.zip', 'application/zip' 80 | fd, tmpfile_path = mkstemp(suffix=suffix) 81 | with os.fdopen(fd, 'wb') as f: 82 | with ZipFile(f, 'w', ZIP_DEFLATED) as z: 83 | z.write(file_path, arcname=fileName) 84 | file_path = tmpfile_path 85 | else: 86 | suffix, content_type = '', 'text/plain' 87 | self.service.create_blob_from_path(container_name=self.container, 88 | blob_name=fileName+suffix, 89 | file_path=file_path, 90 | content_settings=ContentSettings(content_type=content_type), 91 | max_connections=self.max_connections 92 | ) # max_retries and retry_wait no longer arguments in azure 0.33 93 | finally: 94 | if self.zip_compression and fd: 95 | os.remove(tmpfile_path) 96 | 97 | 98 | class BlobStorageRotatingFileHandler(RotatingFileHandler, 99 | _BlobStorageFileHandler): 100 | """ 101 | Handler for logging to a file, which switches from one file 102 | to the next when the current file reaches a certain size. 103 | 104 | The outdated log file is shipped to the specified Azure Storage 105 | blob container and removed from the local file system immediately. 106 | """ 107 | def __init__(self, 108 | filename, 109 | mode='a', 110 | maxBytes=0, 111 | encoding=None, 112 | delay=False, 113 | account_name=None, 114 | account_key=None, 115 | protocol='https', 116 | container='logs', 117 | zip_compression=False, 118 | max_connections=1, 119 | max_retries=5, 120 | retry_wait=1.0, 121 | is_emulated=False): 122 | meta = {'hostname': gethostname(), 'process': os.getpid()} 123 | RotatingFileHandler.__init__(self, 124 | filename % meta, 125 | mode=mode, 126 | maxBytes=maxBytes, 127 | backupCount=1, 128 | encoding=encoding, 129 | delay=delay) 130 | _BlobStorageFileHandler.__init__(self, 131 | account_name=account_name, 132 | account_key=account_key, 133 | protocol=protocol, 134 | container=container, 135 | zip_compression=zip_compression, 136 | max_connections=max_connections, 137 | max_retries=max_retries, 138 | retry_wait=retry_wait, 139 | is_emulated=is_emulated) 140 | 141 | def doRollover(self): 142 | """ 143 | Do a rollover, as described in __init__(). 144 | """ 145 | if self.stream: 146 | self.stream.close() 147 | self.stream = None 148 | dfn = "%s.%s" % (self.baseFilename, 149 | datetime.utcnow().strftime('%Y-%m-%d_%H-%M-%S')) 150 | if os.path.exists(self.baseFilename): 151 | os.rename(self.baseFilename, dfn) 152 | self.put_file_into_storage(*os.path.split(dfn)) 153 | os.remove(dfn) 154 | if not self.delay: 155 | self.stream = self._open() 156 | 157 | 158 | class BlobStorageTimedRotatingFileHandler(TimedRotatingFileHandler, 159 | _BlobStorageFileHandler): 160 | """ 161 | Handler for logging to a file, rotating the log file at certain timed 162 | intervals. 163 | 164 | The outdated log file is shipped to the specified Azure Storage 165 | blob container and removed from the local file system immediately. 166 | """ 167 | def __init__(self, 168 | filename, 169 | when='h', 170 | interval=1, 171 | encoding=None, 172 | delay=False, 173 | utc=False, 174 | account_name=None, 175 | account_key=None, 176 | protocol='https', 177 | container='logs', 178 | zip_compression=False, 179 | max_connections=1, 180 | max_retries=5, 181 | retry_wait=1.0, 182 | is_emulated=False): 183 | meta = {'hostname': gethostname(), 'process': os.getpid()} 184 | TimedRotatingFileHandler.__init__(self, 185 | filename % meta, 186 | when=when, 187 | interval=interval, 188 | backupCount=1, 189 | encoding=encoding, 190 | delay=delay, 191 | utc=utc) 192 | _BlobStorageFileHandler.__init__(self, 193 | account_name=account_name, 194 | account_key=account_key, 195 | protocol=protocol, 196 | container=container, 197 | zip_compression=zip_compression, 198 | max_connections=max_connections, 199 | max_retries=max_retries, 200 | retry_wait=retry_wait, 201 | is_emulated=is_emulated) 202 | 203 | def emit(self, record): 204 | """ 205 | Emit a record. 206 | 207 | Output the record to the file, catering for rollover as described 208 | in doRollover(). 209 | """ 210 | record.hostname = self.meta['hostname'] 211 | super(BlobStorageTimedRotatingFileHandler, self).emit(record) 212 | 213 | def getFilesToDelete(self): 214 | """ 215 | Determine the files to delete when rolling over. 216 | """ 217 | dirName, baseName = os.path.split(self.baseFilename) 218 | fileNames = os.listdir(dirName) 219 | result = [] 220 | prefix = baseName + "." 221 | plen = len(prefix) 222 | for fileName in fileNames: 223 | if fileName[:plen] == prefix: 224 | suffix = fileName[plen:] 225 | if self.extMatch.match(suffix): 226 | self.put_file_into_storage(dirName, fileName) 227 | result.append(os.path.join(dirName, fileName)) 228 | # delete the stored log file from the local file system immediately 229 | return result 230 | 231 | 232 | class QueueStorageHandler(logging.Handler): 233 | """ 234 | Handler class which sends log messages to a Azure Storage queue. 235 | """ 236 | def __init__(self, 237 | account_name=None, 238 | account_key=None, 239 | protocol='https', 240 | queue='logs', 241 | message_ttl=None, 242 | visibility_timeout=None, 243 | base64_encoding=False, 244 | is_emulated=False, 245 | ): 246 | """ 247 | Initialize the handler. 248 | """ 249 | logging.Handler.__init__(self) 250 | self.service = QueueService(account_name=account_name, 251 | account_key=account_key, 252 | is_emulated=is_emulated, 253 | protocol=protocol) 254 | self.meta = {'hostname': gethostname(), 'process': os.getpid()} 255 | self.queue = _formatName(queue, self.meta) 256 | self.queue_created = False 257 | self.message_ttl = message_ttl 258 | self.visibility_timeout = visibility_timeout 259 | self.base64_encoding = base64_encoding 260 | 261 | def emit(self, record): 262 | """ 263 | Emit a record. 264 | 265 | Format the record and send it to the specified queue. 266 | """ 267 | try: 268 | if not self.queue_created: 269 | self.service.create_queue(self.queue) 270 | self.queue_created = True 271 | record.hostname = self.meta['hostname'] 272 | msg = self._encode_text(self.format(record)) 273 | self.service.put_message(self.queue, 274 | msg, 275 | self.visibility_timeout, 276 | self.message_ttl) 277 | except (KeyboardInterrupt, SystemExit): 278 | raise 279 | except: 280 | self.handleError(record) 281 | 282 | def _encode_text(self, text): 283 | if self.base64_encoding: 284 | text = b64encode(text.encode('utf-8')).decode('ascii') 285 | # fallback for the breaking change in azure-storage 0.33 286 | elif sys.version_info < (3,): 287 | if not isinstance(text, unicode): 288 | text = text.decode('utf-8') 289 | return text 290 | 291 | 292 | class TableStorageHandler(logging.Handler): 293 | """ 294 | Handler class which writes log messages to a Azure Storage table. 295 | """ 296 | MAX_BATCH_SIZE = 100 297 | 298 | def __init__(self, 299 | account_name=None, 300 | account_key=None, 301 | protocol='https', 302 | table='logs', 303 | batch_size=0, 304 | extra_properties=None, 305 | partition_key_formatter=None, 306 | row_key_formatter=None, 307 | is_emulated=False, 308 | ): 309 | """ 310 | Initialize the handler. 311 | """ 312 | logging.Handler.__init__(self) 313 | self.service = TableService(account_name=account_name, 314 | account_key=account_key, 315 | is_emulated=is_emulated, 316 | protocol=protocol) 317 | self.meta = {'hostname': gethostname(), 'process': os.getpid()} 318 | self.table = _formatName(table, self.meta) 319 | self.ready = False 320 | self.rowno = 0 321 | if not partition_key_formatter: 322 | # default format for partition keys 323 | fmt = '%(asctime)s' 324 | datefmt = '%Y%m%d%H%M' 325 | partition_key_formatter = logging.Formatter(fmt, datefmt) 326 | self.partition_key_formatter = partition_key_formatter 327 | if not row_key_formatter: 328 | # default format for row keys 329 | fmt = '%(asctime)s%(msecs)03d-%(hostname)s-%(process)d-%(rowno)02d' 330 | datefmt = '%Y%m%d%H%M%S' 331 | row_key_formatter = logging.Formatter(fmt, datefmt) 332 | self.row_key_formatter = row_key_formatter 333 | # extra properties and formatters for them 334 | self.extra_properties = extra_properties 335 | if extra_properties: 336 | self.extra_property_formatters = {} 337 | self.extra_property_names = {} 338 | for extra in extra_properties: 339 | if _PY3: 340 | f = logging.Formatter(fmt=extra, style=extra[0]) 341 | else: 342 | f = logging.Formatter(fmt=extra) 343 | self.extra_property_formatters[extra] = f 344 | self.extra_property_names[extra] = self._getFormatName(extra) 345 | # the storage emulator doesn't support batch operations 346 | if batch_size <= 1 or is_emulated: 347 | self.batch = None 348 | else: 349 | self.batch = TableBatch() 350 | if batch_size > TableStorageHandler.MAX_BATCH_SIZE: 351 | self.batch_size = TableStorageHandler.MAX_BATCH_SIZE 352 | else: 353 | self.batch_size = batch_size 354 | if self.batch: 355 | self.current_partition_key = None 356 | 357 | def _copyLogRecord(self, record): 358 | copy = logging.makeLogRecord(record.__dict__) 359 | copy.exc_info = None 360 | copy.exc_text = None 361 | if _PY3: 362 | copy.stack_info = None 363 | return copy 364 | 365 | def _getFormatName(self, extra): 366 | name = extra 367 | style = extra[0] 368 | if style == '%': 369 | name = extra[2:extra.index(')')] 370 | elif _PY3: 371 | if style == '{': 372 | name = next(string.Formatter().parse(extra))[1] 373 | elif style == '$': 374 | name = extra[1:] 375 | if name.startswith('{'): 376 | name = name[1:-1] 377 | return name 378 | 379 | def emit(self, record): 380 | """ 381 | Emit a record. 382 | 383 | Format the record and send it to the specified table. 384 | """ 385 | try: 386 | if not self.ready: 387 | self.service.create_table(self.table) 388 | self.ready = True 389 | # generate partition key for the entity 390 | record.hostname = self.meta['hostname'] 391 | copy = self._copyLogRecord(record) 392 | partition_key = self.partition_key_formatter.format(copy) 393 | # ensure entities in the batch all have the same patition key 394 | if self.batch: 395 | if self.current_partition_key is not None: 396 | if partition_key != self.current_partition_key: 397 | self.flush() 398 | self.current_partition_key = partition_key 399 | # add log message and extra properties to the entity 400 | entity = {} 401 | if self.extra_properties: 402 | for extra in self.extra_properties: 403 | formatter = self.extra_property_formatters[extra] 404 | name = self.extra_property_names[extra] 405 | entity[name] = formatter.format(copy) 406 | entity['message'] = self.format(record) 407 | # generate row key for the entity 408 | copy.rowno = self.rowno 409 | row_key = self.row_key_formatter.format(copy) 410 | # add entitiy to the table 411 | entity['PartitionKey'] = partition_key 412 | entity['RowKey'] = row_key 413 | if not self.batch: 414 | self.service.insert_or_replace_entity(self.table, entity) 415 | else: 416 | self.batch.insert_or_replace_entity(entity) 417 | # commit the ongoing batch if it reaches the high mark 418 | self.rowno += 1 419 | if self.rowno >= self.batch_size: 420 | self.flush() 421 | except (KeyboardInterrupt, SystemExit): 422 | raise 423 | except: 424 | self.handleError(record) 425 | 426 | def flush(self): 427 | """ 428 | Ensure all logging output has been flushed. 429 | """ 430 | if self.batch and self.rowno > 0: 431 | try: 432 | self.service.commit_batch(self.table, self.batch) 433 | finally: 434 | self.rowno = 0 435 | self.batch = TableBatch() 436 | 437 | def setFormatter(self, fmt): 438 | """ 439 | Set the message formatter. 440 | """ 441 | super(TableStorageHandler, self).setFormatter(fmt) 442 | if self.extra_properties: 443 | logging._acquireLock() 444 | try: 445 | for extra in self.extra_property_formatters.values(): 446 | extra.converter = fmt.converter 447 | extra.datefmt = fmt.datefmt 448 | if _PY3: 449 | extra.default_time_format = fmt.default_time_format 450 | extra.default_msec_format = fmt.default_msec_format 451 | finally: 452 | logging._releaseLock() 453 | 454 | def setPartitionKeyFormatter(self, fmt): 455 | """ 456 | Set the partition key formatter. 457 | """ 458 | self.partition_key_formatter = fmt 459 | 460 | def setRowKeyFormatter(self, fmt): 461 | """ 462 | Set the row key formatter. 463 | """ 464 | self.row_key_formatter = fmt 465 | -------------------------------------------------------------------------------- /README.rst: -------------------------------------------------------------------------------- 1 | azure-storage-logging 2 | ===================== 3 | 4 | .. image:: http://img.shields.io/pypi/v/azure-storage-logging.svg?style=flat 5 | :target: https://pypi.python.org/pypi/azure-storage-logging 6 | 7 | .. image:: http://img.shields.io/pypi/l/azure-storage-logging.svg?style=flat 8 | :target: http://www.apache.org/licenses/LICENSE-2.0.html 9 | 10 | *azure-storage-logging* provides functionality to send output from 11 | the standard Python logging APIs to Microsoft Azure Storage. 12 | 13 | Dependencies 14 | ------------ 15 | 16 | * azure-storage 0.33 or newer 17 | 18 | Installation 19 | ------------ 20 | 21 | Install the package via pip: :: 22 | 23 | pip install azure-storage-logging 24 | 25 | Usage 26 | ----- 27 | 28 | The module **azure_storage_logging.handlers** in the package contains 29 | the following logging handler classes. Each of them uses a different 30 | type of Microsoft Azure Storage to send its output to. They all are subclasses 31 | of the standard Python logging handler classes, so you can make use of them 32 | in the standard ways of Python logging configuration. 33 | 34 | In addition to 35 | `the standard formats for logging `_, 36 | the special format ``%(hostname)s`` is also available in your message formatter 37 | for the handlers. The format is introduced for ease of identifying the source 38 | of log messages which come from many computers and go to the same storage. 39 | 40 | TableStorageHandler 41 | ~~~~~~~~~~~~~~~~~~~ 42 | The **TableStorageHandler** class is a subclass of **logging.Handler** class, 43 | and it sends log messages to Azure table storage and store them 44 | as entities in the specified table. 45 | 46 | The handler puts a formatted log message from applications in the *message* 47 | property of a table entity along with some system-defined properties 48 | (*PartitionKey*, *RowKey*, and *Timestamp*) like this: 49 | 50 | +--------------+-----------+----------------+-------------+ 51 | | PartitionKey | RowKey | Timestamp | message | 52 | +==============+===========+================+=============+ 53 | | XXXXX | XXXXXXXXX | YYYY-MM-DD ... | log message | 54 | +--------------+-----------+----------------+-------------+ 55 | | XXXXX | XXXXXXXXX | YYYY-MM-DD ... | log message | 56 | +--------------+-----------+----------------+-------------+ 57 | | XXXXX | XXXXXXXXX | YYYY-MM-DD ... | log message | 58 | +--------------+-----------+----------------+-------------+ 59 | 60 | * *class* azure_storage_logging.handlers.TableStorageHandler(*account_name=None, account_key=None, protocol='https', table='logs', batch_size=0, extra_properties=None, partition_key_formatter=None, row_key_formatter=None, is_emulated=False*) 61 | 62 | Returns a new instance of the **TableStorageHandler** class. 63 | The instance is initialized with the name and the key of your 64 | Azure Storage account and some optional parameters. 65 | 66 | The *table* specifies the name of the table that stores log messages. 67 | A new table will be created if it doesn't exist. The table name must 68 | conform to the naming convention for Azure Storage table, see 69 | `the naming convention for tables `_ 70 | for more details. 71 | 72 | The *protocol* specifies the protocol to transfer data between 73 | Azure Storage and your application, ``http`` and ``https`` 74 | are supported. 75 | 76 | You can specify the *batch_size* in an integer if you want to use 77 | batch transaction when creating new log entities. If the *batch_size* 78 | is greater than 1, all new log entities will be transferred to the 79 | table at a time when the number of new log messages reaches the 80 | *batch_size*. Otherwise, a new log entity will be transferred to 81 | the table every time a logging is performed. The *batch_size* must be 82 | up to 100 (maximum number of entities in a batch transaction for 83 | Azure Storage table). 84 | 85 | The *extra_properties* accepts a sequence of 86 | `the formats for logging `_. 87 | The handler-specific one ``%(hostname)s`` is also acceptable. 88 | The handler assigns an entity property for every format specified in 89 | *extra_properties*. Here is an example of using extra properties: 90 | 91 | :: 92 | 93 | import logging 94 | from azure_storage_logging.handlers import TableStorageHandler 95 | 96 | # configure the handler and add it to the logger 97 | logger = logging.getLogger('example') 98 | handler = TableStorageHandler(account_name='mystorageaccountname', 99 | account_key='mystorageaccountkey', 100 | extra_properties=('%(hostname)s', 101 | '%(levelname)s')) 102 | logger.addHandler(handler) 103 | 104 | # output log messages 105 | logger.info('info message') 106 | logger.warning('warning message') 107 | logger.error('error message') 108 | 109 | And it will create the log entities, that have the extra properties 110 | in addition to the regular property *message*, into the table like this: 111 | 112 | +--------------+-----------+----------------+----------+-----------+---------------+ 113 | | PartitionKey | RowKey | Timestamp | hostname | levelname | message | 114 | +==============+===========+================+==========+===========+===============+ 115 | | XXXXX | XXXXXXXXX | YYYY-MM-DD ... | myhost | INFO | info message | 116 | +--------------+-----------+----------------+----------+-----------+---------------+ 117 | | XXXXX | XXXXXXXXX | YYYY-MM-DD ... | myhost | WARNING | warn message | 118 | +--------------+-----------+----------------+----------+-----------+---------------+ 119 | | XXXXX | XXXXXXXXX | YYYY-MM-DD ... | myhost | ERROR | error message | 120 | +--------------+-----------+----------------+----------+-----------+---------------+ 121 | 122 | You can specify an instance of your custom **logging.Formatters** 123 | for the *partition_key_formatter* or the *row_key_formatter* 124 | if you want to implement your own keys for the table. 125 | The default formatters will be used for partition keys and row keys 126 | if no custom formatter for them is given to the handler. 127 | The default values for partition keys are provided by the format 128 | ``%(asctime)s`` and the date format ``%Y%m%d%H%M`` (provides a unique 129 | value per minute). The default values for row keys are provided by the 130 | format ``%(asctime)s%(msecs)03d-%(hostname)s-%(process)d-%(rowno)02d`` 131 | and the date format ``%Y%m%d%H%M%S``. 132 | 133 | Note that the format ``%(rowno)d`` is a handler-specific one only 134 | available for row keys. It would be formatted to a sequential and 135 | unique number in a batch that starts from 0. The format is introduced 136 | to avoid collision of row keys generated in a batch, and it would 137 | always be formatted to 0 if you don't use batch transaction for logging 138 | to the table. 139 | 140 | * setPartitionKeyFormatter(*fmt*) 141 | 142 | Sets the handler's formatter for partition keys to *fmt*. 143 | 144 | * setRowKeyFormatter(*fmt*) 145 | 146 | Sets the handler's formatter for row keys to *fmt*. 147 | 148 | QueueStorageHandler 149 | ~~~~~~~~~~~~~~~~~~~ 150 | 151 | The **QueueStorageHandler** class is a subclass of **logging.Handler** class, 152 | and it pushes log messages to specified Azure storage queue. 153 | 154 | You can pop log messages from the queue in other applications 155 | using Azure Storage client libraries. 156 | 157 | * *class* azure_storage_logging.handlers.QueueStorageHandler(*account_name=None, account_key=None, protocol='https', queue='logs', message_ttl=None, visibility_timeout=None, base64_encoding=False, is_emulated=False*) 158 | 159 | Returns a new instance of the **QueueStorageHandler** class. 160 | The instance is initialized with the name and the key of your 161 | Azure Storage account and some optional parameters. 162 | 163 | The *queue* specifies the name of the queue that log messages are added. 164 | A new queue will be created if it doesn't exist. The queue name must 165 | conform to the naming convention for Azure Storage queue, see 166 | `the naming convention for queues `_ 167 | for more details. 168 | 169 | The *protocol* specifies the protocol to transfer data between 170 | Azure Storage and your application, ``http`` and ``https`` 171 | are supported. 172 | 173 | The *message_ttl* specifies the time-to-live interval for the message, 174 | in seconds. The maximum time-to-live allowed is 7 days. If this 175 | parameter is omitted, the default time-to-live is 7 days. 176 | 177 | The *visibility_timeout* specifies the visibility timeout value, 178 | in seconds, relative to server time. If not specified, the default 179 | value is 0 (makes the message visible immediately). The new value 180 | must be larger than or equal to 0, and cannot be larger than 7 days. 181 | The *visibility_timeout* cannot be set to a value later than the 182 | *message_ttl*, and should be set to a value smaller than the 183 | *message_ttl*. 184 | 185 | The *base64_encoding* specifies the necessity for encoding 186 | log text in Base64. If you set this to ``True``, Unicode log text 187 | in a message is encoded in utf-8 first and then encoded in Base64. 188 | Some of Azure Storage client libraries or tools assume that 189 | text messages in Azure Storage queue are encoded in Base64, 190 | so you can set this to ``True`` to receive log messages correctly 191 | with those libraries or tools. 192 | 193 | BlobStorageRotatingFileHandler 194 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 195 | 196 | The **BlobStorageRotatingFileHandler** class is a subclass of 197 | **logging.handlers.RotatingFileHandler** class. It performs 198 | log file rotation and stores the outdated one in Azure blob storage 199 | container when the current file reaches a certain size. 200 | 201 | * *class* azure_storage_logging.handlers.BlobStorageRotatingFileHandler(*filename, mode='a', maxBytes=0, encoding=None, delay=False, account_name=None, account_key=None, protocol='https', container='logs', zip_compression=False, max_connections=1, max_retries=5, retry_wait=1.0*, is_emulated=False) 202 | 203 | Returns a new instance of the **BlobStorageRotatingFileHandler** 204 | class. The instance is initialized with the name and the key of your 205 | Azure Storage account and some optional parameters. 206 | 207 | See `RotatingFileHandler `_ 208 | for its basic usage. The handler keeps the latest log file into the 209 | local file system. Meanwhile, the handler sends the outdated log file 210 | to the blob container immediately and then removes it from the local 211 | file system. 212 | 213 | The *container* specifies the name of the blob container that stores 214 | outdated log files. A new container will be created if it doesn't exist. 215 | The container name must conform to the naming convention for 216 | Azure Storage blob container, see 217 | `the naming convention for blob containers `_ 218 | for more details. 219 | 220 | The *protocol* specifies the protocol to transfer data between 221 | Azure Storage and your application, ``http`` and ``https`` 222 | are supported. 223 | 224 | The *zip_compression* specifies the necessity for compressing 225 | every outdated log file in zip format before putting it in 226 | the container. 227 | 228 | The *max_connections* specifies a maximum number of parallel 229 | connections to use when the blob size exceeds 64MB. 230 | Set to 1 to upload the blob chunks sequentially. 231 | Set to 2 or more to upload the blob chunks in parallel, 232 | and this uses more system resources but will upload faster. 233 | 234 | The *max_retries* specifies a number of times to retry 235 | upload of blob chunk if an error occurs. 236 | 237 | The *retry_wait* specifies sleep time in secs between retries. 238 | 239 | The only two formatters ``%(hostname)s`` and ``%(process)d`` are 240 | acceptable as a part of the *filename* or the *container*. You can save 241 | log files in a blob container dedicated to each host or process by 242 | naming containers with these formatters, and also can store log files 243 | from multiple hosts or processes in a blob container by naming log files 244 | with them. 245 | 246 | Be careful to use the ``%(process)d`` formatter in the *filename* 247 | because inconsistent PIDs assigned to your application every time it 248 | gets started are included as a part of the name of log files to search 249 | for rotation. You should use the formatter in the *filename* only when 250 | the log file is generated by a long-running application process. 251 | 252 | Note that the hander class doesn't take the *backupCount* parameter, 253 | unlike RotatingFileHandler does. The number of outdated log files 254 | that the handler stores in the container is unlimited, and the files 255 | are saved with the extension that indicates the time in UTC when 256 | they are replaced with a new one. If you want to keep the amount of 257 | outdated log files in the container in a certain number, you will 258 | need to do that using Azure management portal or other tools. 259 | 260 | BlobStorageTimedRotatingFileHandler 261 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 262 | 263 | The **BlobStorageTimedRotatingFileHandler** class is a subclass of 264 | **logging.handlers.TimedRotatingFileHandler** class. It performs 265 | log file rotation and stores the outdated one to Azure blob storage 266 | container at certain timed intervals. 267 | 268 | * *class* azure_storage_logging.handlers.BlobStorageTimedRotatingFileHandler(*filename, when='h', interval=1, encoding=None, delay=False, utc=False, account_name=None, account_key=None, protocol='https', container='logs', zip_compression=False, max_connections=1, max_retries=5, retry_wait=1.0*, is_emulated=False) 269 | 270 | Returns a new instance of the **BlobStorageTimedRotatingFileHandler** 271 | class. The instance is initialized with the name and the key of your 272 | Azure Storage account and some optional parameters. 273 | 274 | See `TimedRotatingFileHandler `_ 275 | for its basic usage. The handler keeps the latest log file into the 276 | local file system. Meanwhile, the handler sends the outdated log file 277 | to the blob container immediately and then removes it from the local 278 | file system. 279 | 280 | The *container* specifies the name of the blob container that stores 281 | outdated log files. A new container will be created if it doesn't exist. 282 | The container name must conform to the naming convention for 283 | Azure Storage blob container, see 284 | `the naming convention for blob containers `_ 285 | for more details. 286 | 287 | The *protocol* specifies the protocol to transfer data between 288 | Azure Storage and your application, ``http`` and ``https`` 289 | are supported. 290 | 291 | The *zip_compression* specifies the necessity for compressing 292 | every outdated log file in zip format before putting it in 293 | the container. 294 | 295 | The *max_connections* specifies a maximum number of parallel 296 | connections to use when the blob size exceeds 64MB. 297 | Set to 1 to upload the blob chunks sequentially. 298 | Set to 2 or more to upload the blob chunks in parallel, 299 | and this uses more system resources but will upload faster. 300 | 301 | The *max_retries* specifies a number of times to retry 302 | upload of blob chunk if an error occurs. 303 | 304 | The *retry_wait* specifies sleep time in secs between retries. 305 | 306 | The only two formatters ``%(hostname)s`` and ``%(process)d`` are 307 | acceptable as a part of the *filename* or the *container*. You can save 308 | log files in a blob container dedicated to each host or process by 309 | naming containers with these formatters, and also can store log files 310 | from multiple hosts or processes in a blob container by naming log files 311 | with them. 312 | 313 | Be careful to use the ``%(process)d`` formatter in the *filename* 314 | because inconsistent PIDs assigned to your application every time it 315 | gets started are included as a part of the name of log files to search 316 | for rotation. You should use the formatter in the *filename* only when 317 | the log file is generated by a long-running application process. 318 | 319 | Note that the hander class doesn't take the *backupCount* parameter, 320 | unlike TimedRotatingFileHandler does. The number of outdated log files 321 | that the handler stores in the container is unlimited. 322 | If you want to keep the amount of outdated log files in the container 323 | in a certain number, you will need to do that using Azure 324 | management portal or other tools. 325 | 326 | Example 327 | ------- 328 | 329 | Here is an example of the configurations and the logging that uses 330 | three different types of storage from the logger: 331 | 332 | :: 333 | 334 | LOGGING = { 335 | 'version': 1, 336 | 'formatters': { 337 | 'simple': { 338 | 'format': '%(asctime)s %(message)s', 339 | }, 340 | 'verbose': { 341 | 'format': '%(asctime)s %(levelname)s %(hostname)s %(process)d %(message)s', 342 | }, 343 | # this is the same as the default, so you can skip configuring it 344 | 'partition_key': { 345 | 'format': '%(asctime)s', 346 | 'datefmt': '%Y%m%d%H%M', 347 | }, 348 | # this is the same as the default, so you can skip configuring it 349 | 'row_key': { 350 | 'format': '%(asctime)s%(msecs)03d-%(hostname)s-%(process)d-%(rowno)02d', 351 | 'datefmt': '%Y%m%d%H%M%S', 352 | }, 353 | }, 354 | 'handlers': { 355 | 'file': { 356 | 'account_name': 'mystorageaccountname', 357 | 'account_key': 'mystorageaccountkey', 358 | 'protocol': 'https', 359 | 'level': 'DEBUG', 360 | 'class': 'azure_storage_logging.handlers.BlobStorageTimedRotatingFileHandler', 361 | 'formatter': 'verbose', 362 | 'filename': 'example.log', 363 | 'when': 'D', 364 | 'interval': 1, 365 | 'container': 'logs-%(hostname)s', 366 | 'zip_compression': False, 367 | }, 368 | 'queue': { 369 | 'account_name': 'mystorageaccountname', 370 | 'account_key': 'mystorageaccountkey', 371 | 'protocol': 'https', 372 | 'queue': 'logs', 373 | 'level': 'CRITICAL', 374 | 'class': 'azure_storage_logging.handlers.QueueStorageHandler', 375 | 'formatter': 'verbose', 376 | }, 377 | 'table': { 378 | 'account_name': 'mystorageaccountname', 379 | 'account_key': 'mystorageaccountkey', 380 | 'protocol': 'https', 381 | 'table': 'logs', 382 | 'level': 'INFO', 383 | 'class': 'azure_storage_logging.handlers.TableStorageHandler', 384 | 'formatter': 'simple', 385 | 'batch_size': 20, 386 | 'extra_properties': ['%(hostname)s', '%(levelname)s'], 387 | 'partition_key_formatter': 'cfg://formatters.partition_key', 388 | 'row_key_formatter': 'cfg://formatters.row_key', 389 | }, 390 | }, 391 | 'loggers': { 392 | 'example': { 393 | 'handlers': ['file', 'queue', 'table'], 394 | 'level': 'DEBUG', 395 | }, 396 | } 397 | } 398 | 399 | import logging 400 | from logging.config import dictConfig 401 | 402 | dictConfig(LOGGING) 403 | logger = logging.getLogger('example') 404 | logger.debug('debug message') 405 | logger.info('info message') 406 | logger.warning('warning message') 407 | logger.error('error message') 408 | logger.critical('critical message') 409 | 410 | Notice 411 | ------ 412 | 413 | * Set *is_emulated* to ``True`` at initialization of the logging handlers 414 | if you want to use this package with Azure storage emulator. 415 | 416 | License 417 | ------- 418 | 419 | Apache License 2.0 420 | 421 | Credits 422 | ------- 423 | 424 | - `Michiya Takahashi `__ 425 | -------------------------------------------------------------------------------- /tests/tests.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | import logging 3 | import os 4 | import sys 5 | import time 6 | import unittest 7 | import zipfile 8 | 9 | from base64 import b64encode 10 | from datetime import datetime 11 | from logging.config import dictConfig 12 | from shutil import rmtree 13 | from socket import gethostname 14 | from threading import current_thread 15 | from tempfile import mkdtemp 16 | 17 | from azure.storage.blob import BlockBlobService 18 | from azure.storage.queue import QueueService 19 | from azure.storage.table import TableService 20 | 21 | 22 | # put your Azure Storage account name and key here 23 | # leave them blank if you want to run the tests on Azure Storage emulator 24 | ACCOUNT_NAME = '' 25 | ACCOUNT_KEY = '' 26 | 27 | 28 | _PY3 = sys.version_info[0] == 3 29 | 30 | _LOGFILE_TMPDIR = mkdtemp() 31 | 32 | _EMULATED = not ACCOUNT_NAME and not ACCOUNT_KEY 33 | if _EMULATED: 34 | ACCOUNT_NAME = None 35 | ACCOUNT_KEY = None 36 | 37 | LOGGING = { 38 | 'version': 1, 39 | 'formatters': { 40 | 'simple': { 41 | 'format': '%(levelname)s %(message)s', 42 | }, 43 | 'verbose': { 44 | 'format': '%(asctime)s %(levelname)s %(hostname)s %(process)d %(message)s', 45 | }, 46 | 'batch_test_partition_key': { 47 | # fixate partition keys to avoid unexpected batch commit during the test 48 | 'format': 'batch-%(hostname)s', 49 | }, 50 | 'custom_partition_key': { 51 | 'format': 'mycustompartitionkey-%(hostname)s-%(asctime)s', 52 | 'datefmt': '%Y%m%d', 53 | }, 54 | 'custom_row_key': { 55 | 'format': 'mycustomrowkey-%(hostname)s-%(asctime)s', 56 | 'datefmt': '%Y%m%d%H%M', 57 | }, 58 | }, 59 | 'handlers': { 60 | # BlobStorageFileRotatingHandlerTest 61 | 'rotation': { 62 | 'account_name': ACCOUNT_NAME, 63 | 'account_key': ACCOUNT_KEY, 64 | 'is_emulated': _EMULATED, 65 | 'level': 'DEBUG', 66 | 'class': 'azure_storage_logging.handlers.BlobStorageRotatingFileHandler', 67 | 'filename': os.path.join(_LOGFILE_TMPDIR, 'rotation.log'), 68 | 'maxBytes': 1024 * 1024, 69 | 'delay': True, 70 | 'container': 'logs-%s' % gethostname().replace('_', '-'), 71 | }, 72 | 'rotation_with_parallel_upload': { 73 | 'account_name': ACCOUNT_NAME, 74 | 'account_key': ACCOUNT_KEY, 75 | 'is_emulated': _EMULATED, 76 | 'level': 'DEBUG', 77 | 'class': 'azure_storage_logging.handlers.BlobStorageRotatingFileHandler', 78 | 'filename': os.path.join(_LOGFILE_TMPDIR, 'rotation_with_parallel_upload.log'), 79 | 'maxBytes': 1024 * 1024 * 96, 80 | 'delay': True, 81 | 'container': 'logs-%s' % gethostname().replace('_', '-'), 82 | 'max_connections': 4, 83 | }, 84 | 'rotation_with_zip_compression': { 85 | 'account_name': ACCOUNT_NAME, 86 | 'account_key': ACCOUNT_KEY, 87 | 'is_emulated': _EMULATED, 88 | 'level': 'DEBUG', 89 | 'class': 'azure_storage_logging.handlers.BlobStorageRotatingFileHandler', 90 | 'filename': os.path.join(_LOGFILE_TMPDIR, 'zip_compression_at_rotation.log'), 91 | 'maxBytes': 1024 * 1024, 92 | 'delay': True, 93 | 'container': 'logs-%s' % gethostname().replace('_', '-'), 94 | 'zip_compression': True, 95 | }, 96 | # BlobStorageTimedFileRotatingHandlerTest 97 | 'timed_rotation': { 98 | 'account_name': ACCOUNT_NAME, 99 | 'account_key': ACCOUNT_KEY, 100 | 'is_emulated': _EMULATED, 101 | 'level': 'DEBUG', 102 | 'class': 'azure_storage_logging.handlers.BlobStorageTimedRotatingFileHandler', 103 | 'formatter': 'verbose', 104 | 'filename': os.path.join(_LOGFILE_TMPDIR, 'timed_rotation.log'), 105 | 'when': 'S', 106 | 'interval': 10, 107 | 'delay': True, 108 | 'container': 'logs-%s' % gethostname().replace('_', '-'), 109 | }, 110 | 'timed_rotation_with_zip_compression': { 111 | 'account_name': ACCOUNT_NAME, 112 | 'account_key': ACCOUNT_KEY, 113 | 'is_emulated': _EMULATED, 114 | 'level': 'DEBUG', 115 | 'class': 'azure_storage_logging.handlers.BlobStorageTimedRotatingFileHandler', 116 | 'formatter': 'verbose', 117 | 'filename': os.path.join(_LOGFILE_TMPDIR, 'zip_compression.log'), 118 | 'when': 'S', 119 | 'interval': 10, 120 | 'delay': True, 121 | 'container': 'logs-%s' % gethostname().replace('_', '-'), 122 | 'zip_compression': True, 123 | }, 124 | # QueueStorageHandlerTest 125 | 'queue': { 126 | 'account_name': ACCOUNT_NAME, 127 | 'account_key': ACCOUNT_KEY, 128 | 'is_emulated': _EMULATED, 129 | 'queue': 'queue-storage-handler-test', 130 | 'level': 'INFO', 131 | 'class': 'azure_storage_logging.handlers.QueueStorageHandler', 132 | 'formatter': 'simple', 133 | }, 134 | 'message_ttl': { 135 | 'account_name': ACCOUNT_NAME, 136 | 'account_key': ACCOUNT_KEY, 137 | 'is_emulated': _EMULATED, 138 | 'queue': 'queue-storage-handler-test', 139 | 'level': 'INFO', 140 | 'class': 'azure_storage_logging.handlers.QueueStorageHandler', 141 | 'formatter': 'simple', 142 | 'message_ttl': 10, 143 | }, 144 | 'visibility_timeout': { 145 | 'account_name': ACCOUNT_NAME, 146 | 'account_key': ACCOUNT_KEY, 147 | 'is_emulated': _EMULATED, 148 | 'queue': 'queue-storage-handler-test', 149 | 'level': 'INFO', 150 | 'class': 'azure_storage_logging.handlers.QueueStorageHandler', 151 | 'formatter': 'simple', 152 | 'visibility_timeout': 10, 153 | }, 154 | 'base64_encoding': { 155 | 'account_name': ACCOUNT_NAME, 156 | 'account_key': ACCOUNT_KEY, 157 | 'is_emulated': _EMULATED, 158 | 'queue': 'queue-storage-handler-test', 159 | 'level': 'INFO', 160 | 'class': 'azure_storage_logging.handlers.QueueStorageHandler', 161 | 'formatter': 'simple', 162 | 'base64_encoding': True, 163 | }, 164 | # TableStorageHandlerTest 165 | 'table': { 166 | 'account_name': ACCOUNT_NAME, 167 | 'account_key': ACCOUNT_KEY, 168 | 'is_emulated': _EMULATED, 169 | 'table': 'TableStorageHandlerTest', 170 | 'level': 'INFO', 171 | 'class': 'azure_storage_logging.handlers.TableStorageHandler', 172 | 'formatter': 'simple', 173 | }, 174 | 'batch': { 175 | 'account_name': ACCOUNT_NAME, 176 | 'account_key': ACCOUNT_KEY, 177 | 'is_emulated': _EMULATED, 178 | 'table': 'TableStorageHandlerTest', 179 | 'level': 'INFO', 180 | 'class': 'azure_storage_logging.handlers.TableStorageHandler', 181 | 'formatter': 'simple', 182 | 'batch_size': 10, 183 | 'partition_key_formatter': 'cfg://formatters.batch_test_partition_key', 184 | }, 185 | 'extra_properties': { 186 | 'account_name': ACCOUNT_NAME, 187 | 'account_key': ACCOUNT_KEY, 188 | 'is_emulated': _EMULATED, 189 | 'table': 'TableStorageHandlerTest', 190 | 'level': 'INFO', 191 | 'class': 'azure_storage_logging.handlers.TableStorageHandler', 192 | 'formatter': 'simple', 193 | 'extra_properties': [ 194 | '%(hostname)s', 195 | '%(levelname)s', 196 | '%(levelno)s', 197 | '%(module)s', 198 | '%(name)s', 199 | '%(process)d', 200 | '%(thread)d', 201 | ], 202 | }, 203 | 'custom_keys': { 204 | 'account_name': ACCOUNT_NAME, 205 | 'account_key': ACCOUNT_KEY, 206 | 'is_emulated': _EMULATED, 207 | 'table': 'TableStorageHandlerTest', 208 | 'level': 'INFO', 209 | 'class': 'azure_storage_logging.handlers.TableStorageHandler', 210 | 'formatter': 'simple', 211 | 'partition_key_formatter': 'cfg://formatters.custom_partition_key', 212 | 'row_key_formatter': 'cfg://formatters.custom_row_key', 213 | }, 214 | }, 215 | 'loggers': { 216 | # BlobStorageRotatingFileHandlerTest 217 | 'rotation': { 218 | 'handlers': ['rotation'], 219 | 'level': 'DEBUG', 220 | }, 221 | 'rotation_with_parallel_upload': { 222 | 'handlers': ['rotation_with_parallel_upload'], 223 | 'level': 'DEBUG', 224 | }, 225 | 'rotation_with_zip_compression': { 226 | 'handlers': ['rotation_with_zip_compression'], 227 | 'level': 'DEBUG', 228 | }, 229 | # BlobStorageTimedRotatingFileHandlerTest 230 | 'timed_rotation': { 231 | 'handlers': ['timed_rotation'], 232 | 'level': 'DEBUG', 233 | }, 234 | 'timed_rotation_with_zip_compression': { 235 | 'handlers': ['timed_rotation_with_zip_compression'], 236 | 'level': 'DEBUG', 237 | }, 238 | # QueueStorageHandlerTest 239 | 'queue': { 240 | 'handlers': ['queue'], 241 | 'level': 'DEBUG', 242 | }, 243 | 'message_ttl': { 244 | 'handlers': ['message_ttl'], 245 | 'level': 'DEBUG', 246 | }, 247 | 'visibility_timeout': { 248 | 'handlers': ['visibility_timeout'], 249 | 'level': 'DEBUG', 250 | }, 251 | 'base64_encoding': { 252 | 'handlers': ['base64_encoding'], 253 | 'level': 'DEBUG', 254 | }, 255 | # TableStorageHandlerTest 256 | 'table': { 257 | 'handlers': ['table'], 258 | 'level': 'DEBUG', 259 | }, 260 | 'batch': { 261 | 'handlers': ['batch'], 262 | 'level': 'DEBUG', 263 | }, 264 | 'extra_properties': { 265 | 'handlers': ['extra_properties'], 266 | 'level': 'DEBUG', 267 | }, 268 | 'custom_keys': { 269 | 'handlers': ['custom_keys'], 270 | 'level': 'DEBUG', 271 | }, 272 | } 273 | } 274 | 275 | def _base64_encode(text): 276 | return b64encode(text.encode('utf-8')).decode('ascii') 277 | 278 | def _get_formatter_config_value(formatter_name, key): 279 | return _get_logging_config_value('formatters', formatter_name, key) 280 | 281 | def _get_handler_config_value(handler_name, key): 282 | return _get_logging_config_value('handlers', handler_name, key) 283 | 284 | def _get_handler_name(logger_name): 285 | return next(iter(_get_logger_config_value(logger_name, 'handlers'))) 286 | 287 | def _get_logger_config_value(logger_name, key): 288 | return _get_logging_config_value('loggers', logger_name, key) 289 | 290 | def _get_logging_config_value(kind, name, key): 291 | try: 292 | value = LOGGING[kind][name][key] 293 | except KeyError: 294 | value = None 295 | return value 296 | 297 | 298 | class _TestCase(unittest.TestCase): 299 | 300 | if not _PY3: 301 | def assertRegex(self, text, regex, msg=None): 302 | return self.assertRegexpMatches(text, regex, msg) 303 | 304 | 305 | class _BlobStorageTestCase(_TestCase): 306 | 307 | def _get_container_name(self, handler_name): 308 | container = _get_handler_config_value(handler_name, 'container') 309 | if container: 310 | container = container.replace('_', '-').lower() 311 | return container 312 | 313 | def setUp(self): 314 | self.service = BlockBlobService(account_name=ACCOUNT_NAME, 315 | account_key=ACCOUNT_KEY, 316 | is_emulated=_EMULATED) 317 | # ensure that there's no log file in the container before each test 318 | containers = [c.name for c in self.service.list_containers()] 319 | for handler in LOGGING['handlers']: 320 | container = self._get_container_name(handler) 321 | if container in containers: 322 | filename = _get_handler_config_value(handler, 'filename') 323 | basename = os.path.basename(filename) 324 | for blob in self.service.list_blobs(container, prefix=basename): 325 | self.service.delete_blob(container, blob.name) 326 | 327 | 328 | class BlobStorageRotatingFileHandlerTest(_BlobStorageTestCase): 329 | 330 | def _test_rotation(self, logger_name): 331 | # get the logger for the test 332 | logger = logging.getLogger(logger_name) 333 | handler_name = _get_handler_name(logger_name) 334 | 335 | # perform logging 336 | started_at = datetime.utcnow() 337 | log_text = 'Lorem ipsum dolor sit amet, consectetur adipisicing elit.' 338 | length_per_line = len(log_text) + len(os.linesep) 339 | max_bytes = _get_handler_config_value(handler_name, 'maxBytes') 340 | for _ in range(max_bytes // length_per_line + 1): 341 | logger.info(log_text) 342 | 343 | # confirm that the outdated log file is saved in the container 344 | container = self._get_container_name(handler_name) 345 | filename = _get_handler_config_value(handler_name, 'filename') 346 | basename = os.path.basename(filename) 347 | blobs = iter(self.service.list_blobs(container, prefix=basename)) 348 | blob = next(blobs) 349 | self.assertTrue(blob.name.startswith(basename)) 350 | rotated_at = datetime.strptime(blob.name.rpartition('.')[2], 351 | '%Y-%m-%d_%H-%M-%S') 352 | self.assertGreater(rotated_at, started_at) 353 | self.assertLessEqual(rotated_at, datetime.utcnow()) 354 | self.assertEqual(blob.properties.content_settings.content_type, 'text/plain') 355 | self.assertAlmostEqual(blob.properties.content_length, 356 | max_bytes, 357 | delta=1000) 358 | self.assertAlmostEqual(os.path.getsize(filename), 0, delta=1000) 359 | 360 | # confirm that there's no more blob in the container 361 | with self.assertRaises(StopIteration): 362 | next(blobs) 363 | 364 | def test_rotation(self): 365 | self._test_rotation('rotation') 366 | 367 | def test_rotation_with_parallel_upload(self): 368 | self._test_rotation('rotation_with_parallel_upload') 369 | 370 | def test_rotation_with_zip_compression(self): 371 | # get the logger for the test 372 | logger_name = 'rotation_with_zip_compression' 373 | logger = logging.getLogger(logger_name) 374 | handler_name = _get_handler_name(logger_name) 375 | 376 | # perform logging 377 | started_at = datetime.utcnow() 378 | log_text = 'Lorem ipsum dolor sit amet, consectetur adipisicing elit.' 379 | length_per_line = len(log_text) + len(os.linesep) 380 | max_bytes = _get_handler_config_value(handler_name, 'maxBytes') 381 | for _ in range(max_bytes // length_per_line + 1): 382 | logger.info(log_text) 383 | 384 | # confirm that the outdated log file is saved in the container 385 | container = self._get_container_name(handler_name) 386 | filename = _get_handler_config_value(handler_name, 'filename') 387 | basename = os.path.basename(filename) 388 | blobs = iter(self.service.list_blobs(container, prefix=basename)) 389 | blob = next(blobs) 390 | self.assertTrue(blob.name.startswith(basename)) 391 | self.assertTrue(blob.name.endswith('.zip')) 392 | blob_name = blob.name.rpartition('.')[0] 393 | rotated_at = datetime.strptime(blob_name.rpartition('.')[2], 394 | '%Y-%m-%d_%H-%M-%S') 395 | self.assertGreater(rotated_at, started_at) 396 | self.assertLessEqual(rotated_at, datetime.utcnow()) 397 | self.assertEqual(blob.properties.content_settings.content_type, 'application/zip') 398 | self.assertLess(blob.properties.content_length, max_bytes // 2) 399 | 400 | # confirm that the blob is a zip file 401 | zipfile_path = os.path.join(_LOGFILE_TMPDIR, blob.name) 402 | self.service.get_blob_to_path(container, blob.name, zipfile_path) 403 | self.assertTrue(zipfile.is_zipfile(zipfile_path)) 404 | 405 | # confirm that the zip file only has the rotated log file 406 | extract_dir = mkdtemp(dir=_LOGFILE_TMPDIR) 407 | with zipfile.ZipFile(zipfile_path, 'r') as z: 408 | files = z.namelist() 409 | self.assertEqual(len(files), 1) 410 | self.assertEqual(files[0], blob.name.rpartition('.zip')[0]) 411 | z.extractall(path=extract_dir) 412 | extracted_file = os.path.join(extract_dir, files[0]) 413 | self.assertAlmostEqual(os.path.getsize(extracted_file), 414 | max_bytes, 415 | delta=1000) 416 | 417 | # confirm that there's no more blob in the container 418 | with self.assertRaises(StopIteration): 419 | next(blobs) 420 | 421 | 422 | class BlobStorageTimedRotatingFileHandlerTest(_BlobStorageTestCase): 423 | 424 | def _get_interval_in_second(self, handler_name): 425 | options = {'S': 1, 'M': 60, 'H': 3600, 'D': 86400 } 426 | seconds = options[_get_handler_config_value(handler_name, 'when')] 427 | return int(_get_handler_config_value(handler_name, 'interval')) * seconds 428 | 429 | def test_timed_rotation(self): 430 | # get the logger for the test 431 | logger_name = 'timed_rotation' 432 | logger = logging.getLogger(logger_name) 433 | handler_name = _get_handler_name(logger_name) 434 | 435 | # perform logging 436 | log_text_1 = 'this will be the last line in the rotated log file.' 437 | logger.info(log_text_1) 438 | 439 | # perform logging again after the interval 440 | time.sleep(self._get_interval_in_second(handler_name)+5) 441 | log_text_2 = 'this will be the first line in the new log file.' 442 | logger.info(log_text_2) 443 | 444 | # confirm that the outdated log file is saved in the container 445 | container = self._get_container_name(handler_name) 446 | filename = _get_handler_config_value(handler_name, 'filename') 447 | basename = os.path.basename(filename) 448 | blobs = iter(self.service.list_blobs(container, prefix=basename)) 449 | blob = next(blobs) 450 | self.assertTrue(blob.name.startswith(basename)) 451 | self.assertEqual(blob.properties.content_settings.content_type, 'text/plain') 452 | blob_text = self.service.get_blob_to_text(container, blob.name) 453 | self.assertRegex(blob_text.content, log_text_1) 454 | 455 | # confirm that there's no more blob in the container 456 | with self.assertRaises(StopIteration): 457 | next(blobs) 458 | 459 | # confirm that the current log file has correct logs 460 | with open(filename, 'r') as f: 461 | self.assertRegex(f.readline(), log_text_2) 462 | 463 | def test_timed_rotation_with_zip_compression(self): 464 | # get the logger for the test 465 | logger_name = 'timed_rotation_with_zip_compression' 466 | logger = logging.getLogger(logger_name) 467 | handler_name = _get_handler_name(logger_name) 468 | 469 | # perform logging 470 | log_text_1 = 'this will be the last line in the compressed log file.' 471 | logger.info(log_text_1) 472 | 473 | # perform logging again after the interval 474 | time.sleep(self._get_interval_in_second(handler_name)+5) 475 | log_text_2 = 'this will be the first line in the new log file.' 476 | logger.info(log_text_2) 477 | 478 | # confirm that the outdated log file is saved in the container 479 | container = self._get_container_name(handler_name) 480 | filename = _get_handler_config_value(handler_name, 'filename') 481 | basename = os.path.basename(filename) 482 | blobs = iter(self.service.list_blobs(container, prefix=basename)) 483 | blob = next(blobs) 484 | self.assertTrue(blob.name.startswith(basename)) 485 | self.assertTrue(blob.name.endswith('.zip')) 486 | self.assertEqual(blob.properties.content_settings.content_type, 'application/zip') 487 | 488 | # confirm that the blob is a zip file 489 | zipfile_path = os.path.join(_LOGFILE_TMPDIR, blob.name) 490 | self.service.get_blob_to_path(container, blob.name, zipfile_path) 491 | self.assertTrue(zipfile.is_zipfile(zipfile_path)) 492 | 493 | # confirm that the zip file only has the rotated log file 494 | extract_dir = mkdtemp(dir=_LOGFILE_TMPDIR) 495 | with zipfile.ZipFile(zipfile_path, 'r') as z: 496 | files = z.namelist() 497 | self.assertEqual(len(files), 1) 498 | self.assertEqual(files[0], blob.name.rpartition('.zip')[0]) 499 | z.extractall(path=extract_dir) 500 | with open(os.path.join(extract_dir, files[0]), 'r') as f: 501 | self.assertRegex(f.readline(), log_text_1) 502 | 503 | # confirm that there's no more blob in the container 504 | with self.assertRaises(StopIteration): 505 | next(blobs) 506 | 507 | # confirm that the current log file has correct logs 508 | with open(filename, 'r') as f: 509 | self.assertRegex(f.readline(), log_text_2) 510 | 511 | 512 | class QueueStorageHandlerTest(_TestCase): 513 | 514 | def setUp(self): 515 | self.service = QueueService(account_name=ACCOUNT_NAME, 516 | account_key=ACCOUNT_KEY, 517 | is_emulated=_EMULATED) 518 | # ensure that there's no message on the queue before each test 519 | queues = set() 520 | for cfg in LOGGING['handlers'].values(): 521 | if 'queue' in cfg: 522 | queues.add(cfg['queue']) 523 | for queue in self.service.list_queues(): 524 | if queue.name in queues: 525 | self.service.clear_messages(queue.name) 526 | 527 | def test_logging(self): 528 | # get the logger for the test 529 | logger_name = 'queue' 530 | logger = logging.getLogger(logger_name) 531 | handler_name = _get_handler_name(logger_name) 532 | 533 | # perform logging 534 | log_text = 'logging test' 535 | logger.info(log_text) 536 | 537 | # confirm that the massage has correct log text 538 | queue = _get_handler_config_value(handler_name, 'queue') 539 | messages = iter(self.service.get_messages(queue)) 540 | message = next(messages) 541 | text_expected = "INFO %s" % log_text 542 | if _get_handler_config_value(handler_name, 'base64_encoding'): 543 | text_expected = _base64_encode(text_expected) 544 | self.assertEqual(message.content, text_expected) 545 | 546 | # confirm that there's no more message in the queue 547 | with self.assertRaises(StopIteration): 548 | next(messages) 549 | 550 | def test_message_ttl(self): 551 | # get the logger for the test 552 | logger_name = 'message_ttl' 553 | logger = logging.getLogger(logger_name) 554 | handler_name = _get_handler_name(logger_name) 555 | 556 | # perform logging 557 | log_text = 'time-to-live test' 558 | logger.info(log_text) 559 | 560 | # confirm that the new message is visible till the ttl expires 561 | queue = _get_handler_config_value(handler_name, 'queue') 562 | messages = iter(self.service.get_messages(queue)) 563 | message = next(messages) 564 | text_expected = 'INFO %s' % log_text 565 | if _get_handler_config_value(handler_name, 'base64_encoding'): 566 | text_expected = _base64_encode(text_expected) 567 | self.assertEqual(message.content, text_expected) 568 | 569 | # confirm that there's no more message in the queue 570 | with self.assertRaises(StopIteration): 571 | next(messages) 572 | 573 | # confirm that the new message is invisible after the ttl expires 574 | ttl = _get_handler_config_value(handler_name, 'message_ttl') 575 | time.sleep(int(ttl)+5) 576 | messages = iter(self.service.get_messages(queue)) 577 | with self.assertRaises(StopIteration): 578 | next(messages) 579 | 580 | def test_visibility_timeout(self): 581 | # get the logger for the test 582 | logger_name = 'visibility_timeout' 583 | logger = logging.getLogger(logger_name) 584 | handler_name = _get_handler_name(logger_name) 585 | 586 | # perform logging 587 | log_text = 'visibility test' 588 | logger.info(log_text) 589 | 590 | # confirm that the new message is invisible till the timeout expires 591 | queue = _get_handler_config_value(handler_name, 'queue') 592 | messages = iter(self.service.get_messages(queue)) 593 | with self.assertRaises(StopIteration): 594 | next(messages) 595 | 596 | # confirm that the new message is visible after the timeout expires 597 | timeout = _get_handler_config_value(handler_name, 'visibility_timeout') 598 | time.sleep(int(timeout)+5) 599 | messages = iter(self.service.get_messages(queue)) 600 | message = next(messages) 601 | text_expected = 'INFO %s' % log_text 602 | if _get_handler_config_value(handler_name, 'base64_encoding'): 603 | text_expected = _base64_encode(text_expected) 604 | self.assertEqual(message.content, text_expected) 605 | 606 | # confirm that there's no more message in the queue 607 | with self.assertRaises(StopIteration): 608 | next(messages) 609 | 610 | def test_base64_encoding(self): 611 | # get the logger for the test 612 | logger_name = 'base64_encoding' 613 | logger = logging.getLogger(logger_name) 614 | handler_name = _get_handler_name(logger_name) 615 | 616 | # perform logging 617 | log_text = 'Base64 encoding test' 618 | logger.info(log_text) 619 | 620 | # confirm that the log message is encoded in Base64 621 | queue = _get_handler_config_value(handler_name, 'queue') 622 | messages = iter(self.service.get_messages(queue)) 623 | message = next(messages) 624 | text_expected = "INFO %s" % log_text 625 | if _get_handler_config_value(handler_name, 'base64_encoding'): 626 | text_expected = _base64_encode(text_expected) 627 | self.assertEqual(message.content, text_expected) 628 | 629 | # confirm that there's no more message in the queue 630 | with self.assertRaises(StopIteration): 631 | next(messages) 632 | 633 | 634 | class TableStorageHandlerTest(_TestCase): 635 | 636 | def _divide_key(self, key): 637 | divided = [] 638 | hostname = gethostname() 639 | if key.find(hostname) >= 0: 640 | preceding, hostname, remaining = key.rpartition(hostname) 641 | preceding = preceding[:-1] if preceding.endswith('-') else preceding 642 | divided.extend(preceding.split('-')) 643 | divided.append(hostname) 644 | remaining = remaining[1:] if remaining.startswith('-') else remaining 645 | divided.extend(remaining.split('-')) 646 | else: 647 | divided.extend(key.split('-')) 648 | return iter(divided) 649 | 650 | def _get_formatter_name(self, handler_name, formatter_type): 651 | name = _get_handler_config_value(handler_name, formatter_type) 652 | if name: 653 | if name.startswith('cfg://formatters.'): 654 | name = name.split('.')[1] 655 | return name 656 | 657 | def _get_partition_key_formatter_name(self, handler_name): 658 | return self._get_formatter_name(handler_name, 'partition_key_formatter') 659 | 660 | def _get_row_key_formatter_name(self, handler_name): 661 | return self._get_formatter_name(handler_name, 'row_key_formatter') 662 | 663 | def setUp(self): 664 | self.service = TableService(account_name=ACCOUNT_NAME, 665 | account_key=ACCOUNT_KEY, 666 | is_emulated=_EMULATED) 667 | # ensure that there's no entity in the table before each test 668 | tables = set() 669 | for cfg in LOGGING['handlers'].values(): 670 | if 'table' in cfg: 671 | tables.add(cfg['table']) 672 | for table in self.service.list_tables(): 673 | if table.name in tables: 674 | for entity in self.service.query_entities(table.name): 675 | self.service.delete_entity(table.name, 676 | entity.PartitionKey, 677 | entity.RowKey) 678 | 679 | def test_logging(self): 680 | # get the logger for the test 681 | logger_name = 'table' 682 | logger = logging.getLogger(logger_name) 683 | handler_name = _get_handler_name(logger_name) 684 | 685 | # perform logging 686 | log_text = 'logging test' 687 | logging_started = datetime.now() 688 | logger.info(log_text) 689 | logging_finished = datetime.now() 690 | 691 | # confirm that the entity has correct log text 692 | table = _get_handler_config_value(handler_name, 'table') 693 | entities = iter(self.service.query_entities(table)) 694 | entity = next(entities) 695 | self.assertEqual(entity.message, 'INFO %s' % log_text) 696 | 697 | # confirm that the entity has the default partitiok key 698 | fmt = '%Y%m%d%H%M' 699 | try: 700 | self.assertEqual(entity.PartitionKey, logging_started.strftime(fmt)) 701 | except AssertionError: 702 | if logging_started == logging_finished: 703 | raise 704 | self.assertEqual(entity.PartitionKey, logging_finished.strftime(fmt)) 705 | 706 | # confirm that the entity has the default row key 707 | divided = self._divide_key(entity.RowKey) 708 | timestamp = next(divided) 709 | fmt = '%Y%m%d%H%M%S' 710 | self.assertGreaterEqual(timestamp[:-3], logging_started.strftime(fmt)) 711 | self.assertLessEqual(timestamp[:-3], logging_finished.strftime(fmt)) 712 | self.assertRegex(timestamp[-3:], '^[0-9]{3}$') 713 | self.assertEqual(next(divided), gethostname()) 714 | self.assertEqual(int(next(divided)), os.getpid()) 715 | self.assertEqual(next(divided), '00') 716 | with self.assertRaises(StopIteration): 717 | next(divided) 718 | 719 | # confirm that there's no more entity in the table 720 | with self.assertRaises(StopIteration): 721 | next(entities) 722 | 723 | @unittest.skipIf(_EMULATED, "Azure Storage Emulator doesn't support batch operation.") 724 | def test_batch(self): 725 | # get the logger for the test 726 | logger_name = 'batch' 727 | logger = logging.getLogger(logger_name) 728 | handler_name = _get_handler_name(logger_name) 729 | 730 | # perform logging and execute the first batch 731 | batch_size = _get_handler_config_value(handler_name, 'batch_size') 732 | log_text = 'batch logging test' 733 | for i in range(batch_size + int(batch_size/2)): 734 | logger.info('%s#%02d' % (log_text, i)) 735 | 736 | # confirm that only batch_size entities are committed at this point 737 | table = _get_handler_config_value(handler_name, 'table') 738 | entities = list(iter(self.service.query_entities(table))) 739 | self.assertEqual(len(entities), batch_size) 740 | rowno_found = set() 741 | seq_found = set() 742 | for entity in entities: 743 | # partition key 744 | self.assertEqual(entity.PartitionKey, 'batch-%s' % gethostname()) 745 | # row key 746 | rowno = entity.RowKey.split('-')[-1] 747 | self.assertLess(int(rowno), batch_size) 748 | self.assertNotIn(rowno, rowno_found) 749 | rowno_found.add(rowno) 750 | # message 751 | message, seq = entity.message.split('#') 752 | self.assertEqual(message, 'INFO %s' % log_text) 753 | self.assertLess(int(seq), batch_size) 754 | self.assertNotIn(seq, seq_found) 755 | seq_found.add(seq) 756 | 757 | # remove currently created entities before the next batch 758 | for entity in entities: 759 | self.service.delete_entity(table, 760 | entity.PartitionKey, 761 | entity.RowKey) 762 | 763 | # perform logging again and execute the next batch 764 | for j in range(i+1, int(batch_size/2)+i+1): 765 | logger.info('%s#%02d' % (log_text, j)) 766 | 767 | # confirm that the remaining entities are committed in the next batch 768 | entities = list(iter(self.service.query_entities(table))) 769 | self.assertEqual(len(entities), batch_size) 770 | rowno_found.clear() 771 | for entity in entities: 772 | # partition key 773 | self.assertEqual(entity.PartitionKey, 'batch-%s' % gethostname()) 774 | # row key 775 | rowno = entity.RowKey.split('-')[-1] 776 | self.assertLess(int(rowno), batch_size) 777 | self.assertNotIn(rowno, rowno_found) 778 | rowno_found.add(rowno) 779 | # message 780 | message, seq = entity.message.split('#') 781 | self.assertEqual(message, 'INFO %s' % log_text) 782 | self.assertGreaterEqual(int(seq), batch_size) 783 | self.assertLess(int(seq), batch_size*2) 784 | self.assertNotIn(seq, seq_found) 785 | seq_found.add(seq) 786 | 787 | def test_extra_properties(self): 788 | # get the logger for the test 789 | logger_name = 'extra_properties' 790 | logger = logging.getLogger(logger_name) 791 | handler_name = _get_handler_name(logger_name) 792 | 793 | # perform logging 794 | log_text = 'extra properties test' 795 | logger.info(log_text) 796 | 797 | # confirm that the entity has correct log text 798 | table = _get_handler_config_value(handler_name, 'table') 799 | entities = iter(self.service.query_entities(table)) 800 | entity = next(entities) 801 | self.assertEqual(entity.message, 'INFO %s' % log_text) 802 | 803 | # confirm that the extra properties have correct values 804 | entity = next(iter(self.service.query_entities(table))) 805 | self.assertEqual(entity.hostname, gethostname()) 806 | self.assertEqual(entity.levelname, 'INFO') 807 | self.assertEqual(int(entity.levelno), logging.INFO) 808 | self.assertEqual(entity.module, os.path.basename(__file__).rpartition('.')[0]) 809 | self.assertEqual(entity.name, logger_name) 810 | self.assertEqual(int(entity.process), os.getpid()) 811 | self.assertEqual(int(entity.thread), current_thread().ident) 812 | 813 | # confirm that there's no more entity in the table 814 | with self.assertRaises(StopIteration): 815 | next(entities) 816 | 817 | def test_custom_key_formatters(self): 818 | # get the logger for the test 819 | logger_name = 'custom_keys' 820 | logger = logging.getLogger(logger_name) 821 | handler_name = _get_handler_name(logger_name) 822 | 823 | # perform logging 824 | log_text = 'custom key formatters test' 825 | logging_started = datetime.now() 826 | logger.info(log_text) 827 | logging_finished = datetime.now() 828 | 829 | # confirm that the entity correct log text 830 | table = _get_handler_config_value(handler_name, 'table') 831 | entities = iter(self.service.query_entities(table)) 832 | entity = next(entities) 833 | self.assertEqual(entity.message, 'INFO %s' % log_text) 834 | 835 | # confirm that the entity has a custom partitiok key 836 | divided = self._divide_key(entity.PartitionKey) 837 | self.assertEqual(next(divided), 'mycustompartitionkey') 838 | self.assertEqual(next(divided), gethostname()) 839 | formatter_name = self._get_partition_key_formatter_name(handler_name) 840 | fmt = _get_formatter_config_value(formatter_name, 'datefmt') 841 | asctime = next(divided) 842 | try: 843 | self.assertEqual(asctime, logging_started.strftime(fmt)) 844 | except AssertionError: 845 | if logging_started == logging_finished: 846 | raise 847 | self.assertEqual(asctime, logging_finished.strftime(fmt)) 848 | with self.assertRaises(StopIteration): 849 | next(divided) 850 | 851 | # confirm that the entity has a custom row key 852 | divided = self._divide_key(entity.RowKey) 853 | self.assertEqual(next(divided), 'mycustomrowkey') 854 | self.assertEqual(next(divided), gethostname()) 855 | formatter_name = self._get_row_key_formatter_name(handler_name) 856 | fmt = _get_formatter_config_value(formatter_name, 'datefmt') 857 | asctime = next(divided) 858 | try: 859 | self.assertEqual(asctime, logging_started.strftime(fmt)) 860 | except AssertionError: 861 | if logging_started == logging_finished: 862 | raise 863 | self.assertEqual(asctime, logging_finished.strftime(fmt)) 864 | with self.assertRaises(StopIteration): 865 | next(divided) 866 | 867 | # confirm that there's no more entity in the table 868 | with self.assertRaises(StopIteration): 869 | next(entities) 870 | 871 | 872 | if __name__ == '__main__': 873 | try: 874 | dictConfig(LOGGING) 875 | unittest.main() 876 | finally: 877 | logging.shutdown() 878 | rmtree(_LOGFILE_TMPDIR, ignore_errors=True) 879 | --------------------------------------------------------------------------------