├── setup.cfg ├── MANIFEST.in ├── azure ├── __init__.py ├── eventprocessorhost │ ├── cancellation_token.py │ ├── __init__.py │ ├── checkpoint.py │ ├── lease.py │ ├── azure_blob_lease.py │ ├── abstract_event_processor.py │ ├── eh_config.py │ ├── abstract_checkpoint_manager.py │ ├── eph.py │ ├── abstract_lease_manager.py │ ├── partition_pump.py │ ├── eh_partition_pump.py │ └── partition_context.py └── eventhub │ └── __init__.py ├── .vscode └── settings.json ├── dev_requirements.txt ├── README.rst ├── tests ├── asynctests │ ├── test_partition_manager.py │ ├── test_eh_partition_pump.py │ ├── test_partition_pump.py │ ├── test_iothub_receive_async.py │ ├── __init__.py │ ├── test_reconnect_async.py │ ├── test_longrunning_receive_async.py │ ├── test_longrunning_send_async.py │ ├── test_checkpoint_manager.py │ ├── test_negative_async.py │ ├── test_send_async.py │ ├── test_longrunning_eph.py │ └── test_longrunning_eph_with_context.py ├── test_iothub_receive.py ├── test_iothub_send.py ├── __init__.py ├── test_reconnect.py ├── test_longrunning_send.py ├── test_longrunning_receive.py ├── test_negative.py ├── test_send.py └── test_receive.py ├── examples ├── __init__.py ├── iothub_recv.py ├── send.py ├── batch_send.py ├── transfer.py ├── batch_transfer.py ├── send_async.py ├── recv_batch.py ├── recv.py ├── recv_epoch.py ├── recv_async.py └── eph.py ├── .travis.yml ├── features ├── eph.feature ├── steps │ ├── eventhub.py │ └── test_utils.py └── eventhub.feature ├── LICENSE ├── pylintrc ├── .gitignore ├── setup.py ├── HISTORY.rst └── conftest.py /setup.cfg: -------------------------------------------------------------------------------- 1 | [bdist_wheel] 2 | universal=1 -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | include *.rst 2 | include azure/__init__.py -------------------------------------------------------------------------------- /azure/__init__.py: -------------------------------------------------------------------------------- 1 | 2 | __path__ = __import__('pkgutil').extend_path(__path__, __name__) 3 | -------------------------------------------------------------------------------- /.vscode/settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "python.pythonPath": "${workspaceFolder}/env36/bin/python", 3 | "python.linting.enabled": false 4 | } -------------------------------------------------------------------------------- /dev_requirements.txt: -------------------------------------------------------------------------------- 1 | pytest>=3.4.1 2 | pytest-asyncio>=0.8.0; python_version > '3.4' 3 | azure-servicebus==0.50.0 4 | docutils>=0.14 5 | pygments>=2.2.0 6 | pylint==2.3.0; python_version >= '3.4' 7 | pylint==1.8.4; python_version < '3.4' 8 | behave==1.2.6 9 | wheel -------------------------------------------------------------------------------- /README.rst: -------------------------------------------------------------------------------- 1 | Microsoft Azure SDK for Event Hubs 2 | ================================== 3 | 4 | This project has been moved to the `azure-sdk-for-python `__ repository, under: 5 | 6 | `azure-sdk-for-python/sdk/eventhub/azure-eventhubs `__. 7 | 8 | This repository is no longer maintained. 9 | 10 | For new issues and pull requests, please go to the `azure-sdk-for-python `__ repository. 11 | -------------------------------------------------------------------------------- /tests/asynctests/test_partition_manager.py: -------------------------------------------------------------------------------- 1 | # -------------------------------------------------------------------------------------------- 2 | # Copyright (c) Microsoft Corporation. All rights reserved. 3 | # Licensed under the MIT License. See License.txt in the project root for license information. 4 | # ----------------------------------------------------------------------------------- 5 | 6 | import asyncio 7 | 8 | 9 | def test_get_partition_ids(partition_manager): 10 | """ 11 | Test that partition manger returns all the partitions for an event hub 12 | """ 13 | loop = asyncio.get_event_loop() 14 | pids = loop.run_until_complete(partition_manager.get_partition_ids_async()) 15 | assert pids == ["0", "1"] 16 | -------------------------------------------------------------------------------- /azure/eventprocessorhost/cancellation_token.py: -------------------------------------------------------------------------------- 1 | # -------------------------------------------------------------------------------------------- 2 | # Copyright (c) Microsoft Corporation. All rights reserved. 3 | # Licensed under the MIT License. See License.txt in the project root for license information. 4 | # ----------------------------------------------------------------------------------- 5 | 6 | """ 7 | Based on https://stackoverflow.com/questions/43229939/how-to-pass-a-boolean-by-reference-across-threads-and-modules 8 | """ 9 | class CancellationToken: 10 | """ 11 | Thread Safe Mutable Cancellation Token. 12 | """ 13 | def __init__(self): 14 | self.is_cancelled = False 15 | 16 | def cancel(self): 17 | """ 18 | Cancel the token. 19 | """ 20 | self.is_cancelled = True 21 | -------------------------------------------------------------------------------- /azure/eventhub/__init__.py: -------------------------------------------------------------------------------- 1 | # -------------------------------------------------------------------------------------------- 2 | # Copyright (c) Microsoft Corporation. All rights reserved. 3 | # Licensed under the MIT License. See License.txt in the project root for license information. 4 | # -------------------------------------------------------------------------------------------- 5 | 6 | __version__ = "1.3.1" 7 | 8 | from azure.eventhub.common import EventData, EventHubError, Offset 9 | from azure.eventhub.client import EventHubClient 10 | from azure.eventhub.sender import Sender 11 | from azure.eventhub.receiver import Receiver 12 | 13 | try: 14 | from azure.eventhub.async_ops import ( 15 | EventHubClientAsync, 16 | AsyncSender, 17 | AsyncReceiver) 18 | except (ImportError, SyntaxError): 19 | pass # Python 3 async features not supported 20 | -------------------------------------------------------------------------------- /examples/__init__.py: -------------------------------------------------------------------------------- 1 | # -------------------------------------------------------------------------------------------- 2 | # Copyright (c) Microsoft Corporation. All rights reserved. 3 | # Licensed under the MIT License. See License.txt in the project root for license information. 4 | # -------------------------------------------------------------------------------------------- 5 | 6 | import sys 7 | import logging 8 | 9 | def get_logger(level): 10 | azure_logger = logging.getLogger("azure.eventhub") 11 | azure_logger.setLevel(level) 12 | handler = logging.StreamHandler(stream=sys.stdout) 13 | handler.setFormatter(logging.Formatter('%(asctime)s %(name)-12s %(levelname)-8s %(message)s')) 14 | if not azure_logger.handlers: 15 | azure_logger.addHandler(handler) 16 | 17 | uamqp_logger = logging.getLogger("uamqp") 18 | uamqp_logger.setLevel(logging.INFO) 19 | if not uamqp_logger.handlers: 20 | uamqp_logger.addHandler(handler) 21 | return azure_logger 22 | -------------------------------------------------------------------------------- /tests/test_iothub_receive.py: -------------------------------------------------------------------------------- 1 | #------------------------------------------------------------------------- 2 | # Copyright (c) Microsoft Corporation. All rights reserved. 3 | # Licensed under the MIT License. See License.txt in the project root for 4 | # license information. 5 | #-------------------------------------------------------------------------- 6 | 7 | import os 8 | import pytest 9 | import time 10 | 11 | from azure import eventhub 12 | from azure.eventhub import EventData, EventHubClient, Offset 13 | 14 | def test_iothub_receive_sync(iot_connection_str, device_id): 15 | client = EventHubClient.from_iothub_connection_string(iot_connection_str, debug=True) 16 | receiver = client.add_receiver("$default", "0", operation='/messages/events') 17 | try: 18 | client.run() 19 | partitions = client.get_eventhub_info() 20 | assert partitions["partition_ids"] == ["0", "1", "2", "3"] 21 | received = receiver.receive(timeout=5) 22 | assert len(received) == 0 23 | finally: 24 | client.stop() -------------------------------------------------------------------------------- /tests/test_iothub_send.py: -------------------------------------------------------------------------------- 1 | #------------------------------------------------------------------------- 2 | # Copyright (c) Microsoft Corporation. All rights reserved. 3 | # Licensed under the MIT License. See License.txt in the project root for 4 | # license information. 5 | #-------------------------------------------------------------------------- 6 | 7 | import os 8 | import pytest 9 | import time 10 | import uuid 11 | 12 | from uamqp.message import MessageProperties 13 | 14 | from azure import eventhub 15 | from azure.eventhub import EventData, EventHubClient 16 | 17 | 18 | def test_iothub_send_single_event(iot_connection_str, device_id): 19 | client = EventHubClient.from_iothub_connection_string(iot_connection_str, debug=True) 20 | sender = client.add_sender(operation='/messages/devicebound') 21 | try: 22 | client.run() 23 | outcome = sender.send(EventData(b"A single event", to_device=device_id)) 24 | assert outcome.value == 0 25 | except: 26 | raise 27 | finally: 28 | client.stop() 29 | -------------------------------------------------------------------------------- /tests/asynctests/test_eh_partition_pump.py: -------------------------------------------------------------------------------- 1 | # -------------------------------------------------------------------------------------------- 2 | # Copyright (c) Microsoft Corporation. All rights reserved. 3 | # Licensed under the MIT License. See License.txt in the project root for license information. 4 | # ----------------------------------------------------------------------------------- 5 | 6 | import unittest 7 | import asyncio 8 | import logging 9 | import pytest 10 | 11 | 12 | async def wait_and_close(host): 13 | """ 14 | Run EventProcessorHost for 2 minutes then shutdown. 15 | """ 16 | await asyncio.sleep(60) 17 | await host.close_async() 18 | 19 | 20 | def test_partition_pump_async(eh_partition_pump): 21 | """ 22 | Test that event hub partition pump opens and processess messages sucessfully then closes 23 | """ 24 | pytest.skip("Not working yet") 25 | loop = asyncio.get_event_loop() 26 | tasks = asyncio.gather( 27 | eh_partition_pump.open_async(), 28 | wait_and_close(eh_partition_pump)) 29 | loop.run_until_complete(tasks) 30 | -------------------------------------------------------------------------------- /examples/iothub_recv.py: -------------------------------------------------------------------------------- 1 | # -------------------------------------------------------------------------------------------- 2 | # Copyright (c) Microsoft Corporation. All rights reserved. 3 | # Licensed under the MIT License. See License.txt in the project root for license information. 4 | # -------------------------------------------------------------------------------------------- 5 | 6 | """ 7 | An example to show receiving events from an IoT Hub partition. 8 | """ 9 | from azure import eventhub 10 | from azure.eventhub import EventData, EventHubClient, Offset 11 | 12 | import logging 13 | import os 14 | logger = logging.getLogger('azure.eventhub') 15 | 16 | iot_connection_str = os.environ['IOTHUB_CONNECTION_STR'] 17 | 18 | client = EventHubClient.from_iothub_connection_string(iot_connection_str, debug=True) 19 | receiver = client.add_receiver("$default", "0", operation='/messages/events') 20 | try: 21 | client.run() 22 | eh_info = client.get_eventhub_info() 23 | print(eh_info) 24 | 25 | received = receiver.receive(timeout=5) 26 | print(received) 27 | finally: 28 | client.stop() 29 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | language: python 2 | cache: pip 3 | dist: xenial 4 | sudo: required 5 | matrix: 6 | include: 7 | - os: linux 8 | python: "2.7" 9 | dist: trusty 10 | script: 11 | - pytest 12 | - python ./setup.py check -r -s 13 | - pylint --ignore=async_ops azure.eventhub 14 | - os: linux 15 | python: "3.4" 16 | dist: trusty 17 | script: 18 | - pytest 19 | - python ./setup.py check -r -s 20 | - os: linux 21 | python: "3.5" 22 | script: 23 | - pytest 24 | - python ./setup.py check -r -s 25 | - pylint azure.eventhub 26 | - pylint azure.eventprocessorhost 27 | - os: linux 28 | python: "3.6" 29 | script: 30 | - pytest 31 | - python ./setup.py check -r -s 32 | - pylint azure.eventhub 33 | - pylint azure.eventprocessorhost 34 | - os: linux 35 | python: "3.7" 36 | script: 37 | - pytest 38 | - python ./setup.py check -r -s 39 | - pylint azure.eventhub 40 | - pylint azure.eventprocessorhost 41 | install: 42 | - pip install -r dev_requirements.txt 43 | - pip install -e . 44 | -------------------------------------------------------------------------------- /features/eph.feature: -------------------------------------------------------------------------------- 1 | # -------------------------------------------------------------------------------------------- 2 | # Copyright (c) Microsoft Corporation. All rights reserved. 3 | # Licensed under the MIT License. See License.txt in the project root for license information. 4 | # -------------------------------------------------------------------------------------------- 5 | 6 | Feature: Exercising Event Processor Host 7 | 8 | # Scenario: EPH single host, generic scenario. 9 | 10 | # Scenario: EPH runs with listen only claims. 11 | 12 | # Scenario: Host runs idle for a while by managing sender to send in intervals. 13 | 14 | # Scenario: No sends at all, hosts will stay idle. 15 | 16 | # Scenario: Spawns multiple test processes consuming from the same event hub. 17 | 18 | # Scenario: Registers and unregisters hosts as part of the regular iteration to introduce excessive partition moves. 19 | 20 | Scenario: Registers and unregisters hosts as part of the regular iteration to introduce excessive partition moves. No sends in this scenario. 21 | 22 | Scenario: Runs EPH on 256 partition entity. 23 | 24 | # Scenario: Runs EPH on multiple consumer groups. 25 | 26 | # Scenario: Runs EPH with web sockets enabled. -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) Microsoft Corporation. All rights reserved. 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE 22 | -------------------------------------------------------------------------------- /tests/__init__.py: -------------------------------------------------------------------------------- 1 | # -------------------------------------------------------------------------------------------- 2 | # Copyright (c) Microsoft Corporation. All rights reserved. 3 | # Licensed under the MIT License. See License.txt in the project root for license information. 4 | # ----------------------------------------------------------------------------------- 5 | 6 | import sys 7 | import logging 8 | from logging.handlers import RotatingFileHandler 9 | 10 | 11 | def get_logger(filename, level=logging.INFO): 12 | azure_logger = logging.getLogger("azure.eventhub") 13 | azure_logger.setLevel(level) 14 | uamqp_logger = logging.getLogger("uamqp") 15 | uamqp_logger.setLevel(logging.INFO) 16 | 17 | formatter = logging.Formatter('%(asctime)s %(name)-12s %(levelname)-8s %(message)s') 18 | console_handler = logging.StreamHandler(stream=sys.stdout) 19 | console_handler.setFormatter(formatter) 20 | if not azure_logger.handlers: 21 | azure_logger.addHandler(console_handler) 22 | if not uamqp_logger.handlers: 23 | uamqp_logger.addHandler(console_handler) 24 | 25 | if filename: 26 | file_handler = RotatingFileHandler(filename, maxBytes=5*1024*1024, backupCount=2) 27 | file_handler.setFormatter(formatter) 28 | azure_logger.addHandler(file_handler) 29 | 30 | return azure_logger 31 | -------------------------------------------------------------------------------- /azure/eventprocessorhost/__init__.py: -------------------------------------------------------------------------------- 1 | # -------------------------------------------------------------------------------------------- 2 | # Copyright (c) Microsoft Corporation. All rights reserved. 3 | # Licensed under the MIT License. See License.txt in the project root for license information. 4 | # -------------------------------------------------------------------------------------------- 5 | 6 | """ 7 | The module provides a means to process Azure Event Hubs events at scale. 8 | """ 9 | try: 10 | from azure.eventprocessorhost.abstract_event_processor import AbstractEventProcessor 11 | from azure.eventprocessorhost.azure_storage_checkpoint_manager import AzureStorageCheckpointLeaseManager 12 | from azure.eventprocessorhost.azure_blob_lease import AzureBlobLease 13 | from azure.eventprocessorhost.checkpoint import Checkpoint 14 | from azure.eventprocessorhost.eh_config import EventHubConfig 15 | from azure.eventprocessorhost.eh_partition_pump import EventHubPartitionPump, PartitionReceiver 16 | from azure.eventprocessorhost.eph import EventProcessorHost, EPHOptions 17 | from azure.eventprocessorhost.partition_manager import PartitionManager 18 | from azure.eventprocessorhost.partition_context import PartitionContext 19 | from azure.eventprocessorhost.partition_pump import PartitionPump 20 | except (SyntaxError, ImportError): 21 | raise ImportError("EventProcessHost is only compatible with Python 3.5 and above.") 22 | -------------------------------------------------------------------------------- /azure/eventprocessorhost/checkpoint.py: -------------------------------------------------------------------------------- 1 | # -------------------------------------------------------------------------------------------- 2 | # Copyright (c) Microsoft Corporation. All rights reserved. 3 | # Licensed under the MIT License. See License.txt in the project root for license information. 4 | # ----------------------------------------------------------------------------------- 5 | 6 | class Checkpoint: 7 | """ 8 | Contains checkpoint metadata. 9 | """ 10 | 11 | def __init__(self, partition_id, offset="-1", sequence_number="0"): 12 | """Initialize Checkpoint. 13 | 14 | :param partition_id: The parition ID of the checkpoint. 15 | :type partition_id: str 16 | :param offset: The receive offset of the checkpoint. 17 | :type offset: str 18 | :param sequence_number: The sequence number of the checkpoint. 19 | :type sequence_number: str 20 | """ 21 | self.partition_id = partition_id 22 | self.offset = offset 23 | self.sequence_number = sequence_number 24 | 25 | def from_source(self, checkpoint): 26 | """ 27 | Creates a new Checkpoint from an existing checkpoint. 28 | 29 | :param checkpoint: Existing checkpoint. 30 | :type checkpoint: ~azure.eventprocessorhost.checkpoint.Checkpoint 31 | """ 32 | self.partition_id = checkpoint.partition_id 33 | self.offset = checkpoint.offset 34 | self.sequence_number = checkpoint.sequence_number 35 | -------------------------------------------------------------------------------- /examples/send.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | """ 4 | An example to show sending events to an Event Hub partition. 5 | """ 6 | 7 | # pylint: disable=C0111 8 | 9 | import sys 10 | import logging 11 | import datetime 12 | import time 13 | import os 14 | 15 | from azure.eventhub import EventHubClient, Sender, EventData 16 | 17 | import examples 18 | logger = examples.get_logger(logging.INFO) 19 | 20 | 21 | # Address can be in either of these formats: 22 | # "amqps://:@.servicebus.windows.net/myeventhub" 23 | # "amqps://.servicebus.windows.net/myeventhub" 24 | ADDRESS = os.environ.get('EVENT_HUB_ADDRESS') 25 | 26 | # SAS policy and key are not required if they are encoded in the URL 27 | USER = os.environ.get('EVENT_HUB_SAS_POLICY') 28 | KEY = os.environ.get('EVENT_HUB_SAS_KEY') 29 | 30 | try: 31 | if not ADDRESS: 32 | raise ValueError("No EventHubs URL supplied.") 33 | 34 | client = EventHubClient(ADDRESS, debug=False, username=USER, password=KEY) 35 | sender = client.add_sender(partition="0") 36 | client.run() 37 | try: 38 | start_time = time.time() 39 | for i in range(100): 40 | logger.info("Sending message: {}".format(i)) 41 | sender.send(EventData(str(i))) 42 | except: 43 | raise 44 | finally: 45 | end_time = time.time() 46 | client.stop() 47 | run_time = end_time - start_time 48 | logger.info("Runtime: {} seconds".format(run_time)) 49 | 50 | except KeyboardInterrupt: 51 | pass 52 | -------------------------------------------------------------------------------- /tests/asynctests/test_partition_pump.py: -------------------------------------------------------------------------------- 1 | # -------------------------------------------------------------------------------------------- 2 | # Copyright (c) Microsoft Corporation. All rights reserved. 3 | # Licensed under the MIT License. See License.txt in the project root for license information. 4 | # ----------------------------------------------------------------------------------- 5 | 6 | import asyncio 7 | 8 | 9 | def test_open_async(partition_pump): 10 | """ 11 | Test that partition pump opens sucessfully 12 | """ 13 | loop = asyncio.get_event_loop() 14 | loop.run_until_complete(partition_pump.open_async()) # Simulate Open 15 | 16 | def test_process_events_async(partition_pump): 17 | """ 18 | Test that the partition pump processes a list of mock events (["event1", "event2"]) 19 | properly 20 | """ 21 | loop = asyncio.get_event_loop() 22 | loop.run_until_complete(partition_pump.open_async()) # Simulate Open 23 | _mock_events = ["event1", "event2"] # Mock Events 24 | loop.run_until_complete(partition_pump.process_events_async(_mock_events)) # Simulate Process 25 | 26 | def test_close_async(partition_pump): 27 | """ 28 | Test that partition pump closes 29 | """ 30 | loop = asyncio.get_event_loop() 31 | loop.run_until_complete(partition_pump.open_async()) # Simulate Open 32 | _mock_events = ["event1", "event2"] # Mock Events 33 | loop.run_until_complete(partition_pump.process_events_async(_mock_events)) # Simulate Process 34 | loop.run_until_complete(partition_pump.close_async("Finished")) # Simulate Close 35 | -------------------------------------------------------------------------------- /examples/batch_send.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | """ 4 | An example to show batch sending events to an Event Hub. 5 | """ 6 | 7 | # pylint: disable=C0111 8 | 9 | import sys 10 | import logging 11 | import datetime 12 | import time 13 | import os 14 | 15 | from azure.eventhub import EventHubClient, Sender, EventData 16 | 17 | import examples 18 | logger = examples.get_logger(logging.INFO) 19 | 20 | # Address can be in either of these formats: 21 | # "amqps://:@.servicebus.windows.net/myeventhub" 22 | # "amqps://.servicebus.windows.net/myeventhub" 23 | ADDRESS = os.environ.get('EVENT_HUB_ADDRESS') 24 | 25 | # SAS policy and key are not required if they are encoded in the URL 26 | USER = os.environ.get('EVENT_HUB_SAS_POLICY') 27 | KEY = os.environ.get('EVENT_HUB_SAS_KEY') 28 | 29 | 30 | def data_generator(): 31 | for i in range(1500): 32 | logger.info("Yielding message {}".format(i)) 33 | yield b"Hello world" 34 | 35 | 36 | try: 37 | if not ADDRESS: 38 | raise ValueError("No EventHubs URL supplied.") 39 | 40 | client = EventHubClient(ADDRESS, debug=False, username=USER, password=KEY) 41 | sender = client.add_sender(partition="1") 42 | client.run() 43 | try: 44 | start_time = time.time() 45 | data = EventData(batch=data_generator()) 46 | sender.send(data) 47 | except: 48 | raise 49 | finally: 50 | end_time = time.time() 51 | client.stop() 52 | run_time = end_time - start_time 53 | logger.info("Runtime: {} seconds".format(run_time)) 54 | 55 | except KeyboardInterrupt: 56 | pass 57 | -------------------------------------------------------------------------------- /examples/transfer.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | """ 4 | An example to show sending events to an Event Hub. 5 | """ 6 | 7 | # pylint: disable=C0111 8 | 9 | import sys 10 | import logging 11 | import datetime 12 | import time 13 | import os 14 | 15 | from azure.eventhub import EventHubClient, Sender, EventData 16 | 17 | import examples 18 | logger = examples.get_logger(logging.INFO) 19 | 20 | 21 | # Address can be in either of these formats: 22 | # "amqps://:@.servicebus.windows.net/myeventhub" 23 | # "amqps://.servicebus.windows.net/myeventhub" 24 | ADDRESS = os.environ.get('EVENT_HUB_ADDRESS') 25 | 26 | # SAS policy and key are not required if they are encoded in the URL 27 | USER = os.environ.get('EVENT_HUB_SAS_POLICY') 28 | KEY = os.environ.get('EVENT_HUB_SAS_KEY') 29 | 30 | 31 | def callback(outcome, condition): 32 | logger.info("Message sent. Outcome: {}, Condition: {}".format( 33 | outcome, condition)) 34 | 35 | 36 | try: 37 | if not ADDRESS: 38 | raise ValueError("No EventHubs URL supplied.") 39 | 40 | client = EventHubClient(ADDRESS, debug=False, username=USER, password=KEY) 41 | sender = client.add_sender(partition="1") 42 | client.run() 43 | try: 44 | start_time = time.time() 45 | for i in range(100): 46 | sender.transfer(EventData(str(i)), callback=callback) 47 | logger.info("Queued 100 messages.") 48 | sender.wait() 49 | logger.info("Finished processing queue.") 50 | except: 51 | raise 52 | finally: 53 | end_time = time.time() 54 | client.stop() 55 | run_time = end_time - start_time 56 | logger.info("Runtime: {} seconds".format(run_time)) 57 | 58 | except KeyboardInterrupt: 59 | pass 60 | -------------------------------------------------------------------------------- /pylintrc: -------------------------------------------------------------------------------- 1 | [MASTER] 2 | ignore-patterns=test_* 3 | reports=no 4 | 5 | [MESSAGES CONTROL] 6 | # For all codes, run 'pylint --list-msgs' or go to 'https://pylint.readthedocs.io/en/latest/reference_guide/features.html' 7 | # locally-disabled: Warning locally suppressed using disable-msg 8 | # cyclic-import: because of https://github.com/PyCQA/pylint/issues/850 9 | disable=useless-object-inheritance,raising-bad-type,missing-docstring,locally-disabled,fixme,cyclic-import,too-many-arguments,invalid-name,duplicate-code,logging-format-interpolation,too-many-instance-attributes,too-few-public-methods 10 | 11 | [FORMAT] 12 | max-line-length=120 13 | 14 | [VARIABLES] 15 | # Tells whether we should check for unused import in __init__ files. 16 | init-import=no 17 | 18 | [DESIGN] 19 | # Maximum number of locals for function / method body 20 | max-locals=25 21 | # Maximum number of branch for function / method body 22 | max-branches=20 23 | 24 | [SIMILARITIES] 25 | min-similarity-lines=10 26 | 27 | [BASIC] 28 | # Naming hints based on PEP 8 (https://www.python.org/dev/peps/pep-0008/#naming-conventions). 29 | # Consider these guidelines and not hard rules. Read PEP 8 for more details. 30 | 31 | # The invalid-name checker must be **enabled** for these hints to be used. 32 | include-naming-hint=yes 33 | 34 | module-name-hint=lowercase (keep short; underscores are discouraged) 35 | const-name-hint=UPPER_CASE_WITH_UNDERSCORES 36 | class-name-hint=CapitalizedWords 37 | class-attribute-name-hint=lower_case_with_underscores 38 | attr-name-hint=lower_case_with_underscores 39 | method-name-hint=lower_case_with_underscores 40 | function-name-hint=lower_case_with_underscores 41 | argument-name-hint=lower_case_with_underscores 42 | variable-name-hint=lower_case_with_underscores 43 | inlinevar-name-hint=lower_case_with_underscores (short is OK) 44 | 45 | extension-pkg-whitelist=c_uamqp 46 | -------------------------------------------------------------------------------- /examples/batch_transfer.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | """ 4 | An example to show batch sending events to an Event Hub. 5 | """ 6 | 7 | # pylint: disable=C0111 8 | 9 | import sys 10 | import logging 11 | import datetime 12 | import time 13 | import os 14 | 15 | from azure.eventhub import EventHubClient, Sender, EventData 16 | 17 | import examples 18 | logger = examples.get_logger(logging.INFO) 19 | 20 | # Address can be in either of these formats: 21 | # "amqps://:@.servicebus.windows.net/myeventhub" 22 | # "amqps://.servicebus.windows.net/myeventhub" 23 | ADDRESS = os.environ.get('EVENT_HUB_ADDRESS') 24 | 25 | # SAS policy and key are not required if they are encoded in the URL 26 | USER = os.environ.get('EVENT_HUB_SAS_POLICY') 27 | KEY = os.environ.get('EVENT_HUB_SAS_KEY') 28 | 29 | 30 | def callback(outcome, condition): 31 | logger.info("Message sent. Outcome: {}, Condition: {}".format( 32 | outcome, condition)) 33 | 34 | 35 | def data_generator(): 36 | for i in range(1500): 37 | logger.info("Yielding message {}".format(i)) 38 | yield b"Hello world" 39 | 40 | 41 | try: 42 | if not ADDRESS: 43 | raise ValueError("No EventHubs URL supplied.") 44 | 45 | client = EventHubClient(ADDRESS, debug=False, username=USER, password=KEY) 46 | sender = client.add_sender() 47 | client.run() 48 | try: 49 | start_time = time.time() 50 | data = EventData(batch=data_generator()) 51 | sender.transfer(data, callback=callback) 52 | sender.wait() 53 | except: 54 | raise 55 | finally: 56 | end_time = time.time() 57 | client.stop() 58 | run_time = end_time - start_time 59 | logger.info("Runtime: {} seconds".format(run_time)) 60 | 61 | except KeyboardInterrupt: 62 | pass 63 | -------------------------------------------------------------------------------- /examples/send_async.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | """ 4 | An example to show sending events asynchronously to an Event Hub with partition keys. 5 | """ 6 | 7 | # pylint: disable=C0111 8 | 9 | import sys 10 | import logging 11 | import time 12 | import asyncio 13 | import os 14 | 15 | from azure.eventhub import EventData, EventHubClientAsync, AsyncSender 16 | 17 | import examples 18 | logger = examples.get_logger(logging.INFO) 19 | 20 | # Address can be in either of these formats: 21 | # "amqps://:@.servicebus.windows.net/myeventhub" 22 | # "amqps://.servicebus.windows.net/myeventhub" 23 | ADDRESS = os.environ.get('EVENT_HUB_ADDRESS') 24 | 25 | # SAS policy and key are not required if they are encoded in the URL 26 | USER = os.environ.get('EVENT_HUB_SAS_POLICY') 27 | KEY = os.environ.get('EVENT_HUB_SAS_KEY') 28 | 29 | 30 | async def run(client): 31 | sender = client.add_async_sender() 32 | await client.run_async() 33 | await send(sender, 4) 34 | 35 | 36 | async def send(snd, count): 37 | for i in range(count): 38 | logger.info("Sending message: {}".format(i)) 39 | data = EventData(str(i)) 40 | data.partition_key = b'SamplePartitionKey' 41 | await snd.send(data) 42 | 43 | try: 44 | if not ADDRESS: 45 | raise ValueError("No EventHubs URL supplied.") 46 | 47 | loop = asyncio.get_event_loop() 48 | client = EventHubClientAsync(ADDRESS, debug=True, username=USER, password=KEY) 49 | tasks = asyncio.gather( 50 | run(client), 51 | run(client)) 52 | start_time = time.time() 53 | loop.run_until_complete(tasks) 54 | loop.run_until_complete(client.stop_async()) 55 | end_time = time.time() 56 | run_time = end_time - start_time 57 | logger.info("Runtime: {} seconds".format(run_time)) 58 | loop.close() 59 | 60 | except KeyboardInterrupt: 61 | pass 62 | -------------------------------------------------------------------------------- /examples/recv_batch.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | # -------------------------------------------------------------------------------------------- 4 | # Copyright (c) Microsoft Corporation. All rights reserved. 5 | # Licensed under the MIT License. See License.txt in the project root for license information. 6 | # -------------------------------------------------------------------------------------------- 7 | 8 | """ 9 | An example to show receiving events from an Event Hub partition and processing 10 | the event in on_event_data callback. 11 | 12 | """ 13 | import os 14 | import sys 15 | import logging 16 | from azure.eventhub import EventHubClient, Receiver, Offset 17 | 18 | import examples 19 | logger = examples.get_logger(logging.INFO) 20 | 21 | # Address can be in either of these formats: 22 | # "amqps://:@.servicebus.windows.net/myeventhub" 23 | # "amqps://.servicebus.windows.net/myeventhub" 24 | ADDRESS = os.environ.get('EVENT_HUB_ADDRESS') 25 | 26 | # SAS policy and key are not required if they are encoded in the URL 27 | USER = os.environ.get('EVENT_HUB_SAS_POLICY') 28 | KEY = os.environ.get('EVENT_HUB_SAS_KEY') 29 | CONSUMER_GROUP = "$default" 30 | OFFSET = Offset("-1") 31 | PARTITION = "0" 32 | 33 | 34 | total = 0 35 | last_sn = -1 36 | last_offset = "-1" 37 | client = EventHubClient(ADDRESS, debug=False, username=USER, password=KEY) 38 | try: 39 | receiver = client.add_receiver(CONSUMER_GROUP, PARTITION, prefetch=100, offset=OFFSET) 40 | client.run() 41 | batched_events = receiver.receive(max_batch_size=10) 42 | for event_data in batched_events: 43 | last_offset = event_data.offset.value 44 | last_sn = event_data.sequence_number 45 | total += 1 46 | print("Partition {}, Received {}, sn={} offset={}".format( 47 | PARTITION, 48 | total, 49 | last_sn, 50 | last_offset)) 51 | 52 | except KeyboardInterrupt: 53 | pass 54 | finally: 55 | client.stop() -------------------------------------------------------------------------------- /tests/asynctests/test_iothub_receive_async.py: -------------------------------------------------------------------------------- 1 | #------------------------------------------------------------------------- 2 | # Copyright (c) Microsoft Corporation. All rights reserved. 3 | # Licensed under the MIT License. See License.txt in the project root for 4 | # license information. 5 | #-------------------------------------------------------------------------- 6 | 7 | import os 8 | import asyncio 9 | import pytest 10 | import time 11 | 12 | from azure import eventhub 13 | from azure.eventhub import EventData, Offset, EventHubError, EventHubClientAsync 14 | 15 | 16 | async def pump(receiver, sleep=None): 17 | messages = 0 18 | if sleep: 19 | await asyncio.sleep(sleep) 20 | batch = await receiver.receive(timeout=1) 21 | messages += len(batch) 22 | return messages 23 | 24 | 25 | async def get_partitions(iot_connection_str): 26 | try: 27 | client = EventHubClientAsync.from_iothub_connection_string(iot_connection_str, debug=True) 28 | client.add_async_receiver("$default", "0", prefetch=1000, operation='/messages/events') 29 | await client.run_async() 30 | partitions = await client.get_eventhub_info_async() 31 | return partitions["partition_ids"] 32 | finally: 33 | await client.stop_async() 34 | 35 | 36 | @pytest.mark.asyncio 37 | async def test_iothub_receive_multiple_async(iot_connection_str): 38 | partitions = await get_partitions(iot_connection_str) 39 | client = EventHubClientAsync.from_iothub_connection_string(iot_connection_str, debug=True) 40 | try: 41 | receivers = [] 42 | for p in partitions: 43 | receivers.append(client.add_async_receiver("$default", p, prefetch=10, operation='/messages/events')) 44 | await client.run_async() 45 | outputs = await asyncio.gather(*[pump(r) for r in receivers]) 46 | 47 | assert isinstance(outputs[0], int) and outputs[0] <= 10 48 | assert isinstance(outputs[1], int) and outputs[1] <= 10 49 | finally: 50 | await client.stop_async() 51 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | env*/ 12 | build/ 13 | develop-eggs/ 14 | dist/ 15 | downloads/ 16 | eggs/ 17 | .eggs/ 18 | lib/ 19 | lib64/ 20 | parts/ 21 | sdist/ 22 | var/ 23 | wheels/ 24 | *.egg-info/ 25 | .installed.cfg 26 | *.egg 27 | 28 | # PyInstaller 29 | # Usually these files are written by a python script from a template 30 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 31 | *.manifest 32 | *.spec 33 | 34 | # Installer logs 35 | pip-log.txt 36 | pip-delete-this-directory.txt 37 | 38 | # Unit test / coverage reports 39 | azure/storage/ 40 | azure/common/ 41 | azure/profiles/ 42 | *.log.1 43 | *.log.2 44 | *.log.3 45 | 46 | htmlcov/ 47 | .tox/ 48 | .coverage 49 | .coverage.* 50 | .cache 51 | nosetests.xml 52 | coverage.xml 53 | *.cover 54 | .hypothesis/ 55 | 56 | # Translations 57 | *.mo 58 | *.pot 59 | 60 | # Django stuff: 61 | *.log 62 | local_settings.py 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | target/ 76 | 77 | # Jupyter Notebook 78 | .ipynb_checkpoints 79 | 80 | # pyenv 81 | .python-version 82 | 83 | # celery beat schedule file 84 | celerybeat-schedule 85 | 86 | # SageMath parsed files 87 | *.sage.py 88 | 89 | # dotenv 90 | .env 91 | 92 | # virtualenv 93 | .venv 94 | venv/ 95 | ENV/ 96 | 97 | # Spyder project settings 98 | .spyderproject 99 | .spyproject 100 | 101 | # Rope project settings 102 | .ropeproject 103 | 104 | # mkdocs documentation 105 | /site 106 | 107 | # mypy 108 | .mypy_cache/ 109 | .pytest_cache/v/cache/lastfailed 110 | .pytest_cache/v/cache/nodeids 111 | 112 | # EventHub 113 | azure/mgmt/ 114 | azure/common/ 115 | azure/profiles/ 116 | azure/servicebus/ 117 | features/steps/mgmt_settings_real.py 118 | -------------------------------------------------------------------------------- /azure/eventprocessorhost/lease.py: -------------------------------------------------------------------------------- 1 | # -------------------------------------------------------------------------------------------- 2 | # Copyright (c) Microsoft Corporation. All rights reserved. 3 | # Licensed under the MIT License. See License.txt in the project root for license information. 4 | # ----------------------------------------------------------------------------------- 5 | 6 | 7 | class Lease: 8 | """ 9 | Lease contains partition processing state metadata used to manage partition state. 10 | """ 11 | 12 | def __init__(self): 13 | self.partition_id = None 14 | self.sequence_number = None 15 | self.owner = None 16 | self.token = None 17 | self.epoch = 0 18 | self.event_processor_context = None 19 | 20 | def with_partition_id(self, partition_id): 21 | """ 22 | Init with partition Id. 23 | 24 | :param partition_id: ID of a given partition. 25 | :type partition_id: str 26 | """ 27 | self.partition_id = partition_id 28 | self.owner = None 29 | self.token = None 30 | self.epoch = 0 31 | self.event_processor_context = None 32 | 33 | def with_source(self, lease): 34 | """ 35 | Init with existing lease. 36 | 37 | :param lease: An existing Lease. 38 | :type lease: ~azure.eventprocessorhost.lease.Lease 39 | """ 40 | self.partition_id = lease.partition_id 41 | self.epoch = lease.epoch 42 | self.owner = lease.owner 43 | self.token = lease.token 44 | self.event_processor_context = lease.event_processor_context 45 | 46 | async def is_expired(self): 47 | """ 48 | Determines whether the lease is expired. By default lease never expires. 49 | Deriving class implements the lease expiry logic. 50 | 51 | :rtype: bool 52 | """ 53 | return False 54 | 55 | def increment_epoch(self): 56 | """ 57 | Increment lease epoch. 58 | """ 59 | self.epoch += 1 60 | return self.epoch 61 | -------------------------------------------------------------------------------- /examples/recv.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | # -------------------------------------------------------------------------------------------- 4 | # Copyright (c) Microsoft Corporation. All rights reserved. 5 | # Licensed under the MIT License. See License.txt in the project root for license information. 6 | # -------------------------------------------------------------------------------------------- 7 | 8 | """ 9 | An example to show receiving events from an Event Hub partition. 10 | """ 11 | import os 12 | import sys 13 | import logging 14 | import time 15 | from azure.eventhub import EventHubClient, Receiver, Offset 16 | 17 | import examples 18 | logger = examples.get_logger(logging.INFO) 19 | 20 | # Address can be in either of these formats: 21 | # "amqps://:@.servicebus.windows.net/myeventhub" 22 | # "amqps://.servicebus.windows.net/myeventhub" 23 | ADDRESS = os.environ.get('EVENT_HUB_ADDRESS') 24 | 25 | # SAS policy and key are not required if they are encoded in the URL 26 | USER = os.environ.get('EVENT_HUB_SAS_POLICY') 27 | KEY = os.environ.get('EVENT_HUB_SAS_KEY') 28 | CONSUMER_GROUP = "$default" 29 | OFFSET = Offset("-1") 30 | PARTITION = "0" 31 | 32 | 33 | total = 0 34 | last_sn = -1 35 | last_offset = "-1" 36 | client = EventHubClient(ADDRESS, debug=False, username=USER, password=KEY) 37 | try: 38 | receiver = client.add_receiver(CONSUMER_GROUP, PARTITION, prefetch=5000, offset=OFFSET) 39 | client.run() 40 | start_time = time.time() 41 | batch = receiver.receive(timeout=5000) 42 | while batch: 43 | for event_data in batch: 44 | last_offset = event_data.offset 45 | last_sn = event_data.sequence_number 46 | print("Received: {}, {}".format(last_offset.value, last_sn)) 47 | print(event_data.body_as_str()) 48 | total += 1 49 | batch = receiver.receive(timeout=5000) 50 | 51 | end_time = time.time() 52 | client.stop() 53 | run_time = end_time - start_time 54 | print("Received {} messages in {} seconds".format(total, run_time)) 55 | 56 | except KeyboardInterrupt: 57 | pass 58 | finally: 59 | client.stop() -------------------------------------------------------------------------------- /examples/recv_epoch.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | # -------------------------------------------------------------------------------------------- 4 | # Copyright (c) Microsoft Corporation. All rights reserved. 5 | # Licensed under the MIT License. See License.txt in the project root for license information. 6 | # -------------------------------------------------------------------------------------------- 7 | 8 | """ 9 | An example to show receiving events from an Event Hub partition as an epoch receiver. 10 | """ 11 | 12 | import os 13 | import sys 14 | import time 15 | import logging 16 | import asyncio 17 | from azure.eventhub import Offset, EventHubClientAsync, AsyncReceiver 18 | 19 | import examples 20 | logger = examples.get_logger(logging.INFO) 21 | 22 | # Address can be in either of these formats: 23 | # "amqps://:@.servicebus.windows.net/myeventhub" 24 | # "amqps://.servicebus.windows.net/myeventhub" 25 | ADDRESS = os.environ.get('EVENT_HUB_ADDRESS') 26 | 27 | # SAS policy and key are not required if they are encoded in the URL 28 | USER = os.environ.get('EVENT_HUB_SAS_POLICY') 29 | KEY = os.environ.get('EVENT_HUB_SAS_KEY') 30 | CONSUMER_GROUP = "$default" 31 | EPOCH = 42 32 | PARTITION = "0" 33 | 34 | 35 | async def pump(client, epoch): 36 | receiver = client.add_async_epoch_receiver(CONSUMER_GROUP, PARTITION, epoch=epoch) 37 | await client.run_async() 38 | total = 0 39 | start_time = time.time() 40 | for event_data in await receiver.receive(timeout=5): 41 | last_offset = event_data.offset 42 | last_sn = event_data.sequence_number 43 | total += 1 44 | end_time = time.time() 45 | run_time = end_time - start_time 46 | await client.stop_async() 47 | print("Received {} messages in {} seconds".format(total, run_time)) 48 | 49 | try: 50 | if not ADDRESS: 51 | raise ValueError("No EventHubs URL supplied.") 52 | 53 | loop = asyncio.get_event_loop() 54 | client = EventHubClientAsync(ADDRESS, debug=False, username=USER, password=KEY) 55 | loop.run_until_complete(pump(client, 20)) 56 | loop.close() 57 | 58 | except KeyboardInterrupt: 59 | pass 60 | -------------------------------------------------------------------------------- /examples/recv_async.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | # -------------------------------------------------------------------------------------------- 4 | # Copyright (c) Microsoft Corporation. All rights reserved. 5 | # Licensed under the MIT License. See License.txt in the project root for license information. 6 | # -------------------------------------------------------------------------------------------- 7 | 8 | """ 9 | An example to show running concurrent receivers. 10 | """ 11 | 12 | import os 13 | import sys 14 | import time 15 | import logging 16 | import asyncio 17 | from azure.eventhub import Offset, EventHubClientAsync, AsyncReceiver 18 | 19 | import examples 20 | logger = examples.get_logger(logging.INFO) 21 | 22 | # Address can be in either of these formats: 23 | # "amqps://:@.servicebus.windows.net/myeventhub" 24 | # "amqps://.servicebus.windows.net/myeventhub" 25 | ADDRESS = os.environ.get('EVENT_HUB_ADDRESS') 26 | 27 | # SAS policy and key are not required if they are encoded in the URL 28 | USER = os.environ.get('EVENT_HUB_SAS_POLICY') 29 | KEY = os.environ.get('EVENT_HUB_SAS_KEY') 30 | CONSUMER_GROUP = "$default" 31 | OFFSET = Offset("-1") 32 | 33 | 34 | async def pump(client, partition): 35 | receiver = client.add_async_receiver(CONSUMER_GROUP, partition, OFFSET, prefetch=5) 36 | await client.run_async() 37 | total = 0 38 | start_time = time.time() 39 | for event_data in await receiver.receive(timeout=10): 40 | last_offset = event_data.offset 41 | last_sn = event_data.sequence_number 42 | print("Received: {}, {}".format(last_offset.value, last_sn)) 43 | total += 1 44 | end_time = time.time() 45 | run_time = end_time - start_time 46 | print("Received {} messages in {} seconds".format(total, run_time)) 47 | 48 | try: 49 | if not ADDRESS: 50 | raise ValueError("No EventHubs URL supplied.") 51 | 52 | loop = asyncio.get_event_loop() 53 | client = EventHubClientAsync(ADDRESS, debug=False, username=USER, password=KEY) 54 | tasks = [ 55 | asyncio.ensure_future(pump(client, "0")), 56 | asyncio.ensure_future(pump(client, "1"))] 57 | loop.run_until_complete(asyncio.wait(tasks)) 58 | loop.run_until_complete(client.stop_async()) 59 | loop.close() 60 | 61 | except KeyboardInterrupt: 62 | pass 63 | -------------------------------------------------------------------------------- /azure/eventprocessorhost/azure_blob_lease.py: -------------------------------------------------------------------------------- 1 | # -------------------------------------------------------------------------------------------- 2 | # Copyright (c) Microsoft Corporation. All rights reserved. 3 | # Licensed under the MIT License. See License.txt in the project root for license information. 4 | # ----------------------------------------------------------------------------------- 5 | 6 | import asyncio 7 | import json 8 | 9 | from azure.eventprocessorhost.lease import Lease 10 | 11 | 12 | class AzureBlobLease(Lease): 13 | """ 14 | Azure Blob Lease 15 | """ 16 | 17 | def __init__(self): 18 | """ 19 | Init Azure Blob Lease. 20 | """ 21 | super() 22 | Lease.__init__(self) 23 | self.offset = None 24 | self.state = lambda: None 25 | 26 | def serializable(self): 27 | """ 28 | Returns Serialiazble instance of `__dict__`. 29 | """ 30 | serial = self.__dict__.copy() 31 | del serial['state'] 32 | return serial 33 | 34 | def with_lease(self, lease): 35 | """ 36 | Init with exisiting lease. 37 | """ 38 | super().with_source(lease) 39 | 40 | def with_blob(self, blob): 41 | """ 42 | Init Azure Blob Lease with existing blob. 43 | """ 44 | content = json.loads(blob.content) 45 | self.partition_id = content["partition_id"] 46 | self.owner = content["owner"] 47 | self.token = content["token"] 48 | self.epoch = content["epoch"] 49 | self.offset = content["offset"] 50 | self.sequence_number = content["sequence_number"] 51 | self.event_processor_context = content.get("event_processor_context") 52 | 53 | def with_source(self, lease): 54 | """ 55 | Init Azure Blob Lease from existing. 56 | """ 57 | super().with_source(lease) 58 | self.offset = lease.offset 59 | self.sequence_number = lease.sequence_number 60 | 61 | async def is_expired(self): 62 | """ 63 | Check and return Azure Blob Lease state using Storage API. 64 | """ 65 | if asyncio.iscoroutinefunction(self.state): 66 | current_state = await self.state() 67 | else: 68 | current_state = self.state() 69 | if current_state: 70 | return current_state != "leased" 71 | return False 72 | -------------------------------------------------------------------------------- /tests/asynctests/__init__.py: -------------------------------------------------------------------------------- 1 | #------------------------------------------------------------------------- 2 | # Copyright (c) Microsoft Corporation. All rights reserved. 3 | # Licensed under the MIT License. See License.txt in the project root for 4 | # license information. 5 | #-------------------------------------------------------------------------- 6 | 7 | import asyncio 8 | import logging 9 | 10 | from azure.eventprocessorhost.abstract_event_processor import AbstractEventProcessor 11 | 12 | 13 | class MockEventProcessor(AbstractEventProcessor): 14 | """ 15 | Mock Implmentation of AbstractEventProcessor for testing 16 | """ 17 | def __init__(self, params=None): 18 | """ 19 | Init Event processor 20 | """ 21 | self.params = params 22 | self._msg_counter = 0 23 | 24 | async def open_async(self, context): 25 | """ 26 | Called by processor host to initialize the event processor. 27 | """ 28 | logging.info("Connection established {}".format(context.partition_id)) 29 | 30 | async def close_async(self, context, reason): 31 | """ 32 | Called by processor host to indicate that the event processor is being stopped. 33 | (Params) Context:Information about the partition 34 | """ 35 | logging.info("Connection closed (reason {}, id {}, offset {}, sq_number {})".format( 36 | reason, context.partition_id, context.offset, context.sequence_number)) 37 | 38 | async def process_events_async(self, context, messages): 39 | """ 40 | Called by the processor host when a batch of events has arrived. 41 | This is where the real work of the event processor is done. 42 | (Params) Context: Information about the partition, Messages: The events to be processed. 43 | """ 44 | logging.info("Events processed {} {}".format(context.partition_id, messages)) 45 | await context.checkpoint_async() 46 | 47 | async def process_error_async(self, context, error): 48 | """ 49 | Called when the underlying client experiences an error while receiving. 50 | EventProcessorHost will take care of recovering from the error and 51 | continuing to pump messages,so no action is required from 52 | (Params) Context: Information about the partition, Error: The error that occured. 53 | """ 54 | logging.error("Event Processor Error {!r}".format(error)) -------------------------------------------------------------------------------- /azure/eventprocessorhost/abstract_event_processor.py: -------------------------------------------------------------------------------- 1 | # -------------------------------------------------------------------------------------------- 2 | # Copyright (c) Microsoft Corporation. All rights reserved. 3 | # Licensed under the MIT License. See License.txt in the project root for license information. 4 | # ----------------------------------------------------------------------------------- 5 | 6 | from abc import ABC, abstractmethod 7 | 8 | 9 | class AbstractEventProcessor(ABC): 10 | """ 11 | Abstract that must be extended by event processor classes. 12 | """ 13 | def __init__(self, params=None): 14 | pass 15 | 16 | @abstractmethod 17 | async def open_async(self, context): 18 | """ 19 | Called by processor host to initialize the event processor. 20 | 21 | :param context: Information about the partition 22 | :type context: ~azure.eventprocessorhost.partition_context.PartitionContext 23 | """ 24 | 25 | @abstractmethod 26 | async def close_async(self, context, reason): 27 | """ 28 | Called by processor host to indicate that the event processor is being stopped. 29 | 30 | :param context: Information about the partition 31 | :type context: ~azure.eventprocessorhost.partition_context.PartitionContext 32 | :param reason: The reason for closing. 33 | :type reason: str 34 | """ 35 | 36 | @abstractmethod 37 | async def process_events_async(self, context, messages): 38 | """ 39 | Called by the processor host when a batch of events has arrived. 40 | This is where the real work of the event processor is done. 41 | 42 | :param context: Information about the partition 43 | :type context: ~azure.eventprocessorhost.partition_context.PartitionContext 44 | :param messages: The events to be processed. 45 | :type messages: list[~azure.eventhub.common.EventData] 46 | """ 47 | 48 | @abstractmethod 49 | async def process_error_async(self, context, error): 50 | """ 51 | Called when the underlying client experiences an error while receiving. 52 | EventProcessorHost will take care of recovering from the error and 53 | continuing to pump messages. 54 | 55 | :param context: Information about the partition 56 | :type context: ~azure.eventprocessorhost.partition_context.PartitionContext 57 | :param error: The error that occured. 58 | """ 59 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | #------------------------------------------------------------------------- 4 | # Copyright (c) Microsoft Corporation. All rights reserved. 5 | # Licensed under the MIT License. See License.txt in the project root for 6 | # license information. 7 | #-------------------------------------------------------------------------- 8 | 9 | import re 10 | import os.path 11 | from io import open 12 | from setuptools import find_packages, setup 13 | 14 | 15 | # Change the PACKAGE_NAME only to change folder and different name 16 | PACKAGE_NAME = "azure-eventhub" 17 | PACKAGE_PPRINT_NAME = "Event Hubs" 18 | 19 | # a-b-c => a/b/c 20 | package_folder_path = PACKAGE_NAME.replace('-', '/') 21 | # a-b-c => a.b.c 22 | namespace_name = PACKAGE_NAME.replace('-', '.') 23 | 24 | # Version extraction inspired from 'requests' 25 | with open(os.path.join(package_folder_path, '__init__.py'), 'r') as fd: 26 | version = re.search(r'^__version__\s*=\s*[\'"]([^\'"]*)[\'"]', 27 | fd.read(), re.MULTILINE).group(1) 28 | 29 | if not version: 30 | raise RuntimeError('Cannot find version information') 31 | 32 | with open('README.rst') as f: 33 | readme = f.read() 34 | with open('HISTORY.rst') as f: 35 | history = f.read() 36 | 37 | setup( 38 | name=PACKAGE_NAME, 39 | version=version, 40 | description='Microsoft Azure {} Client Library for Python'.format(PACKAGE_PPRINT_NAME), 41 | long_description=readme + '\n\n' + history, 42 | license='MIT License', 43 | author='Microsoft Corporation', 44 | author_email='azpysdkhelp@microsoft.com', 45 | url='https://github.com/Azure/azure-event-hubs-python', 46 | classifiers=[ 47 | 'Development Status :: 5 - Production/Stable', 48 | 'Programming Language :: Python', 49 | 'Programming Language :: Python :: 2', 50 | 'Programming Language :: Python :: 2.7', 51 | 'Programming Language :: Python :: 3', 52 | 'Programming Language :: Python :: 3.4', 53 | 'Programming Language :: Python :: 3.5', 54 | 'Programming Language :: Python :: 3.6', 55 | 'Programming Language :: Python :: 3.7', 56 | 'License :: OSI Approved :: MIT License', 57 | ], 58 | zip_safe=False, 59 | packages=find_packages(exclude=[ 60 | "azure", 61 | "examples", 62 | "tests", 63 | "tests.asynctests"]), 64 | install_requires=[ 65 | 'uamqp>=1.1.0,<2.0.0', 66 | 'msrestazure>=0.4.32,<2.0.0', 67 | 'azure-common~=1.1', 68 | 'azure-storage-blob~=1.3' 69 | ] 70 | ) 71 | -------------------------------------------------------------------------------- /features/steps/eventhub.py: -------------------------------------------------------------------------------- 1 | # -------------------------------------------------------------------------------------------- 2 | # Copyright (c) Microsoft Corporation. All rights reserved. 3 | # Licensed under the MIT License. See License.txt in the project root for license information. 4 | # -------------------------------------------------------------------------------------------- 5 | 6 | import asyncio 7 | import uuid 8 | import functools 9 | 10 | from behave import * 11 | 12 | import test_utils 13 | 14 | @given('the EventHub SDK is installed') 15 | def step_impl(context): 16 | from azure import eventhub 17 | 18 | @given('an EventHub is created with credentials retrieved') 19 | def step_impl(context): 20 | #from mgmt_settings_real import get_credentials, SUBSCRIPTION_ID 21 | #rg, mgmt_client = test_utils.create_mgmt_client(get_credentials(), SUBSCRIPTION_ID) 22 | context.eh_config = test_utils.get_eventhub_config() 23 | 24 | @given('an EventHub with {properties} is created with credentials retrieved') 25 | def step_impl(context, properties): 26 | #from mgmt_settings_real import get_credentials, SUBSCRIPTION_ID 27 | #rg, mgmt_client = test_utils.create_mgmt_client(get_credentials(), SUBSCRIPTION_ID) 28 | _, prop = properties.split(' ') 29 | if prop == '100TU': 30 | context.eh_config = test_utils.get_eventhub_100TU_config() 31 | else: 32 | raise ValueError("Unrecognised property: {}".format(prop)) 33 | 34 | @When('I start a message sender') 35 | def step_impl(context): 36 | from azure.eventhub import EventHubClient 37 | address = "sb://{}/{}".format(context.eh_config['hostname'], context.eh_config['event_hub']) 38 | context.client = EventHubClient( 39 | address, 40 | username=context.eh_config['key_name'], 41 | password=context.eh_config['access_key']) 42 | context.sender = client.add_sender() 43 | context.client.run() 44 | 45 | @when('I {clients} messages for {hours} hours') 46 | def step_impl(context, clients, hours): 47 | assert True is not False 48 | 49 | @when('I {clients} messages {destination} for {hours} hours') 50 | def step_impl(context, clients, destination, hours): 51 | assert True is not False 52 | 53 | @then('I should receive no errors') 54 | def step_impl(context): 55 | assert context.failed is False 56 | 57 | @then('I can shutdown the {clients} cleanly') 58 | def step_impl(context, clients): 59 | assert context.failed is False 60 | 61 | @then('I should achieve throughput of greater than {total} messages') 62 | def step_impl(context, total): 63 | assert context.failed is False 64 | 65 | @then('I should achieve throughput of greater than {total} messages from {source}') 66 | def step_impl(context, total, source): 67 | assert context.failed is False 68 | 69 | @then('I remove the EventHub') 70 | def step_impl(context): 71 | assert context.failed is False -------------------------------------------------------------------------------- /tests/test_reconnect.py: -------------------------------------------------------------------------------- 1 | #------------------------------------------------------------------------- 2 | # Copyright (c) Microsoft Corporation. All rights reserved. 3 | # Licensed under the MIT License. See License.txt in the project root for 4 | # license information. 5 | #-------------------------------------------------------------------------- 6 | 7 | import os 8 | import time 9 | import pytest 10 | 11 | from azure import eventhub 12 | from azure.eventhub import ( 13 | EventData, 14 | Offset, 15 | EventHubError, 16 | EventHubClient) 17 | 18 | 19 | def test_send_with_long_interval_sync(connstr_receivers): 20 | connection_str, receivers = connstr_receivers 21 | client = EventHubClient.from_connection_string(connection_str, debug=True) 22 | sender = client.add_sender() 23 | try: 24 | client.run() 25 | sender.send(EventData(b"A single event")) 26 | for _ in range(2): 27 | time.sleep(300) 28 | sender.send(EventData(b"A single event")) 29 | finally: 30 | client.stop() 31 | 32 | received = [] 33 | for r in receivers: 34 | received.extend(r.receive(timeout=1)) 35 | 36 | assert len(received) == 3 37 | assert list(received[0].body)[0] == b"A single event" 38 | 39 | 40 | def test_send_with_forced_conn_close_sync(connstr_receivers): 41 | connection_str, receivers = connstr_receivers 42 | client = EventHubClient.from_connection_string(connection_str, debug=True) 43 | sender = client.add_sender() 44 | try: 45 | client.run() 46 | sender.send(EventData(b"A single event")) 47 | sender._handler._message_sender.destroy() 48 | time.sleep(300) 49 | sender.send(EventData(b"A single event")) 50 | sender.send(EventData(b"A single event")) 51 | sender._handler._message_sender.destroy() 52 | time.sleep(300) 53 | sender.send(EventData(b"A single event")) 54 | sender.send(EventData(b"A single event")) 55 | finally: 56 | client.stop() 57 | 58 | received = [] 59 | for r in receivers: 60 | received.extend(r.receive(timeout=1)) 61 | assert len(received) == 5 62 | assert list(received[0].body)[0] == b"A single event" 63 | 64 | 65 | # def test_send_with_forced_link_detach(connstr_receivers): 66 | # connection_str, receivers = connstr_receivers 67 | # client = EventHubClient.from_connection_string(connection_str, debug=True) 68 | # sender = client.add_sender() 69 | # size = 20 * 1024 70 | # try: 71 | # client.run() 72 | # for i in range(1000): 73 | # sender.transfer(EventData([b"A"*size, b"B"*size, b"C"*size, b"D"*size, b"A"*size, b"B"*size, b"C"*size, b"D"*size, b"A"*size, b"B"*size, b"C"*size, b"D"*size])) 74 | # sender.wait() 75 | # finally: 76 | # client.stop() 77 | 78 | # received = [] 79 | # for r in receivers: 80 | # received.extend(r.receive(timeout=10)) 81 | -------------------------------------------------------------------------------- /azure/eventprocessorhost/eh_config.py: -------------------------------------------------------------------------------- 1 | # -------------------------------------------------------------------------------------------- 2 | # Copyright (c) Microsoft Corporation. All rights reserved. 3 | # Licensed under the MIT License. See License.txt in the project root for license information. 4 | # ----------------------------------------------------------------------------------- 5 | 6 | import time 7 | import urllib 8 | import hmac 9 | import hashlib 10 | import base64 11 | 12 | class EventHubConfig: 13 | """ 14 | A container class for Event Hub properties. 15 | 16 | :param sb_name: The EventHub (ServiceBus) namespace. 17 | :type sb_name: str 18 | :param eh_name: The EventHub name. 19 | :type eh_name: str 20 | :param policy: The SAS policy name. 21 | :type policy: str 22 | :param sas_key: The SAS access key. 23 | :type sas_key: str 24 | :param consumer_group: The EventHub consumer group to receive from. The 25 | default value is '$default'. 26 | :type consumer_group: str 27 | :param namespace_suffix: The ServiceBus namespace URL suffix. 28 | The default value is 'servicebus.windows.net'. 29 | :type namespace_suffix: str 30 | """ 31 | def __init__(self, sb_name, eh_name, policy, sas_key, 32 | consumer_group="$default", 33 | namespace_suffix="servicebus.windows.net"): 34 | self.sb_name = sb_name 35 | self.eh_name = eh_name 36 | self.policy = policy 37 | self.sas_key = sas_key 38 | self.namespace_suffix = namespace_suffix 39 | self.consumer_group = consumer_group 40 | self.client_address = self.get_client_address() 41 | self.rest_token = self.get_rest_token() 42 | 43 | def get_client_address(self): 44 | """ 45 | Returns an auth token dictionary for making calls to eventhub 46 | REST API. 47 | 48 | :rtype: str 49 | """ 50 | return "amqps://{}:{}@{}.{}:5671/{}".format( 51 | urllib.parse.quote_plus(self.policy), 52 | urllib.parse.quote_plus(self.sas_key), 53 | self.sb_name, 54 | self.namespace_suffix, 55 | self.eh_name) 56 | 57 | def get_rest_token(self): 58 | """ 59 | Returns an auth token for making calls to eventhub REST API. 60 | 61 | :rtype: str 62 | """ 63 | uri = urllib.parse.quote_plus( 64 | "https://{}.{}/{}".format(self.sb_name, self.namespace_suffix, self.eh_name)) 65 | sas = self.sas_key.encode('utf-8') 66 | expiry = str(int(time.time() + 10000)) 67 | string_to_sign = ('{}\n{}'.format(uri, expiry)).encode('utf-8') 68 | signed_hmac_sha256 = hmac.HMAC(sas, string_to_sign, hashlib.sha256) 69 | signature = urllib.parse.quote(base64.b64encode(signed_hmac_sha256.digest())) 70 | return 'SharedAccessSignature sr={}&sig={}&se={}&skn={}' \ 71 | .format(uri, signature, expiry, self.policy) 72 | -------------------------------------------------------------------------------- /azure/eventprocessorhost/abstract_checkpoint_manager.py: -------------------------------------------------------------------------------- 1 | # -------------------------------------------------------------------------------------------- 2 | # Copyright (c) Microsoft Corporation. All rights reserved. 3 | # Licensed under the MIT License. See License.txt in the project root for license information. 4 | # ----------------------------------------------------------------------------------- 5 | 6 | """ 7 | Author: Aaron (Ari) Bornstien 8 | """ 9 | from abc import ABC, abstractmethod 10 | 11 | class AbstractCheckpointManager(ABC): 12 | """ 13 | If you wish to have EventProcessorHost store checkpoints somewhere other than Azure Storage, 14 | you can write your own checkpoint manager using this abstract class. 15 | """ 16 | def __init__(self): 17 | pass 18 | 19 | @abstractmethod 20 | async def create_checkpoint_store_if_not_exists_async(self): 21 | """ 22 | Create the checkpoint store if it doesn't exist. Do nothing if it does exist. 23 | 24 | :return: `True` if the checkpoint store already exists or was created OK, `False` 25 | if there was a failure. 26 | :rtype: bool 27 | """ 28 | 29 | @abstractmethod 30 | async def get_checkpoint_async(self, partition_id): 31 | """ 32 | Get the checkpoint data associated with the given partition. 33 | Could return null if no checkpoint has been created for that partition. 34 | 35 | :param partition_id: The ID of a given parition. 36 | :type partition_id: str 37 | :return: Given partition checkpoint info, or `None` if none has been previously stored. 38 | :rtype: ~azure.eventprocessorhost.checkpoint.Checkpoint 39 | """ 40 | 41 | @abstractmethod 42 | async def create_checkpoint_if_not_exists_async(self, partition_id): 43 | """ 44 | Create the given partition checkpoint if it doesn't exist.Do nothing if it does exist. 45 | The offset/sequenceNumber for a freshly-created checkpoint should be set to StartOfStream/0. 46 | 47 | :param partition_id: The ID of a given parition. 48 | :type partition_id: str 49 | :return: The checkpoint for the given partition, whether newly created or already existing. 50 | :rtype: ~azure.eventprocessorhost.checkpoint.Checkpoint 51 | """ 52 | 53 | @abstractmethod 54 | async def update_checkpoint_async(self, lease, checkpoint): 55 | """ 56 | Update the checkpoint in the store with the offset/sequenceNumber in the provided checkpoint. 57 | 58 | :param lease: The lease to be updated. 59 | :type lease: ~azure.eventprocessorhost.lease.Lease 60 | :param checkpoint: offset/sequeceNumber to update the store with. 61 | :type checkpoint: ~azure.eventprocessorhost.checkpoint.Checkpoint 62 | """ 63 | 64 | @abstractmethod 65 | async def delete_checkpoint_async(self, partition_id): 66 | """ 67 | Delete the stored checkpoint for the given partition. If there is no stored checkpoint 68 | for the given partition, that is treated as success. 69 | 70 | :param partition_id: The ID of a given parition. 71 | :type partition_id: str 72 | """ 73 | -------------------------------------------------------------------------------- /features/eventhub.feature: -------------------------------------------------------------------------------- 1 | # -------------------------------------------------------------------------------------------- 2 | # Copyright (c) Microsoft Corporation. All rights reserved. 3 | # Licensed under the MIT License. See License.txt in the project root for license information. 4 | # -------------------------------------------------------------------------------------------- 5 | 6 | Feature: Exercising EventHub SDK 7 | 8 | @long-running 9 | Scenario: Just sends for 3 days, no receives. Focus on send failures only. 10 | Given The EventHub SDK is installed 11 | And An EventHub is created with credentials retrieved 12 | When I start a message sender 13 | And I send messages for 72 hours 14 | Then I should receive no errors 15 | And I can shutdown sender 16 | And I remove the EventHub 17 | 18 | # Scenario: Sender stays idle for 45 minutes and sends some number of messages after each idle duration. 19 | 20 | # Scenario: Sends on partition senders. 21 | 22 | # Scenario: Send and receive to/from a multiple consumer group entity. 23 | 24 | # Scenario: Sends and receives 246KB size messages. 25 | 26 | @long-running 27 | Scenario: Runs on a 100TU namespace and saturates ingress. 28 | Given the EventHub SDK is installed 29 | And an EventHub with 100TU is created with credentials retrieved 30 | When I send messages for 2 hours 31 | Then I should achieve throughput of greater than 3600000 messages 32 | And I should receive no errors 33 | And I can shutdown the sender cleanly 34 | And I remove the EventHub 35 | 36 | @long-running 37 | Scenario: Runs on a 100TU namespace and saturates ingress with partition senders for 3 days. 38 | Given the EventHub SDK is installed 39 | And an EventHub with 100TU is created with credentials retrieved 40 | When I send messages to partitions for 2 hours 41 | Then I should achieve throughput of greater than 1800000 messages from each partition 42 | And I should receive no errors 43 | And I can shutdown the sender cleanly 44 | And I remove the EventHub 45 | 46 | # Scenario: Sends and receives 1 byte size messages. 47 | 48 | # Scenario: Single clients parks 500 async sends. 49 | 50 | # Scenario: Sends a set of messages and keeps receiving same set of messages again and again. 51 | 52 | # Scenario: Receives with 60 minutes of receive timeout. 53 | 54 | # Scenario: Receives with 3 seconds of receive timeout. 55 | 56 | # Scenario: Recreates receivers at the beginning of each iteration. 57 | 58 | # Scenario: Recreates receivers with the last known sequence number at the beginning of each iteration. 59 | 60 | # Scenario: Uses epoch receivers. 61 | 62 | # Scenario: Introduces a short idle time after each receive attempt. We use 50 seconds of sleep here. 63 | 64 | # Scenario: Uses pump receivers to receive messages. 65 | 66 | # Scenario: Sends messages with partition key set. 67 | 68 | # Scenario: Issues runtime information API calls as part of send and receive. 69 | 70 | # Scenario: Uses batch sender to send messages. 71 | 72 | # Scenario: Sends and receives by enabling web sockets over AMQP. 73 | 74 | # Scenario: Issues runtime information API calls over web sockets as part of send and receive. 75 | -------------------------------------------------------------------------------- /tests/asynctests/test_reconnect_async.py: -------------------------------------------------------------------------------- 1 | #------------------------------------------------------------------------- 2 | # Copyright (c) Microsoft Corporation. All rights reserved. 3 | # Licensed under the MIT License. See License.txt in the project root for 4 | # license information. 5 | #-------------------------------------------------------------------------- 6 | 7 | import os 8 | import time 9 | import asyncio 10 | import pytest 11 | 12 | from azure import eventhub 13 | from azure.eventhub import ( 14 | EventHubClientAsync, 15 | EventData, 16 | Offset, 17 | EventHubError) 18 | 19 | 20 | @pytest.mark.asyncio 21 | async def test_send_with_long_interval_async(connstr_receivers): 22 | connection_str, receivers = connstr_receivers 23 | client = EventHubClientAsync.from_connection_string(connection_str, debug=True) 24 | sender = client.add_async_sender() 25 | try: 26 | await client.run_async() 27 | await sender.send(EventData(b"A single event")) 28 | for _ in range(2): 29 | await asyncio.sleep(300) 30 | await sender.send(EventData(b"A single event")) 31 | finally: 32 | await client.stop_async() 33 | 34 | received = [] 35 | for r in receivers: 36 | received.extend(r.receive(timeout=1)) 37 | assert len(received) == 3 38 | assert list(received[0].body)[0] == b"A single event" 39 | 40 | 41 | def pump(receiver): 42 | messages = [] 43 | batch = receiver.receive(timeout=1) 44 | messages.extend(batch) 45 | while batch: 46 | batch = receiver.receive(timeout=1) 47 | messages.extend(batch) 48 | return messages 49 | 50 | @pytest.mark.asyncio 51 | async def test_send_with_forced_conn_close_async(connstr_receivers): 52 | connection_str, receivers = connstr_receivers 53 | client = EventHubClientAsync.from_connection_string(connection_str, debug=True) 54 | sender = client.add_async_sender() 55 | try: 56 | await client.run_async() 57 | await sender.send(EventData(b"A single event")) 58 | sender._handler._message_sender.destroy() 59 | await asyncio.sleep(300) 60 | await sender.send(EventData(b"A single event")) 61 | await sender.send(EventData(b"A single event")) 62 | sender._handler._message_sender.destroy() 63 | await asyncio.sleep(300) 64 | await sender.send(EventData(b"A single event")) 65 | await sender.send(EventData(b"A single event")) 66 | finally: 67 | await client.stop_async() 68 | 69 | received = [] 70 | for r in receivers: 71 | received.extend(pump(r)) 72 | assert len(received) == 5 73 | assert list(received[0].body)[0] == b"A single event" 74 | 75 | 76 | # def test_send_with_forced_link_detach(connstr_receivers): 77 | # connection_str, receivers = connstr_receivers 78 | # client = EventHubClient.from_connection_string(connection_str, debug=True) 79 | # sender = client.add_sender() 80 | # size = 20 * 1024 81 | # try: 82 | # client.run() 83 | # for i in range(1000): 84 | # sender.transfer(EventData([b"A"*size, b"B"*size, b"C"*size, b"D"*size, b"A"*size, b"B"*size, b"C"*size, b"D"*size, b"A"*size, b"B"*size, b"C"*size, b"D"*size])) 85 | # sender.wait() 86 | # finally: 87 | # client.stop() 88 | 89 | # received = [] 90 | # for r in receivers: 91 | # received.extend(r.receive(timeout=10)) 92 | -------------------------------------------------------------------------------- /features/steps/test_utils.py: -------------------------------------------------------------------------------- 1 | # -------------------------------------------------------------------------------------------- 2 | # Copyright (c) Microsoft Corporation. All rights reserved. 3 | # Licensed under the MIT License. See License.txt in the project root for license information. 4 | # -------------------------------------------------------------------------------------------- 5 | 6 | import uuid 7 | import time 8 | import asyncio 9 | 10 | def create_mgmt_client(credentials, subscription, location='westus'): 11 | from azure.mgmt.resource import ResourceManagementClient 12 | from azure.mgmt.eventhub import EventHubManagementClient 13 | 14 | resource_client = ResourceManagementClient(credentials, subscription) 15 | rg_name = 'pytest-{}'.format(uuid.uuid4()) 16 | resource_group = resource_client.resource_groups.create_or_update( 17 | rg_name, {'location': location}) 18 | 19 | eh_client = EventHubManagementClient(credentials, subscription) 20 | namespace = 'pytest-{}'.format(uuid.uuid4()) 21 | creator = eh_client.namespaces.create_or_update( 22 | resource_group.name, 23 | namespace) 24 | create.wait() 25 | return resource_group, eh_client 26 | 27 | 28 | def get_eventhub_config(): 29 | config = {} 30 | config['hostname'] = os.environ['EVENT_HUB_HOSTNAME'] 31 | config['event_hub'] = os.environ['EVENT_HUB_NAME'] 32 | config['key_name'] = os.environ['EVENT_HUB_SAS_POLICY'] 33 | config['access_key'] = os.environ['EVENT_HUB_SAS_KEY'] 34 | config['consumer_group'] = "$Default" 35 | config['partition'] = "0" 36 | return config 37 | 38 | 39 | def get_eventhub_100TU_config(): 40 | config = {} 41 | config['hostname'] = os.environ['EVENT_HUB_100TU_HOSTNAME'] 42 | config['event_hub'] = os.environ['EVENT_HUB_100TU_NAME'] 43 | config['key_name'] = os.environ['EVENT_HUB_100TU_SAS_POLICY'] 44 | config['access_key'] = os.environ['EVENT_HUB_100TU_SAS_KEY'] 45 | config['consumer_group'] = "$Default" 46 | config['partition'] = "0" 47 | return config 48 | 49 | 50 | def send_constant_messages(sender, timeout, payload=1024): 51 | deadline = time.time() 52 | total = 0 53 | while time.time() < deadline: 54 | data = EventData(body=b"D" * payload) 55 | sender.send(data) 56 | total += 1 57 | return total 58 | 59 | 60 | def send_constant_async_messages(sender, timeout, batch_size=10000, payload=1024): 61 | deadline = time.time() 62 | total = 0 63 | while time.time() < deadline: 64 | data = EventData(body=b"D" * args.payload) 65 | sender.transfer(data) 66 | total += 1 67 | if total % 10000 == 0: 68 | sender.wait() 69 | return total 70 | 71 | 72 | def send_constant_async_messages(sender, timeout, batch_size=1, payload=1024): 73 | deadline = time.time() 74 | while time.time() < deadline: 75 | if batch_size > 1: 76 | data = EventData(batch=data_generator()) 77 | else: 78 | data = EventData(body=b"D" * payload) 79 | 80 | 81 | async def receive_pump(receiver, timeout, validation=True): 82 | total = 0 83 | deadline = time.time() + timeout 84 | sequence = 0 85 | offset = None 86 | while time.time() < deadline: 87 | batch = await receiver.receive(timeout=5) 88 | total += len(batch) 89 | if validation: 90 | assert receiver.offset 91 | for event in batch: 92 | next_sequence = event.sequence_number 93 | assert next_sequence > sequence, "Received Event with lower sequence number than previous." 94 | assert (next_sequence - sequence) == 1, "Sequence number skipped by a value great than 1." 95 | sequence = next_sequence 96 | msg_data = b"".join([b for b in event.body]).decode('UTF-8') 97 | assert json.loads(msg_data), "Unable to deserialize Event data." 98 | -------------------------------------------------------------------------------- /tests/test_longrunning_send.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | # -------------------------------------------------------------------------------------------- 4 | # Copyright (c) Microsoft Corporation. All rights reserved. 5 | # Licensed under the MIT License. See License.txt in the project root for license information. 6 | # -------------------------------------------------------------------------------------------- 7 | 8 | """ 9 | send test 10 | """ 11 | 12 | import argparse 13 | import time 14 | import os 15 | import sys 16 | import logging 17 | from logging.handlers import RotatingFileHandler 18 | 19 | from azure.eventhub import EventHubClient, Sender, EventData 20 | 21 | 22 | def get_logger(filename, level=logging.INFO): 23 | azure_logger = logging.getLogger("azure.eventhub") 24 | azure_logger.setLevel(level) 25 | uamqp_logger = logging.getLogger("uamqp") 26 | uamqp_logger.setLevel(logging.INFO) 27 | 28 | formatter = logging.Formatter('%(asctime)s %(name)-12s %(levelname)-8s %(message)s') 29 | console_handler = logging.StreamHandler(stream=sys.stdout) 30 | console_handler.setFormatter(formatter) 31 | if not azure_logger.handlers: 32 | azure_logger.addHandler(console_handler) 33 | if not uamqp_logger.handlers: 34 | uamqp_logger.addHandler(console_handler) 35 | 36 | if filename: 37 | file_handler = RotatingFileHandler(filename, maxBytes=20*1024*1024, backupCount=3) 38 | file_handler.setFormatter(formatter) 39 | azure_logger.addHandler(file_handler) 40 | uamqp_logger.addHandler(file_handler) 41 | 42 | return azure_logger 43 | 44 | logger = get_logger("send_test.log", logging.INFO) 45 | 46 | 47 | def check_send_successful(outcome, condition): 48 | if outcome.value != 0: 49 | print("Send failed {}".format(condition)) 50 | 51 | 52 | def main(client, args): 53 | sender = client.add_sender() 54 | client.run() 55 | deadline = time.time() + args.duration 56 | total = 0 57 | 58 | def data_generator(): 59 | for i in range(args.batch): 60 | yield b"D" * args.payload 61 | 62 | if args.batch > 1: 63 | print("Sending batched messages") 64 | else: 65 | print("Sending single messages") 66 | 67 | try: 68 | while time.time() < deadline: 69 | if args.batch > 1: 70 | data = EventData(batch=data_generator()) 71 | else: 72 | data = EventData(body=b"D" * args.payload) 73 | sender.transfer(data, callback=check_send_successful) 74 | total += args.batch 75 | if total % 10000 == 0: 76 | sender.wait() 77 | print("Send total {}".format(total)) 78 | except Exception as err: 79 | print("Send failed {}".format(err)) 80 | finally: 81 | client.stop() 82 | print("Sent total {}".format(total)) 83 | 84 | 85 | def test_long_running_send(connection_str): 86 | if sys.platform.startswith('darwin'): 87 | import pytest 88 | pytest.skip("Skipping on OSX") 89 | parser = argparse.ArgumentParser() 90 | parser.add_argument("--duration", help="Duration in seconds of the test", type=int, default=30) 91 | parser.add_argument("--payload", help="payload size", type=int, default=512) 92 | parser.add_argument("--batch", help="Number of events to send and wait", type=int, default=1) 93 | parser.add_argument("--conn-str", help="EventHub connection string", default=connection_str) 94 | parser.add_argument("--eventhub", help="Name of EventHub") 95 | parser.add_argument("--address", help="Address URI to the EventHub entity") 96 | parser.add_argument("--sas-policy", help="Name of the shared access policy to authenticate with") 97 | parser.add_argument("--sas-key", help="Shared access key") 98 | 99 | args, _ = parser.parse_known_args() 100 | if args.conn_str: 101 | client = EventHubClient.from_connection_string( 102 | args.conn_str, 103 | eventhub=args.eventhub) 104 | elif args.address: 105 | client = EventHubClient( 106 | args.address, 107 | username=args.sas_policy, 108 | password=args.sas_key) 109 | else: 110 | try: 111 | import pytest 112 | pytest.skip("Must specify either '--conn-str' or '--address'") 113 | except ImportError: 114 | raise ValueError("Must specify either '--conn-str' or '--address'") 115 | 116 | try: 117 | main(client, args) 118 | except KeyboardInterrupt: 119 | pass 120 | 121 | if __name__ == '__main__': 122 | test_long_running_send(os.environ.get('EVENT_HUB_CONNECTION_STR')) 123 | -------------------------------------------------------------------------------- /examples/eph.py: -------------------------------------------------------------------------------- 1 | # -------------------------------------------------------------------------------------------- 2 | # Copyright (c) Microsoft Corporation. All rights reserved. 3 | # Licensed under the MIT License. See License.txt in the project root for license information. 4 | # ----------------------------------------------------------------------------------- 5 | 6 | import logging 7 | import asyncio 8 | import sys 9 | import os 10 | import signal 11 | import functools 12 | 13 | from azure.eventprocessorhost import ( 14 | AbstractEventProcessor, 15 | AzureStorageCheckpointLeaseManager, 16 | EventHubConfig, 17 | EventProcessorHost, 18 | EPHOptions) 19 | 20 | import examples 21 | logger = examples.get_logger(logging.INFO) 22 | 23 | 24 | class EventProcessor(AbstractEventProcessor): 25 | """ 26 | Example Implmentation of AbstractEventProcessor 27 | """ 28 | 29 | def __init__(self, params=None): 30 | """ 31 | Init Event processor 32 | """ 33 | super().__init__(params) 34 | self._msg_counter = 0 35 | 36 | async def open_async(self, context): 37 | """ 38 | Called by processor host to initialize the event processor. 39 | """ 40 | logger.info("Connection established {}".format(context.partition_id)) 41 | 42 | async def close_async(self, context, reason): 43 | """ 44 | Called by processor host to indicate that the event processor is being stopped. 45 | :param context: Information about the partition 46 | :type context: ~azure.eventprocessorhost.PartitionContext 47 | """ 48 | logger.info("Connection closed (reason {}, id {}, offset {}, sq_number {})".format( 49 | reason, 50 | context.partition_id, 51 | context.offset, 52 | context.sequence_number)) 53 | 54 | async def process_events_async(self, context, messages): 55 | """ 56 | Called by the processor host when a batch of events has arrived. 57 | This is where the real work of the event processor is done. 58 | :param context: Information about the partition 59 | :type context: ~azure.eventprocessorhost.PartitionContext 60 | :param messages: The events to be processed. 61 | :type messages: list[~azure.eventhub.common.EventData] 62 | """ 63 | logger.info("Events processed {}".format(context.sequence_number)) 64 | await context.checkpoint_async() 65 | 66 | async def process_error_async(self, context, error): 67 | """ 68 | Called when the underlying client experiences an error while receiving. 69 | EventProcessorHost will take care of recovering from the error and 70 | continuing to pump messages,so no action is required from 71 | :param context: Information about the partition 72 | :type context: ~azure.eventprocessorhost.PartitionContext 73 | :param error: The error that occured. 74 | """ 75 | logger.error("Event Processor Error {!r}".format(error)) 76 | 77 | 78 | async def wait_and_close(host): 79 | """ 80 | Run EventProcessorHost for 2 minutes then shutdown. 81 | """ 82 | await asyncio.sleep(60) 83 | await host.close_async() 84 | 85 | 86 | try: 87 | loop = asyncio.get_event_loop() 88 | 89 | # Storage Account Credentials 90 | STORAGE_ACCOUNT_NAME = os.environ.get('AZURE_STORAGE_ACCOUNT') 91 | STORAGE_KEY = os.environ.get('AZURE_STORAGE_ACCESS_KEY') 92 | LEASE_CONTAINER_NAME = "leases" 93 | 94 | NAMESPACE = os.environ.get('EVENT_HUB_NAMESPACE') 95 | EVENTHUB = os.environ.get('EVENT_HUB_NAME') 96 | USER = os.environ.get('EVENT_HUB_SAS_POLICY') 97 | KEY = os.environ.get('EVENT_HUB_SAS_KEY') 98 | 99 | # Eventhub config and storage manager 100 | eh_config = EventHubConfig(NAMESPACE, EVENTHUB, USER, KEY, consumer_group="$default") 101 | eh_options = EPHOptions() 102 | eh_options.release_pump_on_timeout = True 103 | eh_options.debug_trace = False 104 | storage_manager = AzureStorageCheckpointLeaseManager( 105 | STORAGE_ACCOUNT_NAME, STORAGE_KEY, LEASE_CONTAINER_NAME) 106 | 107 | # Event loop and host 108 | host = EventProcessorHost( 109 | EventProcessor, 110 | eh_config, 111 | storage_manager, 112 | ep_params=["param1","param2"], 113 | eph_options=eh_options, 114 | loop=loop) 115 | 116 | tasks = asyncio.gather( 117 | host.open_async(), 118 | wait_and_close(host)) 119 | loop.run_until_complete(tasks) 120 | 121 | except KeyboardInterrupt: 122 | # Canceling pending tasks and stopping the loop 123 | for task in asyncio.Task.all_tasks(): 124 | task.cancel() 125 | loop.run_forever() 126 | tasks.exception() 127 | 128 | finally: 129 | loop.stop() 130 | -------------------------------------------------------------------------------- /tests/test_longrunning_receive.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | # -------------------------------------------------------------------------------------------- 4 | # Copyright (c) Microsoft Corporation. All rights reserved. 5 | # Licensed under the MIT License. See License.txt in the project root for license information. 6 | # -------------------------------------------------------------------------------------------- 7 | 8 | """ 9 | receive test. 10 | """ 11 | 12 | import logging 13 | import argparse 14 | import time 15 | import os 16 | import sys 17 | 18 | from logging.handlers import RotatingFileHandler 19 | 20 | from azure.eventhub import Offset 21 | from azure.eventhub import EventHubClient 22 | 23 | def get_logger(filename, level=logging.INFO): 24 | azure_logger = logging.getLogger("azure.eventhub") 25 | azure_logger.setLevel(level) 26 | uamqp_logger = logging.getLogger("uamqp") 27 | uamqp_logger.setLevel(logging.INFO) 28 | 29 | formatter = logging.Formatter('%(asctime)s %(name)-12s %(levelname)-8s %(message)s') 30 | console_handler = logging.StreamHandler(stream=sys.stdout) 31 | console_handler.setFormatter(formatter) 32 | if not azure_logger.handlers: 33 | azure_logger.addHandler(console_handler) 34 | if not uamqp_logger.handlers: 35 | uamqp_logger.addHandler(console_handler) 36 | 37 | if filename: 38 | file_handler = RotatingFileHandler(filename, maxBytes=20*1024*1024, backupCount=3) 39 | file_handler.setFormatter(formatter) 40 | azure_logger.addHandler(file_handler) 41 | uamqp_logger.addHandler(file_handler) 42 | 43 | return azure_logger 44 | 45 | logger = get_logger("recv_test.log", logging.INFO) 46 | 47 | 48 | def get_partitions(args): 49 | eh_data = args.get_eventhub_info() 50 | return eh_data["partition_ids"] 51 | 52 | 53 | def pump(receivers, duration): 54 | total = 0 55 | iteration = 0 56 | deadline = time.time() + duration 57 | try: 58 | while time.time() < deadline: 59 | for pid, receiver in receivers.items(): 60 | batch = receiver.receive(timeout=5) 61 | size = len(batch) 62 | total += size 63 | iteration += 1 64 | if size == 0: 65 | print("{}: No events received, queue size {}, delivered {}".format( 66 | pid, 67 | receiver.queue_size, 68 | total)) 69 | elif iteration >= 50: 70 | iteration = 0 71 | print("{}: total received {}, last sn={}, last offset={}".format( 72 | pid, 73 | total, 74 | batch[-1].sequence_number, 75 | batch[-1].offset.value)) 76 | print("Total received {}".format(total)) 77 | except Exception as e: 78 | print("Receiver failed: {}".format(e)) 79 | raise 80 | 81 | 82 | def test_long_running_receive(connection_str): 83 | parser = argparse.ArgumentParser() 84 | parser.add_argument("--duration", help="Duration in seconds of the test", type=int, default=30) 85 | parser.add_argument("--consumer", help="Consumer group name", default="$default") 86 | parser.add_argument("--partitions", help="Comma seperated partition IDs") 87 | parser.add_argument("--offset", help="Starting offset", default="-1") 88 | parser.add_argument("--conn-str", help="EventHub connection string", default=connection_str) 89 | parser.add_argument("--eventhub", help="Name of EventHub") 90 | parser.add_argument("--address", help="Address URI to the EventHub entity") 91 | parser.add_argument("--sas-policy", help="Name of the shared access policy to authenticate with") 92 | parser.add_argument("--sas-key", help="Shared access key") 93 | 94 | args, _ = parser.parse_known_args() 95 | if args.conn_str: 96 | client = EventHubClient.from_connection_string( 97 | args.conn_str, 98 | eventhub=args.eventhub, debug=False) 99 | elif args.address: 100 | client = EventHubClient( 101 | args.address, 102 | username=args.sas_policy, 103 | password=args.sas_key) 104 | else: 105 | try: 106 | import pytest 107 | pytest.skip("Must specify either '--conn-str' or '--address'") 108 | except ImportError: 109 | raise ValueError("Must specify either '--conn-str' or '--address'") 110 | 111 | try: 112 | if not args.partitions: 113 | partitions = get_partitions(client) 114 | else: 115 | partitions = args.partitions.split(",") 116 | pumps = {} 117 | for pid in partitions: 118 | pumps[pid] = client.add_receiver( 119 | consumer_group=args.consumer, 120 | partition=pid, 121 | offset=Offset(args.offset), 122 | prefetch=50) 123 | client.run() 124 | pump(pumps, args.duration) 125 | finally: 126 | client.stop() 127 | 128 | 129 | if __name__ == '__main__': 130 | test_long_running_receive(os.environ.get('EVENT_HUB_CONNECTION_STR')) 131 | -------------------------------------------------------------------------------- /HISTORY.rst: -------------------------------------------------------------------------------- 1 | .. :changelog: 2 | 3 | Release History 4 | =============== 5 | 6 | 1.3.1 (2019-02-28) 7 | ++++++++++++++++++ 8 | 9 | **BugFixes** 10 | 11 | - Fixed bug where datetime offset filter was using a local timestamp rather than UTC. 12 | - Fixed stackoverflow error in continuous connection reconnect attempts. 13 | 14 | 15 | 1.3.0 (2019-01-29) 16 | ++++++++++++++++++ 17 | 18 | **Bugfixes** 19 | 20 | - Added support for auto reconnect on token expiration and other auth errors (issue #89). 21 | 22 | **Features** 23 | 24 | - Added ability to create ServiceBusClient from an existing SAS auth token, including 25 | provding a function to auto-renew that token on expiry. 26 | - Added support for storing a custom EPH context value in checkpoint (PR #84, thanks @konstantinmiller) 27 | 28 | 29 | 1.2.0 (2018-11-29) 30 | ++++++++++++++++++ 31 | 32 | - Support for Python 2.7 in azure.eventhub module (azure.eventprocessorhost will not support Python 2.7). 33 | - Parse EventData.enqueued_time as a UTC timestamp (issue #72, thanks @vjrantal) 34 | 35 | 36 | 1.1.1 (2018-10-03) 37 | ++++++++++++++++++ 38 | 39 | - Fixed bug in Azure namespace package. 40 | 41 | 42 | 1.1.0 (2018-09-21) 43 | ++++++++++++++++++ 44 | 45 | - Changes to `AzureStorageCheckpointLeaseManager` parameters to support other connection options (issue #61): 46 | 47 | - The `storage_account_name`, `storage_account_key` and `lease_container_name` arguments are now optional keyword arguments. 48 | - Added a `sas_token` argument that must be specified with `storage_account_name` in place of `storage_account_key`. 49 | - Added an `endpoint_suffix` argument to support storage endpoints in National Clouds. 50 | - Added a `connection_string` argument that, if specified, overrides all other endpoint arguments. 51 | - The `lease_container_name` argument now defaults to `"eph-leases"` if not specified. 52 | 53 | - Fix for clients failing to start if run called multipled times (issue #64). 54 | - Added convenience methods `body_as_str` and `body_as_json` to EventData object for easier processing of message data. 55 | 56 | 57 | 1.0.0 (2018-08-22) 58 | ++++++++++++++++++ 59 | 60 | - API stable. 61 | - Renamed internal `_async` module to `async_ops` for docs generation. 62 | - Added optional `auth_timeout` parameter to `EventHubClient` and `EventHubClientAsync` to configure how long to allow for token 63 | negotiation to complete. Default is 60 seconds. 64 | - Added optional `send_timeout` parameter to `EventHubClient.add_sender` and `EventHubClientAsync.add_async_sender` to determine the 65 | timeout for Events to be successfully sent. Default value is 60 seconds. 66 | - Reformatted logging for performance. 67 | 68 | 69 | 0.2.0 (2018-08-06) 70 | ++++++++++++++++++ 71 | 72 | - Stability improvements for EPH. 73 | - Updated uAMQP version. 74 | - Added new configuration options for Sender and Receiver; `keep_alive` and `auto_reconnect`. 75 | These flags have been added to the following: 76 | 77 | - `EventHubClient.add_receiver` 78 | - `EventHubClient.add_sender` 79 | - `EventHubClientAsync.add_async_receiver` 80 | - `EventHubClientAsync.add_async_sender` 81 | - `EPHOptions.keey_alive_interval` 82 | - `EPHOptions.auto_reconnect_on_error` 83 | 84 | 85 | 0.2.0rc2 (2018-07-29) 86 | +++++++++++++++++++++ 87 | 88 | - **Breaking change** `EventData.offset` will now return an object of type `~uamqp.common.Offset` rather than str. 89 | The original string value can be retrieved from `~uamqp.common.Offset.value`. 90 | - Each sender/receiver will now run in its own independent connection. 91 | - Updated uAMQP dependency to 0.2.0 92 | - Fixed issue with IoTHub clients not being able to retrieve partition information. 93 | - Added support for HTTP proxy settings to both EventHubClient and EPH. 94 | - Added error handling policy to automatically reconnect on retryable error. 95 | - Added keep-alive thread for maintaining an unused connection. 96 | 97 | 98 | 0.2.0rc1 (2018-07-06) 99 | +++++++++++++++++++++ 100 | 101 | - **Breaking change** Restructured library to support Python 3.7. Submodule `async` has been renamed and all classes from 102 | this module can now be imported from azure.eventhub directly. 103 | - **Breaking change** Removed optional `callback` argument from `Receiver.receive` and `AsyncReceiver.receive`. 104 | - **Breaking change** `EventData.properties` has been renamed to `EventData.application_properties`. 105 | This removes the potential for messages to be processed via callback for not yet returned 106 | in the batch. 107 | - Updated uAMQP dependency to v0.1.0 108 | - Added support for constructing IoTHub connections. 109 | - Fixed memory leak in receive operations. 110 | - Dropped Python 2.7 wheel support. 111 | 112 | 113 | 0.2.0b2 (2018-05-29) 114 | ++++++++++++++++++++ 115 | 116 | - Added `namespace_suffix` to EventHubConfig() to support national clouds. 117 | - Added `device_id` attribute to EventData to support IoT Hub use cases. 118 | - Added message header to workaround service bug for PartitionKey support. 119 | - Updated uAMQP dependency to vRC1. 120 | 121 | 122 | 0.2.0b1 (2018-04-20) 123 | ++++++++++++++++++++ 124 | 125 | - Updated uAMQP to latest version. 126 | - Further testing and minor bug fixes. 127 | 128 | 129 | 0.2.0a2 (2018-04-02) 130 | ++++++++++++++++++++ 131 | 132 | - Updated uAQMP dependency. 133 | 134 | 135 | 0.2.0a1 (unreleased) 136 | ++++++++++++++++++++ 137 | 138 | - Swapped out Proton dependency for uAMQP. -------------------------------------------------------------------------------- /tests/asynctests/test_longrunning_receive_async.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | # -------------------------------------------------------------------------------------------- 4 | # Copyright (c) Microsoft Corporation. All rights reserved. 5 | # Licensed under the MIT License. See License.txt in the project root for license information. 6 | # -------------------------------------------------------------------------------------------- 7 | 8 | """ 9 | receive test. 10 | """ 11 | 12 | import logging 13 | import asyncio 14 | import argparse 15 | import time 16 | import os 17 | import sys 18 | from logging.handlers import RotatingFileHandler 19 | 20 | from azure.eventhub import Offset 21 | from azure.eventhub import EventHubClientAsync 22 | 23 | 24 | def get_logger(filename, level=logging.INFO): 25 | azure_logger = logging.getLogger("azure.eventhub") 26 | azure_logger.setLevel(level) 27 | uamqp_logger = logging.getLogger("uamqp") 28 | uamqp_logger.setLevel(logging.INFO) 29 | 30 | formatter = logging.Formatter('%(asctime)s %(name)-12s %(levelname)-8s %(message)s') 31 | console_handler = logging.StreamHandler(stream=sys.stdout) 32 | console_handler.setFormatter(formatter) 33 | if not azure_logger.handlers: 34 | azure_logger.addHandler(console_handler) 35 | if not uamqp_logger.handlers: 36 | uamqp_logger.addHandler(console_handler) 37 | 38 | if filename: 39 | file_handler = RotatingFileHandler(filename, maxBytes=20*1024*1024, backupCount=3) 40 | file_handler.setFormatter(formatter) 41 | azure_logger.addHandler(file_handler) 42 | uamqp_logger.addHandler(file_handler) 43 | 44 | return azure_logger 45 | 46 | logger = get_logger("recv_test_async.log", logging.INFO) 47 | 48 | 49 | async def get_partitions(client): 50 | eh_data = await client.get_eventhub_info_async() 51 | return eh_data["partition_ids"] 52 | 53 | 54 | async def pump(_pid, receiver, _args, _dl): 55 | total = 0 56 | iteration = 0 57 | deadline = time.time() + _dl 58 | try: 59 | while time.time() < deadline: 60 | batch = await receiver.receive(timeout=1) 61 | size = len(batch) 62 | total += size 63 | iteration += 1 64 | if size == 0: 65 | print("{}: No events received, queue size {}, delivered {}".format( 66 | _pid, 67 | receiver.queue_size, 68 | total)) 69 | elif iteration >= 5: 70 | iteration = 0 71 | print("{}: total received {}, last sn={}, last offset={}".format( 72 | _pid, 73 | total, 74 | batch[-1].sequence_number, 75 | batch[-1].offset.value)) 76 | print("{}: total received {}".format( 77 | _pid, 78 | total)) 79 | except Exception as e: 80 | print("Partition {} receiver failed: {}".format(_pid, e)) 81 | raise 82 | 83 | 84 | def test_long_running_receive_async(connection_str): 85 | parser = argparse.ArgumentParser() 86 | parser.add_argument("--duration", help="Duration in seconds of the test", type=int, default=30) 87 | parser.add_argument("--consumer", help="Consumer group name", default="$default") 88 | parser.add_argument("--partitions", help="Comma seperated partition IDs") 89 | parser.add_argument("--offset", help="Starting offset", default="-1") 90 | parser.add_argument("--conn-str", help="EventHub connection string", default=connection_str) 91 | parser.add_argument("--eventhub", help="Name of EventHub") 92 | parser.add_argument("--address", help="Address URI to the EventHub entity") 93 | parser.add_argument("--sas-policy", help="Name of the shared access policy to authenticate with") 94 | parser.add_argument("--sas-key", help="Shared access key") 95 | 96 | loop = asyncio.get_event_loop() 97 | args, _ = parser.parse_known_args() 98 | if args.conn_str: 99 | client = EventHubClientAsync.from_connection_string( 100 | args.conn_str, 101 | eventhub=args.eventhub, auth_timeout=240, debug=False) 102 | elif args.address: 103 | client = EventHubClientAsync( 104 | args.address, 105 | auth_timeout=240, 106 | username=args.sas_policy, 107 | password=args.sas_key) 108 | else: 109 | try: 110 | import pytest 111 | pytest.skip("Must specify either '--conn-str' or '--address'") 112 | except ImportError: 113 | raise ValueError("Must specify either '--conn-str' or '--address'") 114 | 115 | try: 116 | if not args.partitions: 117 | partitions = loop.run_until_complete(get_partitions(client)) 118 | else: 119 | partitions = args.partitions.split(",") 120 | pumps = [] 121 | for pid in partitions: 122 | receiver = client.add_async_receiver( 123 | consumer_group=args.consumer, 124 | partition=pid, 125 | offset=Offset(args.offset), 126 | prefetch=50) 127 | pumps.append(pump(pid, receiver, args, args.duration)) 128 | loop.run_until_complete(client.run_async()) 129 | loop.run_until_complete(asyncio.gather(*pumps)) 130 | finally: 131 | loop.run_until_complete(client.stop_async()) 132 | 133 | 134 | if __name__ == '__main__': 135 | test_long_running_receive_async(os.environ.get('EVENT_HUB_CONNECTION_STR')) 136 | -------------------------------------------------------------------------------- /azure/eventprocessorhost/eph.py: -------------------------------------------------------------------------------- 1 | # -------------------------------------------------------------------------------------------- 2 | # Copyright (c) Microsoft Corporation. All rights reserved. 3 | # Licensed under the MIT License. See License.txt in the project root for license information. 4 | # ----------------------------------------------------------------------------------- 5 | 6 | import uuid 7 | import asyncio 8 | from azure.eventprocessorhost.partition_manager import PartitionManager 9 | 10 | 11 | class EventProcessorHost: 12 | """ 13 | Represents a host for processing Event Hubs event data at scale. 14 | Takes in an event hub, a event processor class definition, a config object, 15 | as well as a storage manager and optional event processor params (ep_params). 16 | """ 17 | 18 | def __init__(self, event_processor, eh_config, storage_manager, ep_params=None, eph_options=None, loop=None): 19 | """ 20 | Initialize EventProcessorHost. 21 | 22 | :param event_processor: The event processing handler. 23 | :type event_processor: ~azure.eventprocessorhost.abstract_event_processor.AbstractEventProcessor 24 | :param eh_config: The EPH connection configuration. 25 | :type eh_config: ~azure.eventprocessorhost.eh_config.EventHubConfig 26 | :param storage_manager: The Azure storage manager for persisting lease and 27 | checkpoint information. 28 | :type storage_manager: 29 | ~azure.eventprocessorhost.azure_storage_checkpoint_manager.AzureStorageCheckpointLeaseManager 30 | :param ep_params: Optional arbitrary parameters to be passed into the event_processor 31 | on initialization. 32 | :type ep_params: list 33 | :param eph_options: EPH configuration options. 34 | :type eph_options: ~azure.eventprocessorhost.eph.EPHOptions 35 | :param loop: An eventloop. If not provided the default asyncio event loop will be used. 36 | """ 37 | self.event_processor = event_processor 38 | self.event_processor_params = ep_params 39 | self.eh_config = eh_config 40 | self.guid = str(uuid.uuid4()) 41 | self.host_name = "host" + str(self.guid) 42 | self.loop = loop or asyncio.get_event_loop() 43 | self.eph_options = eph_options or EPHOptions() 44 | self.partition_manager = PartitionManager(self) 45 | self.storage_manager = storage_manager 46 | if self.storage_manager: 47 | self.storage_manager.initialize(self) 48 | 49 | async def open_async(self): 50 | """ 51 | Starts the host. 52 | """ 53 | if not self.loop: 54 | self.loop = asyncio.get_event_loop() 55 | await self.partition_manager.start_async() 56 | 57 | async def close_async(self): 58 | """ 59 | Stops the host. 60 | """ 61 | await self.partition_manager.stop_async() 62 | 63 | 64 | class EPHOptions: 65 | """ 66 | Class that contains default and overidable EPH option. 67 | 68 | :ivar max_batch_size: The maximum number of events retrieved for processing 69 | at a time. This value must be less than or equal to the prefetch count. The actual 70 | number of events returned for processing may be any number up to the maximum. 71 | The default value is 10. 72 | :vartype max_batch_size: int 73 | :ivar prefetch_count: The number of events to fetch from the service in advance of 74 | processing. The default value is 300. 75 | :vartype prefetch_count: int 76 | :ivar receive_timeout: The length of time a single partition receiver will wait in 77 | order to receive a batch of events. Default is 60 seconds. 78 | :vartype receive_timeout: int 79 | :ivar release_pump_on_timeout: Whether to shutdown an individual partition receiver if 80 | no events were received in the specified timeout. Shutting down the pump will release 81 | the lease to allow it to be picked up by another host. Default is False. 82 | :vartype release_pump_on_timeout: bool 83 | :ivar initial_offset_provider: The initial event offset to receive from if no persisted 84 | offset is found. Default is "-1" (i.e. from the first event available). 85 | :vartype initial_offset_provider: str 86 | :ivar debug_trace: Whether to emit the network traffic in the logs. In order to view 87 | these events the logger must be configured to track "uamqp". Default is False. 88 | :vartype debug_trace: bool 89 | :ivar http_proxy: HTTP proxy configuration. This should be a dictionary with 90 | the following keys present: 'proxy_hostname' and 'proxy_port'. Additional optional 91 | keys are 'username' and 'password'. 92 | :vartype http_proxy: dict 93 | :ivar keep_alive_interval: The time in seconds between asynchronously pinging a receiver 94 | connection to keep it alive during inactivity. Default is None - i.e. no connection pinging. 95 | :vartype keep_alive_interval: int 96 | :ivar auto_reconnect_on_error: Whether to automatically attempt to reconnect a receiver 97 | connection if it is detach from the service with a retryable error. Default is True. 98 | :vartype auto_reconnect_on_error: bool 99 | """ 100 | 101 | def __init__(self): 102 | self.max_batch_size = 10 103 | self.prefetch_count = 300 104 | self.receive_timeout = 60 105 | self.release_pump_on_timeout = False 106 | self.initial_offset_provider = "-1" 107 | self.debug_trace = False 108 | self.http_proxy = None 109 | self.keep_alive_interval = None 110 | self.auto_reconnect_on_error = True 111 | -------------------------------------------------------------------------------- /tests/asynctests/test_longrunning_send_async.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | """ 4 | send test 5 | """ 6 | 7 | import logging 8 | import argparse 9 | import time 10 | import os 11 | import asyncio 12 | import sys 13 | from logging.handlers import RotatingFileHandler 14 | 15 | from azure.eventhub import EventHubClientAsync, EventData 16 | 17 | 18 | def get_logger(filename, level=logging.INFO): 19 | azure_logger = logging.getLogger("azure.eventhub") 20 | azure_logger.setLevel(level) 21 | uamqp_logger = logging.getLogger("uamqp") 22 | uamqp_logger.setLevel(logging.INFO) 23 | 24 | formatter = logging.Formatter('%(asctime)s %(name)-12s %(levelname)-8s %(message)s') 25 | console_handler = logging.StreamHandler(stream=sys.stdout) 26 | console_handler.setFormatter(formatter) 27 | if not azure_logger.handlers: 28 | azure_logger.addHandler(console_handler) 29 | if not uamqp_logger.handlers: 30 | uamqp_logger.addHandler(console_handler) 31 | 32 | if filename: 33 | file_handler = RotatingFileHandler(filename, maxBytes=20*1024*1024, backupCount=3) 34 | file_handler.setFormatter(formatter) 35 | azure_logger.addHandler(file_handler) 36 | uamqp_logger.addHandler(file_handler) 37 | 38 | return azure_logger 39 | 40 | logger = get_logger("send_test_async.log", logging.INFO) 41 | 42 | 43 | def check_send_successful(outcome, condition): 44 | if outcome.value != 0: 45 | print("Send failed {}".format(condition)) 46 | 47 | 48 | async def get_partitions(args): 49 | eh_data = await args.get_eventhub_info_async() 50 | return eh_data["partition_ids"] 51 | 52 | 53 | async def pump(pid, sender, args, duration): 54 | deadline = time.time() + duration 55 | total = 0 56 | 57 | def data_generator(): 58 | for i in range(args.batch): 59 | yield b"D" * args.payload 60 | 61 | if args.batch > 1: 62 | logger.info("{}: Sending batched messages".format(pid)) 63 | else: 64 | logger.info("{}: Sending single messages".format(pid)) 65 | 66 | try: 67 | while time.time() < deadline: 68 | if args.batch > 1: 69 | data = EventData(batch=data_generator()) 70 | else: 71 | data = EventData(body=b"D" * args.payload) 72 | sender.transfer(data, callback=check_send_successful) 73 | total += args.batch 74 | if total % 100 == 0: 75 | await sender.wait_async() 76 | logger.info("{}: Send total {}".format(pid, total)) 77 | except Exception as err: 78 | logger.error("{}: Send failed {}".format(pid, err)) 79 | raise 80 | print("{}: Final Sent total {}".format(pid, total)) 81 | 82 | 83 | def test_long_running_partition_send_async(connection_str): 84 | parser = argparse.ArgumentParser() 85 | parser.add_argument("--duration", help="Duration in seconds of the test", type=int, default=30) 86 | parser.add_argument("--payload", help="payload size", type=int, default=1024) 87 | parser.add_argument("--batch", help="Number of events to send and wait", type=int, default=200) 88 | parser.add_argument("--partitions", help="Comma seperated partition IDs") 89 | parser.add_argument("--conn-str", help="EventHub connection string", default=connection_str) 90 | parser.add_argument("--eventhub", help="Name of EventHub") 91 | parser.add_argument("--address", help="Address URI to the EventHub entity") 92 | parser.add_argument("--sas-policy", help="Name of the shared access policy to authenticate with") 93 | parser.add_argument("--sas-key", help="Shared access key") 94 | parser.add_argument("--logger-name", help="Unique log file ID") 95 | 96 | loop = asyncio.get_event_loop() 97 | args, _ = parser.parse_known_args() 98 | 99 | if args.conn_str: 100 | client = EventHubClientAsync.from_connection_string( 101 | args.conn_str, 102 | eventhub=args.eventhub, debug=True) 103 | elif args.address: 104 | client = EventHubClientAsync( 105 | args.address, 106 | username=args.sas_policy, 107 | password=args.sas_key, 108 | auth_timeout=500) 109 | else: 110 | try: 111 | import pytest 112 | pytest.skip("Must specify either '--conn-str' or '--address'") 113 | except ImportError: 114 | raise ValueError("Must specify either '--conn-str' or '--address'") 115 | 116 | try: 117 | if not args.partitions: 118 | partitions = loop.run_until_complete(get_partitions(client)) 119 | else: 120 | pid_range = args.partitions.split("-") 121 | if len(pid_range) > 1: 122 | partitions = [str(i) for i in range(int(pid_range[0]), int(pid_range[1]) + 1)] 123 | else: 124 | partitions = args.partitions.split(",") 125 | pumps = [] 126 | for pid in partitions: 127 | sender = client.add_async_sender(partition=pid, send_timeout=0, keep_alive=False) 128 | pumps.append(pump(pid, sender, args, args.duration)) 129 | loop.run_until_complete(client.run_async()) 130 | results = loop.run_until_complete(asyncio.gather(*pumps, return_exceptions=True)) 131 | assert not results 132 | except Exception as e: 133 | logger.error("Sender failed: {}".format(e)) 134 | finally: 135 | logger.info("Shutting down sender") 136 | loop.run_until_complete(client.stop_async()) 137 | 138 | if __name__ == '__main__': 139 | test_long_running_partition_send_async(os.environ.get('EVENT_HUB_CONNECTION_STR')) 140 | -------------------------------------------------------------------------------- /tests/asynctests/test_checkpoint_manager.py: -------------------------------------------------------------------------------- 1 | # -------------------------------------------------------------------------------------------- 2 | # Copyright (c) Microsoft Corporation. All rights reserved. 3 | # Licensed under the MIT License. See License.txt in the project root for license information. 4 | # ----------------------------------------------------------------------------------- 5 | 6 | import asyncio 7 | import base64 8 | import pytest 9 | import time 10 | import json 11 | from azure.common import AzureException 12 | 13 | 14 | def test_create_store(storage_clm): 15 | """ 16 | Test the store is created correctly if not exists 17 | """ 18 | loop = asyncio.get_event_loop() 19 | loop.run_until_complete(storage_clm.create_checkpoint_store_if_not_exists_async()) 20 | 21 | 22 | def test_create_lease(storage_clm): 23 | """ 24 | Test lease creation 25 | """ 26 | 27 | loop = asyncio.get_event_loop() 28 | loop.run_until_complete(storage_clm.create_checkpoint_store_if_not_exists_async()) 29 | loop.run_until_complete(storage_clm.create_lease_if_not_exists_async("1")) 30 | 31 | 32 | def test_get_lease(storage_clm): 33 | """ 34 | Test get lease 35 | """ 36 | loop = asyncio.get_event_loop() 37 | loop.run_until_complete(storage_clm.get_lease_async("1")) 38 | 39 | 40 | def test_aquire_renew_release_lease(storage_clm): 41 | """ 42 | Test aquire lease 43 | """ 44 | loop = asyncio.get_event_loop() 45 | lease = loop.run_until_complete(storage_clm.get_lease_async("1")) 46 | assert lease is None 47 | loop.run_until_complete(storage_clm.create_lease_if_not_exists_async("1")) 48 | lease = loop.run_until_complete(storage_clm.get_lease_async("1")) 49 | loop.run_until_complete(storage_clm.acquire_lease_async(lease)) 50 | loop.run_until_complete(storage_clm.renew_lease_async(lease)) 51 | loop.run_until_complete(storage_clm.release_lease_async(lease)) 52 | assert lease.partition_id == "1" 53 | assert lease.epoch == 1 54 | assert loop.run_until_complete(lease.state()) == "available" 55 | 56 | 57 | def test_delete_lease(storage_clm): 58 | """ 59 | Test delete lease 60 | """ 61 | loop = asyncio.get_event_loop() 62 | lease = loop.run_until_complete(storage_clm.get_lease_async("1")) 63 | assert lease is None 64 | loop.run_until_complete(storage_clm.create_lease_if_not_exists_async("1")) 65 | lease = loop.run_until_complete(storage_clm.get_lease_async("1")) 66 | loop.run_until_complete(storage_clm.delete_lease_async(lease)) 67 | lease = loop.run_until_complete(storage_clm.get_lease_async("1")) 68 | assert lease == None 69 | 70 | 71 | def test_checkpointing(storage_clm): 72 | """ 73 | Test checkpointing 74 | """ 75 | loop = asyncio.get_event_loop() 76 | local_checkpoint = loop.run_until_complete(storage_clm.create_checkpoint_if_not_exists_async("1")) 77 | assert local_checkpoint.partition_id == "1" 78 | assert local_checkpoint.offset == "-1" 79 | lease = loop.run_until_complete(storage_clm.get_lease_async("1")) 80 | loop.run_until_complete(storage_clm.acquire_lease_async(lease)) 81 | 82 | # Test EPH context encoded as bytes 83 | event_processor_context = {'some_string_data': 'abc', 'some_int_data': 123, 'a_list': [42]} 84 | cloud_event_processor_context_asbytes = json.dumps(event_processor_context).encode('utf-8') 85 | lease.event_processor_context = base64.b64encode(cloud_event_processor_context_asbytes).decode('ascii') 86 | loop.run_until_complete(storage_clm.update_checkpoint_async(lease, local_checkpoint)) 87 | 88 | cloud_lease = loop.run_until_complete(storage_clm.get_lease_async("1")) 89 | cloud_event_processor_context_asbytes = cloud_lease.event_processor_context.encode('ascii') 90 | event_processor_context_decoded = base64.b64decode(cloud_event_processor_context_asbytes).decode('utf-8') 91 | cloud_event_processor_context = json.loads(event_processor_context_decoded) 92 | assert cloud_event_processor_context['some_string_data'] == 'abc' 93 | assert cloud_event_processor_context['some_int_data'] == 123 94 | assert cloud_event_processor_context['a_list'] == [42] 95 | 96 | # Test EPH context as JSON object 97 | lease.event_processor_context = {'some_string_data': 'abc', 'some_int_data': 123, 'a_list': [42]} 98 | loop.run_until_complete(storage_clm.update_checkpoint_async(lease, local_checkpoint)) 99 | 100 | cloud_lease = loop.run_until_complete(storage_clm.get_lease_async("1")) 101 | assert cloud_lease.event_processor_context['some_string_data'] == 'abc' 102 | assert cloud_lease.event_processor_context['some_int_data'] == 123 103 | assert cloud_lease.event_processor_context['a_list'] == [42] 104 | 105 | cloud_checkpoint = loop.run_until_complete(storage_clm.get_checkpoint_async("1")) 106 | lease.offset = cloud_checkpoint.offset 107 | lease.sequence_number = cloud_checkpoint.sequence_number 108 | lease.event_processor_context = None 109 | assert cloud_checkpoint.partition_id == "1" 110 | assert cloud_checkpoint.offset == "-1" 111 | modify_checkpoint = cloud_checkpoint 112 | modify_checkpoint.offset = "512" 113 | modify_checkpoint.sequence_number = "32" 114 | time.sleep(35) 115 | loop.run_until_complete(storage_clm.update_checkpoint_async(lease, modify_checkpoint)) 116 | cloud_lease = loop.run_until_complete(storage_clm.get_lease_async("1")) 117 | assert cloud_lease.event_processor_context is None 118 | 119 | cloud_checkpoint = loop.run_until_complete(storage_clm.get_checkpoint_async("1")) 120 | assert cloud_checkpoint.partition_id == "1" 121 | assert cloud_checkpoint.offset == "512" 122 | loop.run_until_complete(storage_clm.release_lease_async(lease)) 123 | -------------------------------------------------------------------------------- /azure/eventprocessorhost/abstract_lease_manager.py: -------------------------------------------------------------------------------- 1 | # -------------------------------------------------------------------------------------------- 2 | # Copyright (c) Microsoft Corporation. All rights reserved. 3 | # Licensed under the MIT License. See License.txt in the project root for license information. 4 | # ----------------------------------------------------------------------------------- 5 | 6 | """ 7 | Author: Aaron (Ari) Bornstien 8 | """ 9 | from abc import ABC, abstractmethod 10 | 11 | class AbstractLeaseManager(ABC): 12 | """ 13 | If you wish to have EventProcessorHost store leases somewhere other than Azure Storage, 14 | you can write your own lease manager using this abstract class. The Azure Storage managers 15 | use the same storage for both lease and checkpoints, so both interfaces are implemented by 16 | the same class.You are free to do the same thing if you have a unified store for both 17 | types of data. 18 | """ 19 | 20 | def __init__(self, lease_renew_interval, lease_duration): 21 | self.lease_renew_interval = lease_renew_interval 22 | self.lease_duration = lease_duration 23 | 24 | @abstractmethod 25 | async def create_lease_store_if_not_exists_async(self): 26 | """ 27 | Create the lease store if it does not exist, do nothing if it does exist. 28 | 29 | :return: `True` if the lease store already exists or was created successfully, `False` if not. 30 | :rtype: bool 31 | """ 32 | 33 | @abstractmethod 34 | async def delete_lease_store_async(self): 35 | """ 36 | Not used by EventProcessorHost, but a convenient function to have for testing. 37 | 38 | :return: `True` if the lease store was deleted successfully, `False` if not. 39 | :rtype: bool 40 | """ 41 | 42 | async def get_lease_async(self, partition_id): 43 | """ 44 | Return the lease info for the specified partition. 45 | Can return null if no lease has been created in the store for the specified partition. 46 | 47 | :param partition_id: The ID of a given partition. 48 | :type parition_id: str 49 | :return: Lease info for the partition, or `None`. 50 | :rtype: 51 | """ 52 | 53 | @abstractmethod 54 | def get_all_leases(self): 55 | """ 56 | Return the lease info for all partitions. 57 | A typical implementation could just call get_lease_async() on all partitions. 58 | 59 | :return: A list of lease info. 60 | :rtype: 61 | """ 62 | 63 | @abstractmethod 64 | async def create_lease_if_not_exists_async(self, partition_id): 65 | """ 66 | Create in the store the lease info for the given partition, if it does not exist. 67 | Do nothing if it does exist in the store already. 68 | 69 | :param partition_id: The ID of a given partition. 70 | :type parition_id: str 71 | :return: The existing or newly-created lease info for the partition. 72 | """ 73 | 74 | @abstractmethod 75 | async def delete_lease_async(self, lease): 76 | """ 77 | Delete the lease info for the given partition from the store. 78 | If there is no stored lease for the given partition, that is treated as success. 79 | 80 | :param lease: The lease to be deleted. 81 | :type lease: ~azure.eventprocessorhost.lease.Lease 82 | """ 83 | 84 | @abstractmethod 85 | async def acquire_lease_async(self, lease): 86 | """ 87 | Acquire the lease on the desired partition for this EventProcessorHost. 88 | Note that it is legal to acquire a lease that is already owned by another host. 89 | Lease-stealing is how partitions are redistributed when additional hosts are started. 90 | 91 | :param lease: The lease to be acquired. 92 | :type lease: ~azure.eventprocessorhost.lease.Lease 93 | :return: `True` if the lease was acquired successfully, `False` if not. 94 | :rtype: bool 95 | """ 96 | 97 | @abstractmethod 98 | async def renew_lease_async(self, lease): 99 | """ 100 | Renew a lease currently held by this host. 101 | If the lease has been stolen, or expired, or released, it is not possible to renew it. 102 | You will have to call get_lease_async() and then acquire_lease_async() again. 103 | 104 | :param lease: The lease to be renewed. 105 | :type lease: ~azure.eventprocessorhost.lease.Lease 106 | :return: `True` if the lease was renewed successfully, `False` if not. 107 | :rtype: bool 108 | """ 109 | 110 | @abstractmethod 111 | async def release_lease_async(self, lease): 112 | """ 113 | Give up a lease currently held by this host. If the lease has been stolen, or expired, 114 | releasing it is unnecessary, and will fail if attempted. 115 | 116 | :param lease: The lease to be released. 117 | :type lease: ~azure.eventprocessorhost.lease.Lease 118 | :return: `True` if the lease was released successfully, `False` if not. 119 | :rtype: bool 120 | """ 121 | 122 | @abstractmethod 123 | async def update_lease_async(self, lease): 124 | """ 125 | Update the store with the information in the provided lease. It is necessary to currently 126 | hold a lease in order to update it. If the lease has been stolen, or expired, or released, 127 | it cannot be updated. Updating should renew the lease before performing the update to 128 | avoid lease expiration during the process. 129 | 130 | :param lease: The lease to be updated. 131 | :type lease: ~azure.eventprocessorhost.lease.Lease 132 | :return: `True` if the updated was performed successfully, `False` if not. 133 | :rtype: bool 134 | """ 135 | -------------------------------------------------------------------------------- /azure/eventprocessorhost/partition_pump.py: -------------------------------------------------------------------------------- 1 | # -------------------------------------------------------------------------------------------- 2 | # Copyright (c) Microsoft Corporation. All rights reserved. 3 | # Licensed under the MIT License. See License.txt in the project root for license information. 4 | # --------------------- 5 | 6 | from abc import abstractmethod 7 | import logging 8 | import asyncio 9 | from azure.eventprocessorhost.partition_context import PartitionContext 10 | 11 | 12 | _logger = logging.getLogger(__name__) 13 | 14 | 15 | class PartitionPump(): 16 | """ 17 | Manages individual connection to a given partition. 18 | """ 19 | 20 | def __init__(self, host, lease): 21 | self.host = host 22 | self.lease = lease 23 | self.pump_status = "Uninitialized" 24 | self.partition_context = None 25 | self.processor = None 26 | self.loop = None 27 | 28 | def run(self): 29 | """ 30 | Makes pump sync so that it can be run in a thread. 31 | """ 32 | self.loop = asyncio.new_event_loop() 33 | self.loop.run_until_complete(self.open_async()) 34 | 35 | def set_pump_status(self, status): 36 | """ 37 | Updates pump status and logs update to console. 38 | """ 39 | self.pump_status = status 40 | _logger.info("%r partition %r", status, self.lease.partition_id) 41 | 42 | def set_lease(self, new_lease): 43 | """ 44 | Sets a new partition lease to be processed by the pump. 45 | 46 | :param lease: The lease to set. 47 | :type lease: ~azure.eventprocessorhost.lease.Lease 48 | """ 49 | if self.partition_context: 50 | self.partition_context.lease = new_lease 51 | self.partition_context.event_processor_context = new_lease.event_processor_context 52 | 53 | async def open_async(self): 54 | """ 55 | Opens partition pump. 56 | """ 57 | self.set_pump_status("Opening") 58 | self.partition_context = PartitionContext(self.host, self.lease.partition_id, 59 | self.host.eh_config.client_address, 60 | self.host.eh_config.consumer_group, 61 | self.loop) 62 | self.partition_context.lease = self.lease 63 | self.partition_context.event_processor_context = self.lease.event_processor_context 64 | self.processor = self.host.event_processor(self.host.event_processor_params) 65 | try: 66 | await self.processor.open_async(self.partition_context) 67 | except Exception as err: # pylint: disable=broad-except 68 | # If the processor won't create or open, only thing we can do here is pass the buck. 69 | # Null it out so we don't try to operate on it further. 70 | await self.process_error_async(err) 71 | self.processor = None 72 | self.set_pump_status("OpenFailed") 73 | 74 | # If Open Async Didn't Fail call OnOpenAsync 75 | if self.pump_status == "Opening": 76 | await self.on_open_async() 77 | 78 | @abstractmethod 79 | async def on_open_async(self): 80 | """ 81 | Event handler for on open event. 82 | """ 83 | 84 | def is_closing(self): 85 | """ 86 | Returns whether pump is closing. 87 | 88 | :rtype: bool 89 | """ 90 | return self.pump_status == "Closing" or self.pump_status == "Closed" 91 | 92 | async def close_async(self, reason): 93 | """ 94 | Safely closes the pump. 95 | 96 | :param reason: The reason for the shutdown. 97 | :type reason: str 98 | """ 99 | self.set_pump_status("Closing") 100 | try: 101 | await self.on_closing_async(reason) 102 | if self.processor: 103 | _logger.info("PartitionPumpInvokeProcessorCloseStart %r %r %r", 104 | self.host.guid, self.partition_context.partition_id, reason) 105 | await self.processor.close_async(self.partition_context, reason) 106 | _logger.info("PartitionPumpInvokeProcessorCloseStart %r %r", 107 | self.host.guid, self.partition_context.partition_id) 108 | except Exception as err: # pylint: disable=broad-except 109 | await self.process_error_async(err) 110 | _logger.error("%r %r %r", self.host.guid, self.partition_context.partition_id, err) 111 | raise err 112 | 113 | if reason == "LeaseLost": 114 | try: 115 | _logger.info("Lease Lost releasing ownership") 116 | await self.host.storage_manager.release_lease_async(self.partition_context.lease) 117 | except Exception as err: # pylint: disable=broad-except 118 | _logger.error("%r %r %r", self.host.guid, self.partition_context.partition_id, err) 119 | raise err 120 | 121 | self.set_pump_status("Closed") 122 | 123 | @abstractmethod 124 | async def on_closing_async(self, reason): 125 | """ 126 | Event handler for on closing event. 127 | 128 | :param reason: The reason for the shutdown. 129 | :type reason: str 130 | """ 131 | 132 | async def process_events_async(self, events): 133 | """ 134 | Process pump events. 135 | 136 | :param events: List of events to be processed. 137 | :type events: list[~azure.eventhub.common.EventData] 138 | """ 139 | if events: 140 | # Synchronize to serialize calls to the processor. The handler is not installed until 141 | # after OpenAsync returns, so ProcessEventsAsync cannot conflict with OpenAsync. There 142 | # could be a conflict between ProcessEventsAsync and CloseAsync, however. All calls to 143 | # CloseAsync are protected by synchronizing too. 144 | try: 145 | last = events[-1] 146 | if last is not None: 147 | self.partition_context.set_offset_and_sequence_number(last) 148 | await self.processor.process_events_async(self.partition_context, events) 149 | except Exception as err: # pylint: disable=broad-except 150 | await self.process_error_async(err) 151 | 152 | async def process_error_async(self, error): 153 | """ 154 | Passes error to the event processor for processing. 155 | 156 | :param error: An error the occurred. 157 | :type error: Exception 158 | """ 159 | await self.processor.process_error_async(self.partition_context, error) 160 | -------------------------------------------------------------------------------- /tests/asynctests/test_negative_async.py: -------------------------------------------------------------------------------- 1 | #------------------------------------------------------------------------- 2 | # Copyright (c) Microsoft Corporation. All rights reserved. 3 | # Licensed under the MIT License. See License.txt in the project root for 4 | # license information. 5 | #-------------------------------------------------------------------------- 6 | 7 | import os 8 | import asyncio 9 | import pytest 10 | import time 11 | import sys 12 | 13 | from azure import eventhub 14 | from azure.eventhub import ( 15 | EventHubClientAsync, 16 | EventData, 17 | Offset, 18 | EventHubError) 19 | 20 | 21 | @pytest.mark.asyncio 22 | async def test_send_with_invalid_hostname_async(invalid_hostname, connstr_receivers): 23 | _, receivers = connstr_receivers 24 | client = EventHubClientAsync.from_connection_string(invalid_hostname, debug=True) 25 | sender = client.add_async_sender() 26 | with pytest.raises(EventHubError): 27 | await client.run_async() 28 | 29 | 30 | @pytest.mark.asyncio 31 | async def test_receive_with_invalid_hostname_async(invalid_hostname): 32 | client = EventHubClientAsync.from_connection_string(invalid_hostname, debug=True) 33 | sender = client.add_async_receiver("$default", "0") 34 | with pytest.raises(EventHubError): 35 | await client.run_async() 36 | 37 | 38 | @pytest.mark.asyncio 39 | async def test_send_with_invalid_key_async(invalid_key, connstr_receivers): 40 | _, receivers = connstr_receivers 41 | client = EventHubClientAsync.from_connection_string(invalid_key, debug=False) 42 | sender = client.add_async_sender() 43 | with pytest.raises(EventHubError): 44 | await client.run_async() 45 | 46 | 47 | @pytest.mark.asyncio 48 | async def test_receive_with_invalid_key_async(invalid_key): 49 | client = EventHubClientAsync.from_connection_string(invalid_key, debug=True) 50 | sender = client.add_async_receiver("$default", "0") 51 | with pytest.raises(EventHubError): 52 | await client.run_async() 53 | 54 | 55 | @pytest.mark.asyncio 56 | async def test_send_with_invalid_policy_async(invalid_policy, connstr_receivers): 57 | _, receivers = connstr_receivers 58 | client = EventHubClientAsync.from_connection_string(invalid_policy, debug=False) 59 | sender = client.add_async_sender() 60 | with pytest.raises(EventHubError): 61 | await client.run_async() 62 | 63 | 64 | @pytest.mark.asyncio 65 | async def test_receive_with_invalid_policy_async(invalid_policy): 66 | client = EventHubClientAsync.from_connection_string(invalid_policy, debug=True) 67 | sender = client.add_async_receiver("$default", "0") 68 | with pytest.raises(EventHubError): 69 | await client.run_async() 70 | 71 | 72 | @pytest.mark.asyncio 73 | async def test_send_partition_key_with_partition_async(connection_str): 74 | client = EventHubClientAsync.from_connection_string(connection_str, debug=True) 75 | sender = client.add_async_sender(partition="1") 76 | try: 77 | await client.run_async() 78 | data = EventData(b"Data") 79 | data.partition_key = b"PKey" 80 | with pytest.raises(ValueError): 81 | await sender.send(data) 82 | finally: 83 | await client.stop_async() 84 | 85 | 86 | @pytest.mark.asyncio 87 | async def test_non_existing_entity_sender_async(connection_str): 88 | client = EventHubClientAsync.from_connection_string(connection_str, eventhub="nemo", debug=False) 89 | sender = client.add_async_sender(partition="1") 90 | with pytest.raises(EventHubError): 91 | await client.run_async() 92 | 93 | 94 | @pytest.mark.asyncio 95 | async def test_non_existing_entity_receiver_async(connection_str): 96 | client = EventHubClientAsync.from_connection_string(connection_str, eventhub="nemo", debug=False) 97 | receiver = client.add_async_receiver("$default", "0") 98 | with pytest.raises(EventHubError): 99 | await client.run_async() 100 | 101 | 102 | @pytest.mark.asyncio 103 | async def test_receive_from_invalid_partitions_async(connection_str): 104 | partitions = ["XYZ", "-1", "1000", "-" ] 105 | for p in partitions: 106 | client = EventHubClientAsync.from_connection_string(connection_str, debug=True) 107 | receiver = client.add_async_receiver("$default", p) 108 | try: 109 | with pytest.raises(EventHubError): 110 | await client.run_async() 111 | await receiver.receive(timeout=10) 112 | finally: 113 | await client.stop_async() 114 | 115 | 116 | @pytest.mark.asyncio 117 | async def test_send_to_invalid_partitions_async(connection_str): 118 | partitions = ["XYZ", "-1", "1000", "-" ] 119 | for p in partitions: 120 | client = EventHubClientAsync.from_connection_string(connection_str, debug=False) 121 | sender = client.add_async_sender(partition=p) 122 | try: 123 | with pytest.raises(EventHubError): 124 | await client.run_async() 125 | finally: 126 | await client.stop_async() 127 | 128 | 129 | @pytest.mark.asyncio 130 | async def test_send_too_large_message_async(connection_str): 131 | if sys.platform.startswith('darwin'): 132 | pytest.skip("Skipping on OSX - open issue regarding message size") 133 | client = EventHubClientAsync.from_connection_string(connection_str, debug=False) 134 | sender = client.add_async_sender() 135 | try: 136 | await client.run_async() 137 | data = EventData(b"A" * 300000) 138 | with pytest.raises(EventHubError): 139 | await sender.send(data) 140 | finally: 141 | await client.stop_async() 142 | 143 | 144 | @pytest.mark.asyncio 145 | async def test_send_null_body_async(connection_str): 146 | client = EventHubClientAsync.from_connection_string(connection_str, debug=False) 147 | sender = client.add_async_sender() 148 | try: 149 | await client.run_async() 150 | with pytest.raises(ValueError): 151 | data = EventData(None) 152 | await sender.send(data) 153 | finally: 154 | await client.stop_async() 155 | 156 | 157 | async def pump(receiver): 158 | messages = 0 159 | count = 0 160 | batch = await receiver.receive(timeout=10) 161 | while batch and count <= 5: 162 | count += 1 163 | messages += len(batch) 164 | batch = await receiver.receive(timeout=10) 165 | return messages 166 | 167 | 168 | @pytest.mark.asyncio 169 | async def test_max_receivers_async(connstr_senders): 170 | connection_str, senders = connstr_senders 171 | client = EventHubClientAsync.from_connection_string(connection_str, debug=True) 172 | receivers = [] 173 | for i in range(6): 174 | receivers.append(client.add_async_receiver("$default", "0", prefetch=1000, offset=Offset('@latest'))) 175 | try: 176 | await client.run_async() 177 | outputs = await asyncio.gather( 178 | pump(receivers[0]), 179 | pump(receivers[1]), 180 | pump(receivers[2]), 181 | pump(receivers[3]), 182 | pump(receivers[4]), 183 | pump(receivers[5]), 184 | return_exceptions=True) 185 | print(outputs) 186 | failed = [o for o in outputs if isinstance(o, EventHubError)] 187 | assert len(failed) == 1 188 | print(failed[0].message) 189 | finally: 190 | await client.stop_async() 191 | -------------------------------------------------------------------------------- /tests/asynctests/test_send_async.py: -------------------------------------------------------------------------------- 1 | # -- coding: utf-8 -- 2 | #------------------------------------------------------------------------- 3 | # Copyright (c) Microsoft Corporation. All rights reserved. 4 | # Licensed under the MIT License. See License.txt in the project root for 5 | # license information. 6 | #-------------------------------------------------------------------------- 7 | 8 | import os 9 | import asyncio 10 | import pytest 11 | import time 12 | import json 13 | 14 | from azure.eventhub import EventData, EventHubClientAsync 15 | 16 | 17 | @pytest.mark.asyncio 18 | async def test_send_with_partition_key_async(connstr_receivers): 19 | connection_str, receivers = connstr_receivers 20 | client = EventHubClientAsync.from_connection_string(connection_str, debug=False) 21 | sender = client.add_async_sender() 22 | await client.run_async() 23 | 24 | data_val = 0 25 | for partition in [b"a", b"b", b"c", b"d", b"e", b"f"]: 26 | partition_key = b"test_partition_" + partition 27 | for i in range(50): 28 | data = EventData(str(data_val)) 29 | data.partition_key = partition_key 30 | data_val += 1 31 | await sender.send(data) 32 | await client.stop_async() 33 | 34 | found_partition_keys = {} 35 | for index, partition in enumerate(receivers): 36 | received = partition.receive(timeout=5) 37 | for message in received: 38 | try: 39 | existing = found_partition_keys[message.partition_key] 40 | assert existing == index 41 | except KeyError: 42 | found_partition_keys[message.partition_key] = index 43 | 44 | 45 | @pytest.mark.asyncio 46 | async def test_send_and_receive_zero_length_body_async(connstr_receivers): 47 | connection_str, receivers = connstr_receivers 48 | client = EventHubClientAsync.from_connection_string(connection_str, debug=False) 49 | sender = client.add_async_sender() 50 | try: 51 | await client.run_async() 52 | await sender.send(EventData("")) 53 | except: 54 | raise 55 | finally: 56 | await client.stop_async() 57 | 58 | received = [] 59 | for r in receivers: 60 | received.extend(r.receive(timeout=1)) 61 | 62 | assert len(received) == 1 63 | assert list(received[0].body)[0] == b"" 64 | 65 | 66 | @pytest.mark.asyncio 67 | async def test_send_single_event_async(connstr_receivers): 68 | connection_str, receivers = connstr_receivers 69 | client = EventHubClientAsync.from_connection_string(connection_str, debug=False) 70 | sender = client.add_async_sender() 71 | try: 72 | await client.run_async() 73 | await sender.send(EventData(b"A single event")) 74 | except: 75 | raise 76 | finally: 77 | await client.stop_async() 78 | 79 | received = [] 80 | for r in receivers: 81 | received.extend(r.receive(timeout=1)) 82 | 83 | assert len(received) == 1 84 | assert list(received[0].body)[0] == b"A single event" 85 | 86 | 87 | @pytest.mark.asyncio 88 | async def test_send_batch_async(connstr_receivers): 89 | connection_str, receivers = connstr_receivers 90 | def batched(): 91 | for i in range(10): 92 | yield "Event number {}".format(i) 93 | 94 | client = EventHubClientAsync.from_connection_string(connection_str, debug=False) 95 | sender = client.add_async_sender() 96 | try: 97 | await client.run_async() 98 | await sender.send(EventData(batch=batched())) 99 | except: 100 | raise 101 | finally: 102 | await client.stop_async() 103 | 104 | time.sleep(1) 105 | received = [] 106 | for r in receivers: 107 | received.extend(r.receive(timeout=3)) 108 | 109 | assert len(received) == 10 110 | for index, message in enumerate(received): 111 | assert list(message.body)[0] == "Event number {}".format(index).encode('utf-8') 112 | 113 | 114 | @pytest.mark.asyncio 115 | async def test_send_partition_async(connstr_receivers): 116 | connection_str, receivers = connstr_receivers 117 | client = EventHubClientAsync.from_connection_string(connection_str, debug=False) 118 | sender = client.add_async_sender(partition="1") 119 | try: 120 | await client.run_async() 121 | await sender.send(EventData(b"Data")) 122 | except: 123 | raise 124 | finally: 125 | await client.stop_async() 126 | 127 | partition_0 = receivers[0].receive(timeout=2) 128 | assert len(partition_0) == 0 129 | partition_1 = receivers[1].receive(timeout=2) 130 | assert len(partition_1) == 1 131 | 132 | 133 | @pytest.mark.asyncio 134 | async def test_send_non_ascii_async(connstr_receivers): 135 | connection_str, receivers = connstr_receivers 136 | client = EventHubClientAsync.from_connection_string(connection_str, debug=False) 137 | sender = client.add_async_sender(partition="0") 138 | try: 139 | await client.run_async() 140 | await sender.send(EventData("é,è,à,ù,â,ê,î,ô,û")) 141 | await sender.send(EventData(json.dumps({"foo": "漢字"}))) 142 | except: 143 | raise 144 | finally: 145 | await client.stop_async() 146 | 147 | partition_0 = receivers[0].receive(timeout=2) 148 | assert len(partition_0) == 2 149 | assert partition_0[0].body_as_str() == "é,è,à,ù,â,ê,î,ô,û" 150 | assert partition_0[1].body_as_json() == {"foo": "漢字"} 151 | 152 | 153 | @pytest.mark.asyncio 154 | async def test_send_partition_batch_async(connstr_receivers): 155 | connection_str, receivers = connstr_receivers 156 | def batched(): 157 | for i in range(10): 158 | yield "Event number {}".format(i) 159 | 160 | client = EventHubClientAsync.from_connection_string(connection_str, debug=False) 161 | sender = client.add_async_sender(partition="1") 162 | try: 163 | await client.run_async() 164 | await sender.send(EventData(batch=batched())) 165 | except: 166 | raise 167 | finally: 168 | await client.stop_async() 169 | 170 | partition_0 = receivers[0].receive(timeout=2) 171 | assert len(partition_0) == 0 172 | partition_1 = receivers[1].receive(timeout=2) 173 | assert len(partition_1) == 10 174 | 175 | 176 | @pytest.mark.asyncio 177 | async def test_send_array_async(connstr_receivers): 178 | connection_str, receivers = connstr_receivers 179 | client = EventHubClientAsync.from_connection_string(connection_str, debug=False) 180 | sender = client.add_async_sender() 181 | try: 182 | await client.run_async() 183 | await sender.send(EventData([b"A", b"B", b"C"])) 184 | except: 185 | raise 186 | finally: 187 | await client.stop_async() 188 | 189 | received = [] 190 | for r in receivers: 191 | received.extend(r.receive(timeout=1)) 192 | 193 | assert len(received) == 1 194 | assert list(received[0].body) == [b"A", b"B", b"C"] 195 | 196 | 197 | @pytest.mark.asyncio 198 | async def test_send_multiple_clients_async(connstr_receivers): 199 | connection_str, receivers = connstr_receivers 200 | client = EventHubClientAsync.from_connection_string(connection_str, debug=False) 201 | sender_0 = client.add_async_sender(partition="0") 202 | sender_1 = client.add_async_sender(partition="1") 203 | try: 204 | await client.run_async() 205 | await sender_0.send(EventData(b"Message 0")) 206 | await sender_1.send(EventData(b"Message 1")) 207 | except: 208 | raise 209 | finally: 210 | await client.stop_async() 211 | 212 | partition_0 = receivers[0].receive(timeout=2) 213 | assert len(partition_0) == 1 214 | partition_1 = receivers[1].receive(timeout=2) 215 | assert len(partition_1) == 1 -------------------------------------------------------------------------------- /tests/test_negative.py: -------------------------------------------------------------------------------- 1 | #------------------------------------------------------------------------- 2 | # Copyright (c) Microsoft Corporation. All rights reserved. 3 | # Licensed under the MIT License. See License.txt in the project root for 4 | # license information. 5 | #-------------------------------------------------------------------------- 6 | 7 | import os 8 | import pytest 9 | import time 10 | import sys 11 | 12 | from azure import eventhub 13 | from azure.eventhub import ( 14 | EventData, 15 | Offset, 16 | EventHubError, 17 | EventHubClient) 18 | 19 | 20 | def test_send_with_invalid_hostname(invalid_hostname, connstr_receivers): 21 | _, receivers = connstr_receivers 22 | client = EventHubClient.from_connection_string(invalid_hostname, debug=False) 23 | sender = client.add_sender() 24 | with pytest.raises(EventHubError): 25 | client.run() 26 | 27 | 28 | def test_receive_with_invalid_hostname_sync(invalid_hostname): 29 | client = EventHubClient.from_connection_string(invalid_hostname, debug=True) 30 | receiver = client.add_receiver("$default", "0") 31 | with pytest.raises(EventHubError): 32 | client.run() 33 | 34 | 35 | def test_send_with_invalid_key(invalid_key, connstr_receivers): 36 | _, receivers = connstr_receivers 37 | client = EventHubClient.from_connection_string(invalid_key, debug=False) 38 | sender = client.add_sender() 39 | with pytest.raises(EventHubError): 40 | client.run() 41 | 42 | 43 | def test_receive_with_invalid_key_sync(invalid_key): 44 | client = EventHubClient.from_connection_string(invalid_key, debug=True) 45 | receiver = client.add_receiver("$default", "0") 46 | with pytest.raises(EventHubError): 47 | client.run() 48 | 49 | 50 | def test_send_with_invalid_policy(invalid_policy, connstr_receivers): 51 | _, receivers = connstr_receivers 52 | client = EventHubClient.from_connection_string(invalid_policy, debug=False) 53 | sender = client.add_sender() 54 | with pytest.raises(EventHubError): 55 | client.run() 56 | 57 | 58 | def test_receive_with_invalid_policy_sync(invalid_policy): 59 | client = EventHubClient.from_connection_string(invalid_policy, debug=True) 60 | receiver = client.add_receiver("$default", "0") 61 | with pytest.raises(EventHubError): 62 | client.run() 63 | 64 | 65 | def test_send_partition_key_with_partition_sync(connection_str): 66 | client = EventHubClient.from_connection_string(connection_str, debug=True) 67 | sender = client.add_sender(partition="1") 68 | try: 69 | client.run() 70 | data = EventData(b"Data") 71 | data.partition_key = b"PKey" 72 | with pytest.raises(ValueError): 73 | sender.send(data) 74 | finally: 75 | client.stop() 76 | 77 | 78 | def test_non_existing_entity_sender(connection_str): 79 | client = EventHubClient.from_connection_string(connection_str, eventhub="nemo", debug=False) 80 | sender = client.add_sender(partition="1") 81 | with pytest.raises(EventHubError): 82 | client.run() 83 | 84 | 85 | def test_non_existing_entity_receiver(connection_str): 86 | client = EventHubClient.from_connection_string(connection_str, eventhub="nemo", debug=False) 87 | receiver = client.add_receiver("$default", "0") 88 | with pytest.raises(EventHubError): 89 | client.run() 90 | 91 | 92 | def test_receive_from_invalid_partitions_sync(connection_str): 93 | partitions = ["XYZ", "-1", "1000", "-" ] 94 | for p in partitions: 95 | client = EventHubClient.from_connection_string(connection_str, debug=True) 96 | receiver = client.add_receiver("$default", p) 97 | try: 98 | with pytest.raises(EventHubError): 99 | client.run() 100 | receiver.receive(timeout=10) 101 | finally: 102 | client.stop() 103 | 104 | 105 | def test_send_to_invalid_partitions(connection_str): 106 | partitions = ["XYZ", "-1", "1000", "-" ] 107 | for p in partitions: 108 | client = EventHubClient.from_connection_string(connection_str, debug=False) 109 | sender = client.add_sender(partition=p) 110 | try: 111 | with pytest.raises(EventHubError): 112 | client.run() 113 | finally: 114 | client.stop() 115 | 116 | 117 | def test_send_too_large_message(connection_str): 118 | if sys.platform.startswith('darwin'): 119 | pytest.skip("Skipping on OSX - open issue regarding message size") 120 | client = EventHubClient.from_connection_string(connection_str, debug=True) 121 | sender = client.add_sender() 122 | try: 123 | client.run() 124 | data = EventData(b"A" * 300000) 125 | with pytest.raises(EventHubError): 126 | sender.send(data) 127 | finally: 128 | client.stop() 129 | 130 | 131 | def test_send_null_body(connection_str): 132 | partitions = ["XYZ", "-1", "1000", "-" ] 133 | client = EventHubClient.from_connection_string(connection_str, debug=False) 134 | sender = client.add_sender() 135 | try: 136 | client.run() 137 | with pytest.raises(ValueError): 138 | data = EventData(None) 139 | sender.send(data) 140 | finally: 141 | client.stop() 142 | 143 | 144 | def test_message_body_types(connstr_senders): 145 | connection_str, senders = connstr_senders 146 | client = EventHubClient.from_connection_string(connection_str, debug=False) 147 | receiver = client.add_receiver("$default", "0", offset=Offset('@latest')) 148 | try: 149 | client.run() 150 | 151 | received = receiver.receive(timeout=5) 152 | assert len(received) == 0 153 | senders[0].send(EventData(b"Bytes Data")) 154 | time.sleep(1) 155 | received = receiver.receive(timeout=5) 156 | assert len(received) == 1 157 | assert list(received[0].body) == [b'Bytes Data'] 158 | assert received[0].body_as_str() == "Bytes Data" 159 | with pytest.raises(TypeError): 160 | received[0].body_as_json() 161 | 162 | senders[0].send(EventData("Str Data")) 163 | time.sleep(1) 164 | received = receiver.receive(timeout=5) 165 | assert len(received) == 1 166 | assert list(received[0].body) == [b'Str Data'] 167 | assert received[0].body_as_str() == "Str Data" 168 | with pytest.raises(TypeError): 169 | received[0].body_as_json() 170 | 171 | senders[0].send(EventData(b'{"test_value": "JSON bytes data", "key1": true, "key2": 42}')) 172 | time.sleep(1) 173 | received = receiver.receive(timeout=5) 174 | assert len(received) == 1 175 | assert list(received[0].body) == [b'{"test_value": "JSON bytes data", "key1": true, "key2": 42}'] 176 | assert received[0].body_as_str() == '{"test_value": "JSON bytes data", "key1": true, "key2": 42}' 177 | assert received[0].body_as_json() == {"test_value": "JSON bytes data", "key1": True, "key2": 42} 178 | 179 | senders[0].send(EventData('{"test_value": "JSON str data", "key1": true, "key2": 42}')) 180 | time.sleep(1) 181 | received = receiver.receive(timeout=5) 182 | assert len(received) == 1 183 | assert list(received[0].body) == [b'{"test_value": "JSON str data", "key1": true, "key2": 42}'] 184 | assert received[0].body_as_str() == '{"test_value": "JSON str data", "key1": true, "key2": 42}' 185 | assert received[0].body_as_json() == {"test_value": "JSON str data", "key1": True, "key2": 42} 186 | 187 | senders[0].send(EventData(42)) 188 | time.sleep(1) 189 | received = receiver.receive(timeout=5) 190 | assert len(received) == 1 191 | assert received[0].body_as_str() == "42" 192 | assert received[0].body == 42 193 | except: 194 | raise 195 | finally: 196 | client.stop() -------------------------------------------------------------------------------- /azure/eventprocessorhost/eh_partition_pump.py: -------------------------------------------------------------------------------- 1 | # -------------------------------------------------------------------------------------------- 2 | # Copyright (c) Microsoft Corporation. All rights reserved. 3 | # Licensed under the MIT License. See License.txt in the project root for license information. 4 | # ----------------------------------------------------------------------------------- 5 | 6 | import logging 7 | import asyncio 8 | from azure.eventhub import Offset, EventHubClientAsync 9 | from azure.eventprocessorhost.partition_pump import PartitionPump 10 | 11 | 12 | _logger = logging.getLogger(__name__) 13 | 14 | 15 | class EventHubPartitionPump(PartitionPump): 16 | """ 17 | Pulls and messages from lease partition from eventhub and sends them to processor. 18 | """ 19 | 20 | def __init__(self, host, lease): 21 | PartitionPump.__init__(self, host, lease) 22 | self.eh_client = None 23 | self.partition_receiver = None 24 | self.partition_receive_handler = None 25 | self.running = None 26 | 27 | async def on_open_async(self): 28 | """ 29 | Eventhub Override for on_open_async. 30 | """ 31 | _opened_ok = False 32 | _retry_count = 0 33 | while (not _opened_ok) and (_retry_count < 5): 34 | try: 35 | await self.open_clients_async() 36 | _opened_ok = True 37 | except Exception as err: # pylint: disable=broad-except 38 | _logger.warning( 39 | "%r,%r PartitionPumpWarning: Failure creating client or receiver, retrying: %r", 40 | self.host.guid, self.partition_context.partition_id, err) 41 | last_exception = err 42 | _retry_count += 1 43 | 44 | if not _opened_ok: 45 | await self.processor.process_error_async(self.partition_context, last_exception) 46 | self.set_pump_status("OpenFailed") 47 | 48 | if self.pump_status == "Opening": 49 | loop = asyncio.get_event_loop() 50 | self.set_pump_status("Running") 51 | await self.eh_client.run_async() 52 | self.running = loop.create_task(self.partition_receiver.run()) 53 | 54 | if self.pump_status in ["OpenFailed", "Errored"]: 55 | self.set_pump_status("Closing") 56 | await self.clean_up_clients_async() 57 | self.set_pump_status("Closed") 58 | 59 | 60 | async def open_clients_async(self): 61 | """ 62 | Responsible for establishing connection to event hub client 63 | throws EventHubsException, IOException, InterruptedException, ExecutionException. 64 | """ 65 | await self.partition_context.get_initial_offset_async() 66 | # Create event hub client and receive handler and set options 67 | self.eh_client = EventHubClientAsync( 68 | self.host.eh_config.client_address, 69 | debug=self.host.eph_options.debug_trace, 70 | http_proxy=self.host.eph_options.http_proxy) 71 | self.partition_receive_handler = self.eh_client.add_async_receiver( 72 | self.partition_context.consumer_group_name, 73 | self.partition_context.partition_id, 74 | Offset(self.partition_context.offset), 75 | prefetch=self.host.eph_options.prefetch_count, 76 | keep_alive=self.host.eph_options.keep_alive_interval, 77 | auto_reconnect=self.host.eph_options.auto_reconnect_on_error, 78 | loop=self.loop) 79 | self.partition_receiver = PartitionReceiver(self) 80 | 81 | async def clean_up_clients_async(self): 82 | """ 83 | Resets the pump swallows all exceptions. 84 | """ 85 | if self.partition_receiver: 86 | if self.eh_client: 87 | await self.eh_client.stop_async() 88 | self.partition_receiver = None 89 | self.partition_receive_handler = None 90 | self.eh_client = None 91 | 92 | async def on_closing_async(self, reason): 93 | """ 94 | Overides partition pump on closing. 95 | 96 | :param reason: The reason for the shutdown. 97 | :type reason: str 98 | """ 99 | self.partition_receiver.eh_partition_pump.set_pump_status("Errored") 100 | try: 101 | await self.running 102 | except TypeError: 103 | _logger.debug("No partition pump running.") 104 | except Exception as err: # pylint: disable=broad-except 105 | _logger.info("Error on closing partition pump: %r", err) 106 | await self.clean_up_clients_async() 107 | 108 | 109 | class PartitionReceiver: 110 | """ 111 | Recieves events asynchronously until lease is lost. 112 | """ 113 | 114 | def __init__(self, eh_partition_pump): 115 | self.eh_partition_pump = eh_partition_pump 116 | self.max_batch_size = self.eh_partition_pump.host.eph_options.max_batch_size 117 | self.recieve_timeout = self.eh_partition_pump.host.eph_options.receive_timeout 118 | 119 | async def run(self): 120 | """ 121 | Runs the async partion reciever event loop to retrive messages from the event queue. 122 | """ 123 | # Implement pull max batch from queue instead of one message at a time 124 | while self.eh_partition_pump.pump_status != "Errored" and not self.eh_partition_pump.is_closing(): 125 | if self.eh_partition_pump.partition_receive_handler: 126 | try: 127 | msgs = await self.eh_partition_pump.partition_receive_handler.receive( 128 | max_batch_size=self.max_batch_size, 129 | timeout=self.recieve_timeout) 130 | except Exception as e: # pylint: disable=broad-except 131 | _logger.info("Error raised while attempting to receive messages: %r", e) 132 | await self.process_error_async(e) 133 | else: 134 | if not msgs: 135 | _logger.info("No events received, queue size %r, release %r", 136 | self.eh_partition_pump.partition_receive_handler.queue_size, 137 | self.eh_partition_pump.host.eph_options.release_pump_on_timeout) 138 | if self.eh_partition_pump.host.eph_options.release_pump_on_timeout: 139 | await self.process_error_async(TimeoutError("No events received")) 140 | else: 141 | await self.process_events_async(msgs) 142 | 143 | async def process_events_async(self, events): 144 | """ 145 | This method is called on the thread that the EH client uses to run the pump. 146 | There is one pump per EventHubClient. Since each PartitionPump creates a 147 | new EventHubClient, using that thread to call OnEvents does no harm. Even if OnEvents 148 | is slow, the pump will get control back each time OnEvents returns, and be able to receive 149 | a new batch of messages with which to make the next OnEvents call.The pump gains nothing 150 | by running faster than OnEvents. 151 | 152 | :param events: List of events to be processed. 153 | :type events: list of ~azure.eventhub.common.EventData 154 | """ 155 | await self.eh_partition_pump.process_events_async(events) 156 | 157 | async def process_error_async(self, error): 158 | """ 159 | Handles processing errors this is never called since python recieve client doesn't 160 | have error handling implemented (TBD add fault pump handling). 161 | 162 | :param error: An error the occurred. 163 | :type error: Exception 164 | """ 165 | try: 166 | await self.eh_partition_pump.process_error_async(error) 167 | finally: 168 | self.eh_partition_pump.set_pump_status("Errored") 169 | -------------------------------------------------------------------------------- /tests/test_send.py: -------------------------------------------------------------------------------- 1 | # -- coding: utf-8 -- 2 | #------------------------------------------------------------------------- 3 | # Copyright (c) Microsoft Corporation. All rights reserved. 4 | # Licensed under the MIT License. See License.txt in the project root for 5 | # license information. 6 | #-------------------------------------------------------------------------- 7 | 8 | import os 9 | import pytest 10 | import time 11 | import json 12 | import sys 13 | 14 | from azure import eventhub 15 | from azure.eventhub import EventData, EventHubClient 16 | 17 | 18 | def test_send_with_partition_key(connstr_receivers): 19 | connection_str, receivers = connstr_receivers 20 | client = EventHubClient.from_connection_string(connection_str, debug=False) 21 | sender = client.add_sender() 22 | try: 23 | client.run() 24 | 25 | data_val = 0 26 | for partition in [b"a", b"b", b"c", b"d", b"e", b"f"]: 27 | partition_key = b"test_partition_" + partition 28 | for i in range(50): 29 | data = EventData(str(data_val)) 30 | data.partition_key = partition_key 31 | data_val += 1 32 | sender.send(data) 33 | except: 34 | raise 35 | finally: 36 | client.stop() 37 | 38 | found_partition_keys = {} 39 | for index, partition in enumerate(receivers): 40 | received = partition.receive(timeout=5) 41 | for message in received: 42 | try: 43 | existing = found_partition_keys[message.partition_key] 44 | assert existing == index 45 | except KeyError: 46 | found_partition_keys[message.partition_key] = index 47 | 48 | 49 | def test_send_and_receive_large_body_size(connstr_receivers): 50 | if sys.platform.startswith('darwin'): 51 | pytest.skip("Skipping on OSX - open issue regarding message size") 52 | connection_str, receivers = connstr_receivers 53 | client = EventHubClient.from_connection_string(connection_str, debug=False) 54 | sender = client.add_sender() 55 | try: 56 | client.run() 57 | payload = 250 * 1024 58 | sender.send(EventData("A" * payload)) 59 | except: 60 | raise 61 | finally: 62 | client.stop() 63 | 64 | received = [] 65 | for r in receivers: 66 | received.extend(r.receive(timeout=4)) 67 | 68 | assert len(received) == 1 69 | assert len(list(received[0].body)[0]) == payload 70 | 71 | 72 | def test_send_and_receive_zero_length_body(connstr_receivers): 73 | connection_str, receivers = connstr_receivers 74 | client = EventHubClient.from_connection_string(connection_str, debug=False) 75 | sender = client.add_sender() 76 | try: 77 | client.run() 78 | sender.send(EventData("")) 79 | except: 80 | raise 81 | finally: 82 | client.stop() 83 | 84 | received = [] 85 | for r in receivers: 86 | received.extend(r.receive(timeout=1)) 87 | 88 | assert len(received) == 1 89 | assert list(received[0].body)[0] == b"" 90 | 91 | 92 | def test_send_single_event(connstr_receivers): 93 | connection_str, receivers = connstr_receivers 94 | client = EventHubClient.from_connection_string(connection_str, debug=False) 95 | sender = client.add_sender() 96 | try: 97 | client.run() 98 | sender.send(EventData(b"A single event")) 99 | except: 100 | raise 101 | finally: 102 | client.stop() 103 | 104 | received = [] 105 | for r in receivers: 106 | received.extend(r.receive(timeout=1)) 107 | 108 | assert len(received) == 1 109 | assert list(received[0].body)[0] == b"A single event" 110 | 111 | 112 | def test_send_batch_sync(connstr_receivers): 113 | connection_str, receivers = connstr_receivers 114 | def batched(): 115 | for i in range(10): 116 | yield "Event number {}".format(i) 117 | 118 | client = EventHubClient.from_connection_string(connection_str, debug=False) 119 | sender = client.add_sender() 120 | try: 121 | client.run() 122 | sender.send(EventData(batch=batched())) 123 | except: 124 | raise 125 | finally: 126 | client.stop() 127 | 128 | time.sleep(1) 129 | received = [] 130 | for r in receivers: 131 | received.extend(r.receive(timeout=3)) 132 | 133 | assert len(received) == 10 134 | for index, message in enumerate(received): 135 | assert list(message.body)[0] == "Event number {}".format(index).encode('utf-8') 136 | 137 | 138 | def test_send_partition(connstr_receivers): 139 | connection_str, receivers = connstr_receivers 140 | client = EventHubClient.from_connection_string(connection_str, debug=False) 141 | sender = client.add_sender(partition="1") 142 | try: 143 | client.run() 144 | sender.send(EventData(b"Data")) 145 | except: 146 | raise 147 | finally: 148 | client.stop() 149 | 150 | partition_0 = receivers[0].receive(timeout=2) 151 | assert len(partition_0) == 0 152 | partition_1 = receivers[1].receive(timeout=2) 153 | assert len(partition_1) == 1 154 | 155 | 156 | def test_send_non_ascii(connstr_receivers): 157 | connection_str, receivers = connstr_receivers 158 | client = EventHubClient.from_connection_string(connection_str, debug=False) 159 | sender = client.add_sender(partition="0") 160 | try: 161 | client.run() 162 | sender.send(EventData(u"é,è,à,ù,â,ê,î,ô,û")) 163 | sender.send(EventData(json.dumps({"foo": u"漢字"}))) 164 | except: 165 | raise 166 | finally: 167 | client.stop() 168 | 169 | partition_0 = receivers[0].receive(timeout=2) 170 | assert len(partition_0) == 2 171 | assert partition_0[0].body_as_str() == u"é,è,à,ù,â,ê,î,ô,û" 172 | assert partition_0[1].body_as_json() == {"foo": u"漢字"} 173 | 174 | 175 | def test_send_partition_batch(connstr_receivers): 176 | connection_str, receivers = connstr_receivers 177 | def batched(): 178 | for i in range(10): 179 | yield "Event number {}".format(i) 180 | 181 | client = EventHubClient.from_connection_string(connection_str, debug=False) 182 | sender = client.add_sender(partition="1") 183 | try: 184 | client.run() 185 | sender.send(EventData(batch=batched())) 186 | time.sleep(1) 187 | except: 188 | raise 189 | finally: 190 | client.stop() 191 | 192 | partition_0 = receivers[0].receive(timeout=2) 193 | assert len(partition_0) == 0 194 | partition_1 = receivers[1].receive(timeout=2) 195 | assert len(partition_1) == 10 196 | 197 | 198 | def test_send_array_sync(connstr_receivers): 199 | connection_str, receivers = connstr_receivers 200 | client = EventHubClient.from_connection_string(connection_str, debug=True) 201 | sender = client.add_sender() 202 | try: 203 | client.run() 204 | sender.send(EventData([b"A", b"B", b"C"])) 205 | except: 206 | raise 207 | finally: 208 | client.stop() 209 | 210 | received = [] 211 | for r in receivers: 212 | received.extend(r.receive(timeout=1)) 213 | 214 | assert len(received) == 1 215 | assert list(received[0].body) == [b"A", b"B", b"C"] 216 | 217 | 218 | def test_send_multiple_clients(connstr_receivers): 219 | connection_str, receivers = connstr_receivers 220 | client = EventHubClient.from_connection_string(connection_str, debug=False) 221 | sender_0 = client.add_sender(partition="0") 222 | sender_1 = client.add_sender(partition="1") 223 | try: 224 | client.run() 225 | sender_0.send(EventData(b"Message 0")) 226 | sender_1.send(EventData(b"Message 1")) 227 | except: 228 | raise 229 | finally: 230 | client.stop() 231 | 232 | partition_0 = receivers[0].receive(timeout=2) 233 | assert len(partition_0) == 1 234 | partition_1 = receivers[1].receive(timeout=2) 235 | assert len(partition_1) == 1 -------------------------------------------------------------------------------- /azure/eventprocessorhost/partition_context.py: -------------------------------------------------------------------------------- 1 | # -------------------------------------------------------------------------------------------- 2 | # Copyright (c) Microsoft Corporation. All rights reserved. 3 | # Licensed under the MIT License. See License.txt in the project root for license information. 4 | # ----------------------------------------------------------------------------------- 5 | import asyncio 6 | import logging 7 | from azure.eventprocessorhost.checkpoint import Checkpoint 8 | 9 | 10 | _logger = logging.getLogger(__name__) 11 | 12 | 13 | class PartitionContext: 14 | """ 15 | Encapsulates information related to an Event Hubs partition used by AbstractEventProcessor. 16 | """ 17 | 18 | def __init__(self, host, partition_id, eh_path, consumer_group_name, pump_loop=None): 19 | self.host = host 20 | self.partition_id = partition_id 21 | self.eh_path = eh_path 22 | self.consumer_group_name = consumer_group_name 23 | self.offset = "-1" 24 | self.sequence_number = 0 25 | self.lease = None 26 | self.event_processor_context = None 27 | self.pump_loop = pump_loop or asyncio.get_event_loop() 28 | 29 | def set_offset_and_sequence_number(self, event_data): 30 | """ 31 | Updates offset based on event. 32 | 33 | :param event_data: A received EventData with valid offset and sequenceNumber. 34 | :type event_data: ~azure.eventhub.common.EventData 35 | """ 36 | if not event_data: 37 | raise Exception(event_data) 38 | self.offset = event_data.offset.value 39 | self.sequence_number = event_data.sequence_number 40 | 41 | async def get_initial_offset_async(self): # throws InterruptedException, ExecutionException 42 | """ 43 | Gets the initial offset for processing the partition. 44 | 45 | :rtype: str 46 | """ 47 | _logger.info("Calling user-provided initial offset provider %r %r", 48 | self.host.guid, self.partition_id) 49 | starting_checkpoint = await self.host.storage_manager.get_checkpoint_async(self.partition_id) 50 | if not starting_checkpoint: 51 | # No checkpoint was ever stored. Use the initialOffsetProvider instead 52 | # defaults to "-1" 53 | self.offset = self.host.eph_options.initial_offset_provider 54 | self.sequence_number = -1 55 | else: 56 | self.offset = starting_checkpoint.offset 57 | self.sequence_number = starting_checkpoint.sequence_number 58 | 59 | _logger.info("%r %r Initial offset/sequenceNumber provided %r/%r", 60 | self.host.guid, self.partition_id, self.offset, self.sequence_number) 61 | return self.offset 62 | 63 | async def checkpoint_async(self, event_processor_context=None): 64 | """ 65 | Generates a checkpoint for the partition using the curren offset and sequenceNumber for 66 | and persists to the checkpoint manager. 67 | 68 | :param event_processor_context An optional custom state value for the Event Processor. 69 | This data must be in a JSON serializable format. 70 | :type event_processor_context: str or dict 71 | """ 72 | captured_checkpoint = Checkpoint(self.partition_id, self.offset, self.sequence_number) 73 | await self.persist_checkpoint_async(captured_checkpoint, event_processor_context) 74 | self.event_processor_context = event_processor_context 75 | 76 | async def checkpoint_async_event_data(self, event_data, event_processor_context=None): 77 | """ 78 | Stores the offset and sequenceNumber from the provided received EventData instance, 79 | then writes those values to the checkpoint store via the checkpoint manager. 80 | Optionally stores the state of the Event Processor along the checkpoint. 81 | 82 | :param event_data: A received EventData with valid offset and sequenceNumber. 83 | :type event_data: ~azure.eventhub.common.EventData 84 | :param event_processor_context An optional custom state value for the Event Processor. 85 | This data must be in a JSON serializable format. 86 | :type event_processor_context: str or dict 87 | :raises: ValueError if suplied event_data is None. 88 | :raises: ValueError if the sequenceNumber is less than the last checkpointed value. 89 | """ 90 | if not event_data: 91 | raise ValueError("event_data") 92 | if event_data.sequence_number > self.sequence_number: 93 | #We have never seen this sequence number yet 94 | raise ValueError("Argument Out Of Range event_data x-opt-sequence-number") 95 | 96 | await self.persist_checkpoint_async(Checkpoint(self.partition_id, 97 | event_data.offset.value, 98 | event_data.sequence_number), 99 | event_processor_context) 100 | self.event_processor_context = event_processor_context 101 | 102 | def to_string(self): 103 | """ 104 | Returns the parition context in the following format: 105 | "PartitionContext({EventHubPath}{ConsumerGroupName}{PartitionId}{SequenceNumber})" 106 | 107 | :rtype: str 108 | """ 109 | return "PartitionContext({}{}{}{})".format(self.eh_path, 110 | self.consumer_group_name, 111 | self.partition_id, 112 | self.sequence_number) 113 | 114 | async def persist_checkpoint_async(self, checkpoint, event_processor_context=None): 115 | """ 116 | Persists the checkpoint, and - optionally - the state of the Event Processor. 117 | 118 | :param checkpoint: The checkpoint to persist. 119 | :type checkpoint: ~azure.eventprocessorhost.checkpoint.Checkpoint 120 | :param event_processor_context An optional custom state value for the Event Processor. 121 | This data must be in a JSON serializable format. 122 | :type event_processor_context: str or dict 123 | """ 124 | _logger.debug("PartitionPumpCheckpointStart %r %r %r %r", 125 | self.host.guid, checkpoint.partition_id, checkpoint.offset, checkpoint.sequence_number) 126 | try: 127 | in_store_checkpoint = await self.host.storage_manager.get_checkpoint_async(checkpoint.partition_id) 128 | if not in_store_checkpoint or checkpoint.sequence_number >= in_store_checkpoint.sequence_number: 129 | if not in_store_checkpoint: 130 | _logger.info("persisting checkpoint %r", checkpoint.__dict__) 131 | await self.host.storage_manager.create_checkpoint_if_not_exists_async(checkpoint.partition_id) 132 | 133 | self.lease.event_processor_context = event_processor_context 134 | if not await self.host.storage_manager.update_checkpoint_async(self.lease, checkpoint): 135 | _logger.error("Failed to persist checkpoint for partition: %r", self.partition_id) 136 | raise Exception("failed to persist checkpoint") 137 | self.lease.offset = checkpoint.offset 138 | self.lease.sequence_number = checkpoint.sequence_number 139 | else: 140 | _logger.error( # pylint: disable=logging-not-lazy 141 | "Ignoring out of date checkpoint with offset %r/sequence number %r because " + 142 | "current persisted checkpoint has higher offset %r/sequence number %r", 143 | checkpoint.offset, 144 | checkpoint.sequence_number, 145 | in_store_checkpoint.offset, 146 | in_store_checkpoint.sequence_number) 147 | raise Exception("offset/sequenceNumber invalid") 148 | 149 | except Exception as err: 150 | _logger.error("PartitionPumpCheckpointError %r %r %r", 151 | self.host.guid, checkpoint.partition_id, err) 152 | raise 153 | finally: 154 | _logger.debug("PartitionPumpCheckpointStop %r %r", 155 | self.host.guid, checkpoint.partition_id) 156 | -------------------------------------------------------------------------------- /conftest.py: -------------------------------------------------------------------------------- 1 | #------------------------------------------------------------------------- 2 | # Copyright (c) Microsoft Corporation. All rights reserved. 3 | # Licensed under the MIT License. See License.txt in the project root for 4 | # license information. 5 | #-------------------------------------------------------------------------- 6 | 7 | import os 8 | import pytest 9 | import logging 10 | import sys 11 | import uuid 12 | 13 | # Ignore async tests for Python < 3.5 14 | collect_ignore = [] 15 | if sys.version_info < (3, 5): 16 | collect_ignore.append("tests/asynctests") 17 | collect_ignore.append("features") 18 | else: 19 | from tests.asynctests import MockEventProcessor 20 | from azure.eventprocessorhost import EventProcessorHost 21 | from azure.eventprocessorhost import EventHubPartitionPump 22 | from azure.eventprocessorhost import AzureStorageCheckpointLeaseManager 23 | from azure.eventprocessorhost import AzureBlobLease 24 | from azure.eventprocessorhost import EventHubConfig 25 | from azure.eventprocessorhost.lease import Lease 26 | from azure.eventprocessorhost.partition_pump import PartitionPump 27 | from azure.eventprocessorhost.partition_manager import PartitionManager 28 | 29 | from tests import get_logger 30 | from azure import eventhub 31 | from azure.eventhub import EventHubClient, Receiver, Offset 32 | 33 | 34 | log = get_logger(None, logging.DEBUG) 35 | 36 | 37 | def create_eventhub(eventhub_config, client=None): 38 | from azure.servicebus.control_client import ServiceBusService, EventHub 39 | hub_name = str(uuid.uuid4()) 40 | hub_value = EventHub(partition_count=2) 41 | client = client or ServiceBusService( 42 | service_namespace=eventhub_config['namespace'], 43 | shared_access_key_name=eventhub_config['key_name'], 44 | shared_access_key_value=eventhub_config['access_key']) 45 | if client.create_event_hub(hub_name, hub=hub_value, fail_on_exist=True): 46 | return hub_name 47 | raise ValueError("EventHub creation failed.") 48 | 49 | 50 | def cleanup_eventhub(servicebus_config, hub_name, client=None): 51 | from azure.servicebus.control_client import ServiceBusService 52 | client = client or ServiceBusService( 53 | service_namespace=eventhub_config['namespace'], 54 | shared_access_key_name=eventhub_config['key_name'], 55 | shared_access_key_value=eventhub_config['access_key']) 56 | client.delete_event_hub(hub_name) 57 | 58 | 59 | @pytest.fixture() 60 | def live_eventhub_config(): 61 | try: 62 | config = {} 63 | config['hostname'] = os.environ['EVENT_HUB_HOSTNAME'] 64 | config['event_hub'] = os.environ['EVENT_HUB_NAME'] 65 | config['key_name'] = os.environ['EVENT_HUB_SAS_POLICY'] 66 | config['access_key'] = os.environ['EVENT_HUB_SAS_KEY'] 67 | config['namespace'] = os.environ['EVENT_HUB_NAMESPACE'] 68 | config['consumer_group'] = "$Default" 69 | config['partition'] = "0" 70 | except KeyError: 71 | pytest.skip("Live EventHub configuration not found.") 72 | else: 73 | return config 74 | 75 | 76 | @pytest.fixture() 77 | def live_eventhub(live_eventhub_config): # pylint: disable=redefined-outer-name 78 | from azure.servicebus.control_client import ServiceBusService 79 | client = ServiceBusService( 80 | service_namespace=live_eventhub_config['namespace'], 81 | shared_access_key_name=live_eventhub_config['key_name'], 82 | shared_access_key_value=live_eventhub_config['access_key']) 83 | try: 84 | hub_name = create_eventhub(live_eventhub_config, client=client) 85 | print("Created EventHub {}".format(hub_name)) 86 | live_eventhub_config['event_hub'] = hub_name 87 | yield live_eventhub_config 88 | finally: 89 | cleanup_eventhub(live_eventhub_config, hub_name, client=client) 90 | print("Deleted EventHub {}".format(hub_name)) 91 | 92 | 93 | @pytest.fixture() 94 | def connection_str(live_eventhub): 95 | return "Endpoint=sb://{}/;SharedAccessKeyName={};SharedAccessKey={};EntityPath={}".format( 96 | live_eventhub['hostname'], 97 | live_eventhub['key_name'], 98 | live_eventhub['access_key'], 99 | live_eventhub['event_hub']) 100 | 101 | 102 | @pytest.fixture() 103 | def invalid_hostname(live_eventhub_config): 104 | return "Endpoint=sb://invalid123.servicebus.windows.net/;SharedAccessKeyName={};SharedAccessKey={};EntityPath={}".format( 105 | live_eventhub_config['key_name'], 106 | live_eventhub_config['access_key'], 107 | live_eventhub_config['event_hub']) 108 | 109 | 110 | @pytest.fixture() 111 | def invalid_key(live_eventhub_config): 112 | return "Endpoint=sb://{}/;SharedAccessKeyName={};SharedAccessKey=invalid;EntityPath={}".format( 113 | live_eventhub_config['hostname'], 114 | live_eventhub_config['key_name'], 115 | live_eventhub_config['event_hub']) 116 | 117 | 118 | @pytest.fixture() 119 | def invalid_policy(live_eventhub_config): 120 | return "Endpoint=sb://{}/;SharedAccessKeyName=invalid;SharedAccessKey={};EntityPath={}".format( 121 | live_eventhub_config['hostname'], 122 | live_eventhub_config['access_key'], 123 | live_eventhub_config['event_hub']) 124 | 125 | 126 | @pytest.fixture() 127 | def iot_connection_str(): 128 | try: 129 | return os.environ['IOTHUB_CONNECTION_STR'] 130 | except KeyError: 131 | pytest.skip("No IotHub connection string found.") 132 | 133 | 134 | @pytest.fixture() 135 | def device_id(): 136 | try: 137 | return os.environ['IOTHUB_DEVICE'] 138 | except KeyError: 139 | pytest.skip("No Iothub device ID found.") 140 | 141 | 142 | @pytest.fixture() 143 | def connstr_receivers(connection_str): 144 | client = EventHubClient.from_connection_string(connection_str, debug=False) 145 | eh_hub_info = client.get_eventhub_info() 146 | partitions = eh_hub_info["partition_ids"] 147 | 148 | recv_offset = Offset("@latest") 149 | receivers = [] 150 | for p in partitions: 151 | receivers.append(client.add_receiver("$default", p, prefetch=500, offset=Offset("@latest"))) 152 | 153 | client.run() 154 | 155 | for r in receivers: 156 | r.receive(timeout=1) 157 | yield connection_str, receivers 158 | 159 | client.stop() 160 | 161 | 162 | @pytest.fixture() 163 | def connstr_senders(connection_str): 164 | client = EventHubClient.from_connection_string(connection_str, debug=True) 165 | eh_hub_info = client.get_eventhub_info() 166 | partitions = eh_hub_info["partition_ids"] 167 | 168 | senders = [] 169 | for p in partitions: 170 | senders.append(client.add_sender(partition=p)) 171 | 172 | client.run() 173 | yield connection_str, senders 174 | client.stop() 175 | 176 | 177 | @pytest.fixture() 178 | def storage_clm(eph): 179 | try: 180 | container = str(uuid.uuid4()) 181 | storage_clm = AzureStorageCheckpointLeaseManager( 182 | os.environ['AZURE_STORAGE_ACCOUNT'], 183 | os.environ['AZURE_STORAGE_ACCESS_KEY'], 184 | container) 185 | except KeyError: 186 | pytest.skip("Live Storage configuration not found.") 187 | try: 188 | storage_clm.initialize(eph) 189 | storage_clm.storage_client.create_container(container) 190 | yield storage_clm 191 | finally: 192 | storage_clm.storage_client.delete_container(container) 193 | 194 | 195 | @pytest.fixture() 196 | def eph(): 197 | try: 198 | storage_clm = AzureStorageCheckpointLeaseManager( 199 | os.environ['AZURE_STORAGE_ACCOUNT'], 200 | os.environ['AZURE_STORAGE_ACCESS_KEY'], 201 | "lease") 202 | NAMESPACE = os.environ.get('EVENT_HUB_NAMESPACE') 203 | EVENTHUB = os.environ.get('EVENT_HUB_NAME') 204 | USER = os.environ.get('EVENT_HUB_SAS_POLICY') 205 | KEY = os.environ.get('EVENT_HUB_SAS_KEY') 206 | 207 | eh_config = EventHubConfig(NAMESPACE, EVENTHUB, USER, KEY, consumer_group="$default") 208 | host = EventProcessorHost( 209 | MockEventProcessor, 210 | eh_config, 211 | storage_clm) 212 | except KeyError: 213 | pytest.skip("Live EventHub configuration not found.") 214 | return host 215 | 216 | 217 | @pytest.fixture() 218 | def eh_partition_pump(eph): 219 | lease = AzureBlobLease() 220 | lease.with_partition_id("1") 221 | partition_pump = EventHubPartitionPump(eph, lease) 222 | return partition_pump 223 | 224 | 225 | @pytest.fixture() 226 | def partition_pump(eph): 227 | lease = Lease() 228 | lease.with_partition_id("1") 229 | partition_pump = PartitionPump(eph, lease) 230 | return partition_pump 231 | 232 | 233 | @pytest.fixture() 234 | def partition_manager(eph): 235 | partition_manager = PartitionManager(eph) 236 | return partition_manager 237 | -------------------------------------------------------------------------------- /tests/asynctests/test_longrunning_eph.py: -------------------------------------------------------------------------------- 1 | # -------------------------------------------------------------------------------------------- 2 | # Copyright (c) Microsoft Corporation. All rights reserved. 3 | # Licensed under the MIT License. See License.txt in the project root for license information. 4 | # ----------------------------------------------------------------------------------- 5 | 6 | import logging 7 | import asyncio 8 | import sys 9 | import os 10 | import argparse 11 | import time 12 | import json 13 | from logging.handlers import RotatingFileHandler 14 | 15 | from azure.eventhub import EventHubClientAsync, EventData 16 | from azure.eventprocessorhost import ( 17 | AbstractEventProcessor, 18 | AzureStorageCheckpointLeaseManager, 19 | EventHubConfig, 20 | EventProcessorHost, 21 | EPHOptions) 22 | 23 | 24 | def get_logger(filename, level=logging.INFO): 25 | azure_logger = logging.getLogger("azure.eventprocessorhost") 26 | azure_logger.setLevel(level) 27 | uamqp_logger = logging.getLogger("uamqp") 28 | uamqp_logger.setLevel(logging.INFO) 29 | 30 | formatter = logging.Formatter('%(asctime)s %(name)-12s %(levelname)-8s %(message)s') 31 | console_handler = logging.StreamHandler(stream=sys.stdout) 32 | console_handler.setFormatter(formatter) 33 | if not azure_logger.handlers: 34 | azure_logger.addHandler(console_handler) 35 | if not uamqp_logger.handlers: 36 | uamqp_logger.addHandler(console_handler) 37 | 38 | if filename: 39 | file_handler = RotatingFileHandler(filename, maxBytes=20*1024*1024, backupCount=3) 40 | file_handler.setFormatter(formatter) 41 | azure_logger.addHandler(file_handler) 42 | uamqp_logger.addHandler(file_handler) 43 | 44 | return azure_logger 45 | 46 | logger = get_logger("eph_test_async.log", logging.INFO) 47 | 48 | 49 | class EventProcessor(AbstractEventProcessor): 50 | """ 51 | Example Implmentation of AbstractEventProcessor 52 | """ 53 | 54 | def __init__(self, params=None): 55 | """ 56 | Init Event processor 57 | """ 58 | super().__init__(params) 59 | self._msg_counter = 0 60 | 61 | async def open_async(self, context): 62 | """ 63 | Called by processor host to initialize the event processor. 64 | """ 65 | assert hasattr(context, 'event_processor_context') 66 | assert context.event_processor_context is None 67 | logger.info("Connection established {}. State {}".format( 68 | context.partition_id, context.event_processor_context)) 69 | 70 | async def close_async(self, context, reason): 71 | """ 72 | Called by processor host to indicate that the event processor is being stopped. 73 | :param context: Information about the partition 74 | :type context: ~azure.eventprocessorhost.PartitionContext 75 | """ 76 | logger.info("Connection closed (reason {}, id {}, offset {}, sq_number {}, state {})".format( 77 | reason, 78 | context.partition_id, 79 | context.offset, 80 | context.sequence_number, 81 | context.event_processor_context)) 82 | 83 | async def process_events_async(self, context, messages): 84 | """ 85 | Called by the processor host when a batch of events has arrived. 86 | This is where the real work of the event processor is done. 87 | :param context: Information about the partition 88 | :type context: ~azure.eventprocessorhost.PartitionContext 89 | :param messages: The events to be processed. 90 | :type messages: list[~azure.eventhub.common.EventData] 91 | """ 92 | assert context.event_processor_context is None 93 | print("Processing id {}, offset {}, sq_number {}, state {})".format( 94 | context.partition_id, 95 | context.offset, 96 | context.sequence_number, 97 | context.event_processor_context)) 98 | await context.checkpoint_async() 99 | 100 | async def process_error_async(self, context, error): 101 | """ 102 | Called when the underlying client experiences an error while receiving. 103 | EventProcessorHost will take care of recovering from the error and 104 | continuing to pump messages,so no action is required from 105 | :param context: Information about the partition 106 | :type context: ~azure.eventprocessorhost.PartitionContext 107 | :param error: The error that occured. 108 | """ 109 | logger.info("Event Processor Error for partition {}, {!r}".format(context.partition_id, error)) 110 | 111 | 112 | async def wait_and_close(host, duration): 113 | """ 114 | Run EventProcessorHost for 30 seconds then shutdown. 115 | """ 116 | await asyncio.sleep(duration) 117 | await host.close_async() 118 | 119 | 120 | async def pump(pid, sender, duration): 121 | deadline = time.time() + duration 122 | total = 0 123 | 124 | try: 125 | while time.time() < deadline: 126 | data = EventData(body=b"D" * 512) 127 | sender.transfer(data) 128 | total += 1 129 | if total % 100 == 0: 130 | await sender.wait_async() 131 | #logger.info("{}: Send total {}".format(pid, total)) 132 | except Exception as err: 133 | logger.error("{}: Send failed {}".format(pid, err)) 134 | raise 135 | print("{}: Final Sent total {}".format(pid, total)) 136 | 137 | 138 | def test_long_running_eph(live_eventhub): 139 | parser = argparse.ArgumentParser() 140 | parser.add_argument("--duration", help="Duration in seconds of the test", type=int, default=30) 141 | parser.add_argument("--storage-account", help="Storage account name", default=os.environ.get('AZURE_STORAGE_ACCOUNT')) 142 | parser.add_argument("--storage-key", help="Storage account access key", default=os.environ.get('AZURE_STORAGE_ACCESS_KEY')) 143 | parser.add_argument("--container", help="Lease container name", default="nocontextleases") 144 | parser.add_argument("--eventhub", help="Name of EventHub", default=live_eventhub['event_hub']) 145 | parser.add_argument("--namespace", help="Namespace of EventHub", default=live_eventhub['namespace']) 146 | parser.add_argument("--suffix", help="Namespace of EventHub", default="servicebus.windows.net") 147 | parser.add_argument("--sas-policy", help="Name of the shared access policy to authenticate with", default=live_eventhub['key_name']) 148 | parser.add_argument("--sas-key", help="Shared access key", default=live_eventhub['access_key']) 149 | 150 | loop = asyncio.get_event_loop() 151 | args, _ = parser.parse_known_args() 152 | if not args.namespace or not args.eventhub: 153 | try: 154 | import pytest 155 | pytest.skip("Must specify '--namespace' and '--eventhub'") 156 | except ImportError: 157 | raise ValueError("Must specify '--namespace' and '--eventhub'") 158 | 159 | # Queue up some events in the Eventhub 160 | conn_str = "Endpoint=sb://{}/;SharedAccessKeyName={};SharedAccessKey={};EntityPath={}".format( 161 | live_eventhub['hostname'], 162 | live_eventhub['key_name'], 163 | live_eventhub['access_key'], 164 | live_eventhub['event_hub']) 165 | send_client = EventHubClientAsync.from_connection_string(conn_str) 166 | pumps = [] 167 | for pid in ["0", "1"]: 168 | sender = send_client.add_async_sender(partition=pid, send_timeout=0, keep_alive=False) 169 | pumps.append(pump(pid, sender, 15)) 170 | loop.run_until_complete(send_client.run_async()) 171 | results = loop.run_until_complete(asyncio.gather(*pumps, return_exceptions=True)) 172 | loop.run_until_complete(send_client.stop_async()) 173 | assert not any(results) 174 | 175 | # Eventhub config and storage manager 176 | eh_config = EventHubConfig( 177 | args.namespace, 178 | args.eventhub, 179 | args.sas_policy, 180 | args.sas_key, 181 | consumer_group="$default", 182 | namespace_suffix=args.suffix) 183 | eh_options = EPHOptions() 184 | eh_options.release_pump_on_timeout = True 185 | eh_options.debug_trace = False 186 | eh_options.receive_timeout = 120 187 | storage_manager = AzureStorageCheckpointLeaseManager( 188 | storage_account_name=args.storage_account, 189 | storage_account_key=args.storage_key, 190 | lease_renew_interval=30, 191 | lease_container_name=args.container, 192 | lease_duration=60) 193 | 194 | # Event loop and host 195 | host = EventProcessorHost( 196 | EventProcessor, 197 | eh_config, 198 | storage_manager, 199 | ep_params=["param1","param2"], 200 | eph_options=eh_options, 201 | loop=loop) 202 | 203 | tasks = asyncio.gather( 204 | host.open_async(), 205 | wait_and_close(host, args.duration), return_exceptions=True) 206 | results = loop.run_until_complete(tasks) 207 | assert not any(results) 208 | 209 | 210 | if __name__ == '__main__': 211 | config = {} 212 | config['hostname'] = os.environ['EVENT_HUB_HOSTNAME'] 213 | config['event_hub'] = os.environ['EVENT_HUB_NAME'] 214 | config['key_name'] = os.environ['EVENT_HUB_SAS_POLICY'] 215 | config['access_key'] = os.environ['EVENT_HUB_SAS_KEY'] 216 | config['namespace'] = os.environ['EVENT_HUB_NAMESPACE'] 217 | config['consumer_group'] = "$Default" 218 | config['partition'] = "0" 219 | test_long_running_eph(config) 220 | -------------------------------------------------------------------------------- /tests/asynctests/test_longrunning_eph_with_context.py: -------------------------------------------------------------------------------- 1 | # -------------------------------------------------------------------------------------------- 2 | # Copyright (c) Microsoft Corporation. All rights reserved. 3 | # Licensed under the MIT License. See License.txt in the project root for license information. 4 | # ----------------------------------------------------------------------------------- 5 | 6 | import logging 7 | import asyncio 8 | import sys 9 | import os 10 | import argparse 11 | import time 12 | import json 13 | from logging.handlers import RotatingFileHandler 14 | 15 | from azure.eventhub import EventHubClientAsync, EventData 16 | from azure.eventprocessorhost import ( 17 | AbstractEventProcessor, 18 | AzureStorageCheckpointLeaseManager, 19 | EventHubConfig, 20 | EventProcessorHost, 21 | EPHOptions) 22 | 23 | 24 | def get_logger(filename, level=logging.INFO): 25 | azure_logger = logging.getLogger("azure.eventprocessorhost") 26 | azure_logger.setLevel(level) 27 | uamqp_logger = logging.getLogger("uamqp") 28 | uamqp_logger.setLevel(logging.INFO) 29 | 30 | formatter = logging.Formatter('%(asctime)s %(name)-12s %(levelname)-8s %(message)s') 31 | console_handler = logging.StreamHandler(stream=sys.stdout) 32 | console_handler.setFormatter(formatter) 33 | if not azure_logger.handlers: 34 | azure_logger.addHandler(console_handler) 35 | if not uamqp_logger.handlers: 36 | uamqp_logger.addHandler(console_handler) 37 | 38 | if filename: 39 | file_handler = RotatingFileHandler(filename, maxBytes=20*1024*1024, backupCount=3) 40 | file_handler.setFormatter(formatter) 41 | azure_logger.addHandler(file_handler) 42 | uamqp_logger.addHandler(file_handler) 43 | 44 | return azure_logger 45 | 46 | logger = get_logger("eph_test_async.log", logging.INFO) 47 | 48 | 49 | class EventProcessor(AbstractEventProcessor): 50 | """ 51 | Example Implmentation of AbstractEventProcessor 52 | """ 53 | 54 | def __init__(self, params=None): 55 | """ 56 | Init Event processor 57 | """ 58 | super().__init__(params) 59 | self._params = params 60 | self._msg_counter = 0 61 | 62 | async def open_async(self, context): 63 | """ 64 | Called by processor host to initialize the event processor. 65 | """ 66 | assert hasattr(context, 'event_processor_context') 67 | logger.info("Connection established {}. State {}".format( 68 | context.partition_id, context.event_processor_context)) 69 | 70 | async def close_async(self, context, reason): 71 | """ 72 | Called by processor host to indicate that the event processor is being stopped. 73 | :param context: Information about the partition 74 | :type context: ~azure.eventprocessorhost.PartitionContext 75 | """ 76 | logger.info("Connection closed (reason {}, id {}, offset {}, sq_number {}, state {})".format( 77 | reason, 78 | context.partition_id, 79 | context.offset, 80 | context.sequence_number, 81 | context.event_processor_context)) 82 | 83 | async def process_events_async(self, context, messages): 84 | """ 85 | Called by the processor host when a batch of events has arrived. 86 | This is where the real work of the event processor is done. 87 | :param context: Information about the partition 88 | :type context: ~azure.eventprocessorhost.PartitionContext 89 | :param messages: The events to be processed. 90 | :type messages: list[~azure.eventhub.common.EventData] 91 | """ 92 | print("Processing id {}, offset {}, sq_number {}, state {})".format( 93 | context.partition_id, 94 | context.offset, 95 | context.sequence_number, 96 | context.event_processor_context)) 97 | assert hasattr(context, 'event_processor_context') 98 | if self._msg_counter > 1: 99 | assert context.event_processor_context == json.dumps( 100 | {"Sequence": self._msg_counter, "Data": self._params}) 101 | self._msg_counter += 1 102 | await context.checkpoint_async( 103 | json.dumps({"Sequence": self._msg_counter, "Data": self._params})) 104 | 105 | async def process_error_async(self, context, error): 106 | """ 107 | Called when the underlying client experiences an error while receiving. 108 | EventProcessorHost will take care of recovering from the error and 109 | continuing to pump messages,so no action is required from 110 | :param context: Information about the partition 111 | :type context: ~azure.eventprocessorhost.PartitionContext 112 | :param error: The error that occured. 113 | """ 114 | logger.info("Event Processor Error for partition {}, {!r}".format(context.partition_id, error)) 115 | 116 | 117 | async def wait_and_close(host, duration): 118 | """ 119 | Run EventProcessorHost for 30 seconds then shutdown. 120 | """ 121 | await asyncio.sleep(duration) 122 | await host.close_async() 123 | 124 | 125 | async def pump(pid, sender, duration): 126 | deadline = time.time() + duration 127 | total = 0 128 | 129 | try: 130 | while time.time() < deadline: 131 | data = EventData(body=b"D" * 512) 132 | sender.transfer(data) 133 | total += 1 134 | if total % 100 == 0: 135 | await sender.wait_async() 136 | #logger.info("{}: Send total {}".format(pid, total)) 137 | except Exception as err: 138 | logger.error("{}: Send failed {}".format(pid, err)) 139 | raise 140 | print("{}: Final Sent total {}".format(pid, total)) 141 | 142 | 143 | def test_long_running_context_eph(live_eventhub): 144 | parser = argparse.ArgumentParser() 145 | parser.add_argument("--duration", help="Duration in seconds of the test", type=int, default=30) 146 | parser.add_argument("--storage-account", help="Storage account name", default=os.environ.get('AZURE_STORAGE_ACCOUNT')) 147 | parser.add_argument("--storage-key", help="Storage account access key", default=os.environ.get('AZURE_STORAGE_ACCESS_KEY')) 148 | parser.add_argument("--container", help="Lease container name", default="contextleases") 149 | parser.add_argument("--eventhub", help="Name of EventHub", default=live_eventhub['event_hub']) 150 | parser.add_argument("--namespace", help="Namespace of EventHub", default=live_eventhub['namespace']) 151 | parser.add_argument("--suffix", help="Namespace of EventHub", default="servicebus.windows.net") 152 | parser.add_argument("--sas-policy", help="Name of the shared access policy to authenticate with", default=live_eventhub['key_name']) 153 | parser.add_argument("--sas-key", help="Shared access key", default=live_eventhub['access_key']) 154 | 155 | loop = asyncio.get_event_loop() 156 | args, _ = parser.parse_known_args() 157 | if not args.namespace or not args.eventhub: 158 | try: 159 | import pytest 160 | pytest.skip("Must specify '--namespace' and '--eventhub'") 161 | except ImportError: 162 | raise ValueError("Must specify '--namespace' and '--eventhub'") 163 | 164 | # Queue up some events in the Eventhub 165 | conn_str = "Endpoint=sb://{}/;SharedAccessKeyName={};SharedAccessKey={};EntityPath={}".format( 166 | live_eventhub['hostname'], 167 | live_eventhub['key_name'], 168 | live_eventhub['access_key'], 169 | live_eventhub['event_hub']) 170 | send_client = EventHubClientAsync.from_connection_string(conn_str) 171 | pumps = [] 172 | for pid in ["0", "1"]: 173 | sender = send_client.add_async_sender(partition=pid, send_timeout=0, keep_alive=False) 174 | pumps.append(pump(pid, sender, 15)) 175 | loop.run_until_complete(send_client.run_async()) 176 | results = loop.run_until_complete(asyncio.gather(*pumps, return_exceptions=True)) 177 | loop.run_until_complete(send_client.stop_async()) 178 | assert not any(results) 179 | 180 | # Eventhub config and storage manager 181 | eh_config = EventHubConfig( 182 | args.namespace, 183 | args.eventhub, 184 | args.sas_policy, 185 | args.sas_key, 186 | consumer_group="$default", 187 | namespace_suffix=args.suffix) 188 | eh_options = EPHOptions() 189 | eh_options.release_pump_on_timeout = True 190 | eh_options.debug_trace = False 191 | eh_options.receive_timeout = 120 192 | storage_manager = AzureStorageCheckpointLeaseManager( 193 | storage_account_name=args.storage_account, 194 | storage_account_key=args.storage_key, 195 | lease_renew_interval=30, 196 | lease_container_name=args.container, 197 | lease_duration=60) 198 | 199 | # Event loop and host 200 | host = EventProcessorHost( 201 | EventProcessor, 202 | eh_config, 203 | storage_manager, 204 | ep_params=["param1","param2"], 205 | eph_options=eh_options, 206 | loop=loop) 207 | 208 | tasks = asyncio.gather( 209 | host.open_async(), 210 | wait_and_close(host, args.duration), return_exceptions=True) 211 | results = loop.run_until_complete(tasks) 212 | assert not any(results) 213 | 214 | 215 | if __name__ == '__main__': 216 | config = {} 217 | config['hostname'] = os.environ['EVENT_HUB_HOSTNAME'] 218 | config['event_hub'] = os.environ['EVENT_HUB_NAME'] 219 | config['key_name'] = os.environ['EVENT_HUB_SAS_POLICY'] 220 | config['access_key'] = os.environ['EVENT_HUB_SAS_KEY'] 221 | config['namespace'] = os.environ['EVENT_HUB_NAMESPACE'] 222 | config['consumer_group'] = "$Default" 223 | config['partition'] = "0" 224 | test_long_running_eph(config) 225 | -------------------------------------------------------------------------------- /tests/test_receive.py: -------------------------------------------------------------------------------- 1 | #------------------------------------------------------------------------- 2 | # Copyright (c) Microsoft Corporation. All rights reserved. 3 | # Licensed under the MIT License. See License.txt in the project root for 4 | # license information. 5 | #-------------------------------------------------------------------------- 6 | 7 | import os 8 | import pytest 9 | import time 10 | import datetime 11 | 12 | from azure import eventhub 13 | from azure.eventhub import EventData, EventHubClient, Offset 14 | 15 | 16 | # def test_receive_without_events(connstr_senders): 17 | # connection_str, senders = connstr_senders 18 | # client = EventHubClient.from_connection_string(connection_str, debug=True) 19 | # receiver = client.add_receiver("$default", "0", offset=Offset('@latest')) 20 | # finish = datetime.datetime.now() + datetime.timedelta(seconds=240) 21 | # count = 0 22 | # try: 23 | # client.run() 24 | # while True: #datetime.datetime.now() < finish: 25 | # senders[0].send(EventData("Receiving an event {}".format(count))) 26 | # received = receiver.receive(timeout=1) 27 | # if received: 28 | # print(received[0].body_as_str()) 29 | # count += 1 30 | # time.sleep(1) 31 | # except: 32 | # raise 33 | # finally: 34 | # client.stop() 35 | 36 | 37 | def test_receive_end_of_stream(connstr_senders): 38 | connection_str, senders = connstr_senders 39 | client = EventHubClient.from_connection_string(connection_str, debug=False) 40 | receiver = client.add_receiver("$default", "0", offset=Offset('@latest')) 41 | try: 42 | client.run() 43 | 44 | received = receiver.receive(timeout=5) 45 | assert len(received) == 0 46 | senders[0].send(EventData(b"Receiving only a single event")) 47 | received = receiver.receive(timeout=5) 48 | assert len(received) == 1 49 | 50 | assert received[0].body_as_str() == "Receiving only a single event" 51 | assert list(received[-1].body)[0] == b"Receiving only a single event" 52 | except: 53 | raise 54 | finally: 55 | client.stop() 56 | 57 | 58 | def test_receive_with_offset_sync(connstr_senders): 59 | connection_str, senders = connstr_senders 60 | client = EventHubClient.from_connection_string(connection_str, debug=False) 61 | partitions = client.get_eventhub_info() 62 | assert partitions["partition_ids"] == ["0", "1"] 63 | receiver = client.add_receiver("$default", "0", offset=Offset('@latest')) 64 | try: 65 | client.run() 66 | more_partitions = client.get_eventhub_info() 67 | assert more_partitions["partition_ids"] == ["0", "1"] 68 | 69 | received = receiver.receive(timeout=5) 70 | assert len(received) == 0 71 | senders[0].send(EventData(b"Data")) 72 | received = receiver.receive(timeout=5) 73 | assert len(received) == 1 74 | offset = received[0].offset 75 | 76 | assert list(received[0].body) == [b'Data'] 77 | assert received[0].body_as_str() == "Data" 78 | 79 | offset_receiver = client.add_receiver("$default", "0", offset=offset) 80 | client.run() 81 | received = offset_receiver.receive(timeout=5) 82 | assert len(received) == 0 83 | senders[0].send(EventData(b"Message after offset")) 84 | received = offset_receiver.receive(timeout=5) 85 | assert len(received) == 1 86 | except: 87 | raise 88 | finally: 89 | client.stop() 90 | 91 | 92 | def test_receive_with_inclusive_offset(connstr_senders): 93 | connection_str, senders = connstr_senders 94 | client = EventHubClient.from_connection_string(connection_str, debug=False) 95 | receiver = client.add_receiver("$default", "0", offset=Offset('@latest')) 96 | try: 97 | client.run() 98 | 99 | received = receiver.receive(timeout=5) 100 | assert len(received) == 0 101 | senders[0].send(EventData(b"Data")) 102 | time.sleep(1) 103 | received = receiver.receive(timeout=5) 104 | assert len(received) == 1 105 | offset = received[0].offset 106 | 107 | assert list(received[0].body) == [b'Data'] 108 | assert received[0].body_as_str() == "Data" 109 | 110 | offset_receiver = client.add_receiver("$default", "0", offset=Offset(offset.value, inclusive=True)) 111 | client.run() 112 | received = offset_receiver.receive(timeout=5) 113 | assert len(received) == 1 114 | except: 115 | raise 116 | finally: 117 | client.stop() 118 | 119 | 120 | def test_receive_with_datetime_sync(connstr_senders): 121 | connection_str, senders = connstr_senders 122 | client = EventHubClient.from_connection_string(connection_str, debug=False) 123 | partitions = client.get_eventhub_info() 124 | assert partitions["partition_ids"] == ["0", "1"] 125 | receiver = client.add_receiver("$default", "0", offset=Offset('@latest')) 126 | try: 127 | client.run() 128 | more_partitions = client.get_eventhub_info() 129 | assert more_partitions["partition_ids"] == ["0", "1"] 130 | received = receiver.receive(timeout=5) 131 | assert len(received) == 0 132 | senders[0].send(EventData(b"Data")) 133 | received = receiver.receive(timeout=5) 134 | assert len(received) == 1 135 | offset = received[0].enqueued_time 136 | 137 | assert list(received[0].body) == [b'Data'] 138 | assert received[0].body_as_str() == "Data" 139 | 140 | offset_receiver = client.add_receiver("$default", "0", offset=Offset(offset)) 141 | client.run() 142 | received = offset_receiver.receive(timeout=5) 143 | assert len(received) == 0 144 | senders[0].send(EventData(b"Message after timestamp")) 145 | received = offset_receiver.receive(timeout=5) 146 | assert len(received) == 1 147 | except: 148 | raise 149 | finally: 150 | client.stop() 151 | 152 | 153 | def test_receive_with_custom_datetime_sync(connstr_senders): 154 | connection_str, senders = connstr_senders 155 | client = EventHubClient.from_connection_string(connection_str, debug=False) 156 | for i in range(5): 157 | senders[0].send(EventData(b"Message before timestamp")) 158 | time.sleep(60) 159 | 160 | now = datetime.datetime.utcnow() 161 | offset = datetime.datetime(now.year, now.month, now.day, now.hour, now.minute) 162 | for i in range(5): 163 | senders[0].send(EventData(b"Message after timestamp")) 164 | 165 | receiver = client.add_receiver("$default", "0", offset=Offset(offset)) 166 | try: 167 | client.run() 168 | all_received = [] 169 | received = receiver.receive(timeout=1) 170 | while received: 171 | all_received.extend(received) 172 | received = receiver.receive(timeout=1) 173 | 174 | assert len(all_received) == 5 175 | for received_event in all_received: 176 | assert received_event.body_as_str() == "Message after timestamp" 177 | assert received_event.enqueued_time > offset 178 | except: 179 | raise 180 | finally: 181 | client.stop() 182 | 183 | 184 | def test_receive_with_sequence_no(connstr_senders): 185 | connection_str, senders = connstr_senders 186 | client = EventHubClient.from_connection_string(connection_str, debug=False) 187 | receiver = client.add_receiver("$default", "0", offset=Offset('@latest')) 188 | try: 189 | client.run() 190 | 191 | received = receiver.receive(timeout=5) 192 | assert len(received) == 0 193 | senders[0].send(EventData(b"Data")) 194 | time.sleep(1) 195 | received = receiver.receive(timeout=5) 196 | assert len(received) == 1 197 | offset = received[0].sequence_number 198 | 199 | offset_receiver = client.add_receiver("$default", "0", offset=Offset(offset)) 200 | client.run() 201 | received = offset_receiver.receive(timeout=5) 202 | assert len(received) == 0 203 | senders[0].send(EventData(b"Message next in sequence")) 204 | time.sleep(1) 205 | received = offset_receiver.receive(timeout=5) 206 | assert len(received) == 1 207 | except: 208 | raise 209 | finally: 210 | client.stop() 211 | 212 | 213 | def test_receive_with_inclusive_sequence_no(connstr_senders): 214 | connection_str, senders = connstr_senders 215 | client = EventHubClient.from_connection_string(connection_str, debug=False) 216 | receiver = client.add_receiver("$default", "0", offset=Offset('@latest')) 217 | try: 218 | client.run() 219 | 220 | received = receiver.receive(timeout=5) 221 | assert len(received) == 0 222 | senders[0].send(EventData(b"Data")) 223 | received = receiver.receive(timeout=5) 224 | assert len(received) == 1 225 | offset = received[0].sequence_number 226 | 227 | offset_receiver = client.add_receiver("$default", "0", offset=Offset(offset, inclusive=True)) 228 | client.run() 229 | received = offset_receiver.receive(timeout=5) 230 | assert len(received) == 1 231 | except: 232 | raise 233 | finally: 234 | client.stop() 235 | 236 | 237 | def test_receive_batch(connstr_senders): 238 | connection_str, senders = connstr_senders 239 | client = EventHubClient.from_connection_string(connection_str, debug=False) 240 | receiver = client.add_receiver("$default", "0", prefetch=500, offset=Offset('@latest')) 241 | try: 242 | client.run() 243 | 244 | received = receiver.receive(timeout=5) 245 | assert len(received) == 0 246 | for i in range(10): 247 | senders[0].send(EventData(b"Data")) 248 | received = receiver.receive(max_batch_size=5, timeout=5) 249 | assert len(received) == 5 250 | except: 251 | raise 252 | finally: 253 | client.stop() 254 | 255 | 256 | --------------------------------------------------------------------------------