├── __init__.py ├── emdr ├── conf │ ├── __init__.py │ └── default_settings.py ├── core │ ├── __init__.py │ ├── README.rst │ └── command_utils.py ├── daemons │ ├── __init__.py │ ├── gateway │ │ ├── __init__.py │ │ ├── tests │ │ │ ├── __init__.py │ │ │ └── tests_wsgi.py │ │ ├── exceptions.py │ │ ├── README.rst │ │ ├── order_pusher.py │ │ └── wsgi.py │ ├── relay │ │ ├── __init__.py │ │ ├── dedupers │ │ │ ├── util.py │ │ │ ├── __init__.py │ │ │ ├── memcached.py │ │ │ └── py_deque.py │ │ └── main.py │ ├── announcer │ │ ├── __init__.py │ │ └── main.py │ └── README.rst ├── __init__.py └── README.rst ├── MANIFEST.in ├── examples ├── __init__.py ├── python │ ├── __init__.py │ ├── greenlet_consumer │ │ ├── __init__.py │ │ ├── requirements.txt │ │ ├── README.rst │ │ └── gevent_consumer.py │ ├── cloudwatch_grapher │ │ ├── requirements.txt │ │ ├── README.rst │ │ └── cloudwatch_grapher.py │ └── README.rst └── README.rst ├── doc_src ├── images │ └── emdr-daemon-diagram.png ├── installation.rst ├── data_sources.rst ├── sites.rst ├── volunteering.rst ├── global.txt ├── uploading.rst ├── access.rst ├── index.rst ├── design_considerations.rst ├── Makefile ├── overview.rst ├── conf.py └── using.rst ├── .gitignore ├── requirements_rtd.txt ├── requirements.txt ├── LICENSE ├── bin ├── emdr-snooper ├── fake_unified_history.py ├── fake_unified_order.py ├── emdr-announcer ├── emdr-gateway └── emdr-relay ├── README.rst └── setup.py /__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /emdr/conf/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /emdr/core/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /emdr/daemons/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /emdr/daemons/gateway/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /emdr/daemons/relay/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | include README.rst LICENSE -------------------------------------------------------------------------------- /emdr/daemons/announcer/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /emdr/daemons/gateway/tests/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /emdr/__init__.py: -------------------------------------------------------------------------------- 1 | __version__ = '0.1' 2 | -------------------------------------------------------------------------------- /examples/__init__.py: -------------------------------------------------------------------------------- 1 | __author__ = 'gtaylor' 2 | -------------------------------------------------------------------------------- /examples/python/__init__.py: -------------------------------------------------------------------------------- 1 | __author__ = 'gtaylor' 2 | -------------------------------------------------------------------------------- /examples/python/greenlet_consumer/__init__.py: -------------------------------------------------------------------------------- 1 | __author__ = 'gtaylor' 2 | -------------------------------------------------------------------------------- /examples/python/cloudwatch_grapher/requirements.txt: -------------------------------------------------------------------------------- 1 | gevent 2 | pyzmq 3 | boto -------------------------------------------------------------------------------- /examples/python/greenlet_consumer/requirements.txt: -------------------------------------------------------------------------------- 1 | simplejson 2 | gevent 3 | pyzmq -------------------------------------------------------------------------------- /doc_src/images/emdr-daemon-diagram.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/gtaylor/EVE-Market-Data-Relay/HEAD/doc_src/images/emdr-daemon-diagram.png -------------------------------------------------------------------------------- /doc_src/installation.rst: -------------------------------------------------------------------------------- 1 | .. _installation: 2 | 3 | Installation 4 | ============ 5 | 6 | * pip install -r requirements.txt 7 | * ??? 8 | * profit -------------------------------------------------------------------------------- /emdr/core/README.rst: -------------------------------------------------------------------------------- 1 | Core 2 | ==== 3 | 4 | Anything in here is generally useful throughout the entire codebase, and isn't 5 | tied to any single component. -------------------------------------------------------------------------------- /emdr/daemons/README.rst: -------------------------------------------------------------------------------- 1 | Daemons 2 | ======= 3 | 4 | :gateway: Accepts market data from users. Stuffs it into Amazon Simple Queue 5 | service for a worker to process. -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .idea 2 | *.pyc 3 | *.swp 4 | *.tmp 5 | *.log 6 | .DS_Store 7 | *.pid 8 | settings.py 9 | build 10 | dist 11 | *.egg-info 12 | MANIFEST 13 | doc_src/_build 14 | emds 15 | -------------------------------------------------------------------------------- /emdr/README.rst: -------------------------------------------------------------------------------- 1 | Top-level Module Layout Overview 2 | ================================ 3 | 4 | :core: Things that are used throughout the codebase. 5 | :daemons: Processes that run and do stuff. -------------------------------------------------------------------------------- /requirements_rtd.txt: -------------------------------------------------------------------------------- 1 | # Requirements for ReadTheDocs. 2 | emds 3 | bottle 4 | gevent 5 | requests 6 | python-dateutil<2.0 7 | pytz 8 | sphinx_rtd_theme>=0.2.4 9 | # Only required for developers 10 | sphinx -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | cython 2 | emds 3 | bottle 4 | ujson 5 | gevent 6 | requests 7 | python-dateutil<2.0 8 | git+http://github.com/zeromq/pyzmq.git 9 | pytz 10 | # Only required for developers 11 | nose 12 | sphinx 13 | # Only required for relays 14 | pylibmc 15 | -------------------------------------------------------------------------------- /emdr/daemons/gateway/exceptions.py: -------------------------------------------------------------------------------- 1 | class MalformedUploadError(Exception): 2 | """ 3 | Raise this when an upload is structurally incorrect. This isn't so much 4 | to do with something like a bogus region ID, this is more like "You are 5 | missing a POST key/val, or a body". 6 | """ 7 | pass -------------------------------------------------------------------------------- /examples/python/README.rst: -------------------------------------------------------------------------------- 1 | Python Examples 2 | =============== 3 | 4 | This directory contains EMDR examples written in Python. 5 | 6 | :greenlet_consumer: An example consumer written using greenlets. This consumer 7 | can handle a very large number of messages per second, and closely resembles 8 | EMDR's processor daemons. -------------------------------------------------------------------------------- /emdr/daemons/gateway/README.rst: -------------------------------------------------------------------------------- 1 | Gateway WSGI Application 2 | ======================== 3 | 4 | The gateway application is what the various market data uploaders toss their 5 | data at. It parses whatever custom format they're using into our standard 6 | Python representation of a market order (src.core.market_data.MarketOrder), and 7 | serializes it to JSON, to be sent off to a Broker. 8 | 9 | From there, the broker sends the data to a processor daemon, which picks it 10 | up and does magical things to it. -------------------------------------------------------------------------------- /emdr/daemons/relay/dedupers/util.py: -------------------------------------------------------------------------------- 1 | """ 2 | Various utility functions that are common to all or many dedupers. 3 | """ 4 | 5 | def calc_hash_for_message(message): 6 | """ 7 | Given an EMDR message string, calculate the hash. 8 | 9 | :param basestring message: A compressed or uncompressed EMDR message string. 10 | :rtype: str 11 | :returns: The hash to use for deduping. 12 | """ 13 | # Use Python's naive 32bit integer hashing for now. It's fast and simple. 14 | return hash(message) -------------------------------------------------------------------------------- /examples/python/greenlet_consumer/README.rst: -------------------------------------------------------------------------------- 1 | Example EMDR Greenlet Consumer 2 | ============================== 3 | 4 | This example uses a greenlet pool to accept incoming market data. greenlets 5 | are micro-threads that are extremely lightweight, meaning we can spawn one 6 | for each incoming market message from EMDR. 7 | 8 | Before trying this example, make sure to install ZeroMQ, then the requirements:: 9 | 10 | pip install -r requirements.txt 11 | 12 | You may then run the example:: 13 | 14 | python gevent_consumer.py 15 | 16 | Suggested next steps 17 | -------------------- 18 | 19 | Tack on storage to the DB backend of your choice in the worker function. -------------------------------------------------------------------------------- /doc_src/data_sources.rst: -------------------------------------------------------------------------------- 1 | .. _data-sources: 2 | 3 | .. include:: global.txt 4 | 5 | Data Sources 6 | ============ 7 | 8 | EMDR is 'fed' data by player uploads. There are a number of clients that can 9 | be used to chip in, outlined in more detail on :doc:`uploading`. 10 | 11 | Uploader applications monitor the EVE Online cache files on your machine, 12 | which are populated with market data as you browse around the in-game market 13 | dialogs. 14 | 15 | Individual market orders are uploaded, along with market history (if you 16 | switch to history tabs for items). 17 | 18 | Most well-designed uploader applications use small enough amounts of 19 | CPU and bandwidth as to run unnoticed. 20 | -------------------------------------------------------------------------------- /doc_src/sites.rst: -------------------------------------------------------------------------------- 1 | .. _sites: 2 | 3 | .. include:: global.txt 4 | 5 | Sites using EMDR 6 | ================ 7 | 8 | EMDR delivers nearly a million messages a day to a number of different 9 | projects around the world. Some of these projects are listed below. If we're 10 | missing a project, please file an issue in the `issue tracker`_ with the name 11 | and a URL to the project. Bonus points for describing what you're doing. 12 | 13 | Market sites 14 | ------------ 15 | 16 | * `Element 43`_ 17 | * `EVE Addicts`_ 18 | * `EVE Marketdata`_ 19 | * `Eveonomics`_ 20 | 21 | .. _Element 43: http://www.element-43.com/ 22 | .. _EVE Addicts: http://eve.addicts.nl/ 23 | .. _EVE Marketdata: http://eve-marketdata.com/ 24 | .. _Eveonomics: http://www.eveonomics.com/ 25 | -------------------------------------------------------------------------------- /examples/README.rst: -------------------------------------------------------------------------------- 1 | EMDR Code Examples 2 | ================== 3 | 4 | The sub-directories contained within ``examples`` contain code examples for 5 | various languages. There is no particular rhyme or reason to the contents 6 | of these, and all submissions are welcome. 7 | 8 | If you write something cool, post to the issue tracker or submit a pull 9 | request and we'll get it added in here. 10 | 11 | Disclaimer 12 | ---------- 13 | 14 | The code contained within is not guaranteed work, and it very well may 15 | eat your lunch. We will do our best to filter out bad code, but will largely 16 | rely on your help in policing the contents. 17 | 18 | License 19 | ------- 20 | 21 | All code submitted here is licensed under the BSD License, unless otherwise 22 | specified by the author. -------------------------------------------------------------------------------- /emdr/daemons/relay/dedupers/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | This module contains a few simple message de-duplication backends. These are 3 | used to de-dupe messages, since relays will be connected to at least two 4 | upstream announcers/relays. 5 | """ 6 | from emdr.conf import default_settings as settings 7 | 8 | if settings.RELAY_DEDUPE_BACKEND == 'memcached': 9 | # Memcached backend. This is currently the fastest. 10 | #noinspection PyUnresolvedReferences 11 | from emdr.daemons.relay.dedupers.memcached import is_message_duped 12 | elif settings.RELAY_DEDUPE_BACKEND == 'deque': 13 | # Default to the included deque. 14 | #noinspection PyUnresolvedReferences 15 | from emdr.daemons.relay.dedupers.py_deque import is_message_duped 16 | else: 17 | raise Exception("Unknown deduplication backend.") -------------------------------------------------------------------------------- /doc_src/volunteering.rst: -------------------------------------------------------------------------------- 1 | .. _volunteering: 2 | 3 | .. include:: global.txt 4 | 5 | Volunteering computing resources 6 | ================================ 7 | 8 | EMDR is ran by volunteers who foot the cost in order for everyone to get 9 | access to market data. While running pieces of EMDR is not something that will 10 | get you fame or notoriety, it is crucial to the continued survival and 11 | success of the network. 12 | 13 | If you have idle computing resources, or would like to share capacity on 14 | a machine, we'd love to have it. ``gtaylor`` is available to assist with setup 15 | and configuration. 16 | 17 | Current status 18 | -------------- 19 | 20 | We currently have everything that we need. Special thanks to everyone 21 | who volunteered to help make this happen. Should our needs change, this page 22 | will be updated, and announcement will land on the `mailing list`_. -------------------------------------------------------------------------------- /examples/python/cloudwatch_grapher/README.rst: -------------------------------------------------------------------------------- 1 | Example EMDR Cloudwatch Grapher 2 | =============================== 3 | 4 | A simple script that listens to EMDR, tracks the number of messages coming in, 5 | and reports it to Amazon CloudWatch_ as a custom metric. 6 | 7 | CloudWatch_ allows for nearly real-time graphing from within the AWS Management 8 | Console, and also allows programmatic access to all recorded data. If you 9 | keep the tracking frequency low enough to stay in the free tier (1,000,000 10 | requests per month), this script is free to run. 11 | 12 | .. _CloudWatch: http://aws.amazon.com/cloudwatch/ 13 | 14 | Before trying this example, make sure to install ZeroMQ, then the requirements:: 15 | 16 | pip install -r requirements.txt 17 | 18 | Then edit the ``AWS_*`` fields within ``cloudwatch_grapher.py`` to add your 19 | AWS API keys. 20 | 21 | You may then run the example:: 22 | 23 | python cloudwatch_grapher.py 24 | 25 | Suggested next steps 26 | -------------------- 27 | 28 | Customize to chart other metrics that interest you. -------------------------------------------------------------------------------- /emdr/daemons/gateway/tests/tests_wsgi.py: -------------------------------------------------------------------------------- 1 | """ 2 | Tests for the full wsgi.py module, and all of its submodules. 3 | """ 4 | import unittest 5 | import datetime 6 | from emds.data_structures import MarketOrder, MarketOrderList 7 | 8 | class GatewayWSGITests(unittest.TestCase): 9 | """ 10 | Various tests for the gateway WSGI application. 11 | """ 12 | def setUp(self): 13 | self.order1 = MarketOrder( 14 | order_id=2413387906, 15 | is_bid=True, 16 | region_id=10000068, 17 | solar_system_id=30005316, 18 | station_id=60011521, 19 | type_id=10000068, 20 | price=52875.0, 21 | volume_entered=10, 22 | volume_remaining=4, 23 | minimum_volume=1, 24 | order_issue_date=datetime.datetime.utcnow(), 25 | order_duration=90, 26 | order_range=5, 27 | generated_at=datetime.datetime.utcnow(), 28 | ) 29 | self.order_list = MarketOrderList() 30 | self.order_list.add_order(self.order1) 31 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Copyright (c) 2012 Gregory Taylor 2 | 3 | Permission is hereby granted, free of charge, to any person 4 | obtaining a copy of this software and associated documentation 5 | files (the "Software"), to deal in the Software without 6 | restriction, including without limitation the rights to use, 7 | copy, modify, merge, publish, distribute, sublicense, and/or sell 8 | copies of the Software, and to permit persons to whom the 9 | Software is furnished to do so, subject to the following 10 | conditions: 11 | 12 | The above copyright notice and this permission notice shall be 13 | included in all copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 16 | EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES 17 | OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 18 | NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT 19 | HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, 20 | WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 21 | FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22 | OTHER DEALINGS IN THE SOFTWARE. -------------------------------------------------------------------------------- /doc_src/global.txt: -------------------------------------------------------------------------------- 1 | .. _GitHub project: https://github.com/gtaylor/EVE-Market-Data-Relay 2 | .. _issue tracker: https://github.com/gtaylor/EVE-Market-Data-Relay/issues 3 | .. _mailing list: https://groups.google.com/forum/#!forum/eve-emdr 4 | .. _EMDR map: http://map.eve-emdr.com/ 5 | .. _EMDU: https://github.com/gtaylor/EVE-Market-Data-Uploader 6 | .. _@gctaylor Twitter: https://twitter.com/#!/gctaylor 7 | .. _EMDR monitor: http://monitor.eve-emdr.com 8 | 9 | .. _Unified Uploader Data Interchange Format: http://dev.eve-central.com/unifieduploader/start 10 | .. _Clients supporting UUDIF: http://dev.eve-central.com/unifieduploader/implementations 11 | 12 | .. _Python: http://python.org 13 | .. _nose: http://somethingaboutorange.com/mrl/projects/nose/ 14 | 15 | .. _virtualenv: http://pypi.python.org/pypi/virtualenv 16 | .. _virtualenvwrapper: http://www.doughellmann.com/projects/virtualenvwrapper/ 17 | .. _ZeroMQ: http://www.zeromq.org/ 18 | .. _gevent: http://www.gevent.org/ 19 | .. _bottle: http://bottlepy.org/docs/dev/ 20 | 21 | .. _git: http://git-scm.com/ 22 | .. _Supervisor: http://supervisord.org/ 23 | 24 | .. _GitHub: https://github.com/ 25 | 26 | .. _BSD License: http://opensource.org/licenses/bsd-license.php 27 | -------------------------------------------------------------------------------- /bin/emdr-snooper: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | """ 3 | CLI util used to attach to ZMQ sockets to listen to whatever is coming down. 4 | Currently hard-wired to SUB. 5 | """ 6 | import argparse 7 | import zlib 8 | import zmq 9 | import ujson 10 | from pprint import pprint 11 | 12 | parser = argparse.ArgumentParser( 13 | description="Connects to a PUB ZMQ socket and prints whatever is coming" \ 14 | "out. ", 15 | ) 16 | parser.add_argument('receiver', nargs=1, help="The ZMQ socket to connect to.") 17 | 18 | parsed = parser.parse_args() 19 | 20 | receiver_uri = parsed.receiver[0] 21 | 22 | context = zmq.Context() 23 | subscriber = context.socket(zmq.SUB) 24 | 25 | # Connect to the first publicly available relay. 26 | subscriber.connect(receiver_uri) 27 | # Disable filtering. 28 | subscriber.setsockopt(zmq.SUBSCRIBE, "") 29 | 30 | print("Connected to %s" % receiver_uri) 31 | 32 | while True: 33 | # Receive raw market JSON strings. 34 | market_json = zlib.decompress(subscriber.recv()) 35 | # Un-serialize the JSON data to a Python dict. 36 | market_data = ujson.loads(market_json) 37 | # Dump the market data to stdout. Or, you know, do more fun 38 | # things here. 39 | pprint(market_data) -------------------------------------------------------------------------------- /emdr/core/command_utils.py: -------------------------------------------------------------------------------- 1 | """ 2 | Various utility functions for the included commands in the bin dir. 3 | """ 4 | import sys 5 | from emdr.conf import default_settings 6 | 7 | def set_logger_level(loglevel): 8 | """ 9 | Given a log level from a --loglevel arg, set the root logger's level. 10 | 11 | :param str loglevel: One of DEBUG, INFO, WARNING, or ERROR. 12 | :rtype: str 13 | :returns: The string representation of the log level being set. 14 | """ 15 | loglevel = loglevel.upper() 16 | if loglevel not in ['DEBUG', 'INFO', 'WARNING', 'ERROR']: 17 | print("Invalid log level. Must be one of: DEBUG, INFO, WARNING, ERROR") 18 | sys.exit(1) 19 | default_settings.LOGGING['loggers']['']['level'] = loglevel 20 | return loglevel 21 | 22 | def print_cmd_header(cmd_name): 23 | """ 24 | Prints a header for display during startup. 25 | 26 | :param str cmd_name: The name of the command's bin file. 27 | """ 28 | print("=" * 80) 29 | header_str = "## %s ##" % cmd_name 30 | print(header_str.center(80)) 31 | print("-" * 80) 32 | 33 | def print_cmd_footer(): 34 | """ 35 | Matching footer to go at the end of the start-up sequence. 36 | """ 37 | print("=" * 80) -------------------------------------------------------------------------------- /README.rst: -------------------------------------------------------------------------------- 1 | EVE Market Data Relay 2 | ===================== 3 | 4 | :Author: Greg Taylor 5 | :License: BSD 6 | :Status: Unmaintained 7 | 8 | **With the introduction of native-to-EVE market APIs, the EMDR project has ran its course. As of May 1, 2017, we have shuttered the network. This repo will remain in an archived state. Thanks to all who helped make EMDR a success!** 9 | 10 | This project is a super-scalable, affordable way to 11 | accept a large amount of user-submitted market data (via uploaders), and 12 | re-broadcast said data in realtime to a number of subscribers. 13 | 14 | The end result is that those writing market-data driven applications can 15 | simply subscribe to a "firehose" of market data, and get going, without having 16 | to hassle with uploaders or data submission APIs. 17 | 18 | Additionally, the consumers may accept very large amounts of data without the 19 | overhead associated with a ton of HTTP connections. EMDR's ZeroMQ underpinnings 20 | are hugely more efficient. 21 | 22 | Documentation 23 | ------------- 24 | 25 | Make sure to read the Documentation_ for more details. 26 | 27 | .. _Documentation: http://readthedocs.org/docs/eve-market-data-relay/ 28 | 29 | License 30 | ------- 31 | 32 | This project, and all contributed code, are licensed under the BSD License. 33 | A copy of the BSD License may be found in the repository. 34 | -------------------------------------------------------------------------------- /emdr/daemons/relay/dedupers/memcached.py: -------------------------------------------------------------------------------- 1 | """ 2 | A memcached-backed deduper. This is much more efficient than the deque backend, 3 | and should be used in production. 4 | """ 5 | import pylibmc 6 | from emdr.conf import default_settings as settings 7 | from emdr.daemons.relay.dedupers.util import calc_hash_for_message 8 | 9 | # The connection to memcached. 10 | MC_CLIENT = pylibmc.Client( 11 | settings.RELAY_DEDUPE_BACKEND_CONN, 12 | binary=True, 13 | ) 14 | 15 | def is_message_duped(message): 16 | """ 17 | Given a raw EMDR message string, determine whether we have already recently 18 | seen the same exact message. 19 | 20 | :rtype: bool 21 | :returns: ``True`` if this message is a duplicate, ``False`` if not. 22 | """ 23 | global MC_CLIENT 24 | 25 | # Generate a hash for the incoming message. 26 | message_hash = str(calc_hash_for_message(message)) 27 | cache_key = '%s%s' % (settings.RELAY_DEDUPE_STORE_KEY_PREFIX, message_hash) 28 | # Look at our queue of hashes to figure out if we've seen this 29 | # message yet. 30 | was_already_seen = MC_CLIENT.get(cache_key) is not None 31 | # We always push the message on to the queue, even if it ends up being 32 | # a dupe, since it "refreshes" the hash. 33 | MC_CLIENT.set(cache_key, 1, time=settings.RELAY_DEDUPE_STORE_TIME) 34 | 35 | return was_already_seen -------------------------------------------------------------------------------- /emdr/daemons/relay/dedupers/py_deque.py: -------------------------------------------------------------------------------- 1 | """ 2 | A simple, inefficient de-duper using Python's included deque data structure. 3 | Seek time is pretty high, so this is probably only best for developers. 4 | """ 5 | from collections import deque 6 | from emdr.daemons.relay.dedupers.util import calc_hash_for_message 7 | 8 | # A simple Python deque. See the docs for details on how this works: 9 | # http://docs.python.org/library/collections.html#collections.deque 10 | # We hardcode this, because it's mostly meant for testing when memcached 11 | # isn't available. 12 | HASH_DEQUE = deque(maxlen=500) 13 | 14 | def is_message_duped(message): 15 | """ 16 | Given a raw EMDR message string, determine whether we have already recently 17 | seen the same exact message. 18 | 19 | :rtype: bool 20 | :returns: ``True`` if this message is a duplicate, ``False`` if not. 21 | """ 22 | global HASH_DEQUE 23 | 24 | # Generate a hash for the incoming message. 25 | message_hash = calc_hash_for_message(message) 26 | # Look at our queue of hashes to figure out if we've seen this 27 | # message yet. 28 | was_already_seen = message_hash in HASH_DEQUE 29 | # We always push the message on to the queue, even if it ends up being 30 | # a dupe, since it "refreshes" the hash. 31 | HASH_DEQUE.append(message_hash) 32 | 33 | return was_already_seen -------------------------------------------------------------------------------- /emdr/daemons/gateway/order_pusher.py: -------------------------------------------------------------------------------- 1 | """ 2 | Contains the necessary ZeroMQ socket and a helper function to publish 3 | market data to the Announcer daemons. 4 | """ 5 | import logging 6 | import zlib 7 | import zmq.green as zmq 8 | from emds.formats import unified 9 | from emdr.conf import default_settings as settings 10 | 11 | logger = logging.getLogger(__name__) 12 | 13 | # This socket is used to push market data out to the Announcers over ZeroMQ. 14 | context = zmq.Context() 15 | sender = context.socket(zmq.PUB) 16 | # Get the list of transports to bind from settings. This allows us to PUB 17 | # messages to multiple announcers over a variety of socket types 18 | # (UNIX sockets and/or TCP sockets). 19 | for binding in settings.GATEWAY_SENDER_BINDINGS: 20 | sender.connect(binding) 21 | 22 | def push_message(parsed_message): 23 | """ 24 | Spawned as a greenlet to push parsed messages through ZeroMQ. 25 | """ 26 | try: 27 | # This will be the representation to send to the Announcers. 28 | json_str = unified.encode_to_json(parsed_message) 29 | except TypeError: 30 | logger.error('Unable to serialize a parsed message.') 31 | return 32 | 33 | # Push a zlib compressed JSON representation of the message to 34 | # announcers. 35 | compressed_msg = zlib.compress(json_str) 36 | sender.send(compressed_msg) 37 | 38 | -------------------------------------------------------------------------------- /bin/fake_unified_history.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | """ 3 | A fake history upload script, used to manually test the whole stack. 4 | """ 5 | import ujson 6 | import urllib 7 | import requests 8 | import zlib 9 | 10 | data = """ 11 | { 12 | "resultType" : "history", 13 | "version" : "0.1alpha", 14 | "uploadKeys" : [ 15 | { "name" : "emk", "key" : "abc" }, 16 | { "name" : "ec" , "key" : "def" } 17 | ], 18 | "generator" : { "name" : "Yapeal", "version" : "11.335.1737" }, 19 | "currentTime" : "2011-10-22T15:46:00+00:00", 20 | "columns" : ["date","orders","quantity","low","high","average"], 21 | "rowsets" : [ 22 | { 23 | "generatedAt" : "2011-10-22T15:42:00+00:00", 24 | "regionID" : 10000065, 25 | "typeID" : 11134, 26 | "rows" : [ 27 | ["2011-12-03T00:00:00+00:00",40,40,1999,499999.99,35223.50], 28 | ["2011-12-02T00:00:00+00:00",83,252,9999,11550,11550] 29 | ] 30 | } 31 | ] 32 | } 33 | """ 34 | 35 | message = ujson.dumps(ujson.loads(data)) 36 | 37 | headers = { 38 | #'Content-Encoding': 'deflate' 39 | } 40 | 41 | # POST non-form encoded 42 | data = message 43 | 44 | # POST form-encoded 45 | #data = urllib.urlencode({'data': message}) 46 | 47 | # Compressed request 48 | #data = zlib.compress(data)#[2:-4] 49 | 50 | r = requests.post( 51 | 'http://localhost:8080/upload/unified/', 52 | data=data, 53 | headers=headers, 54 | ) 55 | 56 | print "RESPONSE" 57 | print r.text -------------------------------------------------------------------------------- /emdr/daemons/announcer/main.py: -------------------------------------------------------------------------------- 1 | """ 2 | Gateways connect to Announcer daemons, sending zlib compressed JSON 3 | representations of market data. From here, the Announcer PUBs the messages 4 | out to anyone SUBscribing. This could be Relays, or end-users. 5 | """ 6 | import logging 7 | logger = logging.getLogger(__name__) 8 | 9 | import gevent 10 | import zmq.green as zmq 11 | from emdr.conf import default_settings as settings 12 | 13 | def run(): 14 | """ 15 | Fires up the announcer process. 16 | """ 17 | context = zmq.Context() 18 | 19 | receiver = context.socket(zmq.SUB) 20 | receiver.setsockopt(zmq.SUBSCRIBE, '') 21 | for binding in settings.ANNOUNCER_RECEIVER_BINDINGS: 22 | # Gateways connect to the Announcer to PUB messages. 23 | receiver.bind(binding) 24 | 25 | sender = context.socket(zmq.PUB) 26 | for binding in settings.ANNOUNCER_SENDER_BINDINGS: 27 | # Announcers offer up the data via PUB. 28 | sender.bind(binding) 29 | 30 | def relay_worker(message): 31 | """ 32 | This is the worker function that re-sends the incoming messages out 33 | to any subscribers. 34 | 35 | :param str message: A JSON string to re-broadcast. 36 | """ 37 | sender.send(message) 38 | logger.debug('Message announced.') 39 | 40 | logger.info("Announcer is now listening for order data.") 41 | 42 | while True: 43 | gevent.spawn(relay_worker, receiver.recv()) 44 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | try: 2 | from setuptools import setup, find_packages 3 | except ImportError: 4 | from distutils.core import setup 5 | 6 | def find_packages(exclude=None): 7 | """ 8 | Just stub this. If you're packaging EMDS, you need setuptools. If 9 | you're installing, not so much. 10 | """ 11 | return 12 | 13 | import emdr 14 | 15 | required = [ 16 | 'bottle', 17 | 'ujson', 18 | 'gevent', 19 | 'requests', 20 | 'python-dateutil<2.0', 21 | 'pyzmq', 22 | 'pytz', 23 | 'cython', 24 | 'emds', 25 | ] 26 | 27 | scripts = [ 28 | 'bin/emdr-announcer', 29 | 'bin/emdr-gateway', 30 | 'bin/emdr-relay', 31 | 'bin/emdr-snooper', 32 | ] 33 | 34 | setup( 35 | name='emdr', 36 | version=emdr.__version__, 37 | description='EVE Market Data Relay', 38 | long_description=open('README.rst').read(), 39 | author='Greg Taylor', 40 | author_email='gtaylor@gc-taylor.com', 41 | url='https://github.com/gtaylor/EVE-Market-Data-Relay', 42 | packages=find_packages(exclude=['tests', 'emds']), 43 | scripts=scripts, 44 | package_data={'': ['LICENSE']}, 45 | include_package_data=True, 46 | install_requires=required, 47 | license='BSD', 48 | classifiers=( 49 | 'Development Status :: 4 - Beta', 50 | 'Intended Audience :: Developers', 51 | 'Natural Language :: English', 52 | 'License :: OSI Approved :: BSD License', 53 | 'Programming Language :: Python', 54 | 'Programming Language :: Python :: 2.7', 55 | ), 56 | ) 57 | -------------------------------------------------------------------------------- /doc_src/uploading.rst: -------------------------------------------------------------------------------- 1 | .. _uploading: 2 | 3 | .. include:: global.txt 4 | 5 | Uploading Market data to EMDR 6 | ============================= 7 | 8 | Uploading to EMDR contributes data to for public use. Feeding the firehose 9 | benefits us all, so please do consider pointing your uploader at the network. 10 | 11 | With EVEMon 12 | ----------- 13 | 14 | To upload data with EVEMon_, you need only have it installed and running. 15 | Market data is uploaded to EMDR by default. 16 | 17 | .. _EVEMon: http://evemon.battleclinic.com/ 18 | 19 | With EMDU (Mac, Linux, and Windows) 20 | ----------------------------------- 21 | 22 | EMDU_ (EVE Market Data Uploader) is a cross-platform, console-based market 23 | uploader client. It uploads directly to EMDR. For those who are running 24 | Mac or Linux, this client should run beautifully for you. It runs on Windows, 25 | as well, but it's probably easier to install EVEMon. 26 | 27 | See the the `install instructions`_ for how to get started. 28 | 29 | .. _install instructions: https://github.com/gtaylor/EVE-Market-Data-Uploader/blob/master/README.rst 30 | 31 | With other clients 32 | ------------------ 33 | 34 | While we prefer EVEMon, you can use any client that supports the 35 | `Unified Uploader Data Interchange Format`_. An up to date list is maintained 36 | here: `Clients supporting UUDIF`_. 37 | 38 | Steps vary from client to client, but here is the typical process: 39 | 40 | * Open the dialog that lets you specify where to send market data. 41 | * Create a new endpoint. Select Unified format if it asks. 42 | * Set the URL to: http://upload.eve-emdr.com/upload/ 43 | * Enter your upload key, if you feel like it. Otherwise, just make something 44 | up or leave it blank. 45 | * Hit save, and start uploading. 46 | 47 | You can then use any market service's auto-uploader pages. 48 | -------------------------------------------------------------------------------- /emdr/daemons/relay/main.py: -------------------------------------------------------------------------------- 1 | """ 2 | Relays sit below an announcer, or another relay, and simply repeat what 3 | they receive over PUB/SUB. 4 | """ 5 | # Logging has to be configured first before we do anything. 6 | import logging 7 | 8 | logger = logging.getLogger(__name__) 9 | import zlib 10 | 11 | import gevent 12 | import zmq.green as zmq 13 | from emdr.conf import default_settings as settings 14 | from emdr.daemons.relay.dedupers import is_message_duped 15 | 16 | def run(): 17 | """ 18 | Fires up the relay process. 19 | """ 20 | # These form the connection to the Gateway daemon(s) upstream. 21 | context = zmq.Context() 22 | 23 | receiver = context.socket(zmq.SUB) 24 | receiver.setsockopt(zmq.SUBSCRIBE, '') 25 | for binding in settings.RELAY_RECEIVER_BINDINGS: 26 | # Relays bind upstream to an Announcer, or another Relay. 27 | receiver.connect(binding) 28 | 29 | sender = context.socket(zmq.PUB) 30 | for binding in settings.RELAY_SENDER_BINDINGS: 31 | # End users, or other relays, may attach here. 32 | sender.bind(binding) 33 | 34 | def relay_worker(message): 35 | """ 36 | This is the worker function that re-sends the incoming messages out 37 | to any subscribers. 38 | 39 | :param str message: A JSON string to re-broadcast. 40 | """ 41 | if is_message_duped(message): 42 | # We've already seen this message recently. Discard it. 43 | return 44 | 45 | if settings.RELAY_DECOMPRESS_MESSAGES: 46 | message = zlib.decompress(message) 47 | 48 | sender.send(message) 49 | 50 | logger.info("Relay is now listening for order data.") 51 | 52 | while True: 53 | # For each incoming message, spawn a greenlet using the relay_worker 54 | # function. 55 | gevent.spawn(relay_worker, receiver.recv()) -------------------------------------------------------------------------------- /bin/fake_unified_order.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | """ 3 | A fake order upload script, used to manually test the whole stack. 4 | """ 5 | import ujson 6 | import urllib 7 | import requests 8 | import zlib 9 | 10 | data = """ 11 | { 12 | "resultType" : "orders", 13 | "version" : "0.1alpha", 14 | "uploadKeys" : [ 15 | { "name" : "emk", "key" : "abc" }, 16 | { "name" : "ec" , "key" : "def" } 17 | ], 18 | "generator" : { "name" : "Yapeal", "version" : "11.335.1737" }, 19 | "currentTime" : "2011-10-22T15:46:00+00:00", 20 | "columns" : ["price","volRemaining","range","orderID","volEntered","minVolume","bid","issueDate","duration","stationID","solarSystemID"], 21 | "rowsets" : [ 22 | { 23 | "generatedAt" : "2011-10-22T15:43:00+00:00", 24 | "regionID" : 10000065, 25 | "typeID" : 11134, 26 | "rows" : [ 27 | [8999,1,32767,2363806077,1,1,false,"2011-12-03T08:10:59+00:00",90,60008692,30005038], 28 | [11499.99,10,32767,2363915657,10,1,false,"2011-12-03T10:53:26+00:00",90,60006970,null], 29 | [11500,48,32767,2363413004,50,1,false,"2011-12-02T22:44:01+00:00",90,60006967,30005039] 30 | ] 31 | }, 32 | { 33 | "generatedAt" : "2011-10-22T15:42:00+00:00", 34 | "regionID" : null, 35 | "typeID" : 11135, 36 | "rows" : [ 37 | [8999,1,32767,2363806077,1,1,false,"2011-12-03T08:10:59+00:00",90,60008692,30005038], 38 | [11499.99,10,32767,2363915657,10,1,false,"2011-12-03T10:53:26+00:00",90,60006970,null], 39 | [11500,48,32767,2363413004,50,1,false,"2011-12-02T22:44:01+00:00",90,60006967,30005039] 40 | ] 41 | } 42 | ] 43 | } 44 | """ 45 | message = ujson.dumps(ujson.loads(data)) 46 | 47 | headers = { 48 | 'Content-Encoding': 'deflate' 49 | } 50 | 51 | # POST non-form encoded 52 | #data = message 53 | 54 | # POST form-encoded 55 | data = urllib.urlencode({'data': message}) 56 | 57 | # Compressed request 58 | data = zlib.compress(data)#[2:-4] 59 | 60 | r = requests.post( 61 | 'http://localhost:8080/upload/', 62 | data=data, 63 | headers=headers, 64 | ) 65 | print r.status_code, r.text 66 | -------------------------------------------------------------------------------- /bin/emdr-announcer: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | """ 3 | Gateways connect to Announcers via PUB, and the Announcer distributes 4 | the message to those below it via PUB. 5 | """ 6 | from logging.config import dictConfig 7 | import argparse 8 | from emdr.conf import default_settings as settings 9 | from emdr.core.command_utils import set_logger_level, print_cmd_header, print_cmd_footer 10 | 11 | parser = argparse.ArgumentParser( 12 | description="The announcer accepts order data from processors. " 13 | "The data is PUBlished to all SUBscribers, which are developer " 14 | "applications, and/or Relays.") 15 | parser.add_argument( 16 | '--listener', action='append', dest='listeners', 17 | help="Overrides default announcer receiver bindings. This determines how " 18 | "the Gateways connect to this Announcer to PUB messages.") 19 | parser.add_argument( 20 | '--sender', action='append', dest='senders', 21 | help="Override default Announcer sender bindings. This determines how " 22 | "relays or developer applications can connect to this announcer.") 23 | parser.add_argument( 24 | '--loglevel', action='store', dest='loglevel', default='INFO', 25 | help="Overrides default logger level (DEBUG, INFO, WARNING, ERROR) " 26 | "(default: %s)" % settings.LOGGING['loggers']['']['level']) 27 | 28 | parsed = parser.parse_args() 29 | 30 | print_cmd_header('emdr-announcer') 31 | 32 | if parsed.listeners: 33 | settings.ANNOUNCER_RECEIVER_BINDINGS = parsed.listeners 34 | if parsed.senders: 35 | settings.ANNOUNCER_SENDER_BINDINGS = parsed.senders 36 | 37 | log_level = set_logger_level(parsed.loglevel) 38 | print("* Logging level: %s" % log_level) 39 | 40 | print("* Accepting PUB connections on:") 41 | for binding in settings.ANNOUNCER_RECEIVER_BINDINGS: 42 | print(" - %s" % binding) 43 | 44 | print("* Accepting SUB connections on:") 45 | for binding in settings.ANNOUNCER_SENDER_BINDINGS: 46 | print(" - %s" % binding) 47 | 48 | print_cmd_footer() 49 | 50 | dictConfig(settings.LOGGING) 51 | 52 | # Get the announcer running. 53 | from emdr.daemons.announcer import main 54 | try: 55 | main.run() 56 | except KeyboardInterrupt: 57 | print('Announcer stopped by keyboard interrupt.') -------------------------------------------------------------------------------- /examples/python/greenlet_consumer/gevent_consumer.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | """ 3 | An example consumer that uses a greenlet pool to accept incoming market 4 | messages. This example offers a high degree of concurrency. 5 | """ 6 | import zlib 7 | # This can be replaced with the built-in json module, if desired. 8 | import simplejson 9 | 10 | import gevent 11 | from gevent.pool import Pool 12 | from gevent import monkey; gevent.monkey.patch_all() 13 | import zmq.green as zmq 14 | 15 | # The maximum number of greenlet workers in the greenlet pool. This is not one 16 | # per processor, a decent machine can support hundreds or thousands of greenlets. 17 | # I recommend setting this to the maximum number of connections your database 18 | # backend can accept, if you must open one connection per save op. 19 | MAX_NUM_POOL_WORKERS = 200 20 | 21 | def main(): 22 | """ 23 | The main flow of the application. 24 | """ 25 | context = zmq.Context() 26 | subscriber = context.socket(zmq.SUB) 27 | 28 | # Connect to the first publicly available relay. 29 | subscriber.connect('tcp://relay-us-central-1.eve-emdr.com:8050') 30 | # Disable filtering. 31 | subscriber.setsockopt(zmq.SUBSCRIBE, "") 32 | 33 | # We use a greenlet pool to cap the number of workers at a reasonable level. 34 | greenlet_pool = Pool(size=MAX_NUM_POOL_WORKERS) 35 | 36 | print("Consumer daemon started, waiting for jobs...") 37 | print("Worker pool size: %d" % greenlet_pool.size) 38 | 39 | while True: 40 | # Since subscriber.recv() blocks when no messages are available, 41 | # this loop stays under control. If something is available and the 42 | # greenlet pool has greenlets available for use, work gets done. 43 | greenlet_pool.spawn(worker, subscriber.recv()) 44 | 45 | def worker(job_json): 46 | """ 47 | For every incoming message, this worker function is called. Be extremely 48 | careful not to do anything CPU-intensive here, or you will see blocking. 49 | Sockets are async under gevent, so those are fair game. 50 | """ 51 | # Receive raw market JSON strings. 52 | market_json = zlib.decompress(job_json) 53 | # Un-serialize the JSON data to a Python dict. 54 | market_data = simplejson.loads(market_json) 55 | # Save to your choice of DB here. 56 | print market_data 57 | 58 | if __name__ == '__main__': 59 | main() 60 | 61 | -------------------------------------------------------------------------------- /bin/emdr-gateway: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | """ 3 | WSGI gateway application. Runs using bottle+gevent. 4 | """ 5 | from gevent import monkey; monkey.patch_all() 6 | from logging.config import dictConfig 7 | import argparse 8 | from bottle import run 9 | from emdr.conf import default_settings as settings 10 | from emdr.core.command_utils import set_logger_level, print_cmd_header, print_cmd_footer 11 | 12 | parser = argparse.ArgumentParser( 13 | description="The gateway accepts uploads from player uploader clients " \ 14 | "over HTTP. Market data is then sent to the broker.", 15 | ) 16 | parser.add_argument( 17 | '--sender', action='append', dest='senders', 18 | help="Overrides default gateway sender bindings. This determines where " \ 19 | "the gateway sends its messages, typically an Announcer.") 20 | parser.add_argument( 21 | '--loglevel', action='store', dest='loglevel', default='INFO', 22 | help="Overrides default logger level (DEBUG, INFO, WARNING, ERROR) " 23 | "(default: %s)" % settings.LOGGING['loggers']['']['level']) 24 | parser.add_argument( 25 | '--webport', action='store', dest='web_port', 26 | help="Overrides default webserver port " 27 | "(default: %s)" % settings.GATEWAY_WEB_PORT) 28 | parser.add_argument( 29 | '--ip-key-salt', action='store', dest='ip_key_salt', 30 | help="If provided, set a salted hash of each uploader's IP address as " \ 31 | "the EMDR upload key. Used for consumers to track problematic uploaders.") 32 | 33 | parsed = parser.parse_args() 34 | 35 | print_cmd_header('emdr-gateway') 36 | 37 | if parsed.senders: 38 | settings.GATEWAY_SENDER_BINDINGS = parsed.senders 39 | if parsed.web_port: 40 | settings.GATEWAY_WEB_PORT = parsed.web_port 41 | if parsed.ip_key_salt: 42 | settings.GATEWAY_IP_KEY_SALT = str(parsed.ip_key_salt) 43 | 44 | log_level = set_logger_level(parsed.loglevel) 45 | print("* Logging level: %s" % log_level) 46 | 47 | print("* Listening for HTTP market data on port: %s" % settings.GATEWAY_WEB_PORT) 48 | 49 | print("* Sending market data over PUB to Announcers:") 50 | for binding in settings.GATEWAY_SENDER_BINDINGS: 51 | print(" - %s" % binding) 52 | 53 | print_cmd_footer() 54 | 55 | dictConfig(settings.LOGGING) 56 | 57 | #noinspection PyUnresolvedReferences 58 | from emdr.daemons.gateway import wsgi 59 | # Fire up a bottle+gevent process. 60 | run( 61 | host='localhost', 62 | server='gevent', 63 | port=settings.GATEWAY_WEB_PORT, 64 | # Use libevent HTTP server. 65 | fast=True 66 | ) -------------------------------------------------------------------------------- /doc_src/access.rst: -------------------------------------------------------------------------------- 1 | .. _access: 2 | 3 | .. include:: global.txt 4 | 5 | Getting access to the EMDR network 6 | ================================== 7 | 8 | In order to get access to the EMDR network, you will merely need to connect 9 | to a relay. For your convenience, we have listed the relays below that have 10 | available capacity. If you'd like to verify the health of a relay before 11 | using it, check out our `EMDR monitor`_. 12 | 13 | ========================================== ================== ============== ================== ==================================================================== 14 | URI ISP Location Access Notes 15 | ========================================== ================== ============== ================== ==================================================================== 16 | tcp://relay-us-west-1.eve-emdr.com:8050 Cogent Sacramento, CA Open West coast US realy. Volunteered by Yann of EVE Central. 17 | tcp://relay-us-central-1.eve-emdr.com:8050 Ubuquity Hosting Chicago, IL Open Central US relay. Volunteered by udsaxman. 18 | tcp://relay-eu-germany-1.eve-emdr.com:8050 Hetzner Germany Open German relay. Volunteered by FuzzySteve. 19 | tcp://relay-eu-germany-2.eve-emdr.com:8050 Hetzner Germany Open German relay. Volunteered by Agedon Group, Inc. 20 | tcp://relay-eu-germany-3.eve-emdr.com:8050 Intergenia Germany Open German relay. Volunteered by EVE-HQ.com. 21 | tcp://relay-eu-germany-4.eve-emdr.com:8050 Hetzner Germany Open German relay. Volunteered by Karbowiak. 22 | tcp://relay-eu-denmark-1.eve-emdr.com:8050 ComX Denmark Open Volunteered by Karbowiak. 23 | ========================================== ================== ============== ================== ==================================================================== 24 | 25 | Once you have chosen a relay, simply adapt the sample in 26 | :doc:`using` to use the hostname/port of the relay of your choice. 27 | 28 | .. tip:: If reliability is a concern, you'll want to connect to multiple 29 | relays and handle the duplicate messages. This will keep you running even 30 | if one of the relays you're using dies. 31 | 32 | .. note:: Some relays, marked as *Restricted*, require that you request 33 | access to use them. You will need to get in touch with the relay via the 34 | contact info listed to work it out with the admin. 35 | -------------------------------------------------------------------------------- /emdr/conf/default_settings.py: -------------------------------------------------------------------------------- 1 | """ 2 | This module contains the default settings that stand unless overridden. 3 | """ 4 | 5 | # 6 | ## Gateway Daemon Settings 7 | # 8 | 9 | # Default port to listen for HTTP uploads on. 10 | GATEWAY_WEB_PORT = 8080 11 | # PUB - Connect 12 | GATEWAY_SENDER_BINDINGS = ["ipc:///tmp/announcer-receiver.sock"] 13 | # If set as a string, this value is used as the salt to create a hash of 14 | # each uploader's IP address. This in turn gets set as the EMDR upload key. 15 | GATEWAY_IP_KEY_SALT = None 16 | 17 | # 18 | ## ZeroMQ-based Gateway Daemon Settings 19 | # 20 | # PULL - Bind 21 | GATEWAY_ZMQ_RECEIVER_BINDINGS = ["ipc:///tmp/gateway-zmq-receiver.sock"] 22 | # By default, use the same as the HTTP gateway, for easy testing. 23 | # PUB - Connect 24 | GATEWAY_ZMQ_SENDER_BINDINGS = ["ipc:///tmp/announcer-receiver.sock"] 25 | # The number of worker greenlets to listen for data on. 26 | GATEWAY_ZMQ_NUM_WORKERS = 5 27 | 28 | # 29 | ## Announcer Daemon Settings 30 | # 31 | # SUB - Bind 32 | ANNOUNCER_RECEIVER_BINDINGS = ["ipc:///tmp/announcer-receiver.sock"] 33 | # PUB - Bind 34 | ANNOUNCER_SENDER_BINDINGS = ["ipc:///tmp/announcer-sender.sock"] 35 | 36 | # 37 | ## Relay Daemon Settings 38 | # 39 | # SUB - Connect 40 | RELAY_RECEIVER_BINDINGS = ["ipc:///tmp/announcer-sender.sock"] 41 | # PUB - Bind 42 | RELAY_SENDER_BINDINGS = ["ipc:///tmp/relay-sender.sock"] 43 | # If True, outbound messages to subscribers are decompressed. 44 | RELAY_DECOMPRESS_MESSAGES = False 45 | # Default to memcached, as it's fast. 46 | RELAY_DEDUPE_BACKEND = "memcached" 47 | # For dedupe backends that require a connection string of some sort, store it 48 | # here. We'll default to localhost for now. Use a list of strings. 49 | RELAY_DEDUPE_BACKEND_CONN = ["127.0.0.1"] 50 | # For timeout based backends, this determines how long (in seconds) we store 51 | # the message hashes. 52 | RELAY_DEDUPE_STORE_TIME = 300 53 | # For memcached and other key/value stores, this is prefixed to the hash 54 | # to form the cache key. This is useful to avoid clashes for multi-tenant 55 | # situations. 56 | RELAY_DEDUPE_STORE_KEY_PREFIX = 'emdr-relay-dd' 57 | 58 | # 59 | ## Logging Settings 60 | # 61 | LOGGING = { 62 | 'version': 1, 63 | 'disable_existing_loggers': True, 64 | 'formatters': { 65 | 'verbose': { 66 | 'format': '%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s' 67 | }, 68 | 'simple': { 69 | 'format': '%(name)s -- %(levelname)s -- %(asctime)s: %(message)s' 70 | }, 71 | }, 72 | 'handlers': { 73 | 'console': { 74 | 'level': 'DEBUG', 75 | 'class': 'logging.StreamHandler', 76 | 'formatter': 'simple' 77 | }, 78 | 'null': { 79 | 'level': 'DEBUG', 80 | 'class': 'logging.NullHandler', 81 | }, 82 | }, 83 | 'loggers': { 84 | '': { 85 | 'handlers': ['console'], 86 | 'level': 'INFO', 87 | 'propagate': True, 88 | }, 89 | }, 90 | } 91 | -------------------------------------------------------------------------------- /examples/python/cloudwatch_grapher/cloudwatch_grapher.py: -------------------------------------------------------------------------------- 1 | """ 2 | A simple script that listens to EMDR, tracks the number of messages coming in, 3 | and reports it to Amazon CloudWatch as a custom metric. 4 | 5 | CloudWatch allows for nearly real-time graphing from within the AWS Management 6 | Console, and also allows programmatic access to all recorded data. If you 7 | keep the tracking frequency low enough to stay in the free tier (1,000,000 8 | requests per month), this script is free to run. 9 | 10 | http://aws.amazon.com/cloudwatch/ 11 | """ 12 | import gevent 13 | import gevent.monkey; gevent.monkey.patch_all() 14 | import boto 15 | import zmq.green as zmq 16 | 17 | # 18 | ## AWS and Route53 config. You MUST replace these. 19 | # 20 | 21 | AWS_ACCESS_KEY_ID = 'XXXXXXXXXXXXXXXXXXXX' 22 | AWS_SECRET_ACCESS_KEY = 'YYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYY' 23 | 24 | # 25 | ## Grapher config 26 | # 27 | 28 | # The interval to write to CloudWatch (in seconds). Keeping this high enough 29 | # (no lower than 3 seconds) to stay in the free tier is recommended. 30 | REPORT_INTERVAL = 30 31 | 32 | # 33 | ## Constants and globals 34 | # 35 | 36 | conn = boto.connect_cloudwatch( 37 | aws_access_key_id=AWS_ACCESS_KEY_ID, 38 | aws_secret_access_key=AWS_SECRET_ACCESS_KEY, 39 | ) 40 | 41 | context = zmq.Context() 42 | subscriber = context.socket(zmq.SUB) 43 | 44 | # Connect to the first publicly available relay. 45 | subscriber.connect('tcp://relay-us-central-1.eve-emdr.com:8050') 46 | # Disable filtering. 47 | subscriber.setsockopt(zmq.SUBSCRIBE, "") 48 | 49 | # This is a global used to track the number of messages we've seen since we 50 | # last reported to CloudWatch. 51 | MESSAGE_COUNTER = 0 52 | 53 | # 54 | ## The fun stuff 55 | # 56 | 57 | def data_sender(num_messages): 58 | """ 59 | Handles the sending of the message counter values to CloudWatch via boto. 60 | This is done in a greenlet, and won't block everything else. 61 | """ 62 | global conn 63 | 64 | conn.put_metric_data('EMDR', 'MessagesOut', value=num_messages, unit='Count') 65 | 66 | def heartbeat(): 67 | """ 68 | Every REPORT_INTERVAL number of seconds, this heartbeat greenlet sends 69 | fires off the data_sender greenlet to report to CloudWatch, and resets 70 | the message counter. 71 | """ 72 | global MESSAGE_COUNTER 73 | 74 | while True: 75 | # Only the greenlet sleeps, everything else (the counter) keeps going. 76 | gevent.sleep(REPORT_INTERVAL) 77 | # Fire off another greenlet to send the count to CloudWatch. This lets 78 | # us immediately get back to sleeping. 79 | gevent.spawn(data_sender, MESSAGE_COUNTER) 80 | # Reset the message counter. 81 | MESSAGE_COUNTER = 0 82 | 83 | # Spawns the heartbeat greenlet, which infinitely loops. 84 | gevent.spawn(heartbeat) 85 | 86 | def counter_greenlet(message): 87 | """ 88 | Increments the global message counter by one. This is ran in a greenlet. 89 | """ 90 | global MESSAGE_COUNTER 91 | 92 | MESSAGE_COUNTER += 1 93 | 94 | while True: 95 | # This is the main driver of the whole script. For every incoming 96 | # message, hand it off to the counter greenlet, which increments 97 | # the message counter. We could do also do this here, but doing it in a 98 | # greenlet is more fun. 99 | gevent.spawn(counter_greenlet, subscriber.recv()) -------------------------------------------------------------------------------- /doc_src/index.rst: -------------------------------------------------------------------------------- 1 | .. _index: 2 | 3 | .. include:: global.txt 4 | 5 | EVE Market Data Relay (EMDR) 6 | ============================ 7 | 8 | **With the introduction of native-to-EVE market APIs, the EMDR project has ran its course. As of May 1, 2017, we have shuttered the network. This repo will remain in an archived state. Thanks to all who helped make EMDR a success!** 9 | 10 | EVE Market Data Relay (EMDR) is a super scalable, highly available firehose of 11 | real-time market data. For those that wish to record price and history data 12 | as it comes in, EMDR will help you do so as efficiently and reliably as 13 | possible. EMDR's data feed is open to the public, and is developed as an 14 | open source project. 15 | 16 | EMDR may appeal to you if: 17 | 18 | * You need real-time access to market data, as soon as possible. Perhaps for 19 | sending out price/inventory level alerts, notification of lucrative 20 | trade routes, or real-time charts and graphs. 21 | * You want to record prices over time. 22 | * You want the absolutely most complete set of data that you can get. 23 | * The effort and overhead of getting large amounts of direct player uploads to 24 | your site is too much to bear. 25 | 26 | EMDR's primary goals are: 27 | 28 | * Ensuring that all market sites have access to player-uploaded market data. 29 | * Extremely high reliability. 30 | * Minimize expense to those running EMDR (shared burden). 31 | * Minimize expense to those consuming the feed (bandwidth). 32 | 33 | For a more complete run-down, see :doc:`overview`. 34 | 35 | **License:** EVE Market Data Relay is licensed under the `BSD License`_. 36 | 37 | Assorted Info 38 | ------------- 39 | * `Mailing list`_ - If you are consuming the feed, make sure 40 | to subscribe to this for important announcements. This is also one of the 41 | best places to ask questions or discuss EMDR stuff. 42 | * Slack Channel - `join #emdr on tweetfleet.slack.com `_, an excellent place for getting quick 43 | help, or hanging out with other developers and consumers. Get your account `here `_ if you don't have one! 44 | * `Issue tracker`_ - Report bugs here. 45 | * `GitHub project`_ - Source code and issue tracking. 46 | * `EMDR monitor`_ - EMDR relay/announcer monitor web app. 47 | * `EMDR map`_ - See the solar systems light up as 48 | market data arrives. 49 | * `@gctaylor Twitter`_ - Tweets from the maintainer. 50 | 51 | General Topics 52 | -------------- 53 | 54 | The following topics are higher-level overviews, and general documentation. 55 | If you are just curious, or wondering how to upload data to EMDR, this section 56 | is all you need. 57 | 58 | .. toctree:: 59 | :maxdepth: 2 60 | 61 | overview 62 | sites 63 | uploading 64 | 65 | Consumer Documentation 66 | ---------------------- 67 | 68 | The following topics are useful to those wishing to connect to and use 69 | EMDR's data feed. 70 | 71 | .. toctree:: 72 | :maxdepth: 3 73 | 74 | data_sources 75 | access 76 | using 77 | design_considerations 78 | 79 | EMDR Developer Documentation 80 | ---------------------------- 81 | 82 | The following topics will be useful to you if you would like to help improve 83 | EMDR, or volunteer additional computing resources to the network. 84 | 85 | .. toctree:: 86 | :maxdepth: 2 87 | 88 | installation 89 | volunteering 90 | 91 | Indices and tables 92 | ------------------ 93 | 94 | * :ref:`genindex` 95 | * :ref:`modindex` 96 | * :ref:`search` 97 | 98 | -------------------------------------------------------------------------------- /bin/emdr-relay: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | """ 3 | Relays listen to Announcers or other Relays for messages over SUB. Any 4 | messages received are pushed out to anyone below the Relay via PUB. 5 | """ 6 | from logging.config import dictConfig 7 | import argparse 8 | from emdr.conf import default_settings as settings 9 | from emdr.core.command_utils import set_logger_level, print_cmd_header, print_cmd_footer 10 | 11 | parser = argparse.ArgumentParser( 12 | description="The relay accepts order data from announcers, or other relays. " 13 | "The data is PUBlished to all SUBscribers, which are developer " 14 | "applications, and/or other relays.", 15 | ) 16 | parser.add_argument( 17 | '--listener', action='append', dest='listeners', 18 | help="Overrides default relay receiver bindings. This determines how " 19 | "the processors (or other relays) connects to this relay to PUB " 20 | "messages.") 21 | parser.add_argument( 22 | '--sender', action='append', dest='senders', 23 | help="Override default relay sender bindings. This determines how " 24 | "other relays or developer applications can connect to this relay.") 25 | parser.add_argument( 26 | '--loglevel', action='store', dest='loglevel', default='INFO', 27 | help="Overrides default logger level (DEBUG, INFO, WARNING, ERROR) " 28 | "(default: %s)" % settings.LOGGING['loggers']['']['level']) 29 | parser.add_argument( 30 | '--enable-decompress', action='store_true', dest='enable_decompress', 31 | help="Decompress all outbound messages (default: %s)" % ( 32 | 'Disabled' if settings.RELAY_DECOMPRESS_MESSAGES else 'Enabled', 33 | )) 34 | 35 | dedupe_group = parser.add_argument_group( 36 | 'Message Deduplication', 37 | "Settings for message de-duplication." 38 | ) 39 | dedupe_group.add_argument( 40 | '--dedupe-backend', action='store', dest='dedupe_backend', 41 | help="The dedupe backend to use. One of: deque, memcached " 42 | "(default: %s)" % settings.RELAY_DEDUPE_BACKEND) 43 | dedupe_group.add_argument( 44 | '--dedupe-conn', action='store', dest='dedupe_conn', 45 | help="Backend-specific connection string, if needed " 46 | "(default: %s)" % settings.RELAY_DEDUPE_BACKEND_CONN[0]) 47 | dedupe_group.add_argument( 48 | '--dedupe-store-time', action='store', dest='dedupe_store_time', 49 | help="For backends that store message hashes, this determines " 50 | "how long to do so (in seconds) " 51 | "(default: %s)" % settings.RELAY_DEDUPE_STORE_TIME) 52 | dedupe_group.add_argument( 53 | '--dedupe-store-key-prefix', action='store', dest='dedupe_store_key_prefix', 54 | help="For multi-tenant installs, set this key prefix on all relay " 55 | "cache entries " 56 | "(default: %s)" % settings.RELAY_DEDUPE_STORE_KEY_PREFIX) 57 | 58 | parsed = parser.parse_args() 59 | 60 | print_cmd_header('emdr-relay') 61 | 62 | if parsed.listeners: 63 | settings.RELAY_RECEIVER_BINDINGS = parsed.listeners 64 | if parsed.senders: 65 | settings.RELAY_SENDER_BINDINGS = parsed.senders 66 | if parsed.dedupe_backend: 67 | settings.RELAY_DEDUPE_BACKEND = parsed.dedupe_backend 68 | if parsed.dedupe_store_time: 69 | settings.RELAY_DEDUPE_STORE_TIME = int(parsed.dedupe_store_time) 70 | if parsed.enable_decompress: 71 | settings.RELAY_DECOMPRESS_MESSAGES = True 72 | if parsed.dedupe_store_key_prefix: 73 | settings.RELAY_DEDUPE_STORE_KEY_PREFIX = parsed.dedupe_store_key_prefix 74 | 75 | log_level = set_logger_level(parsed.loglevel) 76 | print("* Logging level: %s" % log_level) 77 | 78 | print("* Connect to Announcers via SUB:") 79 | for binding in settings.RELAY_RECEIVER_BINDINGS: 80 | print(" - %s" % binding) 81 | 82 | print("* Accepting SUB connections on:") 83 | for binding in settings.RELAY_SENDER_BINDINGS: 84 | print(" - %s" % binding) 85 | 86 | print("* De-dupe backend: %s" % settings.RELAY_DEDUPE_BACKEND) 87 | print("* De-dupe backend connection: %s" % settings.RELAY_DEDUPE_BACKEND_CONN) 88 | print("* De-dupe backend hash store time: %s" % settings.RELAY_DEDUPE_STORE_TIME) 89 | print("* De-dupe backend store key prefix: %s" % settings.RELAY_DEDUPE_STORE_KEY_PREFIX) 90 | 91 | if settings.RELAY_DECOMPRESS_MESSAGES: 92 | print("* Outbound message de-compression enabled.") 93 | 94 | print_cmd_footer() 95 | 96 | dictConfig(settings.LOGGING) 97 | 98 | # Get the relay running. 99 | from emdr.daemons.relay import main 100 | try: 101 | main.run() 102 | except KeyboardInterrupt: 103 | print('Relay stopped by keyboard interrupt.') -------------------------------------------------------------------------------- /doc_src/design_considerations.rst: -------------------------------------------------------------------------------- 1 | .. _design-considerations: 2 | 3 | .. include:: global.txt 4 | 5 | Design considerations for consumers 6 | =================================== 7 | 8 | This document outlines some useful tips in designing your consumer applications. 9 | If you have anything to add, post an entry on our `issue tracker`_. 10 | 11 | Keeping up with market data 12 | --------------------------- 13 | 14 | As EMDR grows, the volume of market data you see over a given span of time 15 | will continue to increase. This means that you'll need to design with 16 | concurrency in mind. 17 | 18 | Ideally, you have a dedicated process that is just enough to connect to EMDR 19 | and save the data to your DB backend. We suggest doing any aggregation or 20 | additional processing in another process, to make sure you don't lose any 21 | data due to blocking or bugs introduced in your processing/aggregation code. 22 | If your consumer can't process the incoming data fast enough 23 | to keep up with the relay, we end up discarding pending messages 24 | on the relay to prevent buffer overflow. The end result is that you will lose 25 | messages. 26 | 27 | For an idea of what this looks like, see our greenlet_consumer_ code example. 28 | This is written in Python, using gevent_ to perform the DB saves using 29 | greenlets, which are micro-threads. Most languages have something similar 30 | available, so don't let the fact that this is in Python psyche you out if you're 31 | using another language. 32 | 33 | .. _greenlet_consumer: https://github.com/gtaylor/EVE-Market-Data-Relay/blob/master/examples/python/greenlet_consumer/gevent_consumer.py 34 | 35 | Deal with duplicate data 36 | ------------------------ 37 | 38 | You will see some duplicate data coming down through EMDR. There are a few 39 | different kinds of duplication: 40 | 41 | * Multiple players are sitting in Jita, looking at Module X at about the same 42 | time. You'll see two individual messages, containing the same (or very similar) 43 | data. 44 | * Another market service uploads a message that has already been through EMDR. 45 | This is a duplicate in its purest sense. We will do our best to hunt this 46 | down and take care of it for you, but do design with it in mind. 47 | 48 | Some elect to store every individual data point for each item. This is a 49 | viable approach, and not extremely expensive. Your aggregator process can 50 | go through data as it's coming in to look for suspicious patterns. Duplicate 51 | data can be a valuable means of cross-checking incoming data, in this case. 52 | 53 | Others only store the current price for items, using the generatedAt values 54 | to determine whether the message contains newer data than they have. 55 | 56 | Don't be too trusting 57 | --------------------- 58 | 59 | The reality of player-upload-driven market data sites is that we are at the 60 | mercy of said players as far as the data goes. The vast majority of uploaders 61 | are going to send good data. However, there is a minority that does not play 62 | so nicely. 63 | 64 | In many cases, multiple players will upload the details for the same orders 65 | multiple times. This can be used to your advantage, in that you can cross-check 66 | things as they come in. If one message says Large Shield Extender I is going 67 | for 5 billion isk in Jita, but another three are saying much lower than that, 68 | your outlier is probably fraudulent and is best ignored. 69 | 70 | You also have the option of cross-referencing the APIs of other sites who 71 | do not consume EMDR data. While this can defeat some of the purpose of using 72 | EMDR, the option is there to complement the feed. 73 | 74 | Drop the banhammer on vandals 75 | ----------------------------- 76 | 77 | Uploaders may be uniquely identified via the ``EMDR`` key/value pair in each 78 | message's ``uploadKeys`` list. The value of the ``EMDR`` key is a salted, 79 | hashed string unique to the uploader's IP address. While this may be spoofed, 80 | it will offer some ability to blacklist obviously malicious users. 81 | 82 | Use EMDR's redundancy to your advantage 83 | --------------------------------------- 84 | 85 | EMDR is built with high availability in mind. Our only single point of failure 86 | is Amazon's Route 53 DNS service, which has an excellent reliability track 87 | record. 88 | 89 | While you can connect to only one relay, you may wish to connect your consumer 90 | to two. This will allow your consumer to keep functioning, even if one of 91 | the relays it is subscribed to dies a fiery death. The only complication is 92 | that you will need to de-dupe the data coming in, as you'll be receiving 93 | two copies of each message (one from each relay). 94 | 95 | Optionally, fire up a private EMDR relay within your infrastructure and 96 | consume from that. It'll do the de-duplication for you. 97 | -------------------------------------------------------------------------------- /doc_src/Makefile: -------------------------------------------------------------------------------- 1 | # Makefile for Sphinx documentation 2 | # 3 | 4 | # You can set these variables from the command line. 5 | SPHINXOPTS = 6 | SPHINXBUILD = sphinx-build 7 | PAPER = 8 | BUILDDIR = _build 9 | 10 | # Internal variables. 11 | PAPEROPT_a4 = -D latex_paper_size=a4 12 | PAPEROPT_letter = -D latex_paper_size=letter 13 | ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . 14 | # the i18n builder cannot share the environment and doctrees with the others 15 | I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . 16 | 17 | .PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest gettext 18 | 19 | help: 20 | @echo "Please use \`make ' where is one of" 21 | @echo " html to make standalone HTML files" 22 | @echo " dirhtml to make HTML files named index.html in directories" 23 | @echo " singlehtml to make a single large HTML file" 24 | @echo " pickle to make pickle files" 25 | @echo " json to make JSON files" 26 | @echo " htmlhelp to make HTML files and a HTML help project" 27 | @echo " qthelp to make HTML files and a qthelp project" 28 | @echo " devhelp to make HTML files and a Devhelp project" 29 | @echo " epub to make an epub" 30 | @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" 31 | @echo " latexpdf to make LaTeX files and run them through pdflatex" 32 | @echo " text to make text files" 33 | @echo " man to make manual pages" 34 | @echo " texinfo to make Texinfo files" 35 | @echo " info to make Texinfo files and run them through makeinfo" 36 | @echo " gettext to make PO message catalogs" 37 | @echo " changes to make an overview of all changed/added/deprecated items" 38 | @echo " linkcheck to check all external links for integrity" 39 | @echo " doctest to run all doctests embedded in the documentation (if enabled)" 40 | 41 | clean: 42 | -rm -rf $(BUILDDIR)/* 43 | 44 | html: 45 | $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html 46 | @echo 47 | @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." 48 | 49 | dirhtml: 50 | $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml 51 | @echo 52 | @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." 53 | 54 | singlehtml: 55 | $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml 56 | @echo 57 | @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." 58 | 59 | pickle: 60 | $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle 61 | @echo 62 | @echo "Build finished; now you can process the pickle files." 63 | 64 | json: 65 | $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json 66 | @echo 67 | @echo "Build finished; now you can process the JSON files." 68 | 69 | htmlhelp: 70 | $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp 71 | @echo 72 | @echo "Build finished; now you can run HTML Help Workshop with the" \ 73 | ".hhp project file in $(BUILDDIR)/htmlhelp." 74 | 75 | qthelp: 76 | $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp 77 | @echo 78 | @echo "Build finished; now you can run "qcollectiongenerator" with the" \ 79 | ".qhcp project file in $(BUILDDIR)/qthelp, like this:" 80 | @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/EVEMarketDataRelay.qhcp" 81 | @echo "To view the help file:" 82 | @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/EVEMarketDataRelay.qhc" 83 | 84 | devhelp: 85 | $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp 86 | @echo 87 | @echo "Build finished." 88 | @echo "To view the help file:" 89 | @echo "# mkdir -p $$HOME/.local/share/devhelp/EVEMarketDataRelay" 90 | @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/EVEMarketDataRelay" 91 | @echo "# devhelp" 92 | 93 | epub: 94 | $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub 95 | @echo 96 | @echo "Build finished. The epub file is in $(BUILDDIR)/epub." 97 | 98 | latex: 99 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex 100 | @echo 101 | @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." 102 | @echo "Run \`make' in that directory to run these through (pdf)latex" \ 103 | "(use \`make latexpdf' here to do that automatically)." 104 | 105 | latexpdf: 106 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex 107 | @echo "Running LaTeX files through pdflatex..." 108 | $(MAKE) -C $(BUILDDIR)/latex all-pdf 109 | @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." 110 | 111 | text: 112 | $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text 113 | @echo 114 | @echo "Build finished. The text files are in $(BUILDDIR)/text." 115 | 116 | man: 117 | $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man 118 | @echo 119 | @echo "Build finished. The manual pages are in $(BUILDDIR)/man." 120 | 121 | texinfo: 122 | $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo 123 | @echo 124 | @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo." 125 | @echo "Run \`make' in that directory to run these through makeinfo" \ 126 | "(use \`make info' here to do that automatically)." 127 | 128 | info: 129 | $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo 130 | @echo "Running Texinfo files through makeinfo..." 131 | make -C $(BUILDDIR)/texinfo info 132 | @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo." 133 | 134 | gettext: 135 | $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale 136 | @echo 137 | @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale." 138 | 139 | changes: 140 | $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes 141 | @echo 142 | @echo "The overview file is in $(BUILDDIR)/changes." 143 | 144 | linkcheck: 145 | $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck 146 | @echo 147 | @echo "Link check complete; look for any errors in the above output " \ 148 | "or in $(BUILDDIR)/linkcheck/output.txt." 149 | 150 | doctest: 151 | $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest 152 | @echo "Testing of doctests in the sources finished, look at the " \ 153 | "results in $(BUILDDIR)/doctest/output.txt." 154 | -------------------------------------------------------------------------------- /doc_src/overview.rst: -------------------------------------------------------------------------------- 1 | .. _overview: 2 | 3 | .. include:: global.txt 4 | 5 | A High-Level Overview 6 | ===================== 7 | 8 | Project Motivation 9 | ------------------ 10 | 11 | **For the application developer**, there are quite a few 12 | barriers to entry for those wishing to write market-data-driven applications. 13 | A developer would probably need to: 14 | 15 | * Either write their own uploader client, or re-purpose an existing uploader. 16 | * Write a service/API to accept the data in whatever format(s) the uploader(s) 17 | they'd like to support use. 18 | * Actually get players to point their uploader at said service/API (This is the 19 | hardest part!) 20 | * Probably pull data from other market sites to flesh out their data set. 21 | * Then, and only then, start writing the *fun* part of their application as 22 | the amount of data coming in slowly grows. By this point, they are probably 23 | a ways down the road to burnout. 24 | 25 | None of these tasks are fun, they all involve re-inventing the wheel. By the 26 | time the developer gets through all of this (if they do), burnout is a distinct 27 | possibility. All before getting to the fun stuff! 28 | 29 | EVE Market Data Relay (EMDR) allows you to forgo all of this drudgery, and 30 | instead, connect to a firehose of data in the standardized 31 | `Unified Uploader Data Interchange Format`_ format. EMDR's ZeroMQ_ underpinnings also 32 | make it easier, and exponentially more efficient than accepting HTTP 33 | uploads directly. 34 | 35 | Core Principles 36 | --------------- 37 | 38 | During the early design and development of EMDR, these were the main pillars 39 | we built on: 40 | 41 | * There should be no single point of failure. Every component of the 42 | architecture should be simple to make redundant using trusted volunteered 43 | machines. 44 | * The application must be able to accept an extremely large number of incoming 45 | market orders without performance issues. 46 | * The cost for people hosting parts of EMDR's network should be kept to an 47 | absolute minimum. This means being stingey with CPU, RAM, and bandwidth. 48 | Likewise, consuming the feed shouldn't break the bank, either. 49 | * It must be very easy to scale the system without restarts/reconfigs on the 50 | primary setup. 51 | * The broadcasting of the market data needs to happen in a "fan out" manner. 52 | In this way, we can keep adding additional subscribers without running into 53 | scalability issues. 54 | 55 | How it all fits together 56 | ------------------------ 57 | 58 | For any given submitted market order, here is the flow said order goes through:: 59 | 60 | (Gateway) -> (Announcer) -> (Relays) -> (Applications) 61 | 62 | First, the order hits the **Gateway**, which is a simple HTTP application 63 | that parses the message. Incoming messages are in 64 | `Unified Uploader Data Interchange Format`_. 65 | 66 | The Gateway interprets the message, validates it, normalizes anything weird, 67 | then pipes it to all of the root-level **Announcers** in the network. 68 | 69 | The **Announcer** is the first tier of our market data distribution. 70 | Announcers relay any data they receive to **Relays** that are 71 | connected to the Announcer. There are only a few Announcers, and these only 72 | accept connections from approved Relays. Most relays connect to multiple 73 | announcers for added redundancy. 74 | 75 | The **Relay**, like the Announcer, is a dumb repeater of everything it 76 | receives. Relays receive data from their Announcers, then pipe it out to any 77 | subscribers that are connected to them. Subscribers can be other **Relays**, 78 | or actual user sites/applications. 79 | 80 | By using our system of Relays, we keep bandwidth usage and costs 81 | lower on the top-level Announcers. We are also able to keep "fanning out" to 82 | improve redundancy and serve greater numbers of consumers without large 83 | increases in bandwidth utilization. 84 | 85 | We are left with a very efficient, very sturdy data relay network. The next 86 | section goes into detail about fault-tolerance. 87 | 88 | High Availability through shared burden 89 | --------------------------------------- 90 | 91 | EMDR is architected in a way that allows every single component to be 92 | replicated. We can easily add additional daemons at each level of the stack in 93 | order to improve availability, or to spread costs. 94 | 95 | HTTP Uploads are dispersed to Gateways via Round-Robin DNS, which is a 96 | simple way to distribute the traffic across multiple machines. For each additional 97 | Gateway added to DNS rotation, incoming bandwidth consumption drops for the 98 | whole pool as the load is divided. If at any time one of the gateways becomes 99 | unreachable, it is automatically removed from the DNS rotation. 100 | 101 | In the diagram below, we see a rough representation of our current deployment. 102 | Site 1 is comprised of EMDR running on Greg Taylor's (the project maintainer) 103 | machines, and Site 2 is a separate copy running in another data center. The 104 | relays are all ran by different volunteers. 105 | 106 | .. note:: We are not limited to just two instances of EMDR, there is no hard 107 | limit. Additionally, we'll mostly scale by adding more Gateways, since 108 | additional Announcers are only for redundancy. 109 | 110 | At every step of the entire flow, we can afford to lose one of the two 111 | daemons without a service interruption. The infrastructure can be scaled well 112 | out past the initial two sites, if need be. 113 | 114 | .. image:: images/emdr-daemon-diagram.png 115 | 116 | Security 117 | -------- 118 | 119 | Security is something we take seriously, but let's consider the current 120 | reality of market data with EVE sites: *Players upload market data directly 121 | to market sites.* We are no less secure than that. Uploads can be faked, 122 | and malicious payloads can be sent, though EMDR will do its best to catch 123 | anything harmful. 124 | 125 | .. note:: As a consumer, you may wish to cross-reference incoming data. In 126 | many cases, you will get the same data point multiple times, as several 127 | players upload the same thing. This can be used to your advantage. 128 | 129 | Technology Used 130 | --------------- 131 | 132 | This is the least interesting part of the overview, so it goes towards the 133 | ends. 134 | 135 | * EMDR is written in Python_. 136 | * All network-related stuff is handled by ZeroMQ_, which is an incredibly 137 | simple and performant networking library. 138 | * gevent_ is used for their excellent greenlet-based Queue, Workers, and 139 | async network I/O. 140 | * The gateway HTTP servers run bottle_. 141 | 142 | The entire stack is super low overhead, and very fast. 143 | 144 | Volunteering 145 | ------------ 146 | 147 | If you would like to volunteer computing resources to the EMDR network, 148 | see :doc:`volunteering` for more details. -------------------------------------------------------------------------------- /emdr/daemons/gateway/wsgi.py: -------------------------------------------------------------------------------- 1 | """ 2 | This WSGI application accepts market data uploads from various uploader clients. 3 | The various URLs below are structured to pass off the parsing based on what 4 | format the data is in. 5 | 6 | Once parsed, the data is shoved out to the Announcers via the 7 | gateway.order_pusher module. 8 | """ 9 | # Logging has to be configured first before we do anything. 10 | import logging 11 | import urlparse 12 | import zlib 13 | import hashlib 14 | 15 | logger = logging.getLogger(__name__) 16 | 17 | import gevent 18 | #noinspection PyUnresolvedReferences 19 | from bottle import run, request, response, get, post, default_app 20 | 21 | from emds.formats import unified 22 | from emds.exceptions import EMDSError 23 | from emdr import __version__ as EMDR_VERSION 24 | from emdr.daemons.gateway import order_pusher 25 | from emdr.daemons.gateway.exceptions import MalformedUploadError 26 | from emdr.conf import default_settings as settings 27 | 28 | def get_remote_address(): 29 | """ 30 | Determines the address of the uploading client. First checks the for 31 | proxy-forwarded headers, then falls back to request.remote_addr. 32 | 33 | :rtype: str 34 | """ 35 | return request.headers.get('X-Forwarded-For', request.remote_addr) 36 | 37 | def get_decompressed_message(): 38 | """ 39 | For upload formats that support it, detect gzip Content-Encoding headers 40 | and de-compress on the fly. 41 | 42 | :rtype: str 43 | :returns: The de-compressed request body. 44 | """ 45 | content_encoding = request.headers.get('Content-Encoding', '') 46 | 47 | if content_encoding in ['gzip', 'deflate']: 48 | # Compressed request. We have to decompress the body, then figure out 49 | # if it's form-encoded. 50 | try: 51 | # Auto header checking. 52 | message_body = zlib.decompress(request.body.read(), 15 + 32) 53 | except zlib.error: 54 | # Negative wbits suppresses adler32 checksumming. 55 | message_body = zlib.decompress(request.body.read(), -15) 56 | 57 | # At this point, we're not sure whether we're dealing with a straight 58 | # un-encoded POST body, or a form-encoded POST. Attempt to parse the 59 | # body. If it's not form-encoded, this will return an empty dict. 60 | form_enc_parsed = urlparse.parse_qs(message_body) 61 | if form_enc_parsed: 62 | # This is a form-encoded POST. The value of the data attrib will 63 | # be the body we're looking for. 64 | try: 65 | message_body = form_enc_parsed['data'][0] 66 | except (KeyError, IndexError): 67 | raise MalformedUploadError( 68 | "No 'data' POST key/value found. Check your POST key " 69 | "name for spelling, and make sure you're passing a value." 70 | ) 71 | else: 72 | # Uncompressed request. Bottle handles all of the parsing of the 73 | # POST key/vals, or un-encoded body. 74 | data_key = request.forms.get('data') 75 | if data_key: 76 | # This is a form-encoded POST. Support the silly people. 77 | message_body = data_key 78 | else: 79 | # This is a non form-encoded POST body. 80 | message_body = request.body.read() 81 | 82 | return message_body 83 | 84 | def parse_and_error_handle(parser, data, upload_format): 85 | """ 86 | Standardized parsing and error handling for parsing. Returns the final 87 | HTTP body to send back to the uploader after parsing, or error messages. 88 | 89 | :param callable parser: The parser function to use to parse ``data``. 90 | :param object data: An dict or str of parser-specific data to parse 91 | using the callable specified in ``parser``. 92 | :param str upload_format: Upload format identifier for the logs. 93 | :rtype: str 94 | :returns: The HTTP body to return. 95 | """ 96 | try: 97 | parsed_message = parser(data) 98 | except ( 99 | EMDSError, MalformedUploadError, TypeError, ValueError 100 | ) as exc: 101 | # Something bad happened. We know this will return at least a 102 | # semi-useful error message, so do so. 103 | response.status = 400 104 | logger.error("Error to %s: %s" % (get_remote_address(), exc.message)) 105 | return exc.message 106 | 107 | ip_hash_salt = settings.GATEWAY_IP_KEY_SALT 108 | if ip_hash_salt: 109 | # If an IP hash is set, salt+hash the uploader's IP address and set 110 | # it as the EMDR upload key value. 111 | ip_hash = hashlib.sha1(ip_hash_salt + get_remote_address()).hexdigest() 112 | parsed_message.upload_keys.append({'name': 'EMDR', 'key': ip_hash}) 113 | 114 | # Sends the parsed MarketOrderList or MarketHistoryList to the Announcers 115 | # as compressed JSON. 116 | gevent.spawn(order_pusher.push_message, parsed_message) 117 | 118 | logger.info("Accepted %s %s upload from %s" % ( 119 | upload_format, parsed_message.list_type, get_remote_address() 120 | )) 121 | # Goofy, but apparently expected by EVE Market Data Uploader. 122 | return '1' 123 | 124 | @post('/upload/unified/') 125 | def upload_unified(): 126 | """ 127 | This view accepts uploads in Unified Uploader format. These 128 | typically arrive via the EVE Unified Uploader client. 129 | """ 130 | try: 131 | # Body may or may not be compressed. 132 | message_body = get_decompressed_message() 133 | except zlib.error as exc: 134 | # Some languages and libs do a crap job zlib compressing stuff. Provide 135 | # at least some kind of feedback for them to try to get pointed in 136 | # the correct direction. 137 | response.status = 400 138 | # I'm curious how common this is, keep an eye out. 139 | logger.error("gzip error with %s: %s" % (get_remote_address(), exc.message)) 140 | return exc.message 141 | except MalformedUploadError as exc: 142 | # They probably sent an encoded POST, but got the key/val wrong. 143 | response.status = 400 144 | logger.error("Error to %s: %s" % (get_remote_address(), exc.message)) 145 | return exc.message 146 | 147 | 148 | return parse_and_error_handle( 149 | unified.parse_from_json, message_body, 'Unified' 150 | ) 151 | 152 | @post('/upload/') 153 | def upload(): 154 | """ 155 | Convenience URL that determines what format the upload is coming in, 156 | then routes to the correct logic for said format. 157 | """ 158 | # We only support UUDIF for now. 159 | return upload_unified() 160 | 161 | @get('/health_check/') 162 | def health_check(): 163 | """ 164 | This should only be used by the gateway monitoring script. It is used 165 | to detect whether the gateway is still alive, and whether it should remain 166 | in the DNS rotation. 167 | """ 168 | return EMDR_VERSION -------------------------------------------------------------------------------- /doc_src/conf.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # 3 | # EVE Market Data Relay documentation build configuration file, created by 4 | # sphinx-quickstart on Tue Mar 27 00:15:24 2012. 5 | # 6 | # This file is execfile()d with the current directory set to its containing dir. 7 | # 8 | # Note that not all possible configuration values are present in this 9 | # autogenerated file. 10 | # 11 | # All configuration values have a default; values that are commented out 12 | # serve to show the default. 13 | 14 | import sys, os 15 | 16 | # If extensions (or modules to document with autodoc) are in another directory, 17 | # add these directories to sys.path here. If the directory is relative to the 18 | # documentation root, use os.path.abspath to make it absolute, like shown here. 19 | #sys.path.insert(0, os.path.abspath('.')) 20 | 21 | # -- General configuration ----------------------------------------------------- 22 | 23 | # If your documentation needs a minimal Sphinx version, state it here. 24 | #needs_sphinx = '1.0' 25 | 26 | # Add any Sphinx extension module names here, as strings. They can be extensions 27 | # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. 28 | extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx'] 29 | 30 | # Add any paths that contain templates here, relative to this directory. 31 | templates_path = ['_templates'] 32 | 33 | # The suffix of source filenames. 34 | source_suffix = '.rst' 35 | 36 | # The encoding of source files. 37 | #source_encoding = 'utf-8-sig' 38 | 39 | # The master toctree document. 40 | master_doc = 'index' 41 | 42 | # General information about the project. 43 | project = u'EVE Market Data Relay' 44 | copyright = u'2017, Greg Taylor' 45 | 46 | # The version info for the project you're documenting, acts as replacement for 47 | # |version| and |release|, also used in various other places throughout the 48 | # built documents. 49 | # 50 | # The short X.Y version. 51 | version = '0.1' 52 | # The full version, including alpha/beta/rc tags. 53 | release = '0.1' 54 | 55 | # The language for content autogenerated by Sphinx. Refer to documentation 56 | # for a list of supported languages. 57 | #language = None 58 | 59 | # There are two options for replacing |today|: either, you set today to some 60 | # non-false value, then it is used: 61 | #today = '' 62 | # Else, today_fmt is used as the format for a strftime call. 63 | #today_fmt = '%B %d, %Y' 64 | 65 | # List of patterns, relative to source directory, that match files and 66 | # directories to ignore when looking for source files. 67 | exclude_patterns = ['_build'] 68 | 69 | # The reST default role (used for this markup: `text`) to use for all documents. 70 | #default_role = None 71 | 72 | # If true, '()' will be appended to :func: etc. cross-reference text. 73 | #add_function_parentheses = True 74 | 75 | # If true, the current module name will be prepended to all description 76 | # unit titles (such as .. function::). 77 | #add_module_names = True 78 | 79 | # If true, sectionauthor and moduleauthor directives will be shown in the 80 | # output. They are ignored by default. 81 | #show_authors = False 82 | 83 | # The name of the Pygments (syntax highlighting) style to use. 84 | pygments_style = 'sphinx' 85 | 86 | # A list of ignored prefixes for module index sorting. 87 | #modindex_common_prefix = [] 88 | 89 | 90 | # -- Options for HTML output --------------------------------------------------- 91 | 92 | # The theme to use for HTML and HTML Help pages. See the documentation for 93 | # a list of builtin themes. 94 | html_theme = 'sphinx_rtd_theme' 95 | 96 | # Theme options are theme-specific and customize the look and feel of a theme 97 | # further. For a list of options available for each theme, see the 98 | # documentation. 99 | #html_theme_options = {} 100 | 101 | # Add any paths that contain custom themes here, relative to this directory. 102 | #html_theme_path = [] 103 | 104 | # The name for this set of Sphinx documents. If None, it defaults to 105 | # " v documentation". 106 | #html_title = None 107 | 108 | # A shorter title for the navigation bar. Default is the same as html_title. 109 | #html_short_title = None 110 | 111 | # The name of an image file (relative to this directory) to place at the top 112 | # of the sidebar. 113 | #html_logo = None 114 | 115 | # The name of an image file (within the static path) to use as favicon of the 116 | # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 117 | # pixels large. 118 | #html_favicon = None 119 | 120 | # Add any paths that contain custom static files (such as style sheets) here, 121 | # relative to this directory. They are copied after the builtin static files, 122 | # so a file named "default.css" will overwrite the builtin "default.css". 123 | html_static_path = ['_static'] 124 | 125 | # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, 126 | # using the given strftime format. 127 | #html_last_updated_fmt = '%b %d, %Y' 128 | 129 | # If true, SmartyPants will be used to convert quotes and dashes to 130 | # typographically correct entities. 131 | #html_use_smartypants = True 132 | 133 | # Custom sidebar templates, maps document names to template names. 134 | #html_sidebars = {} 135 | 136 | # Additional templates that should be rendered to pages, maps page names to 137 | # template names. 138 | #html_additional_pages = {} 139 | 140 | # If false, no module index is generated. 141 | #html_domain_indices = True 142 | 143 | # If false, no index is generated. 144 | #html_use_index = True 145 | 146 | # If true, the index is split into individual pages for each letter. 147 | #html_split_index = False 148 | 149 | # If true, links to the reST sources are added to the pages. 150 | #html_show_sourcelink = True 151 | 152 | # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. 153 | #html_show_sphinx = True 154 | 155 | # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. 156 | #html_show_copyright = True 157 | 158 | # If true, an OpenSearch description file will be output, and all pages will 159 | # contain a tag referring to it. The value of this option must be the 160 | # base URL from which the finished HTML is served. 161 | #html_use_opensearch = '' 162 | 163 | # This is the file name suffix for HTML files (e.g. ".xhtml"). 164 | #html_file_suffix = None 165 | 166 | # Output file base name for HTML help builder. 167 | htmlhelp_basename = 'EVEMarketDataRelaydoc' 168 | 169 | 170 | # -- Options for LaTeX output -------------------------------------------------- 171 | 172 | latex_elements = { 173 | # The paper size ('letterpaper' or 'a4paper'). 174 | #'papersize': 'letterpaper', 175 | 176 | # The font size ('10pt', '11pt' or '12pt'). 177 | #'pointsize': '10pt', 178 | 179 | # Additional stuff for the LaTeX preamble. 180 | #'preamble': '', 181 | } 182 | 183 | # Grouping the document tree into LaTeX files. List of tuples 184 | # (source start file, target name, title, author, documentclass [howto/manual]). 185 | latex_documents = [ 186 | ('index', 'EVEMarketDataRelay.tex', u'EVE Market Data Relay Documentation', 187 | u'Greg Taylor', 'manual'), 188 | ] 189 | 190 | # The name of an image file (relative to this directory) to place at the top of 191 | # the title page. 192 | #latex_logo = None 193 | 194 | # For "manual" documents, if this is true, then toplevel headings are parts, 195 | # not chapters. 196 | #latex_use_parts = False 197 | 198 | # If true, show page references after internal links. 199 | #latex_show_pagerefs = False 200 | 201 | # If true, show URL addresses after external links. 202 | #latex_show_urls = False 203 | 204 | # Documents to append as an appendix to all manuals. 205 | #latex_appendices = [] 206 | 207 | # If false, no module index is generated. 208 | #latex_domain_indices = True 209 | 210 | 211 | # -- Options for manual page output -------------------------------------------- 212 | 213 | # One entry per manual page. List of tuples 214 | # (source start file, name, description, authors, manual section). 215 | man_pages = [ 216 | ('index', 'evemarketdatarelay', u'EVE Market Data Relay Documentation', 217 | [u'Greg Taylor'], 1) 218 | ] 219 | 220 | # If true, show URL addresses after external links. 221 | #man_show_urls = False 222 | 223 | 224 | # -- Options for Texinfo output ------------------------------------------------ 225 | 226 | # Grouping the document tree into Texinfo files. List of tuples 227 | # (source start file, target name, title, author, 228 | # dir menu entry, description, category) 229 | texinfo_documents = [ 230 | ('index', 'EVEMarketDataRelay', u'EVE Market Data Relay Documentation', 231 | u'Greg Taylor', 'EVEMarketDataRelay', 'One line description of project.', 232 | 'Miscellaneous'), 233 | ] 234 | 235 | # Documents to append as an appendix to all manuals. 236 | #texinfo_appendices = [] 237 | 238 | # If false, no module index is generated. 239 | #texinfo_domain_indices = True 240 | 241 | # How to display URL addresses: 'footnote', 'no', or 'inline'. 242 | #texinfo_show_urls = 'footnote' 243 | 244 | 245 | # Example configuration for intersphinx: refer to the Python standard library. 246 | intersphinx_mapping = {'http://docs.python.org/': None} 247 | -------------------------------------------------------------------------------- /doc_src/using.rst: -------------------------------------------------------------------------------- 1 | .. _using: 2 | 3 | .. include:: global.txt 4 | 5 | Connecting to the EMDR network 6 | ============================== 7 | 8 | In order to connect to the EVE Market Data Relay feed, you'll need to use 9 | the ZeroMQ_ bindings for the language of your choice. This involves installing 10 | ZeroMQ_, then installing the bindings. See your language's section below for 11 | a link to its bindings. 12 | 13 | Once you have ZeroMQ_ plus bindings installed, you'll need to choose a 14 | Relay to connect to. See :doc:`access` for a list, and any potential special 15 | case instructions for each relay. After that, you'll be set to connect and 16 | begin receiving data. 17 | 18 | Data Format 19 | ----------- 20 | 21 | All data coming out of EMDR is in `Unified Uploader Data Interchange Format`_, 22 | which is a JSON-based standard for market orders and history. See the 23 | `spec `_ for more details. 24 | 25 | An important note on keeping up 26 | ------------------------------- 27 | 28 | Our relays use PUB/SUB to broadcast market data to the consumers. Within each 29 | relay is a buffer for each consumer. If the consumer is 30 | having a hard time keeping up with the flow of data from the relay, the relay's 31 | send buffer will gradually fill, since data is being produced faster than 32 | the consumer can receive it. Once the buffer reaches a certain size, we 33 | start discarding messages, which can be a very bad thing for your application. 34 | Here is the relevant quote from the ZeroMQ documentation:: 35 | 36 | When a ZMQ_PUB socket enters an exceptional state due to having reached 37 | the high water mark for a subscriber, then any messages that would be 38 | sent to the subscriber in question shall instead be dropped until the 39 | exceptional state ends. The zmq_send() function shall never block for 40 | this socket type. 41 | 42 | The important take-away is that when designing your consumers, you'll want 43 | to either do as little processing in your consumer as possible, or make 44 | sure that your network IO remains async or non-blocking so that you 45 | don't lose any messages. 46 | 47 | .. tip:: While the examples below are a good way to get you started, you 48 | will probably need to adapt them with this IO concern in mind as you 49 | add more. 50 | 51 | Examples for various languages 52 | ------------------------------ 53 | 54 | Below are a few examples of how to connect to the data feed. If you see 55 | anything wrong with the examples below, please let us know on the 56 | `issue tracker`_. The original author of this documentation is only familiar 57 | with Python. 58 | 59 | Python 60 | ^^^^^^ 61 | 62 | The following example uses the pyzmq_ module (available off of PyPi) 63 | and simplejson_. For a more complete list of examples, see the 64 | `Python examples`_ dir on github.:: 65 | 66 | """ 67 | Example Python EMDR client. 68 | """ 69 | import zlib 70 | import zmq 71 | # You can substitute the stdlib's json module, if that suits your fancy 72 | import simplejson 73 | 74 | def main(): 75 | context = zmq.Context() 76 | subscriber = context.socket(zmq.SUB) 77 | 78 | # Connect to the first publicly available relay. 79 | subscriber.connect('tcp://relay-us-central-1.eve-emdr.com:8050') 80 | # Disable filtering. 81 | subscriber.setsockopt(zmq.SUBSCRIBE, "") 82 | 83 | while True: 84 | # Receive raw market JSON strings. 85 | market_json = zlib.decompress(subscriber.recv()) 86 | # Un-serialize the JSON data to a Python dict. 87 | market_data = simplejson.loads(market_json) 88 | # Dump the market data to stdout. Or, you know, do more fun 89 | # things here. 90 | print market_data 91 | 92 | if __name__ == '__main__': 93 | main() 94 | 95 | .. _pyzmq: http://pypi.python.org/pypi/pyzmq/ 96 | .. _simplejson: http://pypi.python.org/pypi/simplejson/ 97 | .. _json: http://docs.python.org/library/json.html 98 | .. _Python examples: https://github.com/gtaylor/EVE-Market-Data-Relay/tree/master/examples/python 99 | 100 | PHP 101 | ^^^ 102 | 103 | PHP accesses EMDR via ZeroMQ's `php-zmq`_ PHP bindings: 104 | 105 | .. code-block:: php 106 | 107 | getSocket(ZMQ::SOCKET_SUB); 114 | 115 | // Connect to the first publicly available relay. 116 | $subscriber->connect("tcp://relay-us-central-1.eve-emdr.com:8050"); 117 | // Disable filtering. 118 | $subscriber->setSockOpt(ZMQ::SOCKOPT_SUBSCRIBE, ""); 119 | 120 | while (true) { 121 | // Receive raw market JSON strings. 122 | $market_json = gzuncompress($subscriber->recv()); 123 | // Un-serialize the JSON data to a named array. 124 | $market_data = json_decode($market_json); 125 | // Dump the market data to stdout. Or, you know, do more fun things here. 126 | var_dump($market_data); 127 | } 128 | 129 | .. _php-zmq: http://www.zeromq.org/bindings:php 130 | 131 | Ruby 132 | ^^^^ 133 | 134 | Ruby accesses EMDR via ZeroMQ's zmq_ Ruby bindings: 135 | 136 | .. code-block:: ruby 137 | 138 | # 139 | # Synchronized subscriber 140 | # 141 | 142 | require 'rubygems' 143 | require 'ffi-rzmq' 144 | require 'json' 145 | require 'zlib' 146 | 147 | context = ZMQ::Context.new 148 | subscriber = context.socket(ZMQ::SUB) 149 | 150 | # Connect to the first publicly available relay. 151 | subscriber.connect("tcp://relay-us-central-1.eve-emdr.com:8050") 152 | subscriber.setsockopt(ZMQ::SUBSCRIBE,"") 153 | 154 | loop do 155 | # Receive raw market JSON strings. 156 | subscriber.recv_string(string = '') 157 | # Un-compress the stream. 158 | market_json = Zlib::Inflate.new(Zlib::MAX_WBITS).inflate(string) 159 | # Un-serialize the JSON data. 160 | market_data = JSON.parse(market_json) 161 | # Dump the market data to stdout. Or, you know, do more fun things here. 162 | puts market_data 163 | end 164 | 165 | .. _zmq: http://www.zeromq.org/bindings:ruby 166 | 167 | Go 168 | ^^ 169 | 170 | .. code-block:: go 171 | 172 | package main 173 | 174 | import ( 175 | "log" 176 | "bytes" 177 | "io/ioutil" 178 | "compress/zlib" 179 | zmq "github.com/pebbe/zmq2" // or zmq3/zmq4 180 | ) 181 | 182 | // go run emdr_client_example.go 183 | func main() { 184 | client, err := zmq.NewSocket(zmq.SUB) 185 | if err != nil { 186 | log.Fatal(err) 187 | } 188 | 189 | // Connect 190 | err = client.Connect("tcp://relay-us-central-1.eve-emdr.com:8050") 191 | client.SetSubscribe("") 192 | if err != nil { 193 | log.Fatal(err) 194 | } 195 | 196 | // Endless loop. 197 | for { 198 | // Receive message from ZeroMQ. 199 | msg, err := client.Recv(0) 200 | if err != nil { 201 | log.Fatal(err) 202 | } 203 | 204 | // Prepare to decode. 205 | decoded, err := ZlibDecode(msg) 206 | if err != nil { 207 | log.Fatal(err) 208 | } 209 | 210 | // Output as json string, should do something useful at this point ... 211 | log.Printf("%s", decoded) 212 | } 213 | } 214 | 215 | func ZlibDecode(encoded string) (decoded []byte, err error) { 216 | b := bytes.NewBufferString(encoded) 217 | pipeline, err := zlib.NewReader(b) 218 | 219 | if err == nil { 220 | defer pipeline.Close() 221 | decoded, err = ioutil.ReadAll(pipeline) 222 | } 223 | 224 | return 225 | } 226 | 227 | C# 228 | ^^ 229 | 230 | C# accesses EMDR via ZeroMQ's clrzmq_ binding: 231 | 232 | .. code-block:: c# 233 | 234 | using System; 235 | using System.Collections.Generic; 236 | using System.IO; 237 | using System.IO.Compression; 238 | using System.Linq; 239 | using System.Text; 240 | using System.Web.Script.Serialization; // Needs reference to 'System.Web.Extensions.dll' 241 | using ZMQ; // Needs reference to 'clrzmq.dll' and adding 'libzmq.dll' to project 242 | // 'clrzmq' can be found at: https://github.com/zeromq/clrzmq/downloads 243 | 244 | namespace EMDR_Client 245 | { 246 | public class Program 247 | { 248 | private static void Main() 249 | { 250 | using (var context = new Context()) 251 | { 252 | using (var subscriber = context.Socket(SocketType.SUB)) 253 | { 254 | //Connect to the first publicly available relay. 255 | subscriber.Connect("tcp://relay-us-central-1.eve-emdr.com:8050"); 256 | 257 | // Disable filtering. 258 | subscriber.SetSockOpt(SocketOpt.SUBSCRIBE, Encoding.UTF8.GetBytes("")); 259 | 260 | // Alternatively 'Subscribe' can be used 261 | //subscriber.Subscribe("", Encoding.UTF8); 262 | 263 | while (true) 264 | { 265 | try 266 | { 267 | // Receive compressed raw market data. 268 | var receivedData = subscriber.Recv(); 269 | 270 | // The following code lines remove the need of 'zlib' usage; 271 | // 'zlib' actually uses the same algorith as 'DeflateStream'. 272 | // To make the data compatible for 'DeflateStream', we only have to remove 273 | // the four last bytes which are the adler32 checksum and 274 | // the two first bytes which are the 'zlib' header. 275 | byte[] decompressed; 276 | byte[] choppedRawData = new byte[(receivedData.Length - 4)]; 277 | Array.Copy(receivedData, choppedRawData, choppedRawData.Length); 278 | choppedRawData = choppedRawData.Skip(2).ToArray(); 279 | 280 | // Decompress the raw market data. 281 | using (MemoryStream inStream = new MemoryStream(choppedRawData)) 282 | using (MemoryStream outStream = new MemoryStream()) 283 | { 284 | DeflateStream outZStream = new DeflateStream(inStream, CompressionMode.Decompress); 285 | outZStream.CopyTo(outStream); 286 | decompressed = outStream.ToArray(); 287 | } 288 | 289 | // Transform data into JSON strings. 290 | string marketJson = Encoding.UTF8.GetString(decompressed); 291 | 292 | // Un-serialize the JSON data to a dictionary. 293 | var serializer = new JavaScriptSerializer(); 294 | var dictionary = serializer.Deserialize>(marketJson); 295 | 296 | // Dump the market data to console or, you know, do more fun things here. 297 | foreach (KeyValuePair pair in dictionary) 298 | { 299 | Console.WriteLine("{0}: {1}", pair.Key, pair.Value); 300 | } 301 | Console.WriteLine(); 302 | } 303 | catch (ZMQ.Exception ex) 304 | { 305 | Console.WriteLine("ZMQ Exception occurred : {0}", ex.Message); 306 | } 307 | } 308 | } 309 | } 310 | } 311 | } 312 | } 313 | 314 | .. _clrzmq: https://github.com/zeromq/clrzmq/downloads 315 | 316 | Visual Basic 317 | ^^^^^^^^^^^^ 318 | 319 | Visual Basic, like C#, accesses EMDR via ZeroMQ's clrzmq_ binding: 320 | 321 | .. code-block:: vb.net 322 | 323 | Imports System.Text 324 | Imports System.IO 325 | Imports System.IO.Compression 326 | Imports System.Web.Script.Serialization ' Needs reference to 'System.Web.Extensions.dll' 327 | Imports ZMQ ' Needs reference to 'clrzmq.dll' and adding 'libzmq.dll' to project 328 | ' 'clrzmq' can be found at: https://github.com/zeromq/clrzmq/downloads 329 | 330 | Module MainModule 331 | 332 | Sub Main() 333 | Using context = New Context() 334 | Using subscriber = context.Socket(SocketType.SUB) 335 | 336 | 'Connect to the first publicly available relay. 337 | subscriber.Connect("tcp://relay-us-central-1.eve-emdr.com:8050") 338 | 339 | ' Disable filtering. 340 | subscriber.SetSockOpt(SocketOpt.SUBSCRIBE, Encoding.UTF8.GetBytes("")) 341 | 342 | ' Alternatively 'Subscribe' can be used. 343 | 'subscriber.Subscribe("", Encoding.UTF8) 344 | 345 | While True 346 | Try 347 | ' Receive compressed raw market data. 348 | Dim receivedData() = subscriber.Recv() 349 | 350 | ' The following code lines remove the need of 'zlib' usage; 351 | ' 'zlib' actually uses the same algorith as 'DeflateStream'. 352 | ' To make the data compatible for 'DeflateStream', we only have to remove 353 | ' the four last bytes which are the adler32 checksum and 354 | ' the two first bytes which are the 'zlib' header. 355 | Dim decompressed() As Byte 356 | Dim choppedRawData(receivedData.Length - 4) As Byte 357 | Array.Copy(receivedData, choppedRawData, choppedRawData.Length) 358 | choppedRawData = choppedRawData.Skip(2).ToArray() 359 | 360 | ' Decompress the raw market data. 361 | Using inStream = New MemoryStream(choppedRawData) 362 | Using outStream = New MemoryStream() 363 | Dim outZStream = New DeflateStream(inStream, CompressionMode.Decompress) 364 | outZStream.CopyTo(outStream) 365 | decompressed = outStream.ToArray 366 | End Using 367 | End Using 368 | 369 | ' Transform data into JSON strings. 370 | Dim marketJson = Encoding.UTF8.GetString(decompressed) 371 | 372 | ' Un-serialize the JSON data to a dictionary. 373 | Dim serializer = New JavaScriptSerializer() 374 | Dim dictionary = serializer.Deserialize(Of Dictionary(Of String, Object))(marketJson) 375 | 376 | ' Dump the market data to console or, you know, do more fun things here. 377 | For Each pair In dictionary 378 | Console.WriteLine("{0}: {1}", pair.Key, pair.Value) 379 | Next 380 | Console.WriteLine() 381 | Catch ex As Exception 382 | Console.WriteLine("ZMQ Exception occurred : {0}", ex.Message) 383 | End Try 384 | End While 385 | End Using 386 | End Using 387 | End Sub 388 | End Module 389 | 390 | Perl 391 | ^^^^ 392 | 393 | Perl uses the `ZeroMQ-Perl`_ binding for Perl: 394 | 395 | .. code-block:: perl 396 | 397 | #!/usr/bin/perl 398 | use warnings; 399 | use strict; 400 | $|=1; 401 | 402 | use ZeroMQ qw/:all/; 403 | 404 | my $cxt = ZeroMQ::Context->new; 405 | my $sock = $cxt->socket(ZMQ_SUB); 406 | $sock->connect('tcp://relay-us-central-1.eve-emdr.com:8050'); 407 | $sock->setsockopt(ZMQ_SUBSCRIBE, ""); 408 | 409 | while (1) { 410 | my $msg = $sock->recv(); 411 | last unless defined $msg; 412 | 413 | use Compress::Zlib; 414 | my $json = uncompress($msg->data); 415 | 416 | use JSON; 417 | my $data = decode_json($json); 418 | 419 | use Data::Dumper; 420 | print Dumper($data),"\n\n"; 421 | } 422 | 423 | .. _ZeroMQ-Perl: http://www.zeromq.org/bindings:perl 424 | 425 | Java 426 | ^^^^ 427 | 428 | Java uses jzmq_ binding: 429 | 430 | .. code-block:: java 431 | 432 | /* 433 | * Example Java EMDR client. 434 | */ 435 | 436 | import org.zeromq.*; // https://github.com/zeromq/jzmq 437 | import org.json.simple.*; // http://code.google.com/p/json-simple/downloads/list 438 | import org.json.simple.parser.*; 439 | import java.util.zip.*; 440 | 441 | public class EMDR_Client { 442 | 443 | public static void main(String[] args) throws Exception { 444 | 445 | ZMQ.Context context = ZMQ.context(1); 446 | ZMQ.Socket subscriber = context.socket(ZMQ.SUB); 447 | 448 | // Connect to the first publicly available relay. 449 | subscriber.connect("tcp://relay-us-central-1.eve-emdr.com:8050"); 450 | 451 | // Disable filtering. 452 | subscriber.subscribe(new byte[0]); 453 | 454 | while (true) { 455 | try { 456 | // Receive compressed raw market data. 457 | byte[] receivedData = subscriber.recv(0); 458 | 459 | // We build a large enough buffer to contain the decompressed data. 460 | byte[] decompressed = new byte[receivedData.length * 16]; 461 | 462 | // Decompress the raw market data. 463 | Inflater inflater = new Inflater(); 464 | inflater.setInput(receivedData); 465 | int decompressedLength = inflater.inflate(decompressed); 466 | inflater.end(); 467 | 468 | byte[] output = new byte[decompressedLength]; 469 | System.arraycopy(decompressed, 0, output, 0, decompressedLength); 470 | 471 | // Transform data into JSON strings. 472 | String market_json = new String(output, "UTF-8"); 473 | 474 | // Un-serialize the JSON data. 475 | JSONParser parser = new JSONParser(); 476 | JSONObject market_data = (JSONObject)parser.parse(market_json); 477 | 478 | // Dump the market data to console or, you know, do more fun things here. 479 | System.out.println(market_data); 480 | } catch (ZMQException ex) { 481 | System.out.println("ZMQ Exception occurred : " + ex.getMessage()); 482 | } 483 | } 484 | } 485 | } 486 | 487 | .. _jzmq: http://www.zeromq.org/bindings:java 488 | 489 | Erlang 490 | ^^^^^^ 491 | 492 | Erlang uses erlzmq2_ binding: 493 | 494 | .. code-block:: erlang 495 | 496 | #!/usr/bin/env escript 497 | 498 | % you will need the ZeroMQ Erlang library: https://github.com/zeromq/erlzmq2 499 | % I also use jiffy for Json: https://github.com/davisp/jiffy 500 | 501 | main(_Args) -> 502 | {ok, Context} = erlzmq:context(), 503 | {ok, Subscriber} = erlzmq:socket(Context, sub), 504 | ok = erlzmq:connect(Subscriber,"tcp://relay-us-central-1.eve-emdr.com:8050"), 505 | ok = erlzmq:setsockopt(Subscriber, subscribe, <<>>), 506 | msgcheck(Subscriber). 507 | 508 | msgcheck(Subscriber) -> 509 | {ok,Msg} = erlzmq:recv(Subscriber), 510 | io:format("~p\n",[jiffy:decode(zlib:uncompress(Msg))]), 511 | msgcheck(Subscriber). 512 | 513 | .. _erlzmq2: https://github.com/zeromq/erlzmq2 514 | 515 | Node.js 516 | ^^^^^^^ 517 | 518 | Node.js uses the `zeromq.node`_ binding: 519 | 520 | .. code-block:: javascript 521 | 522 | /* 523 | * Example node.js EMDR client 524 | */ 525 | 526 | var zmq = require('zmq'); 527 | var zlib = require('zlib'); 528 | 529 | var sock = zmq.socket('sub'); 530 | 531 | // Connect to the first publicly available relay. 532 | sock.connect('tcp://relay-us-central-1.eve-emdr.com:8050'); 533 | // Disable filtering 534 | sock.subscribe(''); 535 | 536 | sock.on('message', function(msg){ 537 | // Receive raw market JSON strings. 538 | zlib.inflate(msg, function(err, market_json) { 539 | // Un-serialize the JSON data. 540 | var market_data = JSON.parse(market_json); 541 | 542 | // Do something useful 543 | console.log(market_data); 544 | }); 545 | }); 546 | 547 | .. _zeromq.node: http://www.zeromq.org/bindings:node-js 548 | --------------------------------------------------------------------------------