15 | {% for hostname, tags in targets -%}
16 | {{ hostname }}:
17 | {% for key, value in tags -%}
18 | {{key}}={{value}}
19 | {% endfor %}
20 | {% endfor %}
21 |
22 |
23 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | Copyright (c) 2016 Dropbox, Inc.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 |
--------------------------------------------------------------------------------
/bin/llama_reflector:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | """Llama UDP Reflector
3 |
4 | Usage:
5 | llama_reflector [options]
6 |
7 | Options:
8 | --loglevel=(debug|info|warn|error|critical)
9 | # Logging level to print to stderr [default: info]
10 | --logfile=PATH # Log to a file
11 | --port=NUM # UDP port to bind reflector [default: 60000]
12 | """
13 |
14 | from llama import app
15 | from llama import udp
16 | import docopt
17 | import logging
18 |
19 |
20 | def main(args):
21 | # process args
22 | loglevel = args['--loglevel']
23 | logfile = args['--logfile']
24 | port = int(args['--port'])
25 |
26 | # setup logging
27 | app.log_to_stderr(loglevel)
28 | if logfile:
29 | app.log_to_file(logfile, loglevel)
30 | logging.info('Arguments:\n%s', args)
31 |
32 | # reflect!
33 | reflector = udp.Reflector(port)
34 | reflector.run()
35 |
36 |
37 | if __name__ == '__main__':
38 | app.run(main, docopt.docopt(__doc__))
39 |
--------------------------------------------------------------------------------
/llama/tests/util_test.py:
--------------------------------------------------------------------------------
1 | """Unittests for util lib."""
2 |
3 | from llama import util
4 | import pytest # noqa
5 |
6 |
7 | class TestUtil(object):
8 |
9 | def test_mean(self):
10 | """Test ``util.mean()``"""
11 | items = [2, 4, 6, 8, 10]
12 | expected = 6
13 | result = util.mean(items)
14 | assert expected == result
15 |
16 | def test_array_split(self):
17 | """Test ``util.array_split()``"""
18 | items = range(90)
19 | expected_lengths = (50, 40)
20 |
21 | batches = util.array_split(items, 50)
22 | for idx, batch in enumerate(batches):
23 | assert len(batch) == expected_lengths[idx]
24 |
25 | def test_runcmd(self):
26 | """Test ``util.runcmd()``"""
27 | results = util.runcmd('echo something')
28 | assert results.returncode == 0
29 | assert results.stdout == 'something\n'
30 | assert not results.stderr
31 | results = util.runcmd('ls /somethingthatdoesntexist__16481916571')
32 | assert results.returncode == 2
33 | assert results.stderr
34 | assert not results.stdout
35 |
--------------------------------------------------------------------------------
/setup.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 |
3 | import os
4 | from setuptools import find_packages, setup
5 |
6 | execfile('llama/version.py')
7 |
8 | with open('requirements.txt') as fh:
9 | required = fh.read().splitlines()
10 |
11 | setup(
12 | name='llama',
13 | version=str(__version__),
14 | description='LLAMA - Loss & LAtency MAtrix',
15 | url='https://github.com/dropbox/llama',
16 | author='Bryan Reed',
17 | maintainer='Daniel Martin',
18 | author_email='breed@dropbox.com',
19 | maintainer_email='dmar@dropbox.com',
20 | license='Apache',
21 | classifiers=[
22 | 'Development Status :: 1 - Planning',
23 | 'Intended Audience :: System Administrators',
24 | 'License :: OSI Approved :: Apache Software License',
25 | 'Operating System :: POSIX :: Linux',
26 | 'Programming Language :: Python :: 2.7',
27 | 'Topic :: System :: Networking :: Monitoring',
28 | ],
29 | keywords='llama udp loss latency matrix probe packet',
30 | scripts=['bin/llama_collector'],
31 | packages=find_packages(exclude=['docs', 'tests*']),
32 | include_package_data=True,
33 | zip_safe=False,
34 | install_requires=required,
35 | )
36 |
--------------------------------------------------------------------------------
/llama/tests/ping_test.py:
--------------------------------------------------------------------------------
1 | """Unittests for metrics lib."""
2 |
3 | from llama import ping
4 | from llama import util
5 | import pytest
6 |
7 |
8 | def fake_runcmd(cmd):
9 | stderr = '''
10 | --- shelby hping statistic ---
11 | 5 packets transmitted, 5 packets received, 0% packet loss
12 | round-trip min/avg/max = 0.1/0.1/0.2 ms
13 | '''
14 | stdout = '''
15 | HPING shelby (eth0 108.160.167.85): S set, 40 headers + 0 data bytes
16 | len=46 ip=1.1.7.5 ttl=61 DF id=4696 sport=0 flags=RA seq=0 win=0 rtt=0.1 ms
17 | len=46 ip=1.1.7.5 ttl=61 DF id=4699 sport=0 flags=RA seq=1 win=0 rtt=0.1 ms
18 | len=46 ip=1.1.7.5 ttl=61 DF id=4701 sport=0 flags=RA seq=2 win=0 rtt=0.1 ms
19 | len=46 ip=1.1.7.5 ttl=61 DF id=4702 sport=0 flags=RA seq=3 win=0 rtt=0.1 ms
20 | len=46 ip=1.1.7.5 ttl=61 DF id=4704 sport=0 flags=RA seq=4 win=0 rtt=0.1 ms
21 | '''
22 | return 0, stdout, stderr
23 |
24 |
25 | class TestHping3(object):
26 |
27 | def silence_pyflakes(self):
28 | """PyFlakes complains because we don't explicitly use the module."""
29 | dir(pytest)
30 |
31 | def test_good(self, monkeypatch):
32 | monkeypatch.setattr(util, 'runcmd', fake_runcmd)
33 | assert ping.hping3('somehost', count=5) == ('0', '0.1', 'somehost')
34 |
--------------------------------------------------------------------------------
/docs/index.rst:
--------------------------------------------------------------------------------
1 | ##################################
2 | LLAMA - Loss & LAtency MAtrix
3 | ##################################
4 |
5 | |travis-ci-status| |rtd-llama| |pypi-llama|
6 |
7 | .. figure:: https://raw.githubusercontent.com/dropbox/llama/master/docs/_static/llama-logo.png
8 | :alt: llama-logo
9 |
10 | **L.L.A.M.A.** is a deployable service which artificially produces traffic
11 | for measuring network performance between endpoints.
12 |
13 | LLAMA uses UDP socket level operations to support multiple QoS classes.
14 | UDP datagrams are fast, efficient, and will hash across ECMP paths in
15 | large networks to uncover faults and erring interfaces. LLAMA is written
16 | in pure Python for maintainability.
17 |
18 |
19 | **Contents**:
20 |
21 | .. toctree::
22 | :maxdepth: 2
23 |
24 |
25 | Indices and tables
26 | ==================
27 |
28 | * :ref:`genindex`
29 | * :ref:`modindex`
30 | * :ref:`search`
31 |
32 |
33 | .. |travis-ci-status| image:: https://travis-ci.org/dropbox/llama.svg?branch=master
34 | :target: https://travis-ci.org/dropbox/llama
35 | .. |pypi-llama| image:: https://img.shields.io/pypi/v/llama.svg?style=flat
36 | :target: https://pypi.python.org/pypi/llama
37 | .. |rtd-llama| image:: https://readthedocs.org/projects/llama/badge/?version=latest
38 | :target: http://llama.readthedocs.io/en/latest/?badge=latest
39 | :alt: Documentation Status
40 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Byte-compiled / optimized / DLL files
2 | __pycache__/
3 | *.py[cod]
4 | *$py.class
5 |
6 | # C extensions
7 | *.so
8 |
9 | # Distribution / packaging
10 | .Python
11 | env/
12 | build/
13 | develop-eggs/
14 | dist/
15 | downloads/
16 | eggs/
17 | .eggs/
18 | lib/
19 | lib64/
20 | parts/
21 | sdist/
22 | var/
23 | *.egg-info/
24 | .installed.cfg
25 | *.egg
26 |
27 | # PyInstaller
28 | # Usually these files are written by a python script from a template
29 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
30 | *.manifest
31 | *.spec
32 |
33 | # Installer logs
34 | pip-log.txt
35 | pip-delete-this-directory.txt
36 |
37 | # Unit test / coverage reports
38 | htmlcov/
39 | .tox/
40 | .coverage
41 | .coverage.*
42 | .cache
43 | nosetests.xml
44 | coverage.xml
45 | *,cover
46 | .hypothesis/
47 |
48 | # Translations
49 | *.mo
50 | *.pot
51 |
52 | # Django stuff:
53 | *.log
54 | local_settings.py
55 |
56 | # Flask stuff:
57 | instance/
58 | .webassets-cache
59 |
60 | # Scrapy stuff:
61 | .scrapy
62 |
63 | # Sphinx documentation
64 | docs/_build/
65 |
66 | # PyBuilder
67 | target/
68 |
69 | # IPython Notebook
70 | .ipynb_checkpoints
71 |
72 | # pyenv
73 | .python-version
74 |
75 | # celery beat schedule file
76 | celerybeat-schedule
77 |
78 | # dotenv
79 | .env
80 |
81 | # virtualenv
82 | venv/
83 | ENV/
84 |
85 | # Spyder project settings
86 | .spyderproject
87 |
88 | # Rope project settings
89 | .ropeproject
90 |
91 | # vim
92 | .*sw?
93 |
--------------------------------------------------------------------------------
/bin/llama_sender:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | """Llama UDP Sender
3 |
4 | This script is intended to be used as a command-line test from a sending host
5 | to a reflecting host running `llama_reflector`.
6 |
7 | Usage:
8 | llama_sender [options]
9 |
10 | Options:
11 | --loglevel=(debug|info|warn|error|critical)
12 | # Logging level to print to stderr [default: info]
13 | --logfile=PATH # Log to a file
14 | --port=NUM # UDP port of destination [default: 60000]
15 | --count=NUM # Number of datagrams to send [default: 500]
16 | --timeout=0.2 # Timeout for each datagram in seconds [default: 0.2]
17 | --tos=0xNN # TOS (hex) bits to set on datagrams [default: 0x00]
18 | """
19 |
20 | from llama import app
21 | from llama import udp
22 | import docopt
23 | import logging
24 |
25 |
26 | def main(args):
27 | # process args
28 | loglevel = args['--loglevel']
29 | logfile = args['--logfile']
30 | port = int(args['--port'])
31 | destination = args['']
32 | count = int(args['--count'])
33 | tos = int(args['--tos'], base=16)
34 | timeout = float(args['--timeout'])
35 |
36 | # setup logging
37 | app.log_to_stderr(loglevel)
38 | if logfile:
39 | app.log_to_file(logfile, loglevel)
40 | logging.info('Arguments:\n%s', args)
41 |
42 | # send!
43 | sender = udp.Sender(destination, port, count, tos, timeout)
44 | sender.run()
45 | print sender.stats
46 |
47 |
48 | if __name__ == '__main__':
49 | app.run(main, docopt.docopt(__doc__))
50 |
--------------------------------------------------------------------------------
/llama/util.py:
--------------------------------------------------------------------------------
1 | """Utility library for LLAMA.
2 |
3 | This provides utility functions used across the project.
4 | """
5 |
6 | import collections
7 | import logging
8 | import shlex
9 | import subprocess
10 |
11 |
12 | # Default port for targets
13 | # Primarily used for UDP, as dst for the collector, and
14 | # the listening port on the reflector
15 | DEFAULT_DST_PORT = 60000
16 | # Default timeout for probes
17 | # Determines how long to wait until counting it as a loss
18 | DEFAULT_TIMEOUT = 0.2
19 |
20 | CommandResults = collections.namedtuple(
21 | 'CommandResults', ['returncode', 'stdout', 'stderr'])
22 |
23 |
24 | def array_split(iterable, n):
25 | """Split a list into chunks of ``n`` length."""
26 | for i in range(0, len(iterable), n):
27 | yield iterable[i:i + n]
28 |
29 |
30 | def mean(iterable):
31 | """Returns the average of the list of items."""
32 | return sum(iterable) / len(iterable)
33 |
34 |
35 | def runcmd(command):
36 | """Runs a command in sub-shell.
37 |
38 | Args:
39 | command: string containing the command
40 |
41 | Returns:
42 | a namedtuple containing (returncode, stdout, stderr)
43 | """
44 | stdout = ''
45 | cmd = shlex.split(command)
46 | runner = subprocess.Popen(
47 | cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
48 | while True:
49 | out = runner.stdout.readline()
50 | if out == '' and runner.poll() is not None:
51 | break
52 | if out:
53 | logging.debug(out.strip())
54 | stdout += out
55 | return CommandResults(runner.returncode, stdout, runner.stderr.read())
56 |
--------------------------------------------------------------------------------
/llama/tests/udp_test.py:
--------------------------------------------------------------------------------
1 | """Unittests for udp lib"""
2 |
3 | from llama.udp import UdpData, UdpStats, Sender, Ipv4UdpSocket
4 | import pytest
5 |
6 | class TestSender(object):
7 |
8 | def test_stats(self):
9 | mock_results = [
10 | UdpData(
11 | Ipv4UdpSocket.SIGNATURE, # Signature
12 | 0x00, # ToS
13 | 1496847778307.926, # Sent timestamp
14 | 1496847778320.653, # Rcvd timestamp
15 | 12.727022171020508, # RTT
16 | False, # Lost
17 | ),
18 | UdpData(
19 | Ipv4UdpSocket.SIGNATURE, # Signature
20 | 0x00, # ToS
21 | 0, # Sent timestamp
22 | 0, # Rcvd timestamp
23 | 0, # RTT
24 | True, # Lost
25 | ),
26 | UdpData(
27 | Ipv4UdpSocket.SIGNATURE, # Signature
28 | 0x00, # ToS
29 | 1496847925937.957, # Sent timestamp
30 | 1496847925952.936, # Rcvd timestamp
31 | 14.978885650634766, # RTT
32 | False, # Lost
33 | ),
34 | ]
35 | # TODO: This should be updated to reflect the commented/correct
36 | # values once #27 has been resolved.
37 | mock_stats = UdpStats(
38 | 3, # sent
39 | 1, # lost
40 | 33.33333333333333, # loss
41 | 14.978885650634766, # rtt_max
42 | # 12.727022171020508, # rtt_min
43 | 0, # rtt_min - This is due to #27
44 | # 13.852953910827637, # rtt_avg
45 | 9.235302607218424, # rtt_avg - This is due to #27
46 | )
47 | sender = Sender('127.0.0.1', 60000, 3, tos=0x00, timeout=0.2)
48 | sender.results = mock_results
49 | stats = sender.stats
50 | assert stats == mock_stats
51 |
--------------------------------------------------------------------------------
/bin/llama_collector:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | """Llama Collector
3 |
4 | Usage:
5 | collector [options]
6 |
7 | Options:
8 | --loglevel=(debug|info|warn|error|critical)
9 | # Logging level to print to stderr [default: info]
10 | --logfile=PATH # Log to a file
11 | --count=NUM # Count of datagrams sent to hosts [default: 128]
12 | --interval=NUM # Polling interval in seconds [default: 30]
13 | --ip=ADDR # IP address to bind HTTP server [default: 0.0.0.0]
14 | --port=NUM # TCP port to bind HTTP server [default: 5000]
15 | --dst-port=NUM # UDP port of destination [default: 60000]
16 | --udp # Use UDP probes against reflectors
17 | (default is SYN tcp/0 with hping3)
18 | --timeout=NUM # Seconds to wait for probes before counting as
19 | # loss. Applies to UDP only. [default: 0.2]
20 | """
21 |
22 | from llama import app
23 | from llama import collector
24 | import docopt
25 | import logging
26 |
27 |
28 | def main(args):
29 | # process args
30 | loglevel = args['--loglevel']
31 | logfile = args['--logfile']
32 | interval = int(args['--interval'])
33 | count = int(args['--count'])
34 | ip = args['--ip']
35 | port = int(args['--port'])
36 | dst_port = int(args['--dst-port'])
37 | config_filepath = args['']
38 | udp = args['--udp']
39 | timeout = float(args['--timeout'])
40 |
41 | # setup logging
42 | app.log_to_stderr(loglevel)
43 | if logfile:
44 | app.log_to_file(logfile, loglevel)
45 | logging.info('Arguments:\n%s', args)
46 |
47 | # get to work
48 | server = collector.HttpServer(__name__, ip=ip, port=port)
49 | server.configure(config_filepath)
50 | server.run(interval, count, udp, dst_port, timeout)
51 |
52 |
53 | if __name__ == '__main__':
54 | app.run(main, docopt.docopt(__doc__))
55 |
--------------------------------------------------------------------------------
/bin/llama_scraper:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | """LLAMA TSDB Scraper
3 |
4 | This binary scrapes the LLAMA collectors for latency statistics and shovels
5 | them into a timeseries database.
6 |
7 | Usage:
8 | llama_scraper [options] ...
9 |
10 | Options:
11 | --loglevel=(debug|info|warn|error|critical)
12 | # Logging level to print to stderr [default: info]
13 | --logfile=PATH # Log to a file
14 | --interval=NUM # Collector poll interval, seconds [default: 30]
15 | --influx_server=HOST # InfluxDB server [default: localhost]
16 | --influx_port=PORT # InfluxDB port [default: 8086]
17 | --influx_db=NAME # InfluxDB database name [default: llama]
18 | --port=PORT # Connection port on collectors [default: 5000]
19 | """
20 |
21 | from apscheduler.schedulers.blocking import BlockingScheduler
22 | import docopt
23 | import logging
24 |
25 | from llama import app
26 | from llama import scraper
27 |
28 |
29 | def main(args):
30 | # process args
31 | loglevel = args['--loglevel']
32 | logfile = args['--logfile']
33 | interval = int(args['--interval'])
34 | influx_server = args['--influx_server']
35 | influx_port = int(args['--influx_port'])
36 | influx_db = args['--influx_db']
37 | collector_port = args['--port']
38 | collectors = args['']
39 |
40 | # setup logging
41 | app.log_to_stderr(loglevel)
42 | if logfile:
43 | app.log_to_file(logfile, loglevel)
44 | logging.info('Arguments:\n%s', args)
45 |
46 | # get to work
47 | logging.info('Using Collector list: %s', collectors)
48 | scheduler = BlockingScheduler()
49 | for collector in collectors:
50 | client = scraper.CollectorClient(collector, collector_port)
51 | scheduler.add_job(
52 | client.run, 'interval', seconds=interval, args=[
53 | influx_server, influx_port, influx_db])
54 | scheduler.start()
55 |
56 |
57 | if __name__ == '__main__':
58 | app.run(main, docopt.docopt(__doc__))
59 |
--------------------------------------------------------------------------------
/CHANGELOG.rst:
--------------------------------------------------------------------------------
1 | #########
2 | Changelog
3 | #########
4 |
5 | Version History
6 | ===============
7 |
8 | .. _v0.1.1:
9 | 0.1.1 (2017-07-14)
10 | ------------------
11 | * Updating maintainer
12 |
13 | .. _v0.1.0:
14 | 0.1.0 (2017-07-14)
15 | ------------------
16 | * Adds timeout parameter through the complete flow for the collector.
17 | * Sender results are actually handled as they complete now.
18 | * Sender exceptions are collected and provided via logging. However, better handling of non-lost probes requires a greater refactor to truly solve.
19 |
20 | .. _v0.0.1a10:
21 | 0.0.1a10 (2017-06-07)
22 | ------------------
23 | * Removed rounding calculation that was excessively removing precision
24 |
25 | .. _v0.0.1a9:
26 | 0.0.1a9 (2017-05-31)
27 | ------------------
28 | * Reflector will now discard malformed datagrams instead of still reflecting them
29 | * Removed a debug statement in a high-frequency loop that was hampering perf
30 | * Collector can now be told which port to use as the destination with UDP
31 |
32 | .. _v0.0.1a8:
33 | 0.0.1a8 (2017-02-02)
34 | ------------------
35 | * Converted ``llama_collector`` to require IP addresses for reflector targets
36 | * Removed all DNS lookups from ``llama_collector`` process
37 |
38 | .. _v0.0.1a7:
39 | 0.0.1a7 (2016-12-06)
40 | ------------------
41 | * Created ``CHANGELOG.rst``, retroactively
42 | * Converted ``README.md`` to ``README.rst``
43 | * Created documentation, `llama.readthedocs.io `_
44 |
45 | .. _v0.0.1a6:
46 | 0.0.1a6 (2016-12-01)
47 | ------------------
48 | * Minor housekeeping
49 | * Moved ``runcmd()`` function into ``util.py`` library, added unittest
50 |
51 | .. _v0.0.1a5:
52 | 0.0.1a5 (2016-11-23)
53 | ------------------
54 | * Hooked UDP socket library into ``llama_collector``
55 | * Added ``llama_sender`` command-line test utility
56 |
57 | .. _v0.0.1a4:
58 | 0.0.1a4 (2016-11-14) and previous versions
59 | ------------------
60 | * Initial Alpha versions 0.0.1a1 through 0.0.1a4 with basic functionality
61 | using TCP SYN probes generated from ``hping3`` command-line utility
62 |
--------------------------------------------------------------------------------
/llama/config.py:
--------------------------------------------------------------------------------
1 | """Configuation library for LLAMA.
2 |
3 | This is mostly used by the Collector process to determine target hosts and
4 | tag mappings.
5 | """
6 |
7 | import collections
8 | import ipaddress
9 | import logging
10 | import yaml
11 |
12 |
13 | class Error(Exception):
14 | """Top level error."""
15 |
16 |
17 | Tag = collections.namedtuple('Tag', ['key', 'value'])
18 |
19 |
20 | def validate_ip(addr):
21 | """Pass-through function for validating an IPv4 address.
22 |
23 | Args:
24 | ip: (str) IP address
25 |
26 | Returns:
27 | unicode string with same address
28 |
29 | Raises:
30 | Error: if IPv4 address is not valid
31 | """
32 | try:
33 | return ipaddress.IPv4Address(unicode(addr)).compressed
34 | except ipaddress.AddressValueError as exc:
35 | raise Error('Invalid IPv4 address "%s"; %s' % (addr, exc))
36 |
37 |
38 | class Target(object):
39 | """Configuration for a single LLAMA target.
40 |
41 | On a decently sized deployment, we'd expect ~1000 or ~10,000 targets, so
42 | we'll use __slots__ to save memory at scale.
43 | """
44 |
45 | __slots__ = ['dst_ip', 'tags']
46 |
47 | def __init__(self, dst, **tags):
48 | self.dst_ip = validate_ip(dst)
49 | self.tags = []
50 | for key, value in tags.iteritems():
51 | self.tags.append(Tag(key=key, value=value))
52 |
53 | def __repr__(self):
54 | return '' % (
55 | self.ip, len(self.tags), hex(id(self)))
56 |
57 |
58 | class CollectorConfig(object):
59 |
60 | __slots__ = ['_targets']
61 |
62 | def __init__(self):
63 | self._targets = []
64 |
65 | def load(self, filepath):
66 | with open(filepath, 'r') as fh:
67 | config = yaml.safe_load(fh)
68 | for dst in config.keys():
69 | self._targets.append(Target(dst, **config[dst]))
70 | logging.info('Loaded configuration with %s targets',
71 | len(self._targets))
72 |
73 | def __repr__(self):
74 | return '' % (
75 | len(self._targets), hex(id(self)))
76 |
77 | @property
78 | def targets(self):
79 | for target in self._targets:
80 | yield target.dst_ip, target.tags
81 |
--------------------------------------------------------------------------------
/README.rst:
--------------------------------------------------------------------------------
1 | =====
2 | LLAMA
3 | =====
4 |
5 | Archived
6 | --------
7 | Development and maintenance of this repo/package has ended. For the latest generation of LLAMA, see https://github.com/dropbox/llama
8 |
9 | Overview
10 | --------
11 | |travis-ci-status| |rtd-llama| |pypi-llama|
12 |
13 | .. figure:: https://raw.githubusercontent.com/dropbox/llama/master/docs/_static/llama-logo.png
14 | :alt: llama-logo
15 |
16 | **L.L.A.M.A.** is a deployable service which artificially produces traffic
17 | for measuring network performance between endpoints.
18 |
19 | LLAMA uses UDP socket level operations to support multiple QoS classes.
20 | UDP datagrams are fast, efficient, and will hash across ECMP paths in
21 | large networks to uncover faults and erring interfaces. LLAMA is written
22 | in pure Python for maintainability.
23 |
24 | Okay, but not yet - Alpha Status
25 | --------------------------------
26 | LLAMA will eventually have all those capabilities, but not yet. For
27 | instance, there it does not currently provide QOS functionality,
28 | but will send test traffic using ``hping3`` or a built-in UDP library
29 | It’s currently being tested in *Alpha* at Dropbox through experimental
30 | correlation.
31 |
32 | Documentation
33 | -------------
34 | * `LLAMA on ReadTheDocs `_
35 | * `Changelog `_
36 | * See `Issues `_ for TODOs and Bugs
37 |
38 | Visualization
39 | -------------
40 | Using InfluxDB and interested in visualizing LLAMA data in a matrix-like UI?
41 |
42 | Check out https://github.com/dropbox/grallama-panel
43 |
44 | .. figure:: https://raw.githubusercontent.com/dropbox/grallama-panel/master/src/img/grallama-example.png
45 | :alt: grallama-example
46 |
47 | Acknowledgements / References
48 | -----------------------------
49 | * Inspired by: https://www.youtube.com/watch?v=N0lZrJVdI9A
50 | * with slides: https://www.nanog.org/sites/default/files/Lapukhov_Move_Fast_Unbreak.pdf
51 | * Concepts borrowed from: https://github.com/facebook/UdpPinger/
52 |
53 | .. |travis-ci-status| image:: https://travis-ci.org/dropbox/llama.svg?branch=master
54 | :target: https://travis-ci.org/dropbox/llama
55 | .. |pypi-llama| image:: https://img.shields.io/pypi/v/llama.svg?style=flat
56 | :target: https://pypi.python.org/pypi/llama
57 | .. |rtd-llama| image:: https://readthedocs.org/projects/llama/badge/?version=latest
58 | :target: http://llama.readthedocs.io/en/latest/?badge=latest
59 | :alt: Documentation Status
60 |
--------------------------------------------------------------------------------
/llama/ping.py:
--------------------------------------------------------------------------------
1 | """Ping.py
2 |
3 | Ping implements different methods of measuring latency between endpoints. Major
4 | methods available are:
5 | * hping3 (sub-shell/process)
6 | """
7 |
8 | import collections
9 | import logging
10 | import re
11 | from llama import udp
12 | from llama import util
13 |
14 |
15 | RE_LOSS = re.compile(
16 | r'(?P[0-9]+)\% packet loss')
17 | RE_STATS = re.compile(
18 | r'= (?P[0-9.]+)/(?P[0-9.]+)/(?P[0-9.]+) ms')
19 |
20 |
21 | ProbeResults = collections.namedtuple(
22 | 'ProbeResults', ['loss', 'avg', 'target'])
23 |
24 |
25 | def hping3(target, count=128, *args, **kwargs):
26 | """Sends TCP SYN traffic to a target host.
27 |
28 | Note: Using hping3 requires not only hping3 be installed on the host
29 | system, but access as `root` (or sudo equivalent).
30 |
31 | Args:
32 | target: hostname or IP address of target
33 | count: number of datagrams to send
34 | args: catch for args not yet supported by this method
35 | kwargs: catch for kwargs not yet supported by this method
36 |
37 | Returns:
38 | a tuple containing (loss %, RTT average, target host)
39 | """
40 | cmd = 'sudo hping3 --interval u10000 --count %s --syn %s' % (
41 | count, target)
42 | code, out, err = util.runcmd(cmd)
43 | for line in err.split('\n'):
44 | logging.debug(line)
45 | match_loss = RE_LOSS.search(err)
46 | match_stats = RE_STATS.search(err)
47 | if match_loss and match_stats:
48 | results = ProbeResults(match_loss.group('loss'),
49 | match_stats.group('avg'),
50 | target)
51 | else:
52 | results = ProbeResults(None, None, target)
53 | return results
54 |
55 |
56 | def send_udp(target, count=500, port=util.DEFAULT_DST_PORT, tos=0x00,
57 | timeout=util.DEFAULT_TIMEOUT):
58 | """Sends UDP datagrams crafted for LLAMA reflectors to target host.
59 |
60 | Note: Using this method does NOT require `root` privileges.
61 |
62 | Args:
63 | target: hostname or IP address of target
64 | count: number of datagrams to send
65 | port: destination port to use for probes
66 | tos: hex type-of-service to use for probes
67 | timeout: seconds to wait for probe to return
68 |
69 | Returns:
70 | a tuple containing (loss %, RTT average, target host)
71 | """
72 | sender = udp.Sender(target, port, count, tos, timeout)
73 | sender.run()
74 | return ProbeResults(sender.stats.loss, sender.stats.rtt_avg, target)
75 |
--------------------------------------------------------------------------------
/llama/scraper.py:
--------------------------------------------------------------------------------
1 | """LLAMA TSDB Scraper
2 |
3 | This binary scrapes the LLAMA collectors for latency statistics and shovels
4 | them into a timeseries database.
5 | """
6 |
7 | import httplib
8 | import influxdb
9 | import json
10 | import logging
11 | import socket
12 |
13 |
14 | class Error(Exception):
15 | """Top-level error."""
16 |
17 |
18 | def http_get(server, port, uri, **headers):
19 | """Generic HTTP GET request.
20 |
21 | Args:
22 | uri: string containing the URI to query
23 | headers: HTTP headers to inject into request
24 |
25 | Returns:
26 | a tuple, (status_code, data_as_string)
27 | """
28 | # TODO(): Move this to requests library.
29 | httpconn = httplib.HTTPConnection(server, port)
30 | try:
31 | httpconn.request('GET', uri, "", headers)
32 | except socket.error as exc:
33 | raise Error('Could not connect to %s:%s (%s)' % (server, port, exc))
34 | response = httpconn.getresponse()
35 | return response.status, response.read()
36 |
37 |
38 | class CollectorClient(object):
39 | """A client for moving data from Collector to TSDB."""
40 |
41 | def __init__(self, server, port):
42 | """Constructor.
43 |
44 | Args:
45 | server: (str) collector server hostname or IP
46 | port: (int) collector TCP port
47 | """
48 | logging.info('Created a %s for %s:%s', self, server, port)
49 | self.server = server
50 | self.port = port
51 |
52 | def get_latency(self):
53 | """Gets /influxdata stats from collector.
54 |
55 | Returns:
56 | list of dictionary data (latency JSON)
57 |
58 | Raises:
59 | Error: if status code from collector is not 200
60 | """
61 | status, data = http_get(self.server, self.port, '/influxdata')
62 | # TODO(): this would be obviated by the requests library.
63 | if status < 200 or status > 299:
64 | logging.error('Error received getting latency from collector: '
65 | '%s:%s, code=%s' % (self.server, self.port, status))
66 | return json.loads(data)
67 |
68 | def push_tsdb(self, server, port, database, points):
69 | """Push latest datapoints to influxDB server.
70 |
71 | Args:
72 | server: (str) influxDB server hostname or IP
73 | port: (int) influxDB server TCP port
74 | database: (str) name of LLAMA database
75 | points: (list) dicts containing InfluxDB formatted datapoints
76 | """
77 | client = influxdb.InfluxDBClient(
78 | server, port, database=database)
79 | client.write_points(points)
80 |
81 | def run(self, server, port, database):
82 | """Get and push stats to TSDB."""
83 | try:
84 | points = self.get_latency()
85 | except Error as exc:
86 | logging.error(exc)
87 | return
88 | logging.info('Pulled %s datapoints from collector: %s',
89 | len(points), self.server)
90 | self.push_tsdb(server, port, database, points)
91 | logging.info('Pushed %s datapoints to TSDB: %s', len(points), server)
92 |
--------------------------------------------------------------------------------
/llama/metrics.py:
--------------------------------------------------------------------------------
1 | """LAMA - Variables module
2 |
3 | This library supplies a consistent variable naming representation for LAMA
4 | timeseries data.
5 | """
6 |
7 | import collections
8 | import json
9 | import time
10 | import weakref
11 |
12 |
13 | class Error(Exception):
14 | """Top-level error."""
15 |
16 |
17 | class DatapointError(Error):
18 | """Problems with Datapoint descriptors."""
19 |
20 |
21 | DatapointResults = collections.namedtuple(
22 | 'DatapointResults', ['name', 'value', 'timestamp'])
23 |
24 |
25 | class Datapoint(object):
26 | """Descriptor for a single datapoint."""
27 |
28 | def __init__(self, name):
29 | self.name = name
30 | self._value = weakref.WeakKeyDictionary()
31 | self._time = weakref.WeakKeyDictionary()
32 |
33 | def __set__(self, instance, value):
34 | self._value[instance] = value
35 | self._time[instance] = int(round(time.time()))
36 |
37 | def __get__(self, instance, cls):
38 | try:
39 | results = DatapointResults(
40 | self.name, self._value[instance], self._time[instance])
41 | except KeyError:
42 | results = DatapointResults(self.name, None, None)
43 | return results
44 |
45 | def __delete__(self, instance):
46 | raise DatapointError('Cannot delete datapoint: %s' % instance)
47 |
48 |
49 | class Metrics(object):
50 | """A collection of metrics and common operations."""
51 |
52 | rtt = Datapoint('rtt')
53 | loss = Datapoint('loss')
54 |
55 | def __init__(self, **tags):
56 | """Constructor
57 |
58 | Args:
59 | tags: (dict) key=value pairs of tags to assign the metric.
60 | """
61 | self._tags = tags
62 |
63 | @property
64 | def tags(self):
65 | return self._tags
66 |
67 | @property
68 | def data(self):
69 | data = []
70 | for attr, thing in Metrics.__dict__.iteritems():
71 | if isinstance(thing, Datapoint):
72 | data.append(tuple(self.__getattribute__(attr)))
73 | return data
74 |
75 | @property
76 | def as_dict(self):
77 | return {'tags': self.tags, 'data': self.data}
78 |
79 | @property
80 | def as_json(self):
81 | return json.dumps(self.as_dict, indent=4)
82 |
83 | @property
84 | def as_influx(self):
85 | """Returns datapoints formatted for ingestion into InfluxDB.
86 |
87 | The returned data is a list of dicts (each dict is one datapoint).
88 | """
89 | points = []
90 | for name, value, timestamp in self.data:
91 | point = {}
92 | point.setdefault('measurement', name)
93 | point.setdefault('tags', self.tags)
94 | try:
95 | point.setdefault('fields', {'value': float(value)})
96 | except TypeError:
97 | point.setdefault('fields', {'value': None})
98 | try:
99 | point.setdefault('time', timestamp * 1000000000)
100 | except TypeError:
101 | point.setdefault('time', None)
102 | points.append(point)
103 | return points
104 |
--------------------------------------------------------------------------------
/llama/tests/metrics_test.py:
--------------------------------------------------------------------------------
1 | """Unittests for metrics lib."""
2 |
3 | from llama import metrics
4 | import pytest
5 | import time
6 |
7 |
8 | @pytest.fixture
9 | def two_things():
10 | class Thing(object):
11 | dp1 = metrics.Datapoint('dp1')
12 | dp2 = metrics.Datapoint('dp2')
13 | return Thing(), Thing()
14 |
15 |
16 | class TestDatapoint(object):
17 | """Let's test the Datapoint descriptor."""
18 |
19 | def test_setget_single(self, two_things, monkeypatch):
20 | obj1, obj2 = two_things
21 | monkeypatch.setattr(time, 'time', lambda: 100)
22 | obj1.dp1 = 1241
23 | assert obj1.dp1 == ('dp1', 1241, 100)
24 |
25 | def test_setget_multidatapoint(self, two_things, monkeypatch):
26 | obj1, obj2 = two_things
27 | monkeypatch.setattr(time, 'time', lambda: 999)
28 | obj1.dp1 = 1241
29 | obj1.dp2 = 0.1111
30 | assert obj1.dp1 == ('dp1', 1241, 999)
31 | assert obj1.dp2 == ('dp2', 0.1111, 999)
32 |
33 | def test_setget_multiclass(self, two_things, monkeypatch):
34 | obj1, obj2 = two_things
35 | monkeypatch.setattr(time, 'time', lambda: 12345)
36 | obj1.dp1 = 1241
37 | obj1.dp2 = 0.1111
38 | obj2.dp1 = 3000
39 | obj2.dp2 = 1234567890123456
40 | assert obj1.dp2 == ('dp2', 0.1111, 12345)
41 | assert obj2.dp1 == ('dp1', 3000, 12345)
42 | assert obj2.dp2 == ('dp2', 1234567890123456, 12345)
43 | # another round
44 | obj1.dp1 = 0
45 | obj1.dp2 = 10.1111
46 | assert obj1.dp1 == ('dp1', 0, 12345)
47 | assert obj1.dp2 == ('dp2', 10.1111, 12345)
48 |
49 |
50 | @pytest.fixture
51 | def m1():
52 | m = metrics.Metrics(src='host2',
53 | metro='iad', facility='iad2', cluster='iad2a')
54 | return m
55 |
56 |
57 | class TestMetrics(object):
58 |
59 | def test_constructor(self, m1):
60 | assert m1
61 |
62 | def test_tags(self, m1):
63 | assert m1.tags == {
64 | 'src': 'host2',
65 | 'metro': 'iad',
66 | 'facility': 'iad2',
67 | 'cluster': 'iad2a'
68 | }
69 |
70 | def test_data(self, m1):
71 | assert len(m1.data) == 2
72 |
73 | def test_as_dict(self, m1, monkeypatch):
74 | monkeypatch.setattr(time, 'time', lambda: 100)
75 | m1.rtt = 1
76 | m1.loss = 2
77 | assert m1.as_dict['tags'] == {
78 | 'src': 'host2',
79 | 'metro': 'iad',
80 | 'facility': 'iad2',
81 | 'cluster': 'iad2a'
82 | }
83 | assert ('rtt', 1, 100) in m1.as_dict['data']
84 | assert ('loss', 2, 100) in m1.as_dict['data']
85 |
86 | def test_as_influx(self, monkeypatch):
87 | monkeypatch.setattr(time, 'time', lambda: 100)
88 | m1 = metrics.Metrics(src='a', dst='b')
89 | m1.rtt = 70
90 | m1.loss = 1.2
91 | point1 = {
92 | 'measurement': 'rtt',
93 | 'tags': {
94 | 'src': 'a',
95 | 'dst': 'b',
96 | },
97 | 'time': 100000000000,
98 | 'fields': {
99 | 'value': 70
100 | }
101 | }
102 | point2 = {
103 | 'measurement': 'loss',
104 | 'tags': {
105 | 'src': 'a',
106 | 'dst': 'b',
107 | },
108 | 'time': 100000000000,
109 | 'fields': {
110 | 'value': 1.2
111 | }
112 | }
113 | assert type(m1.as_influx) is list
114 | assert point1 in m1.as_influx
115 | assert point2 in m1.as_influx
116 |
--------------------------------------------------------------------------------
/llama/collector.py:
--------------------------------------------------------------------------------
1 | """LLAMA Collector module
2 |
3 | The Collector is intended to be run on a single host and collect packet loss
4 | and latency to a collection of far-end hosts.
5 | """
6 |
7 | from apscheduler.schedulers.background import BackgroundScheduler
8 | from apscheduler.executors.pool import ThreadPoolExecutor, ProcessPoolExecutor
9 | from concurrent import futures
10 | import flask
11 | import humanfriendly
12 | import json
13 | import logging
14 | import os
15 | import time
16 |
17 | from llama import config
18 | from llama import metrics
19 | from llama import ping
20 | from llama import util
21 | from version import __version__
22 |
23 |
24 | class Error(Exception):
25 | """Top level error."""
26 |
27 |
28 | class Collection(object):
29 | """An abstraction for measuring latency to a group of targets."""
30 |
31 | def __init__(self, config, use_udp=False):
32 | """Constructor.
33 |
34 | Args:
35 | config: (config.CollectorConfig) of targets
36 | udp: (bool) Use UDP datagrams for probes (requires Reflectors)
37 | """
38 | self.method = ping.hping3
39 | if use_udp:
40 | self.method = ping.send_udp
41 | self.metrics = {}
42 | self.config = config
43 | for dst_ip, tags in self.config.targets:
44 | logging.info('Creating metrics for %s: %s', dst_ip, tags)
45 | self.metrics.setdefault(
46 | dst_ip, metrics.Metrics(**dict(tags)))
47 |
48 | def collect(self, count, dst_port=util.DEFAULT_DST_PORT,
49 | timeout=util.DEFAULT_TIMEOUT):
50 | """Collects latency against a set of hosts.
51 |
52 | Args:
53 | count: (int) number of datagrams to send each host
54 | timeout: (float) seconds to wait for probes to return
55 | """
56 | jobs = []
57 | with futures.ThreadPoolExecutor(max_workers=50) as executor:
58 | for host in self.metrics.keys():
59 | logging.info('Assigning target host: %s', host)
60 | jobs.append(executor.submit(self.method, host,
61 | count=count,
62 | port=dst_port,
63 | timeout=timeout,
64 | ))
65 | for job in futures.as_completed(jobs):
66 | loss, rtt, host = job.result()
67 | self.metrics[host].loss = loss
68 | self.metrics[host].rtt = rtt
69 | logging.info('Summary {:16}:{:>3}% loss, {:>4} ms rtt'.format(
70 | host, loss, rtt))
71 |
72 | @property
73 | def stats(self):
74 | return [x.as_dict for x in self.metrics.values()]
75 |
76 | @property
77 | def stats_influx(self):
78 | points = []
79 | for metric in self.metrics.values():
80 | points.extend(metric.as_influx)
81 | return points
82 |
83 |
84 | class HttpServer(flask.Flask):
85 | """Our HTTP/API server."""
86 |
87 | EXECUTORS = {
88 | 'default': ThreadPoolExecutor(20),
89 | 'processpool': ProcessPoolExecutor(5)
90 | }
91 |
92 | def __init__(self, name, ip, port, *args, **kwargs):
93 | """Constructor.
94 |
95 | Args:
96 | name: (str) name of Flask service
97 | ip: (str) IP address to bind HTTP server
98 | port: (int) TCP port for HTTP server to listen
99 | """
100 | super(HttpServer, self).__init__(name, *args, **kwargs)
101 | # Fixup the root path for Flask so it can find templates/*
102 | root_path = os.path.abspath(os.path.dirname(__file__))
103 | logging.debug('Setting root_path for Flask: %s', root_path)
104 | self.root_path = root_path
105 | self.targets = config.CollectorConfig()
106 | self.ip = ip
107 | self.port = port
108 | self.start_time = time.time()
109 | self.setup_time = 0
110 | self.scheduler = BackgroundScheduler(
111 | daemon=True, executors=self.EXECUTORS)
112 | self.collection = None
113 | self.add_url_rule('/', 'index', self.index_handler)
114 | self.add_url_rule('/status', 'status', self.status_handler)
115 | self.add_url_rule('/latency', 'latency', self.latency_handler)
116 | self.add_url_rule('/influxdata', 'influxdata', self.influxdata_handler)
117 | self.add_url_rule('/quitquit', 'quitquit', self.shutdown_handler)
118 | logging.info('Starting Llama Collector, version %s', __version__)
119 |
120 | def configure(self, filepath):
121 | """Configure the Collector from file.
122 |
123 | Args:
124 | filepath: (str) where the configuration is located
125 | """
126 | self.targets.load(filepath)
127 |
128 | def status_handler(self):
129 | return flask.Response('ok', mimetype='text/plain')
130 |
131 | def index_handler(self):
132 | return flask.render_template(
133 | 'index.html',
134 | targets=self.targets.targets,
135 | interval=self.interval,
136 | start_time=self.start_time,
137 | setup_time=self.setup_time,
138 | uptime=humanfriendly.format_timespan(
139 | time.time() - self.start_time))
140 |
141 | def latency_handler(self):
142 | data = json.dumps(self.collection.stats, indent=4)
143 | return flask.Response(data, mimetype='application/json')
144 |
145 | def influxdata_handler(self):
146 | data = json.dumps(self.collection.stats_influx, indent=4)
147 | return flask.Response(data, mimetype='application/json')
148 |
149 | def shutdown_handler(self):
150 | """Shuts down the running web server and other things."""
151 | logging.warn('/quitquit request, attempting to shutdown server...')
152 | self.scheduler.shutdown(wait=False)
153 | fn = flask.request.environ.get('werkzeug.server.shutdown')
154 | if not fn:
155 | raise Error('Werkzeug (Flask) server NOT running.')
156 | fn()
157 | return '
Quitting...
'
158 |
159 | def run(self, interval, count, use_udp=False,
160 | dst_port=util.DEFAULT_DST_PORT, timeout=util.DEFAULT_TIMEOUT,
161 | *args, **kwargs):
162 | """Start all the polling and run the HttpServer.
163 |
164 | Args:
165 | interval: seconds between each poll
166 | count: count of datagram to send each responder per interval
167 | use_udp: utilize UDP probes for testing
168 | dst_port: port to use for testing (only UDP)
169 | timeout: how long to wait for probes to return
170 | """
171 | self.interval = interval
172 | self.scheduler.start()
173 | self.collection = Collection(self.targets, use_udp)
174 | self.scheduler.add_job(self.collection.collect, 'interval',
175 | seconds=interval,
176 | args=[count, dst_port, timeout])
177 | super(HttpServer, self).run(
178 | host=self.ip, port=self.port, threaded=True, *args, **kwargs)
179 | self.setup_time = round(time.time() - self.start_time, 0)
180 |
--------------------------------------------------------------------------------
/llama/app.py:
--------------------------------------------------------------------------------
1 | """Application Setup & Run
2 |
3 | This module intends to provide standard logging, command-line flags/options.
4 | The intended execution looks like:
5 |
6 | import app
7 |
8 | def main(args):
9 | pass
10 |
11 | if __name__ == '__main__':
12 | app.run(main)
13 |
14 | Default behavior is:
15 | o Setup logging to INFO facility with standard format
16 | o Find the main() function which must take a single argument
17 | o Pass command-line arguments from sys.argv to main()
18 |
19 | Behavior can be augmented by passing keyword arguments to app.run(). Constants
20 | and helper functions should exist within this module to assist.
21 |
22 | EXAMPLE USAGE:
23 |
24 | if __name__ == '__main__':
25 | # Always call run() first, then modify or add logging after.
26 | app.run(main)
27 |
28 | # Change logging to stderr. All three statements below do the same
29 | # thing.
30 | app.log_to_stderr(app.DEBUG)
31 | app.log_to_stderr('debug')
32 | app.log_to_stderr('DEBUG')
33 |
34 | # Log to a file in append mode.
35 | app.log_to_file('/tmp/myapp.log')
36 |
37 | # Log to the file and clobber exising log file.
38 | app.log_to_file('/tmp/myapp.log', filemode=app.CLOBBER)
39 |
40 | # Use an argument parser, like 'docopt' or 'gflags', to pass
41 | # arguments through app.run. This ensures flags/options are loaded
42 | # prior to any other code running.
43 | if __name__ == '__main__':
44 | app.run(main, docopt.docopt(__doc__))
45 |
46 | # or with gflags
47 | app.run(main, FLAGS(sys.argv))
48 |
49 | NOTE: Protection exists against running app.run() more than once per runtime
50 | environment.
51 | """
52 |
53 | import logging
54 | import os
55 | import sys
56 |
57 |
58 | # Logging facilities
59 | DEBUG = logging.DEBUG
60 | INFO = logging.INFO
61 | WARN = logging.WARN
62 | WARNING = logging.WARNING
63 | ERROR = logging.ERROR
64 | CRITICAL = logging.CRITICAL
65 | FATAL = logging.FATAL
66 |
67 | # File modes.
68 | CLOBBER = 'w'
69 | APPEND = 'a'
70 |
71 | # Formatting.
72 | _LOG_FORMATTER = logging.Formatter(
73 | '%(levelname)s %(asctime)s [%(module)s]: %(message)s')
74 |
75 | # Logging handlers.
76 | _STDERR_HANDLER = None
77 | _FILE_HANDLER = None
78 |
79 | # Colors.
80 | LOG_COLORS = {
81 | 'DEBUG': '\033[94m',
82 | 'INFO': '\033[32m',
83 | 'WARN': '\033[33m',
84 | 'WARNING': '\033[33m',
85 | 'ERROR': '\033[91m',
86 | 'CRITICAL': '\033[91m',
87 | 'END': '\033[0m'}
88 |
89 |
90 | class Error(Exception):
91 | """Top-level error."""
92 | pass
93 |
94 |
95 | class AppError(Error):
96 | """Errors associated with running the application."""
97 | pass
98 |
99 |
100 | def run_once(function):
101 | """Decorator which prevents functions from being run more than once."""
102 | def wrapper(*args, **kwargs):
103 | if wrapper.has_run:
104 | raise AppError(
105 | 'Cannot call %s.%s() more than once per runtime!' % (
106 | __name__, function.__name__))
107 | else:
108 | wrapper.has_run = True
109 | return function(*args, **kwargs)
110 | wrapper.has_run = False
111 | return wrapper
112 |
113 |
114 | @run_once
115 | def run(main, args=None, root_level=DEBUG, formatter=_LOG_FORMATTER):
116 | """Run the application.
117 |
118 | Args:
119 | main: the main() function from __main__ module
120 | args: list or object containing args to pass to main()
121 | default behavior is to not send any args to main()
122 | popular values here are `sys.argv` or `docopt.docopt(__doc__)`
123 | root_level: logging level for the root logger; limiting this will
124 | limit ALL subsequent logging.
125 | formatter: logging.Formatter object for stderr and file logging
126 |
127 | Raises:
128 | AppError: if function is called more than once
129 | """
130 | # Set the root logger to handle everything.
131 | logger = logging.getLogger('')
132 | logger.setLevel(root_level)
133 | # Setup stderr logging.
134 | log_to_stderr(level=CRITICAL, formatter=formatter)
135 | logging.debug('App started. Calling main()')
136 | # Call main().
137 | if args:
138 | main(args)
139 | else:
140 | main()
141 |
142 |
143 | def userlog(level, message, *args):
144 | """Log a message to STDOUT for user-facing applications.
145 |
146 | This also results in logging to whatever level is specified.
147 |
148 | Args:
149 | level: a logging level, i.e. logging.info or logging.error
150 | message: string of message to log
151 | args: arguments to pass into message
152 | """
153 | lvlname = level.func_name.upper()
154 | try:
155 | color = LOG_COLORS[lvlname]
156 | end = LOG_COLORS['END']
157 | except KeyError:
158 | color = ''
159 | end = ''
160 | level(message, *args)
161 | output = '%s%s:%s %s\n' % (color, lvlname, end, message)
162 | sys.stderr.write(output % args)
163 |
164 |
165 | def get_loglevel(level):
166 | """Return a logging level (integer).
167 |
168 | Args:
169 | level: can be a level or a string name of a level, such as:
170 | logging.INFO, app.INFO or 'info'
171 | Returns:
172 | a logging. (which is really an integer)
173 | """
174 | if type(level) is str:
175 | level = logging.getLevelName(level.upper())
176 | if type(level) is int:
177 | return level
178 | else:
179 | raise AppError('Level "%s" is not a valid logging level' % level)
180 |
181 |
182 | def log_to_stderr(level, formatter=_LOG_FORMATTER,
183 | handler=logging.StreamHandler):
184 | """Setup logging or set logging level to STDERR.
185 |
186 | Args:
187 | level: a logging level, like logging.INFO
188 | formatter: a logging.Formatter object
189 | handler: logging.StreamHandler (this argument is for testing)
190 | """
191 | global _STDERR_HANDLER
192 | _level = get_loglevel(level)
193 | if type(_STDERR_HANDLER) is handler:
194 | _STDERR_HANDLER.setLevel(_level)
195 | else:
196 | _STDERR_HANDLER = handler(stream=sys.stderr)
197 | _STDERR_HANDLER.setLevel(_level)
198 | _STDERR_HANDLER.setFormatter(formatter)
199 | logging.getLogger('').addHandler(_STDERR_HANDLER)
200 | logging.debug('Setting logging at level=%s',
201 | logging.getLevelName(_level))
202 |
203 |
204 | def log_to_file(filename, level=INFO, formatter=_LOG_FORMATTER,
205 | filemode=APPEND, handler=logging.FileHandler):
206 | """Setup logging or set logging level to file.
207 |
208 | Args:
209 | filename: string of path/file to write logs
210 | level: a logging level, like logging.INFO
211 | formatter: a logging.Formatter object
212 | filemode: a mode of writing, like app.APPEND or app.CLOBBER
213 | handler: logging.FileHandler (this argument is for testing)
214 | """
215 | global _FILE_HANDLER
216 | _level = get_loglevel(level)
217 | if type(_FILE_HANDLER) is handler:
218 | _FILE_HANDLER.setLevel(_level)
219 | else:
220 | _FILE_HANDLER = handler(filename=filename, mode=filemode)
221 | _FILE_HANDLER.setLevel(_level)
222 | _FILE_HANDLER.setFormatter(formatter)
223 | logging.getLogger('').addHandler(_FILE_HANDLER)
224 | logging.info('Logging to file %s [mode=\'%s\', level=%s]',
225 | os.path.abspath(filename), filemode,
226 | logging.getLevelName(_level))
227 |
--------------------------------------------------------------------------------
/docs/Makefile:
--------------------------------------------------------------------------------
1 | # Makefile for Sphinx documentation
2 | #
3 |
4 | # You can set these variables from the command line.
5 | SPHINXOPTS =
6 | SPHINXBUILD = sphinx-build
7 | PAPER =
8 | BUILDDIR = _build
9 |
10 | # Internal variables.
11 | PAPEROPT_a4 = -D latex_paper_size=a4
12 | PAPEROPT_letter = -D latex_paper_size=letter
13 | ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
14 | # the i18n builder cannot share the environment and doctrees with the others
15 | I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
16 |
17 | .PHONY: help
18 | help:
19 | @echo "Please use \`make ' where is one of"
20 | @echo " html to make standalone HTML files"
21 | @echo " dirhtml to make HTML files named index.html in directories"
22 | @echo " singlehtml to make a single large HTML file"
23 | @echo " pickle to make pickle files"
24 | @echo " json to make JSON files"
25 | @echo " htmlhelp to make HTML files and a HTML help project"
26 | @echo " qthelp to make HTML files and a qthelp project"
27 | @echo " applehelp to make an Apple Help Book"
28 | @echo " devhelp to make HTML files and a Devhelp project"
29 | @echo " epub to make an epub"
30 | @echo " epub3 to make an epub3"
31 | @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter"
32 | @echo " latexpdf to make LaTeX files and run them through pdflatex"
33 | @echo " latexpdfja to make LaTeX files and run them through platex/dvipdfmx"
34 | @echo " text to make text files"
35 | @echo " man to make manual pages"
36 | @echo " texinfo to make Texinfo files"
37 | @echo " info to make Texinfo files and run them through makeinfo"
38 | @echo " gettext to make PO message catalogs"
39 | @echo " changes to make an overview of all changed/added/deprecated items"
40 | @echo " xml to make Docutils-native XML files"
41 | @echo " pseudoxml to make pseudoxml-XML files for display purposes"
42 | @echo " linkcheck to check all external links for integrity"
43 | @echo " doctest to run all doctests embedded in the documentation (if enabled)"
44 | @echo " coverage to run coverage check of the documentation (if enabled)"
45 | @echo " dummy to check syntax errors of document sources"
46 |
47 | .PHONY: clean
48 | clean:
49 | rm -rf $(BUILDDIR)/*
50 |
51 | .PHONY: html
52 | html:
53 | $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
54 | @echo
55 | @echo "Build finished. The HTML pages are in $(BUILDDIR)/html."
56 |
57 | .PHONY: dirhtml
58 | dirhtml:
59 | $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml
60 | @echo
61 | @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml."
62 |
63 | .PHONY: singlehtml
64 | singlehtml:
65 | $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml
66 | @echo
67 | @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml."
68 |
69 | .PHONY: pickle
70 | pickle:
71 | $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle
72 | @echo
73 | @echo "Build finished; now you can process the pickle files."
74 |
75 | .PHONY: json
76 | json:
77 | $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json
78 | @echo
79 | @echo "Build finished; now you can process the JSON files."
80 |
81 | .PHONY: htmlhelp
82 | htmlhelp:
83 | $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp
84 | @echo
85 | @echo "Build finished; now you can run HTML Help Workshop with the" \
86 | ".hhp project file in $(BUILDDIR)/htmlhelp."
87 |
88 | .PHONY: qthelp
89 | qthelp:
90 | $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp
91 | @echo
92 | @echo "Build finished; now you can run "qcollectiongenerator" with the" \
93 | ".qhcp project file in $(BUILDDIR)/qthelp, like this:"
94 | @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/LLAMA.qhcp"
95 | @echo "To view the help file:"
96 | @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/LLAMA.qhc"
97 |
98 | .PHONY: applehelp
99 | applehelp:
100 | $(SPHINXBUILD) -b applehelp $(ALLSPHINXOPTS) $(BUILDDIR)/applehelp
101 | @echo
102 | @echo "Build finished. The help book is in $(BUILDDIR)/applehelp."
103 | @echo "N.B. You won't be able to view it unless you put it in" \
104 | "~/Library/Documentation/Help or install it in your application" \
105 | "bundle."
106 |
107 | .PHONY: devhelp
108 | devhelp:
109 | $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp
110 | @echo
111 | @echo "Build finished."
112 | @echo "To view the help file:"
113 | @echo "# mkdir -p $$HOME/.local/share/devhelp/LLAMA"
114 | @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/LLAMA"
115 | @echo "# devhelp"
116 |
117 | .PHONY: epub
118 | epub:
119 | $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub
120 | @echo
121 | @echo "Build finished. The epub file is in $(BUILDDIR)/epub."
122 |
123 | .PHONY: epub3
124 | epub3:
125 | $(SPHINXBUILD) -b epub3 $(ALLSPHINXOPTS) $(BUILDDIR)/epub3
126 | @echo
127 | @echo "Build finished. The epub3 file is in $(BUILDDIR)/epub3."
128 |
129 | .PHONY: latex
130 | latex:
131 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
132 | @echo
133 | @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex."
134 | @echo "Run \`make' in that directory to run these through (pdf)latex" \
135 | "(use \`make latexpdf' here to do that automatically)."
136 |
137 | .PHONY: latexpdf
138 | latexpdf:
139 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
140 | @echo "Running LaTeX files through pdflatex..."
141 | $(MAKE) -C $(BUILDDIR)/latex all-pdf
142 | @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
143 |
144 | .PHONY: latexpdfja
145 | latexpdfja:
146 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
147 | @echo "Running LaTeX files through platex and dvipdfmx..."
148 | $(MAKE) -C $(BUILDDIR)/latex all-pdf-ja
149 | @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
150 |
151 | .PHONY: text
152 | text:
153 | $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text
154 | @echo
155 | @echo "Build finished. The text files are in $(BUILDDIR)/text."
156 |
157 | .PHONY: man
158 | man:
159 | $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man
160 | @echo
161 | @echo "Build finished. The manual pages are in $(BUILDDIR)/man."
162 |
163 | .PHONY: texinfo
164 | texinfo:
165 | $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
166 | @echo
167 | @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo."
168 | @echo "Run \`make' in that directory to run these through makeinfo" \
169 | "(use \`make info' here to do that automatically)."
170 |
171 | .PHONY: info
172 | info:
173 | $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
174 | @echo "Running Texinfo files through makeinfo..."
175 | make -C $(BUILDDIR)/texinfo info
176 | @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo."
177 |
178 | .PHONY: gettext
179 | gettext:
180 | $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale
181 | @echo
182 | @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale."
183 |
184 | .PHONY: changes
185 | changes:
186 | $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes
187 | @echo
188 | @echo "The overview file is in $(BUILDDIR)/changes."
189 |
190 | .PHONY: linkcheck
191 | linkcheck:
192 | $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck
193 | @echo
194 | @echo "Link check complete; look for any errors in the above output " \
195 | "or in $(BUILDDIR)/linkcheck/output.txt."
196 |
197 | .PHONY: doctest
198 | doctest:
199 | $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest
200 | @echo "Testing of doctests in the sources finished, look at the " \
201 | "results in $(BUILDDIR)/doctest/output.txt."
202 |
203 | .PHONY: coverage
204 | coverage:
205 | $(SPHINXBUILD) -b coverage $(ALLSPHINXOPTS) $(BUILDDIR)/coverage
206 | @echo "Testing of coverage in the sources finished, look at the " \
207 | "results in $(BUILDDIR)/coverage/python.txt."
208 |
209 | .PHONY: xml
210 | xml:
211 | $(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml
212 | @echo
213 | @echo "Build finished. The XML files are in $(BUILDDIR)/xml."
214 |
215 | .PHONY: pseudoxml
216 | pseudoxml:
217 | $(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml
218 | @echo
219 | @echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml."
220 |
221 | .PHONY: dummy
222 | dummy:
223 | $(SPHINXBUILD) -b dummy $(ALLSPHINXOPTS) $(BUILDDIR)/dummy
224 | @echo
225 | @echo "Build finished. Dummy builder generates no files."
226 |
--------------------------------------------------------------------------------
/llama/udp.py:
--------------------------------------------------------------------------------
1 | """UDP Packet Library for LLAMA
2 |
3 | This library provides several functions:
4 | * Custom IPv4 UDP sockets
5 | * Sender class
6 | * Reflector class
7 |
8 | The custom socket class provides the capability to carry timestamps, TOS
9 | markings, and other data encoding. Specialty methods send and receive these
10 | pieces of data. Usually, setting IP_TOS in the IP header is simple, but in
11 | Python reading TOS bits becomes difficult unless using raw sockets. We'd like
12 | to avoid raw sockets so our application doesn't need to run as `root`. To
13 | solve this we encode TOS bits into the payload of the datagrams.
14 |
15 | The Sender class sends large quantities of UDP probes in batches.
16 |
17 | The Reflector class runs a simple loop: receive, decode TOS, set timestamp,
18 | encode TOS, send back.
19 |
20 | TOS is encoded as 8-bits (1-byte, 2-hex digits). See
21 | https://www.tucny.com/Home/dscp-tos for a reference.
22 | """
23 |
24 | import collections
25 | import concurrent.futures
26 | import logging
27 | import socket
28 | import struct
29 | import time
30 |
31 | from llama import util
32 |
33 |
34 | # Data payload structure for probes
35 | # Encoding timestamps in packet data reduces the amount of tracking we have to
36 | # do in code. TOS bits can be set on outbound UDP packets, but are difficult
37 | # to read back with getsockopt() -- placing in payload helps this as well.
38 | # We include a 'signature' to help reject non-LLAMA related datagrams.
39 | UdpData = collections.namedtuple(
40 | 'UdpData', ['signature', # Unique LLAMA signature
41 | 'tos', # TOS bits, expressed as 1 byte in hex
42 | 'sent', # Time datagram was placed on wire in ms
43 | 'rcvd', # Time datagram was returned to sender in ms
44 | 'rtt', # Total round-trip time in ms
45 | 'lost']) # Boolean, was our packet returned to sender?
46 |
47 |
48 | # UDP statistics returned at the end of each probe cycle.
49 | UdpStats = collections.namedtuple(
50 | 'UdpStats', ['sent', # How many datagrams were sent
51 | 'lost', # How many datagrams were not returned
52 | 'loss', # Loss, expressed as a percentage
53 | 'rtt_max', # Maximum round trip time
54 | 'rtt_min', # Minimum round trip time
55 | 'rtt_avg']) # Average (mean) round trip time
56 |
57 |
58 | class Ipv4UdpSocket(socket.socket):
59 | """Custom IPv4 UDP socket which tracks TOS and timestamps.
60 |
61 | Note: We inherit from the socket.socket() class for ease of use, but due
62 | to restrictions in C bindings, we cannot override builtin methods
63 | like sendto() and recvfrom(). For those methods, we make special
64 | methods below: tos_sendto() and tos_recvfrom().
65 | """
66 |
67 | SIGNATURE = '__llama__' # Identify LLAMA packets from other UDP
68 | FORMAT = '<10sBddd?' # Used to pack/unpack struct data
69 |
70 | def __init__(self, tos=0x00, timeout=util.DEFAULT_TIMEOUT):
71 | """Constructor.
72 |
73 | Args:
74 | tos: (hex) TOS bits expressed as 2-bytes
75 | timeout: (float) Number of seconds to block/wait socket operation
76 | """
77 | super(Ipv4UdpSocket, self).__init__(socket.AF_INET, socket.SOCK_DGRAM,
78 | socket.IPPROTO_UDP)
79 | self._tos = tos & 0xff # [6-bits TOS] [2-bits ECN]
80 | self.setsockopt(socket.IPPROTO_IP, socket.IP_TOS, self._tos)
81 | self.settimeout(timeout)
82 | self.processed = 0
83 |
84 | def tos_sendto(self, ip, port):
85 | """Mimic the behavior of socket.sendto() with special behavior.
86 |
87 | Note: Data is excluded from arguments since we encode our own.
88 |
89 | Args:
90 | ip: (str) destination IP address
91 | port: (int) destination UDP port
92 |
93 | Returns:
94 | (int) the number of bytes sent on the socket
95 | """
96 | return self.sendto(struct.pack(self.FORMAT, self.SIGNATURE, self._tos,
97 | time.time() * 1000, 0, 0, False),
98 | (ip, port))
99 |
100 | def tos_recvfrom(self, bufsize=512):
101 | """Mimic the behavior of socket.recvfrom() with special behavior.
102 |
103 | Args:
104 | bufsize: (int) number of bytes to read from socket
105 | It's not advisable to change this.
106 |
107 | Returns:
108 | (UdpData) namedtuple containing timestamps
109 | """
110 | try:
111 | data, addr = self.recvfrom(bufsize)
112 | rcvd = time.time() * 1000
113 | results = UdpData._make(struct.unpack(self.FORMAT, data))
114 | rtt = rcvd - results.sent
115 | return results._replace(rcvd=rcvd, rtt=rtt, lost=False)
116 | except socket.timeout:
117 | logging.debug('Timed out after {}s waiting to receive'.format(
118 | self.gettimeout()))
119 | return UdpData(self.SIGNATURE, self._tos, 0, 0, 0, True)
120 |
121 | def tos_reflect(self, bufsize=512):
122 | """Intended to be the sole operation on a LLAMA reflector.
123 |
124 | Args:
125 | bufsize: (int) number of bytes to read from socket
126 | It's not advisable to change this.
127 | """
128 | data, addr = self.recvfrom(bufsize)
129 | try:
130 | udpdata = UdpData._make(struct.unpack(self.FORMAT, data))
131 | except struct.error:
132 | logging.warn('Received malformed datagram of %s bytes. '
133 | 'Discarding.', len(data))
134 | # Don't reflect invalid data
135 | return
136 | self.setsockopt(socket.IPPROTO_IP, socket.IP_TOS, udpdata.tos)
137 | self.sendto(data, addr)
138 | self.processed += 1
139 | if self.processed % 512 == 0:
140 | logging.info('Processed packets: %s', self.processed)
141 |
142 |
143 | class Sender(object):
144 | """UDP Sender class capable of sending/receiving UDP probes."""
145 |
146 | def __init__(self, target, port, count, tos=0x00,
147 | timeout=util.DEFAULT_TIMEOUT):
148 | """Constructor.
149 |
150 | Args:
151 | target: (str) IP address or hostname of destination
152 | port: (int) UDP port of destination
153 | count: (int) number of UDP datagram probes to send
154 | tos: (hex) TOS bits
155 | timeout: (float) in seconds
156 | """
157 | self.target = target
158 | self.port = port
159 | sockets = []
160 | for x in range(0, count):
161 | sock = Ipv4UdpSocket(tos=tos, timeout=timeout)
162 | sock.bind(('', 0))
163 | sockets.append(sock)
164 | self.batches = util.array_split(sockets, 50)
165 |
166 | def send_and_recv(self, batch):
167 | """Send and receive a single datagram and store results.
168 |
169 | Args:
170 | batch: (list of socket objects) for sending/receiving
171 | """
172 | for sock in batch:
173 | sock.tos_sendto(self.target, self.port)
174 | self.results.append(sock.tos_recvfrom())
175 |
176 | def run(self):
177 | """Run the sender."""
178 | self.results = []
179 | exception_jobs = []
180 | jobs = []
181 | with concurrent.futures.ThreadPoolExecutor(max_workers=50) as executor:
182 | for batch in self.batches:
183 | jobs.append(executor.submit(self.send_and_recv, batch))
184 | for job in concurrent.futures.as_completed(jobs):
185 | # Results should be getting collected as part of the job
186 | # So just handle logging any exceptions.
187 | if job.exception():
188 | exception_jobs.append(job)
189 | for result in self.results:
190 | logging.debug(result)
191 | if len(exception_jobs) > 0:
192 | logging.critical("Encountered {} exceptions while running Sender. "
193 | "Logging one such exception as an "
194 | "example.".format(len(exception_jobs)))
195 | try:
196 | exception_jobs[0].result()
197 | except Exception as e:
198 | logging.exception(e)
199 |
200 | @property
201 | def stats(self):
202 | """Returns a namedtuple containing UDP loss/latency results."""
203 | sent = len(self.results)
204 | if sent is 0:
205 | logging.critical('Sender has zero results, likely as a '
206 | 'result of exceptions during probing')
207 | # TODO: Better handling for this requires a greater refactor
208 | return UdpStats(0, 0, 0.0, 0.0, 0.0, 0.0)
209 | lost = sum(x.lost for x in self.results)
210 | loss = (float(lost) / float(sent)) * 100
211 | # TODO: This includes 0 values for instances of loss
212 | # Handling this requires more work around null
213 | # values along the various components and DB
214 | rtt_values = [x.rtt for x in self.results]
215 | rtt_min = min(rtt_values)
216 | rtt_max = max(rtt_values)
217 | rtt_avg = util.mean(rtt_values)
218 | return UdpStats(sent, lost, loss, rtt_max, rtt_min, rtt_avg)
219 |
220 |
221 | class Reflector(object):
222 | """Simple Reflector class."""
223 |
224 | def __init__(self, port):
225 | self.sock = Ipv4UdpSocket()
226 | self.sock.bind(('', port))
227 | sockname = self.sock.getsockname()
228 | logging.info('LLAMA reflector listening on %s udp/%s',
229 | sockname[0], sockname[1])
230 | self.sock.setblocking(1)
231 |
232 | def run(self):
233 | while True:
234 | self.sock.tos_reflect()
235 |
--------------------------------------------------------------------------------
/docs/conf.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | #
3 | # LLAMA documentation build configuration file, created by
4 | # sphinx-quickstart on Sat Dec 3 00:25:17 2016.
5 | #
6 | # This file is execfile()d with the current directory set to its
7 | # containing dir.
8 | #
9 | # Note that not all possible configuration values are present in this
10 | # autogenerated file.
11 | #
12 | # All configuration values have a default; values that are commented out
13 | # serve to show the default.
14 |
15 | # If extensions (or modules to document with autodoc) are in another directory,
16 | # add these directories to sys.path here. If the directory is relative to the
17 | # documentation root, use os.path.abspath to make it absolute, like shown here.
18 | #
19 | import os
20 | import sys
21 | import sphinx_rtd_theme
22 | sys.path.insert(0, os.path.abspath('..'))
23 |
24 | # -- General configuration ------------------------------------------------
25 |
26 | # If your documentation needs a minimal Sphinx version, state it here.
27 | #
28 | # needs_sphinx = '1.0'
29 |
30 | # Add any Sphinx extension module names here, as strings. They can be
31 | # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
32 | # ones.
33 | extensions = []
34 |
35 | # Add any paths that contain templates here, relative to this directory.
36 | templates_path = ['_templates']
37 |
38 | # The suffix(es) of source filenames.
39 | # You can specify multiple suffix as a list of string:
40 | #
41 | # source_suffix = ['.rst', '.md']
42 | source_suffix = '.rst'
43 |
44 | # The encoding of source files.
45 | #
46 | # source_encoding = 'utf-8-sig'
47 |
48 | # The master toctree document.
49 | master_doc = 'index'
50 |
51 | # General information about the project.
52 | project = u'LLAMA'
53 | copyright = u'2016, Dropbox, Inc.'
54 | author = u'Bryan Reed'
55 |
56 | # The version info for the project you're documenting, acts as replacement for
57 | # |version| and |release|, also used in various other places throughout the
58 | # built documents.
59 | #
60 |
61 | # Helper function to retrieve version from repo
62 | def get_version():
63 | from llama import __version__
64 | return __version__
65 |
66 | # The short X.Y version.
67 | version = get_version()
68 | # The full version, including alpha/beta/rc tags.
69 | release = version
70 |
71 | # The language for content autogenerated by Sphinx. Refer to documentation
72 | # for a list of supported languages.
73 | #
74 | # This is also used if you do content translation via gettext catalogs.
75 | # Usually you set "language" from the command line for these cases.
76 | language = None
77 |
78 | # There are two options for replacing |today|: either, you set today to some
79 | # non-false value, then it is used:
80 | #
81 | # today = ''
82 | #
83 | # Else, today_fmt is used as the format for a strftime call.
84 | #
85 | # today_fmt = '%B %d, %Y'
86 |
87 | # List of patterns, relative to source directory, that match files and
88 | # directories to ignore when looking for source files.
89 | # This patterns also effect to html_static_path and html_extra_path
90 | exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
91 |
92 | # The reST default role (used for this markup: `text`) to use for all
93 | # documents.
94 | #
95 | # default_role = None
96 |
97 | # If true, '()' will be appended to :func: etc. cross-reference text.
98 | #
99 | # add_function_parentheses = True
100 |
101 | # If true, the current module name will be prepended to all description
102 | # unit titles (such as .. function::).
103 | #
104 | # add_module_names = True
105 |
106 | # If true, sectionauthor and moduleauthor directives will be shown in the
107 | # output. They are ignored by default.
108 | #
109 | # show_authors = False
110 |
111 | # The name of the Pygments (syntax highlighting) style to use.
112 | pygments_style = 'sphinx'
113 |
114 | # A list of ignored prefixes for module index sorting.
115 | # modindex_common_prefix = []
116 |
117 | # If true, keep warnings as "system message" paragraphs in the built documents.
118 | # keep_warnings = False
119 |
120 | # If true, `todo` and `todoList` produce output, else they produce nothing.
121 | todo_include_todos = False
122 |
123 |
124 | # -- Options for HTML output ----------------------------------------------
125 |
126 | # The theme to use for HTML and HTML Help pages. See the documentation for
127 | # a list of builtin themes.
128 | #
129 | # html_theme = 'alabaster'
130 | html_theme = 'sphinx_rtd_theme'
131 | html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
132 |
133 | # Theme options are theme-specific and customize the look and feel of a theme
134 | # further. For a list of options available for each theme, see the
135 | # documentation.
136 | #
137 | # html_theme_options = {}
138 |
139 | # Add any paths that contain custom themes here, relative to this directory.
140 | # html_theme_path = []
141 |
142 | # The name for this set of Sphinx documents.
143 | # " v documentation" by default.
144 | #
145 | # html_title = u'LLAMA v0.0.1a7'
146 |
147 | # A shorter title for the navigation bar. Default is the same as html_title.
148 | #
149 | # html_short_title = None
150 |
151 | # The name of an image file (relative to this directory) to place at the top
152 | # of the sidebar.
153 | #
154 | # html_logo = None
155 |
156 | # The name of an image file (relative to this directory) to use as a favicon of
157 | # the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
158 | # pixels large.
159 | #
160 | # html_favicon = None
161 |
162 | # Add any paths that contain custom static files (such as style sheets) here,
163 | # relative to this directory. They are copied after the builtin static files,
164 | # so a file named "default.css" will overwrite the builtin "default.css".
165 | html_static_path = ['_static']
166 |
167 | # Add any extra paths that contain custom files (such as robots.txt or
168 | # .htaccess) here, relative to this directory. These files are copied
169 | # directly to the root of the documentation.
170 | #
171 | # html_extra_path = []
172 |
173 | # If not None, a 'Last updated on:' timestamp is inserted at every page
174 | # bottom, using the given strftime format.
175 | # The empty string is equivalent to '%b %d, %Y'.
176 | #
177 | # html_last_updated_fmt = None
178 |
179 | # If true, SmartyPants will be used to convert quotes and dashes to
180 | # typographically correct entities.
181 | #
182 | # html_use_smartypants = True
183 |
184 | # Custom sidebar templates, maps document names to template names.
185 | #
186 | # html_sidebars = {}
187 |
188 | # Additional templates that should be rendered to pages, maps page names to
189 | # template names.
190 | #
191 | # html_additional_pages = {}
192 |
193 | # If false, no module index is generated.
194 | #
195 | # html_domain_indices = True
196 |
197 | # If false, no index is generated.
198 | #
199 | # html_use_index = True
200 |
201 | # If true, the index is split into individual pages for each letter.
202 | #
203 | # html_split_index = False
204 |
205 | # If true, links to the reST sources are added to the pages.
206 | #
207 | # html_show_sourcelink = True
208 |
209 | # If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
210 | #
211 | # html_show_sphinx = True
212 |
213 | # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
214 | #
215 | # html_show_copyright = True
216 |
217 | # If true, an OpenSearch description file will be output, and all pages will
218 | # contain a tag referring to it. The value of this option must be the
219 | # base URL from which the finished HTML is served.
220 | #
221 | # html_use_opensearch = ''
222 |
223 | # This is the file name suffix for HTML files (e.g. ".xhtml").
224 | # html_file_suffix = None
225 |
226 | # Language to be used for generating the HTML full-text search index.
227 | # Sphinx supports the following languages:
228 | # 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
229 | # 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr', 'zh'
230 | #
231 | # html_search_language = 'en'
232 |
233 | # A dictionary with options for the search language support, empty by default.
234 | # 'ja' uses this config value.
235 | # 'zh' user can custom change `jieba` dictionary path.
236 | #
237 | # html_search_options = {'type': 'default'}
238 |
239 | # The name of a javascript file (relative to the configuration directory) that
240 | # implements a search results scorer. If empty, the default will be used.
241 | #
242 | # html_search_scorer = 'scorer.js'
243 |
244 | # Output file base name for HTML help builder.
245 | htmlhelp_basename = 'LLAMAdoc'
246 |
247 | # -- Options for LaTeX output ---------------------------------------------
248 |
249 | latex_elements = {
250 | # The paper size ('letterpaper' or 'a4paper').
251 | #
252 | # 'papersize': 'letterpaper',
253 |
254 | # The font size ('10pt', '11pt' or '12pt').
255 | #
256 | # 'pointsize': '10pt',
257 |
258 | # Additional stuff for the LaTeX preamble.
259 | #
260 | # 'preamble': '',
261 |
262 | # Latex figure (float) alignment
263 | #
264 | # 'figure_align': 'htbp',
265 | }
266 |
267 | # Grouping the document tree into LaTeX files. List of tuples
268 | # (source start file, target name, title,
269 | # author, documentclass [howto, manual, or own class]).
270 | latex_documents = [
271 | (master_doc, 'LLAMA.tex', u'LLAMA Documentation',
272 | u'Bryan Reed', 'manual'),
273 | ]
274 |
275 | # The name of an image file (relative to this directory) to place at the top of
276 | # the title page.
277 | #
278 | # latex_logo = None
279 |
280 | # For "manual" documents, if this is true, then toplevel headings are parts,
281 | # not chapters.
282 | #
283 | # latex_use_parts = False
284 |
285 | # If true, show page references after internal links.
286 | #
287 | # latex_show_pagerefs = False
288 |
289 | # If true, show URL addresses after external links.
290 | #
291 | # latex_show_urls = False
292 |
293 | # Documents to append as an appendix to all manuals.
294 | #
295 | # latex_appendices = []
296 |
297 | # It false, will not define \strong, \code, itleref, \crossref ... but only
298 | # \sphinxstrong, ..., \sphinxtitleref, ... To help avoid clash with user added
299 | # packages.
300 | #
301 | # latex_keep_old_macro_names = True
302 |
303 | # If false, no module index is generated.
304 | #
305 | # latex_domain_indices = True
306 |
307 |
308 | # -- Options for manual page output ---------------------------------------
309 |
310 | # One entry per manual page. List of tuples
311 | # (source start file, name, description, authors, manual section).
312 | man_pages = [
313 | (master_doc, 'llama', u'LLAMA Documentation',
314 | [author], 1)
315 | ]
316 |
317 | # If true, show URL addresses after external links.
318 | #
319 | # man_show_urls = False
320 |
321 |
322 | # -- Options for Texinfo output -------------------------------------------
323 |
324 | # Grouping the document tree into Texinfo files. List of tuples
325 | # (source start file, target name, title, author,
326 | # dir menu entry, description, category)
327 | texinfo_documents = [
328 | (master_doc, 'LLAMA', u'LLAMA Documentation',
329 | author, 'LLAMA', 'Loss & LAtency MAtrix for Networks',
330 | 'Network Monitoring'),
331 | ]
332 |
333 | # Documents to append as an appendix to all manuals.
334 | #
335 | # texinfo_appendices = []
336 |
337 | # If false, no module index is generated.
338 | #
339 | # texinfo_domain_indices = True
340 |
341 | # How to display URL addresses: 'footnote', 'no', or 'inline'.
342 | #
343 | # texinfo_show_urls = 'footnote'
344 |
345 | # If true, do not generate a @detailmenu in the "Top" node's menu.
346 | #
347 | # texinfo_no_detailmenu = False
348 |
--------------------------------------------------------------------------------