├── .gitignore ├── .python-version ├── .travis.yml ├── .vscode └── settings.json ├── LICENSE ├── MANIFEST.in ├── Makefile ├── README.md ├── agile_analytics ├── __init__.py ├── analyzers.py ├── fetchers.py ├── models.py ├── reporters.py ├── version.py └── writers.py ├── docs ├── Makefile ├── conf.py └── index.rst ├── requirements.txt ├── setup.cfg ├── setup.py ├── tests ├── conftest.py ├── data │ └── weeks_of_tickets.csv ├── test_agileticket.py ├── test_analyzed_agile_ticket.py ├── test_common_fixtures.py ├── test_created_reporter.py ├── test_csvwriter.py ├── test_cycle_pctile_reporter.py ├── test_date_analyzer.py ├── test_fetchers.py ├── test_gsheetwriter.py ├── test_lead_pctile_reporter.py ├── test_lead_reporter.py ├── test_partial_date_analyzer.py ├── test_reporter.py ├── test_sla_report.py ├── test_ticket_reporter.py └── test_tp_reporter.py └── version.txt /.gitignore: -------------------------------------------------------------------------------- 1 | # Debugging scripts 2 | tryout.py 3 | tryout.csv 4 | tests/json/*.json 5 | *_secret.json 6 | 7 | # Build artifacts 8 | reqs 9 | 10 | # Byte-compiled / optimized / DLL files 11 | __pycache__/ 12 | *.py[cod] 13 | *$py.class 14 | 15 | # C extensions 16 | *.so 17 | 18 | # Distribution / packaging 19 | .DS_Store 20 | .Python 21 | env/ 22 | build/ 23 | develop-eggs/ 24 | dist/ 25 | downloads/ 26 | eggs/ 27 | .eggs/ 28 | lib/ 29 | lib64/ 30 | parts/ 31 | sdist/ 32 | var/ 33 | *.egg-info/ 34 | .installed.cfg 35 | *.egg 36 | venv 37 | sdist-venv 38 | 39 | # PyInstaller 40 | # Usually these files are written by a python script from a template 41 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 42 | *.manifest 43 | *.spec 44 | 45 | # Installer logs 46 | pip-log.txt 47 | pip-delete-this-directory.txt 48 | 49 | # Unit test / coverage reports 50 | htmlcov/ 51 | .tox/ 52 | .coverage 53 | .coverage.* 54 | .cache 55 | nosetests.xml 56 | coverage.xml 57 | *,cover 58 | .hypothesis/ 59 | 60 | # Translations 61 | *.mo 62 | *.pot 63 | 64 | # Django stuff: 65 | *.log 66 | 67 | # Sphinx documentation 68 | docs/_build/ 69 | 70 | # PyBuilder 71 | target/ 72 | 73 | #Ipython Notebook 74 | .ipynb_checkpoints 75 | -------------------------------------------------------------------------------- /.python-version: -------------------------------------------------------------------------------- 1 | 3.5.2 2 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | language: python 2 | python: 3 | - "3.5" 4 | install: 5 | - pip install -r requirements.txt 6 | - pip install python-coveralls 7 | - python setup.py develop 8 | script: "py.test -svv --flake8 --cov=agile_analytics tests/" 9 | after_success: 10 | coveralls 11 | deploy: 12 | # test pypi 13 | - provider: pypi 14 | server: https://test.pypi.org/legacy/ 15 | user: "cmheisel" 16 | password: 17 | secure: "K+JpMmKkoIpPvYGnpo7Ujp3jrF55m6L5ewYqAuEwpzIDDLjqLmI4jq3UIHOXLgrIJsr1PmMsRlZetAi7ytc2Rbj9GNCo2vXHTilAp5wIBvtqWc+G+aCiLtxmX5xQdF6V+hkOuD8OM+VqZJxVOzpJrxq6go85qMVabmL0mJCq1xC85VewqNxzcE7fRCfvqToY9L+Q+K/OdBnlJ9WFJUO6KOCswm9djhcvNsGVBetiPcv1IYC5E9tVfqaBguthAXHOWkotv4PRYpS9W8xs/aNl//Sg85Ra+bOt0Rdr3d8R+Kt5ouigDM2N4o5J88dz11cWoq+o8J6zLP9JgYLP75ZUoylo+VSIEg+YNBEeYKdeB/B9b8X+A2Vr0H1Wc/LUTX/VK+3IjPsv2fT2ya3lphtOJtl97LeSlrQPwJFb6zJm2XEsdcS0rhEQjC3LXaSahyp0azsaCIcgVnBs5HeP/dBO3PstJLYIh6IGVJylwqdVsh4pajD2hOWqIzfD0LYZauXZBR68CF13GQ8DIsB2fcpOXZ+ra+hYnF7ny/4qZEMBjkUMilZ0WDcT7m8W9hfkThtcY28/Xs5rz/qu3dzVxlXgZkxWzOUDeRsQvHaZS1gfcs+el66G6zF17h1pLJU4PwAHQu9SaQ1yEHHYxr/i1HNhiQa5aG5xRNC3H2wXOh6t6xE=" 18 | on: 19 | branch: master 20 | tags: false 21 | condition: $TRAVIS_PYTHON_VERSION = "3.5" 22 | # production pypi 23 | - provider: pypi 24 | user: "cmheisel" 25 | password: 26 | secure: "vOugYIR93Z4n3wL1W3VV2uF1i/Vv7/shu/xtCgf5MymInK1RnyZRQLE1FgdBFOrLQkqXI3d7ALRH3zauwtnisy9CmjTLRkxW1ypt3HER 27 | bB0N0JJKqx/NWRBHzJCTkdip0pwNUI9GK2fdsZonne340iUKkH/I8oinV2o3DhrSxtDbA0wfJaIGnUQAOOektG8KlmVzs83mKm+t4Yk1u5lmTfsGbVS 28 | WkicEVAnXWdNaMRy5fHNgawWHRIXrQ2z7Z9ik5pIpQftCm8ljyhQwxTWftrs4akLJawvyofhxT3Y8+rbT6Tpxzx9KEtwoSbe7lOTIeFBeMlhDdXOPv2 29 | mYJneEXpfTO/Mrrl90Igx2kEx5mF6ql4J2rsvHeC9BU72HdMCEDra61mmn8x/xB11cxn5m4B2GIaSaOqVPsE8oXEtgt1JE0ujpmWaUS4D9gpMH2zbXQ 30 | Sub9OLP4LYLbcWa3+6IeR0tD7s7gjkQSV1tn5qdidqrmW1bOEhQ2Uu9wkSiirJ3+ccfT/HKS/s3xRe1tIV34yuhbcLQC1HOqA34FSV0t+qzRW7+sw8v 31 | LFdSWPW02aGrof80qSp/lxVaOJc3qoM2hEBtyQvOWMywHveAR8Vw+O+GuZOf02dkD/LTK2sgowzL0fZAjfpwqZQMlB0F2zyBRofT941CoViIF+q3Fm6 32 | n3rc=" 33 | on: 34 | branch: master 35 | tags: true 36 | condition: $TRAVIS_PYTHON_VERSION = "3.5" 37 | -------------------------------------------------------------------------------- /.vscode/settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "python.pythonPath": "venv/bin/python3.5" 3 | } -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2016 Chris Heisel 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | include README.md 2 | include requirements.txt 3 | include version.txt 4 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | venv: 2 | virtualenv ./venv 3 | 4 | reqs: venv 5 | ./venv/bin/pip install -r requirements.txt && touch reqs 6 | 7 | agile_analytics.egg-info/PKG-INFO: reqs 8 | ./venv/bin/python setup.py develop 9 | 10 | test: agile_analytics.egg-info/PKG-INFO 11 | ./venv/bin/py.test -svv --flake8 12 | ./venv/bin/py.test -svv --cov-report term-missing --cov=agile_analytics tests/ 13 | 14 | systest: test tryout.py 15 | # Poor man's system test, not committed because it requies a real jira 16 | ./venv/bin/python tryout.py 17 | 18 | clean_pycs: 19 | find . | grep -E "(__pycache__|\.pyc)" | xargs rm -rf 20 | 21 | clean: clean_pycs 22 | rm -rf sdist-venv 23 | rm -rf .coverage 24 | rm -rf .cache 25 | rm -rf reqs 26 | rm -rf venv 27 | rm -rf agile_analytics.egg-info 28 | rm -rf dist 29 | 30 | docs: reqs 31 | cd docs && make html 32 | 33 | version: agile_analytics.egg-info/PKG-INFO 34 | ./venv/bin/python -c "import agile_analytics; open('version.txt', 'w').write(agile_analytics.version)" 35 | git add agile_analytics/version.py 36 | git add version.txt 37 | git commit -m "Version bump." 38 | 39 | test_sdist: test version clean 40 | python setup.py sdist 41 | virtualenv ./sdist-venv 42 | ./sdist-venv/bin/pip install --upgrade pip 43 | ./sdist-venv/bin/pip install ./dist/*.tar.gz 44 | ./sdist-venv/bin/python -c "import agile_analytics; assert agile_analytics" 45 | 46 | .PHONY: test clean clean_pycs docs version systest 47 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # jira-agile-extractor 2 | 3 | [![Build Status](https://travis-ci.org/cmheisel/agile-analytics.svg?branch=master)](https://travis-ci.org/cmheisel/agile-analytics) 4 | [![Coverage Status](https://coveralls.io/repos/github/cmheisel/agile-analytics/badge.svg?branch=master)](https://coveralls.io/github/cmheisel/agile-analytics?branch=master) 5 | [![Stories in Ready](https://badge.waffle.io/cmheisel/agile-analytics.svg?label=ready&title=Ready)](http://waffle.io/cmheisel/agile-analytics) 6 | 7 | Extract data about items from JIRA, output raw data and interesting reports 8 | 9 | ## Architecture 10 | 11 | The agile extractor is composed of several different components. Fetchers gather data, which is fed to one or more analyzer/reporters/writer combos. 12 | 13 | ### Components 14 | 15 | #### Fetchers 16 | 17 | Fetchers are responsible for getting raw data about tickets from agile data sources (JIRA, Trello, etc.) 18 | 19 | They depend on fetcher specific configuration including things like API end points, credentials and search criteria. 20 | 21 | They produce a set of AgileTicket objects with a known interface. 22 | 23 | #### Analyzers 24 | 25 | Analyzers take in a set of AgileTicket objects and an analysis configuration and return a set of AnalyzedTicket objects that contain the original AgileTicket as well as additional data calculated in light of the analysis context. 26 | 27 | For example, a CycleTime analyzer would look for a start_state and an end_state in the configuration, and calculate the days between those and store it as cycle_time on the AnalyzedTicket object. 28 | 29 | #### Reporters 30 | 31 | Reporters take in a set of AnalyzedTicket objects and a report configuration and return a Report object. It has two standard attributes: 32 | * Table - A representation of the report as a 2 dimensional table, provides column headers, row labels, and values for each row/column combo 33 | * Summary - A key/value store of report specific data 34 | 35 | #### Writers 36 | 37 | Writers take in a Report and a WriterConfig can write it out a particular source. Examples: 38 | * CSV to standout 39 | * CSV to a file 40 | * Google spreadsheet 41 | * Plotly 42 | 43 | ### Diagram 44 | 45 | ``` 46 | +-----------> Reporter: Distribution 47 | | title=Cycle Time Distribution 48 | | start_date=1/1/2015 49 | | end_date=3/31/2015 50 | | field=cycle_time 51 | | 52 | +-----------> Reporter: Throughput 53 | | title=Weekly Throughput 54 | | start_date=1/1/2015 55 | | end_date=3/31/2015 56 | | period=weekly 57 | | 58 | | 59 | | 60 | +-----------------> Analyzer: Cycle Time + 61 | | start_state=Backlog 62 | | end_state=Deployed 63 | | issue_types=Story 64 | | 65 | Fetcher | +-----------> Reporter: Throughput 66 | source=JIRA +----------------> Analyzer: Defect + title=Escaped Defects 67 | filter=1111 | defect_types=Bug,Incident start_date=1/1/2015 68 | auth=user,pass | end_date=3/31/2015 69 | | 70 | | 71 | +----------------> Analyzer: Cycle Time +-----------> Reporter: Throughput 72 | start_state=Analysis title=Weekly Analysis Throughput 73 | end_state=Dev start_date=1/1/2015 74 | end_date=3/31/2015 75 | period=weekly 76 | ``` 77 | -------------------------------------------------------------------------------- /agile_analytics/__init__.py: -------------------------------------------------------------------------------- 1 | """Pulls data from agile systems and analyzes it.""" 2 | 3 | from .version import __version__, __author__ 4 | 5 | from .fetchers import ( 6 | JIRAFetcher, 7 | convert_jira_issue 8 | ) 9 | 10 | from .analyzers import ( 11 | DateAnalyzer, 12 | PartialDateAnalyzer, 13 | ) 14 | 15 | from .reporters import ( 16 | ThroughputReporter, 17 | LeadTimeDistributionReporter, 18 | TicketReporter, 19 | LeadTimePercentileReporter, 20 | CycleTimePercentileReporter, 21 | SLAReporter, 22 | CreatedReporter, 23 | ) 24 | 25 | from .writers import ( 26 | CSVWriter, 27 | GSheetWriter 28 | ) 29 | 30 | version = ".".join(map(str, __version__)) 31 | 32 | 33 | __all__ = [ 34 | "version", 35 | "__version__", 36 | "__author__", 37 | "JIRAFetcher", 38 | "convert_jira_issue", 39 | "DateAnalyzer", 40 | "ThroughputReporter", 41 | "LeadTimeDistributionReporter", 42 | "TicketReporter", 43 | "CSVWriter", 44 | "GSheetWriter", 45 | "LeadTimePercentileReporter", 46 | "SLAReporter", 47 | "PartialDateAnalyzer", 48 | "CreatedReporter", 49 | "CycleTimePercentileReporter" 50 | ] 51 | -------------------------------------------------------------------------------- /agile_analytics/analyzers.py: -------------------------------------------------------------------------------- 1 | """Analyzers decorate AgileTickets with contextual information. 2 | 3 | Analyzers look at tickets through the lens of "this is my start state", or 4 | "this is what defects look like" and modify AgileTickets to contain information 5 | based on that context like "ended_at", "commited_at", "started_at", etc. 6 | """ 7 | 8 | from .models import AnalyzedAgileTicket 9 | 10 | 11 | class MissingPhaseInformation(Exception): 12 | """Raise when a ticket is missing information for a phase. 13 | 14 | Arguments: 15 | message (unicode): Human readable string describing the exception. 16 | phase (unicode): The phase that no state could be found for. 17 | state_list (list[unicode]): List of states that were included in the phase. 18 | 19 | Attritbutes: 20 | message (unicode): Human readable string describing the exception. 21 | phase (unicode): The phase that no state could be found for. 22 | state_list (list[unicode]): List of states that were included in the phase. 23 | """ 24 | 25 | def __init__(self, message, phase, state_list): 26 | """Create the exception.""" 27 | self.message = message 28 | self.phase = phase 29 | self.state_list = state_list 30 | super(Exception, self).__init__(message) 31 | 32 | 33 | class PartialDateAnalyzer(object): 34 | """Analyze Tickets that might not have been started or completed. 35 | 36 | Attributes: 37 | commit_state (list[str]): The list of names of the state when work was committed to. 38 | start_state (list[str]): The list of names of the state when work was started. 39 | end_state (list[str]): The list of names of the state when work was completed. 40 | """ 41 | NEWEST_DATE = 'newest' 42 | OLDEST_DATE = 'oldest' 43 | 44 | def __init__(self, commit_states, start_states, end_states): 45 | """Create instances.""" 46 | self.end_states = end_states 47 | self.commit_states = commit_states 48 | self.start_states = start_states 49 | super().__init__() 50 | 51 | @property 52 | def states_context(self): 53 | """Enumerate the states that match the phases of an analyzed ticket.""" 54 | return { 55 | u'committed': self.commit_states, 56 | u'started': self.start_states, 57 | u'ended': self.end_states, 58 | } 59 | 60 | def _find_entered_at(self, state_list, ticket, strategy): 61 | entry = dict(state=None, entered_at=None) 62 | entries = [] 63 | for state_name in state_list: 64 | for log in ticket.flow_log: 65 | if log['state'] == state_name: 66 | entries.append(log) 67 | if len(entries) > 0: 68 | break 69 | 70 | if len(entries) > 0: 71 | if strategy == self.NEWEST_DATE: 72 | entry = entries[-1] 73 | else: 74 | entry = entries[0] 75 | return entry['state'], entry['entered_at'] 76 | 77 | def analyze(self, tickets, strategy=None): 78 | """Return a list of AnalyzedAgileTicket. 79 | 80 | Arguments: 81 | tickets (list[AgileTicket]): The list of tickets to be analyzed 82 | strategy (analyzer.OLDEST_DATE | analyzer.NEWEST_DATE): Which date to pick when a ticket entered a state multiple times 83 | 84 | Returns: 85 | list[AnalyzedAgileTicket]: The list of tickets 86 | """ 87 | if strategy is None: 88 | strategy = self.OLDEST_DATE 89 | 90 | analyzed_tickets = [] 91 | ignored_tickets = [] 92 | for ticket in tickets: 93 | analyzed_tickets.append(self.analyze_ticket(ticket, strategy)) 94 | return analyzed_tickets, ignored_tickets 95 | 96 | def analyze_ticket(self, ticket, strategy): 97 | """Convert a single AgileTicket into an AnalyzedAgileTicket. 98 | 99 | Arguments: 100 | ticket (AgileTicket): The AgileTicket under consideration 101 | strategy (analyzer.OLDEST_DATE | analyzer.NEWEST_DATE): Which date to pick when a ticket entered a state multiple times 102 | 103 | Returns: 104 | AnalyzedAgileTicket 105 | """ 106 | 107 | kwargs = { 108 | "key": ticket.key, 109 | "ttype": ticket.type, 110 | "title": ticket.title, 111 | } 112 | 113 | for phase, state_list in self.states_context.items(): 114 | state, datetime = self._find_entered_at(state_list, ticket, strategy) 115 | kwargs[phase] = dict(state=state, entered_at=datetime) 116 | return AnalyzedAgileTicket(**kwargs) 117 | 118 | 119 | class DateAnalyzer(PartialDateAnalyzer): 120 | """Analyze Tickets for cycle data. 121 | 122 | Attributes: 123 | commit_state (list[str]): The list of names of the state when work was committed to. 124 | start_state (list[str]): The list of names of the state when work was started. 125 | end_state (list[str]): The list of names of the state when work was completed. 126 | """ 127 | 128 | def analyze(self, tickets, strategy=None): 129 | """Return a list of AnalyzedAgileTicket. 130 | 131 | Arguments: 132 | tickets (list[AgileTicket]): The list of tickets to be analyzed 133 | strategy (analyzer.OLDEST_DATE | analyzer.NEWEST_DATE): Which date to pick when a ticket entered a state multiple times 134 | 135 | Returns: 136 | list[AnalyzedAgileTicket]: The list of tickets 137 | """ 138 | if strategy is None: 139 | strategy = self.OLDEST_DATE 140 | 141 | analyzed_tickets = [] 142 | ignored_tickets = [] 143 | for ticket in tickets: 144 | try: 145 | analyzed_tickets.append(self.analyze_ticket(ticket, strategy)) 146 | except MissingPhaseInformation as e: 147 | ignored_tickets.append(dict(ticket=ticket, phase=e.phase, state_list=e.state_list)) 148 | return analyzed_tickets, ignored_tickets 149 | 150 | def analyze_ticket(self, ticket, strategy): 151 | """Convert a single AgileTicket into an AnalyzedAgileTicket. 152 | 153 | Arguments: 154 | ticket (AgileTicket): The AgileTicket under consideration 155 | strategy (analyzer.OLDEST_DATE | analyzer.NEWEST_DATE): Which date to pick when a ticket entered a state multiple times 156 | 157 | Returns: 158 | AnalyzedAgileTicket 159 | """ 160 | 161 | kwargs = { 162 | "key": ticket.key, 163 | "ttype": ticket.type, 164 | "title": ticket.title, 165 | } 166 | 167 | for phase, state_list in self.states_context.items(): 168 | state, datetime = self._find_entered_at(state_list, ticket, strategy) 169 | if None in (state, datetime): 170 | msg = "{key} is missing flow_log information for {state_list}".format(key=ticket.key, state_list=state_list) 171 | raise MissingPhaseInformation( 172 | msg, 173 | phase, 174 | state_list, 175 | ) 176 | kwargs[phase] = dict(state=state, entered_at=datetime) 177 | return AnalyzedAgileTicket(**kwargs) 178 | -------------------------------------------------------------------------------- /agile_analytics/fetchers.py: -------------------------------------------------------------------------------- 1 | """Fetch data from agile sources and return standard AgileTickets.""" 2 | 3 | from dateutil.parser import parse 4 | from jira import JIRA 5 | 6 | from .models import AgileTicket 7 | 8 | 9 | class BaseFetcher(object): 10 | """Base class for Fetchers.""" 11 | 12 | def fetch(): 13 | """Method invoked to fetch. 14 | 15 | Args: 16 | None 17 | Returns: 18 | None 19 | Raises: 20 | NotImplementedError 21 | """ 22 | raise NotImplementedError # pragma: no cover 23 | 24 | 25 | def convert_jira_issue(issue): 26 | """ 27 | Convert a JIRA issue into a AgileTicket. 28 | 29 | Args: 30 | issue (Issue): A jira.Issue instance 31 | 32 | Returns: 33 | An AgileTicket instance 34 | 35 | Raises: 36 | None 37 | """ 38 | try: 39 | ttype = issue.fields.issuetype.name 40 | except AttributeError: 41 | ttype = "Ticket" 42 | 43 | t = AgileTicket(issue.key, ttype=ttype) 44 | 45 | t.title = issue.fields.summary 46 | t.created_at = parse(issue.fields.created) 47 | t.updated_at = parse(issue.fields.updated) 48 | t.flow_log.append( 49 | dict( 50 | entered_at=t.created_at, 51 | state=str("Created"), 52 | ) 53 | ) 54 | 55 | for history in issue.changelog.histories: 56 | for item in history.items: 57 | if item.field == 'status': 58 | t.flow_log.append( 59 | dict( 60 | entered_at=parse(history.created), 61 | state=str(item.toString) 62 | ) 63 | ) 64 | 65 | return t 66 | 67 | 68 | class JIRAFetcher(BaseFetcher): 69 | """Fetch data from JIRA and transform it into AgileTickets. 70 | 71 | Attributes: 72 | jira_kwargs (dict): The arguments passed to the JIRA instance. 73 | auth_kwargs (dict): The authentication kwargs passed to the JIRA instance 74 | """ 75 | 76 | BASIC_AUTH_KEYS = ['username', 'password'] 77 | OAUTH_KEYS = ['access_token', 'access_token_secret', 'consumer_key', 'key_cert'] 78 | 79 | def __init__(self, url, auth, filter_id, max_results=999, jira_kwargs=None): 80 | """Create JIRAFetcher. 81 | 82 | Args: 83 | url (str): Fully qualified URL include http/https scheme to your JIRA instance 84 | auth (dict): Dictionary of authentication credentials. Either username/password keys 85 | for basic auth OR access_token/access_token_secret/consumer_key/key_cert for OAuth 86 | filter_id (int): The JIRA filter ID of results you wish to fetch 87 | max_results (Optional[int]): Number of results to fetch, defaults to 999 88 | jira_kwargs (Optional[dict]): Additional kwargs passed to the jira.JIRA class 89 | at instance creation 90 | 91 | Returns: 92 | JIRAFetcher: instance 93 | 94 | Raises: 95 | TypeError: When neither Basic nor Oauth keys were provided in the auth dict 96 | """ 97 | self._url = url 98 | self._auth = auth 99 | self.auth_kwargs = {} 100 | self._validate_auth() 101 | self._filter_id = int(filter_id) 102 | self.jira_kwargs = jira_kwargs or {} 103 | self.jira_kwargs.update(self.auth_kwargs) 104 | self._max_results = max_results 105 | 106 | def _validate_auth(self): 107 | if set(self._auth.keys()) <= set(self.BASIC_AUTH_KEYS): 108 | self.auth_kwargs = dict(basic_auth=(self._auth['username'], self._auth['password'])) 109 | elif set(self._auth.keys()) <= set(self.OAUTH_KEYS): 110 | self.auth_kwargs = dict(oauth=self._auth) 111 | else: 112 | raise TypeError("Neither %s nor %s found in auth parameter" % (self.BASIC_AUTH_KEYS, self.OAUTH_KEYS)) 113 | 114 | # TODO: Needs tests 115 | def fetch(self, jira_klass=JIRA): 116 | """Fetch data and return AgileTickets. 117 | 118 | Args: 119 | jira_klass (Optional[JIRA]): jira.JIRA compatible class to be used for a JIRA connection 120 | 121 | Returns: 122 | list: List of AgileTicket instances or empty list 123 | 124 | Raises: 125 | None 126 | """ 127 | j = jira_klass( 128 | server=self._url, 129 | **self.jira_kwargs 130 | ) 131 | search_string = "filter={filter_id}".format(filter_id=self._filter_id) 132 | issues = j.search_issues(search_string, maxResults=self._max_results, expand="changelog") 133 | tickets = [] 134 | for i in issues: 135 | tickets.append(convert_jira_issue(i)) 136 | return tickets 137 | -------------------------------------------------------------------------------- /agile_analytics/models.py: -------------------------------------------------------------------------------- 1 | """Data models.""" 2 | 3 | from datetime import datetime 4 | 5 | 6 | class AgileTicket(object): 7 | """Abstract representation of tickets in Agile systems. 8 | 9 | Attributes: 10 | key (unicode): Unique identifier for the ticket in its system of record 11 | created_at (datetime): When was the ticket created 12 | updated_at (datetime): When was the ticket last updated 13 | type (str): The kind of ticket this is: Bug, Epic, Story, etc. 14 | 15 | Optional Attributes: 16 | title (unicode): The title of the ticket 17 | type (unicode): A label of the type of ticket: Story, Epic, Defect 18 | """ 19 | 20 | def __init__(self, key, title="", ttype="Ticket"): 21 | """Init an AgileTicket. 22 | 23 | Args: 24 | key (str): A unique identifier for this ticket in the system of record 25 | """ 26 | self.key = str(key) 27 | self.title = title 28 | self.created_at = None 29 | self.updated_at = None 30 | self.type = ttype 31 | self._flow_log = FlowLog() 32 | 33 | @property 34 | def flow_log(self): 35 | """FlowLog[dict]. 36 | 37 | A list of dicts guaranteed to have the following: 38 | entered_at (datetime): When the ticket entered the state 39 | state (unicode): The name of the state the ticket entered 40 | """ 41 | return self._flow_log 42 | 43 | 44 | class FlowLog(list): 45 | """List subclass enforcing dictionaries with specific keys are added to it.""" 46 | 47 | def append(self, value): 48 | """Add items to the list. 49 | 50 | Args: 51 | value (dict): Must contain an entered_at and state key. 52 | 53 | Returns: 54 | None 55 | 56 | Raises: 57 | TypeError: Flow log items must have a 'entered_at' datetime and a 'state' string. 58 | """ 59 | try: 60 | ('entered_at', 'state') in value.keys() 61 | except AttributeError: 62 | raise TypeError("Flow log items must have a 'entered_at' datetime and a 'state' string. Got: {value}".format(value=value)) 63 | 64 | entered_at = value['entered_at'] 65 | try: 66 | datetime.now(entered_at.tzinfo) - entered_at 67 | except (AttributeError, TypeError) as e: 68 | msgvars = dict( 69 | val_type=type(entered_at), 70 | val=entered_at, 71 | exc=str(e) 72 | ) 73 | raise TypeError("Flow log items must have a entered_at datetime. Got: {val_type} / {val}, \n Exception: {exc}".format(**msgvars)) 74 | 75 | value[u'state'] = str(value['state']) 76 | super(FlowLog, self).append(value) 77 | self.sort(key=lambda l: l['entered_at']) 78 | 79 | 80 | class AnalyzedAgileTicket(object): 81 | """An AgileTicket analyzed within a certain context. 82 | 83 | Attributes: 84 | key (unicode): Unique identifier for the ticket in its system of record 85 | committed (dict): The state and datetime when the story was committed 86 | started (dict): The state and datetime when the story was started 87 | ended (dict): The state and datetime when the story was ended 88 | 89 | Optional Attributes: 90 | title (unicode): The title of the ticket 91 | type (unicode): A label of the type of ticket: Story, Epic, Defect 92 | """ 93 | 94 | def __init__( 95 | self, key, committed, started, ended, 96 | title="", ttype="Ticket", 97 | ): 98 | """Create AnalyzedAgileTickets.""" 99 | self.key = key 100 | self.title = title 101 | self.committed = committed 102 | self.started = started 103 | self.ended = ended 104 | self.type = ttype 105 | 106 | def __repr__(self): 107 | """Represention of the object.""" 108 | return "{} -- Ended: {}".format(self.key, self.ended['entered_at']) 109 | 110 | @property 111 | def lead_time(self): 112 | """Number of days between committed and ended.""" 113 | diff = self.ended['entered_at'] - self.committed['entered_at'] 114 | return diff.days 115 | 116 | @property 117 | def cycle_time(self): 118 | """Number of days between started and ended.""" 119 | diff = self.ended['entered_at'] - self.started['entered_at'] 120 | return diff.days 121 | -------------------------------------------------------------------------------- /agile_analytics/reporters.py: -------------------------------------------------------------------------------- 1 | """Make reports from data.""" 2 | 3 | from collections import namedtuple 4 | from datetime import datetime 5 | 6 | from dateutil.relativedelta import relativedelta 7 | from dateutil.tz import tzutc 8 | 9 | from numpy import histogram, array, arange, percentile, round 10 | 11 | Report = namedtuple("Report", ["table", "summary"]) 12 | 13 | 14 | class Reporter(object): 15 | """Base class for Reporters. 16 | 17 | Attributes: 18 | title (unicode): The name of the report 19 | start_date (datetime): The starting range of the report. 20 | end_date (datetime): The ending range of the report. 21 | """ 22 | 23 | MONDAY = 0 24 | TUESDAY = 1 25 | WEDNESDAY = 2 26 | THURSDAY = 3 27 | FRIDAY = 4 28 | SATURDAY = 5 29 | SUNDAY = 6 30 | 31 | def __init__(self, title, start_date=None, end_date=None): 32 | self.title = title 33 | self.start_date = start_date 34 | self.end_date = end_date 35 | super().__init__() 36 | 37 | @property 38 | def start_date(self): 39 | return self.valid_start_date(self._start_date) 40 | 41 | @start_date.setter 42 | def start_date(self, value): 43 | if value and value.tzinfo is None: 44 | value = value.replace(tzinfo=tzutc()) 45 | self._start_date = value 46 | 47 | @property 48 | def end_date(self): 49 | return self.valid_end_date(self._end_date) 50 | 51 | @end_date.setter 52 | def end_date(self, value): 53 | if value and value.tzinfo is None: 54 | value = value.replace(tzinfo=tzutc()) 55 | self._end_date = value 56 | 57 | def valid_start_date(self, target_date): 58 | """Returns a date that is valid for the start of the report. 59 | Arguments: 60 | target_date (datetime): The date you'd like examined 61 | Returns: 62 | datetime: A datetime made valid for the report based on the target_date argument. 63 | """ 64 | return target_date 65 | 66 | def valid_end_date(self, target_date): 67 | """Returns a date that is valid for the end of the report. 68 | Arguments: 69 | target_date (datetime): The date you'd like examined 70 | Returns: 71 | datetime: A datetime made valid for the report based on the target_date argument. 72 | """ 73 | return target_date 74 | 75 | def walk_back_to_weekday(self, target_date, day): 76 | """Returns the nearest date that predates the target_date. 77 | Arguments: 78 | target_date (datetime): The date to start with. 79 | day (int): An integer between 0 (Monday) and 6 (Sunday) 80 | Returns: 81 | datetime: The nearest date that predates the target_date for the given day. 82 | """ 83 | while target_date.weekday() != day: 84 | target_date = target_date - relativedelta(days=1) 85 | return target_date 86 | 87 | def walk_forward_to_weekday(self, target_date, day): 88 | """Returns the nearest date that postdates the target_date. 89 | Arguments: 90 | target_date (datetime): The date to start with. 91 | day (int): An integer between 0 (Monday) and 6 (Sunday) 92 | Returns: 93 | datetime: The nearest date that postdates the target_date for the given day. 94 | """ 95 | while target_date.weekday() != day: 96 | target_date = target_date + relativedelta(days=1) 97 | return target_date 98 | 99 | def filter_on_ended(self, issues): 100 | """Returns issues that were ended between the instances start/end dates. 101 | Arguments: 102 | issues (list[AnalyzedAgileTicket]): List of issues to be filtered_issues 103 | Return: 104 | list[AnalyzedAgileTicket]: List of issues that match. 105 | """ 106 | filtered_issues = [i for i in issues if i.ended and (i.ended['entered_at'] >= self.start_date and i.ended['entered_at'] <= self.end_date)] 107 | return filtered_issues 108 | 109 | def filter_on_committed(self, issues): 110 | """Returns issues that were committed to between the instances start/end dates. 111 | Arguments: 112 | issues (list[AnalyzedAgileTicket]): List of issues to be filtered_issues 113 | Return: 114 | list[AnalyzedAgileTicket]: List of issues that match. 115 | """ 116 | filtered_issues = [i for i in issues if i.ended and (i.committed['entered_at'] >= self.start_date and i.committed['entered_at'] <= self.end_date)] 117 | return filtered_issues 118 | 119 | def starts_of_weeks(self): 120 | """Return a list of dates that start each week between start_date and end_date.""" 121 | week_starting = self.walk_back_to_weekday(self.start_date.date(), self.SUNDAY) 122 | while week_starting <= self.end_date.date(): 123 | yield week_starting 124 | week_starting += relativedelta(days=7) 125 | 126 | def filter_issues(self, issues): 127 | raise NotImplementedError 128 | 129 | def report_on(self, issues, header=True): 130 | raise NotImplementedError 131 | 132 | 133 | class CreatedReporter(Reporter): 134 | """Generates a report listing counts for all the types of tickets created each week.""" 135 | def valid_start_date(self, target_date): 136 | """Ensure we start on a Sunday.""" 137 | target_date = super().valid_start_date(target_date) 138 | return self.walk_back_to_weekday(target_date, self.SUNDAY) 139 | 140 | def valid_end_date(self, target_date): 141 | """Ensure we end on a Sunday.""" 142 | target_date = super().valid_end_date(target_date) 143 | return self.walk_forward_to_weekday(target_date, self.SATURDAY) 144 | 145 | def filter_issues(self, issues): 146 | """Ignore issues completed outside the start/end range.""" 147 | return self.filter_on_committed(issues) 148 | 149 | def _issues_for_week(self, week_start, issues): 150 | week_end = self.walk_forward_to_weekday(week_start, self.SATURDAY) 151 | week_start = datetime(week_start.year, week_start.month, week_start.day, 0, 0, 0, tzinfo=tzutc()) 152 | week_end = datetime(week_end.year, week_end.month, week_end.day, 11, 59, 59, tzinfo=tzutc()) 153 | return [i for i in issues if i.committed['entered_at'] >= week_start and i.committed['entered_at'] <= week_end] 154 | 155 | def report_on(self, issues, header=True): 156 | """Generate a report, one row per week, with counts for each ticket type.""" 157 | issues = self.filter_issues(issues) 158 | r = Report( 159 | table=[], 160 | summary=dict( 161 | title=self.title, 162 | start_date=self.start_date, 163 | end_date=self.end_date 164 | ) 165 | ) 166 | 167 | ticket_types = list(set([issue.type for issue in issues])) 168 | ticket_types.sort() 169 | headers = ["Week", ] 170 | headers.extend(ticket_types) 171 | r.table.append(headers) 172 | 173 | for sunday in self.starts_of_weeks(): 174 | this_weeks_issues = self._issues_for_week(sunday, issues) 175 | row = [sunday, ] 176 | for ttype in ticket_types: 177 | row.append(len([i for i in this_weeks_issues if i.type == ttype])) 178 | r.table.append(row) 179 | 180 | return r 181 | 182 | 183 | class TicketReporter(Reporter): 184 | """Generates a report listing all the tickets that match the filter critiera.""" 185 | def valid_start_date(self, target_date): 186 | """Ensure we start on a Sunday.""" 187 | target_date = super().valid_start_date(target_date) 188 | return self.walk_back_to_weekday(target_date, self.SUNDAY) 189 | 190 | def valid_end_date(self, target_date): 191 | """Ensure we end on a Sunday.""" 192 | target_date = super().valid_end_date(target_date) 193 | return self.walk_forward_to_weekday(target_date, self.SATURDAY) 194 | 195 | def filter_issues(self, issues): 196 | """Ignore issues completed outside the start/end range.""" 197 | return self.filter_on_ended(issues) 198 | 199 | def report_on(self, issues, header=True): 200 | """Generate a report, one row per issue, with details.""" 201 | issues = self.filter_issues(issues) 202 | issues.sort(key=lambda x: x.ended['entered_at']) 203 | r = Report( 204 | table=[], 205 | summary=dict( 206 | title=self.title, 207 | start_date=self.start_date, 208 | end_date=self.end_date 209 | ) 210 | ) 211 | 212 | r.table.append( 213 | ["Key", "Title", "Lead Time", "Cycle Time", "Commit State", "Commit At", "Start State", "Start At", "End State", "End At"], 214 | ) 215 | for i in issues: 216 | row = [ 217 | i.key, 218 | i.title, 219 | i.lead_time, 220 | i.cycle_time, 221 | i.committed['state'], 222 | i.committed['entered_at'], 223 | i.started['state'], 224 | i.started['entered_at'], 225 | i.ended['state'], 226 | i.ended['entered_at'], 227 | ] 228 | r.table.append(row) 229 | 230 | return r 231 | 232 | 233 | class SLAReporter(Reporter): 234 | """Generates a report showing the number of tickets per week that exceeded their SLA.""" 235 | def valid_start_date(self, target_date): 236 | """Ensure we start on a Sunday.""" 237 | target_date = super().valid_start_date(target_date) 238 | return self.walk_back_to_weekday(target_date, self.SUNDAY) 239 | 240 | def valid_end_date(self, target_date): 241 | """Ensure we end on a Sunday.""" 242 | target_date = super().valid_end_date(target_date) 243 | return self.walk_forward_to_weekday(target_date, self.SATURDAY) 244 | 245 | def filter_issues(self, issues): 246 | """Ignore issues completed outside the start/end range.""" 247 | return self.filter_on_ended(issues) 248 | 249 | def _issues_for_week(self, week_start, issues): 250 | week_end = self.walk_forward_to_weekday(week_start, self.SATURDAY) 251 | week_start = datetime(week_start.year, week_start.month, week_start.day, 0, 0, 0, tzinfo=tzutc()) 252 | week_end = datetime(week_end.year, week_end.month, week_end.day, 11, 59, 59, tzinfo=tzutc()) 253 | return [i for i in issues if i.ended['entered_at'] >= week_start and i.ended['entered_at'] <= week_end] 254 | 255 | def report_on(self, issues, sla_config={}, header=True): 256 | r = Report( 257 | table=[], 258 | summary=dict( 259 | title=self.title, 260 | start_date=self.start_date, 261 | end_date=self.end_date 262 | ) 263 | ) 264 | 265 | issues = self.filter_issues(issues) 266 | ticket_types = list(set([issue.type for issue in issues])) 267 | ticket_types.sort() 268 | headers = ["Week", ] 269 | headers.extend(ticket_types) 270 | r.table.append(headers) 271 | 272 | for sunday in self.starts_of_weeks(): 273 | this_weeks_issues = self._issues_for_week(sunday, issues) 274 | row = [sunday, ] 275 | for ttype in ticket_types: 276 | row.append(len([i for i in this_weeks_issues if i.type == ttype and i.lead_time > sla_config[ttype]])) 277 | r.table.append(row) 278 | 279 | return r 280 | 281 | 282 | class LeadTimeDistributionReporter(Reporter): 283 | """Generates lead time distribution report for tickets completed in a single date range.""" 284 | 285 | def valid_start_date(self, target_date): 286 | """Ensure we start on a Sunday.""" 287 | target_date = super().valid_start_date(target_date) 288 | return self.walk_back_to_weekday(target_date, self.SUNDAY) 289 | 290 | def valid_end_date(self, target_date): 291 | """Ensure we end on a Sunday.""" 292 | target_date = super().valid_end_date(target_date) 293 | return self.walk_forward_to_weekday(target_date, self.SATURDAY) 294 | 295 | def filter_issues(self, issues): 296 | """Ignore issues completed outside the start/end range.""" 297 | return self.filter_on_ended(issues) 298 | 299 | def report_on(self, issues, header=True): 300 | """Generate a report object with a lead time histogram.""" 301 | r = Report( 302 | table=[], 303 | summary=dict( 304 | title=self.title, 305 | start_date=self.start_date, 306 | end_date=self.end_date 307 | ) 308 | ) 309 | r.table.append(["Lead Time", "Tickets"]) 310 | filtered_issues = self.filter_issues(issues) 311 | 312 | if filtered_issues: 313 | lead_times = [i.lead_time for i in filtered_issues] 314 | lead_times = array(lead_times) 315 | hist_values, hist_bins = histogram( 316 | lead_times, 317 | bins=arange(0, max(lead_times) + 2, 1) 318 | ) 319 | 320 | for i in range(0, len(hist_values)): 321 | if i == 0: 322 | continue 323 | row = [hist_bins[i], hist_values[i]] 324 | r.table.append(row) 325 | return r 326 | 327 | 328 | class TimePercentileReporter(Reporter): 329 | """Report on Time Percentiles, calculated over a configurable (default 4) week period. 330 | Attributes: 331 | title (unicode): The name of the report 332 | start_date (datetime): The starting range of issues for the report. 333 | end_date (datetime): The ending range of issues for the report. 334 | num_weeks (int): The number of weeks you'd like reported on. Default: 4 335 | """ 336 | 337 | def __init__(self, title, time_attr, start_date=None, end_date=None, num_weeks=4): 338 | super().__init__(title, start_date, end_date) 339 | self.num_weeks = num_weeks 340 | self.time_attr = time_attr 341 | 342 | def valid_start_date(self, target_date): 343 | target_date = super().valid_start_date(target_date) 344 | target_date = self.walk_back_to_weekday(target_date, self.SUNDAY) 345 | return target_date 346 | 347 | def valid_end_date(self, target_date): 348 | target_date = super().valid_end_date(target_date) 349 | target_date = self.walk_forward_to_weekday(target_date, self.SATURDAY) 350 | return target_date 351 | 352 | def filter_issues(self, issues): 353 | return self.filter_on_ended(issues) 354 | 355 | def _times_for_week(self, week_start, issues): 356 | week_end = self.walk_forward_to_weekday(week_start, self.SATURDAY) 357 | week_start = datetime(week_start.year, week_start.month, week_start.day, 0, 0, 0, tzinfo=tzutc()) 358 | week_end = datetime(week_end.year, week_end.month, week_end.day, 11, 59, 59, tzinfo=tzutc()) 359 | return [getattr(i, self.time_attr) for i in issues if i.ended['entered_at'] >= week_start and i.ended['entered_at'] <= week_end] 360 | 361 | def report_on(self, issues, header=True): 362 | r = Report( 363 | table=[], 364 | summary=dict( 365 | title=self.title, 366 | start_date=self.start_date, 367 | end_date=self.end_date, 368 | num_weeks=self.num_weeks) 369 | ) 370 | if header: 371 | headers = ["Week", "50th", "75th", "95th"] 372 | r.table.append(headers) 373 | 374 | issues = self.filter_issues(issues) 375 | 376 | percentiles_by_week = [] 377 | moving_sample = [] 378 | for sunday in self.starts_of_weeks(): 379 | moving_sample.append(self._times_for_week(sunday, issues)) 380 | # Lets limit our percentile calc to the past 4 weeks 381 | if len(moving_sample) > 4: 382 | moving_sample.pop(0) 383 | 384 | samples = [] 385 | for sample_set in moving_sample: 386 | samples.extend(sample_set) 387 | 388 | try: 389 | the_50th = int(round(percentile(samples, 50))) 390 | the_75th = int(round(percentile(samples, 75))) 391 | the_95th = int(round(percentile(samples, 95))) 392 | except IndexError: 393 | the_50th, the_75th, the_95th = 0, 0, 0 394 | 395 | percentiles_by_week.append([sunday, the_50th, the_75th, the_95th]) 396 | 397 | for row in percentiles_by_week[-self.num_weeks:]: 398 | r.table.append(row) 399 | 400 | return r 401 | 402 | 403 | class LeadTimePercentileReporter(TimePercentileReporter): 404 | def __init__(self, title, start_date=None, end_date=None, num_weeks=4): 405 | super(LeadTimePercentileReporter, self).__init__(title, "lead_time", start_date=start_date, end_date=end_date, num_weeks=num_weeks) 406 | 407 | 408 | class CycleTimePercentileReporter(TimePercentileReporter): 409 | def __init__(self, title, start_date=None, end_date=None, num_weeks=4): 410 | super(CycleTimePercentileReporter, self).__init__(title, "cycle_time", start_date=start_date, end_date=end_date, num_weeks=num_weeks) 411 | 412 | 413 | class ThroughputReporter(Reporter): 414 | """Generate throughput reports. 415 | 416 | Attributes: 417 | title (unicode): The name of the report 418 | period (unicode): The interval you'd like, one of daily, weekly, monthly, 419 | start_date (datetime): The starting range of the report. 420 | end_date (datetime): The ending range of the report. 421 | """ 422 | 423 | def __init__(self, title, period=None, start_date=None, end_date=None): 424 | self.title = title 425 | self.period = period 426 | self.start_date = start_date 427 | self.end_date = end_date 428 | super().__init__(title, start_date, end_date) 429 | 430 | def valid_start_date(self, target_date): 431 | target_date = super().valid_start_date(target_date) 432 | if self.period == "weekly": 433 | target_date = self.walk_back_to_weekday(target_date, self.SUNDAY) 434 | return target_date 435 | 436 | def valid_end_date(self, target_date): 437 | target_date = super().valid_end_date(target_date) 438 | if self.period == "weekly": 439 | target_date = self.walk_forward_to_weekday(target_date, self.SATURDAY) 440 | return target_date 441 | 442 | def _count_by_week(self, issues): 443 | counted_by_week = {} 444 | for week_starting in self.starts_of_weeks(): 445 | week_end = week_starting + relativedelta(days=6) 446 | counted_by_week[week_starting] = len( 447 | [i for i in issues if i.ended['entered_at'].date() >= week_starting and i.ended['entered_at'].date() <= week_end] 448 | ) 449 | 450 | return counted_by_week 451 | 452 | def filter_issues(self, issues): 453 | return self.filter_on_ended(issues) 454 | 455 | def report_on(self, issues, header=True): 456 | r = Report( 457 | table=[], 458 | summary=dict( 459 | title=self.title, 460 | period=self.period, 461 | start_date=self.start_date, 462 | end_date=self.end_date 463 | ) 464 | ) 465 | r.table.append(["Week", "Completed"]) 466 | filtered_issues = self.filter_issues(issues) 467 | counted_by_week = self._count_by_week(filtered_issues) 468 | 469 | weeks = list(counted_by_week.keys()) 470 | weeks.sort() 471 | for week in weeks: 472 | r.table.append([week, counted_by_week[week]]) 473 | 474 | return r 475 | -------------------------------------------------------------------------------- /agile_analytics/version.py: -------------------------------------------------------------------------------- 1 | __author__ = "cmheisel" 2 | __version__ = (0, 15, 2) 3 | -------------------------------------------------------------------------------- /agile_analytics/writers.py: -------------------------------------------------------------------------------- 1 | import csv 2 | import re 3 | 4 | import gspread 5 | 6 | from oauth2client.service_account import ServiceAccountCredentials 7 | 8 | 9 | class CSVWriter(object): 10 | def write(self, report, destination): 11 | """Write a CSV version of the report. 12 | Arguments: 13 | desitnation (file-like object): The file-like object where the data should be written to. 14 | report (Report): The Report instance that should be written to the destination. 15 | Returns: 16 | None 17 | """ 18 | writer = csv.writer(destination) 19 | for row in report.table: 20 | writer.writerow(row) 21 | 22 | 23 | class GSheetWriter(object): 24 | """Writes reports to Google Spreadsheets. 25 | Arguments: 26 | keyfile_name (str): The path to a JSON file with the service account credentials you want to use. 27 | """ 28 | 29 | CREDENTIAL_CLASS = ServiceAccountCredentials 30 | DRIVER_MODULE = gspread 31 | COLUMN_OPTIONS = 'A B C D E F G H I J K L M N O P Q R S T U V W X Y Z'.split(' ') 32 | 33 | def __init__(self, keyfile_name="client_secret.json"): 34 | self.keyfile_name = keyfile_name 35 | self.scope = ['https://spreadsheets.google.com/feeds'] 36 | 37 | @property 38 | def credentials(self): 39 | """Load the JSON and return ServiceAccountCredentials 40 | Returns: 41 | Credentials: The oauth2client compatible credentials object based on the JSON. 42 | """ 43 | credentials = self.CREDENTIAL_CLASS.from_json_keyfile_name(self.keyfile_name, self.scope) 44 | return credentials 45 | 46 | @property 47 | def driver(self): 48 | return self.DRIVER_MODULE.authorize(self.credentials) 49 | 50 | def get_datasheet(self, doc, name): 51 | """Finds (or creates) the worksheet in the supplied google spreadsheet. 52 | Arguments: 53 | doc (Spreadsheet): The spreadsheet instance where you'd like the worksheet to exist. 54 | name (str default="Data"): The name of the worksheet you'd like to exist. 55 | Returns: 56 | Worksheet: The worksheet requested. 57 | """ 58 | try: 59 | data_sheet = doc.worksheet(name) 60 | except gspread.exceptions.WorksheetNotFound: 61 | data_sheet = doc.add_worksheet(name, 1, 1) 62 | return data_sheet 63 | 64 | def clear_sheet(self, sheet, rows, cols): 65 | """Deletes all data on the supplied sheet by resizing it. 66 | Arguments: 67 | sheet (Worksheet): The worksheet you want cleared. 68 | rows (int): The number of rows the cleared out sheet should end up with. 69 | cols (int): The number of rows the cleared out sheet should end up with. 70 | Returns: 71 | None 72 | """ 73 | sheet.resize(rows, cols) 74 | cell_list = sheet.findall(re.compile(".*", re.DOTALL)) 75 | for cell in cell_list: 76 | cell.value = "" 77 | sheet.update_cells(cell_list) 78 | 79 | def select_range(self, sheet, data): 80 | """Returns the cells that will be modified to match the new data. 81 | Arguments: 82 | sheet (Worksheet): The worksheet you want to put data into. 83 | data (array): The array of data you want placed into the sheet. 84 | Returns: 85 | list[Cell]: The list of gspread.Cells that should be replaced to match the data. 86 | """ 87 | start_cell = "A1" # Always and forever 88 | end_cell = "{}{}".format( 89 | self.COLUMN_OPTIONS[len(data[0]) - 1], 90 | len(data) 91 | ) 92 | return sheet.range("{}:{}".format(start_cell, end_cell)) 93 | 94 | def update_cells(self, cells, data): 95 | """Updates the cells, in batch, with the data provided. 96 | Arguments: 97 | cells (list[Cell]): The list of gspread.Cells that should be replaced to match the data. 98 | data (array): The array of data you want placed into the sheet. 99 | Returns: 100 | None 101 | """ 102 | for i in range(0, len(cells)): 103 | cell = cells[i] 104 | row, col = cell.row, cell.col 105 | row_index = row - 1 106 | col_index = col - 1 107 | try: 108 | value = data[row_index][col_index] 109 | cell.value = value 110 | except IndexError: # Leave as is, not in data 111 | pass 112 | 113 | return cells 114 | 115 | def batch_update_sheet(self, sheet, data): 116 | """Clear a sheet and append the data to it. 117 | Arguments: 118 | sheet (Worksheet): The worksheet you want to modify 119 | data (array): The data you want the worksheet to contain 120 | Returns: 121 | None 122 | """ 123 | self.clear_sheet(sheet, len(data), len(data[0])) 124 | cells = self.select_range(sheet, data) 125 | cells = self.update_cells(cells, data) 126 | sheet.update_cells(cells) 127 | 128 | def write(self, report, doc_name, sheet_name): 129 | doc = self.driver.open(doc_name) 130 | sheet = self.get_datasheet(doc, sheet_name) 131 | self.batch_update_sheet(sheet, report.table) 132 | -------------------------------------------------------------------------------- /docs/Makefile: -------------------------------------------------------------------------------- 1 | # Makefile for Sphinx documentation 2 | # 3 | 4 | # You can set these variables from the command line. 5 | SPHINXOPTS = 6 | SPHINXBUILD = ../venv/bin/sphinx-build 7 | PAPER = 8 | BUILDDIR = _build 9 | 10 | # Internal variables. 11 | PAPEROPT_a4 = -D latex_paper_size=a4 12 | PAPEROPT_letter = -D latex_paper_size=letter 13 | ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . 14 | # the i18n builder cannot share the environment and doctrees with the others 15 | I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . 16 | 17 | .PHONY: help 18 | help: 19 | @echo "Please use \`make ' where is one of" 20 | @echo " html to make standalone HTML files" 21 | @echo " dirhtml to make HTML files named index.html in directories" 22 | @echo " singlehtml to make a single large HTML file" 23 | @echo " pickle to make pickle files" 24 | @echo " json to make JSON files" 25 | @echo " htmlhelp to make HTML files and a HTML help project" 26 | @echo " qthelp to make HTML files and a qthelp project" 27 | @echo " applehelp to make an Apple Help Book" 28 | @echo " devhelp to make HTML files and a Devhelp project" 29 | @echo " epub to make an epub" 30 | @echo " epub3 to make an epub3" 31 | @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" 32 | @echo " latexpdf to make LaTeX files and run them through pdflatex" 33 | @echo " latexpdfja to make LaTeX files and run them through platex/dvipdfmx" 34 | @echo " text to make text files" 35 | @echo " man to make manual pages" 36 | @echo " texinfo to make Texinfo files" 37 | @echo " info to make Texinfo files and run them through makeinfo" 38 | @echo " gettext to make PO message catalogs" 39 | @echo " changes to make an overview of all changed/added/deprecated items" 40 | @echo " xml to make Docutils-native XML files" 41 | @echo " pseudoxml to make pseudoxml-XML files for display purposes" 42 | @echo " linkcheck to check all external links for integrity" 43 | @echo " doctest to run all doctests embedded in the documentation (if enabled)" 44 | @echo " coverage to run coverage check of the documentation (if enabled)" 45 | @echo " dummy to check syntax errors of document sources" 46 | 47 | .PHONY: clean 48 | clean: 49 | rm -rf $(BUILDDIR)/* 50 | 51 | .PHONY: html 52 | html: 53 | $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html 54 | @echo 55 | @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." 56 | 57 | .PHONY: dirhtml 58 | dirhtml: 59 | $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml 60 | @echo 61 | @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." 62 | 63 | .PHONY: singlehtml 64 | singlehtml: 65 | $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml 66 | @echo 67 | @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." 68 | 69 | .PHONY: pickle 70 | pickle: 71 | $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle 72 | @echo 73 | @echo "Build finished; now you can process the pickle files." 74 | 75 | .PHONY: json 76 | json: 77 | $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json 78 | @echo 79 | @echo "Build finished; now you can process the JSON files." 80 | 81 | .PHONY: htmlhelp 82 | htmlhelp: 83 | $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp 84 | @echo 85 | @echo "Build finished; now you can run HTML Help Workshop with the" \ 86 | ".hhp project file in $(BUILDDIR)/htmlhelp." 87 | 88 | .PHONY: qthelp 89 | qthelp: 90 | $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp 91 | @echo 92 | @echo "Build finished; now you can run "qcollectiongenerator" with the" \ 93 | ".qhcp project file in $(BUILDDIR)/qthelp, like this:" 94 | @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/agile-analytics.qhcp" 95 | @echo "To view the help file:" 96 | @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/agile-analytics.qhc" 97 | 98 | .PHONY: applehelp 99 | applehelp: 100 | $(SPHINXBUILD) -b applehelp $(ALLSPHINXOPTS) $(BUILDDIR)/applehelp 101 | @echo 102 | @echo "Build finished. The help book is in $(BUILDDIR)/applehelp." 103 | @echo "N.B. You won't be able to view it unless you put it in" \ 104 | "~/Library/Documentation/Help or install it in your application" \ 105 | "bundle." 106 | 107 | .PHONY: devhelp 108 | devhelp: 109 | $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp 110 | @echo 111 | @echo "Build finished." 112 | @echo "To view the help file:" 113 | @echo "# mkdir -p $$HOME/.local/share/devhelp/agile-analytics" 114 | @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/agile-analytics" 115 | @echo "# devhelp" 116 | 117 | .PHONY: epub 118 | epub: 119 | $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub 120 | @echo 121 | @echo "Build finished. The epub file is in $(BUILDDIR)/epub." 122 | 123 | .PHONY: epub3 124 | epub3: 125 | $(SPHINXBUILD) -b epub3 $(ALLSPHINXOPTS) $(BUILDDIR)/epub3 126 | @echo 127 | @echo "Build finished. The epub3 file is in $(BUILDDIR)/epub3." 128 | 129 | .PHONY: latex 130 | latex: 131 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex 132 | @echo 133 | @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." 134 | @echo "Run \`make' in that directory to run these through (pdf)latex" \ 135 | "(use \`make latexpdf' here to do that automatically)." 136 | 137 | .PHONY: latexpdf 138 | latexpdf: 139 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex 140 | @echo "Running LaTeX files through pdflatex..." 141 | $(MAKE) -C $(BUILDDIR)/latex all-pdf 142 | @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." 143 | 144 | .PHONY: latexpdfja 145 | latexpdfja: 146 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex 147 | @echo "Running LaTeX files through platex and dvipdfmx..." 148 | $(MAKE) -C $(BUILDDIR)/latex all-pdf-ja 149 | @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." 150 | 151 | .PHONY: text 152 | text: 153 | $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text 154 | @echo 155 | @echo "Build finished. The text files are in $(BUILDDIR)/text." 156 | 157 | .PHONY: man 158 | man: 159 | $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man 160 | @echo 161 | @echo "Build finished. The manual pages are in $(BUILDDIR)/man." 162 | 163 | .PHONY: texinfo 164 | texinfo: 165 | $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo 166 | @echo 167 | @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo." 168 | @echo "Run \`make' in that directory to run these through makeinfo" \ 169 | "(use \`make info' here to do that automatically)." 170 | 171 | .PHONY: info 172 | info: 173 | $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo 174 | @echo "Running Texinfo files through makeinfo..." 175 | make -C $(BUILDDIR)/texinfo info 176 | @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo." 177 | 178 | .PHONY: gettext 179 | gettext: 180 | $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale 181 | @echo 182 | @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale." 183 | 184 | .PHONY: changes 185 | changes: 186 | $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes 187 | @echo 188 | @echo "The overview file is in $(BUILDDIR)/changes." 189 | 190 | .PHONY: linkcheck 191 | linkcheck: 192 | $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck 193 | @echo 194 | @echo "Link check complete; look for any errors in the above output " \ 195 | "or in $(BUILDDIR)/linkcheck/output.txt." 196 | 197 | .PHONY: doctest 198 | doctest: 199 | $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest 200 | @echo "Testing of doctests in the sources finished, look at the " \ 201 | "results in $(BUILDDIR)/doctest/output.txt." 202 | 203 | .PHONY: coverage 204 | coverage: 205 | $(SPHINXBUILD) -b coverage $(ALLSPHINXOPTS) $(BUILDDIR)/coverage 206 | @echo "Testing of coverage in the sources finished, look at the " \ 207 | "results in $(BUILDDIR)/coverage/python.txt." 208 | 209 | .PHONY: xml 210 | xml: 211 | $(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml 212 | @echo 213 | @echo "Build finished. The XML files are in $(BUILDDIR)/xml." 214 | 215 | .PHONY: pseudoxml 216 | pseudoxml: 217 | $(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml 218 | @echo 219 | @echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml." 220 | 221 | .PHONY: dummy 222 | dummy: 223 | $(SPHINXBUILD) -b dummy $(ALLSPHINXOPTS) $(BUILDDIR)/dummy 224 | @echo 225 | @echo "Build finished. Dummy builder generates no files." 226 | -------------------------------------------------------------------------------- /docs/conf.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # 3 | # agile-analytics documentation build configuration file, created by 4 | # sphinx-quickstart on Fri Jun 17 13:58:53 2016. 5 | # 6 | # This file is execfile()d with the current directory set to its 7 | # containing dir. 8 | # 9 | # Note that not all possible configuration values are present in this 10 | # autogenerated file. 11 | # 12 | # All configuration values have a default; values that are commented out 13 | # serve to show the default. 14 | 15 | # If extensions (or modules to document with autodoc) are in another directory, 16 | # add these directories to sys.path here. If the directory is relative to the 17 | # documentation root, use os.path.abspath to make it absolute, like shown here. 18 | # 19 | # import os 20 | # import sys 21 | # sys.path.insert(0, os.path.abspath('.')) 22 | 23 | # -- General configuration ------------------------------------------------ 24 | 25 | # If your documentation needs a minimal Sphinx version, state it here. 26 | # 27 | # needs_sphinx = '1.0' 28 | 29 | # Add any Sphinx extension module names here, as strings. They can be 30 | # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom 31 | # ones. 32 | extensions = [ 33 | 'sphinx.ext.autodoc', 34 | 'sphinx.ext.todo', 35 | 'sphinx.ext.coverage', 36 | 'sphinx.ext.viewcode', 37 | 'sphinx.ext.githubpages', 38 | ] 39 | 40 | # Add any paths that contain templates here, relative to this directory. 41 | templates_path = ['_templates'] 42 | 43 | # The suffix(es) of source filenames. 44 | # You can specify multiple suffix as a list of string: 45 | # 46 | # source_suffix = ['.rst', '.md'] 47 | source_suffix = '.rst' 48 | 49 | # The encoding of source files. 50 | # 51 | # source_encoding = 'utf-8-sig' 52 | 53 | # The master toctree document. 54 | master_doc = 'index' 55 | 56 | # General information about the project. 57 | project = u'agile-analytics' 58 | copyright = u'2016, Chris Heisel' 59 | author = u'Chris Heisel' 60 | 61 | # The version info for the project you're documenting, acts as replacement for 62 | # |version| and |release|, also used in various other places throughout the 63 | # built documents. 64 | # 65 | # The short X.Y version. 66 | version = u'0.1' 67 | # The full version, including alpha/beta/rc tags. 68 | release = u'0.1' 69 | 70 | # The language for content autogenerated by Sphinx. Refer to documentation 71 | # for a list of supported languages. 72 | # 73 | # This is also used if you do content translation via gettext catalogs. 74 | # Usually you set "language" from the command line for these cases. 75 | language = None 76 | 77 | # There are two options for replacing |today|: either, you set today to some 78 | # non-false value, then it is used: 79 | # 80 | # today = '' 81 | # 82 | # Else, today_fmt is used as the format for a strftime call. 83 | # 84 | # today_fmt = '%B %d, %Y' 85 | 86 | # List of patterns, relative to source directory, that match files and 87 | # directories to ignore when looking for source files. 88 | # This patterns also effect to html_static_path and html_extra_path 89 | exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store'] 90 | 91 | # The reST default role (used for this markup: `text`) to use for all 92 | # documents. 93 | # 94 | # default_role = None 95 | 96 | # If true, '()' will be appended to :func: etc. cross-reference text. 97 | # 98 | # add_function_parentheses = True 99 | 100 | # If true, the current module name will be prepended to all description 101 | # unit titles (such as .. function::). 102 | # 103 | # add_module_names = True 104 | 105 | # If true, sectionauthor and moduleauthor directives will be shown in the 106 | # output. They are ignored by default. 107 | # 108 | # show_authors = False 109 | 110 | # The name of the Pygments (syntax highlighting) style to use. 111 | pygments_style = 'sphinx' 112 | 113 | # A list of ignored prefixes for module index sorting. 114 | # modindex_common_prefix = [] 115 | 116 | # If true, keep warnings as "system message" paragraphs in the built documents. 117 | # keep_warnings = False 118 | 119 | # If true, `todo` and `todoList` produce output, else they produce nothing. 120 | todo_include_todos = True 121 | 122 | 123 | # -- Options for HTML output ---------------------------------------------- 124 | 125 | # The theme to use for HTML and HTML Help pages. See the documentation for 126 | # a list of builtin themes. 127 | # 128 | html_theme = 'alabaster' 129 | 130 | # Theme options are theme-specific and customize the look and feel of a theme 131 | # further. For a list of options available for each theme, see the 132 | # documentation. 133 | # 134 | # html_theme_options = {} 135 | 136 | # Add any paths that contain custom themes here, relative to this directory. 137 | # html_theme_path = [] 138 | 139 | # The name for this set of Sphinx documents. 140 | # " v documentation" by default. 141 | # 142 | # html_title = u'agile-analytics v0.1' 143 | 144 | # A shorter title for the navigation bar. Default is the same as html_title. 145 | # 146 | # html_short_title = None 147 | 148 | # The name of an image file (relative to this directory) to place at the top 149 | # of the sidebar. 150 | # 151 | # html_logo = None 152 | 153 | # The name of an image file (relative to this directory) to use as a favicon of 154 | # the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 155 | # pixels large. 156 | # 157 | # html_favicon = None 158 | 159 | # Add any paths that contain custom static files (such as style sheets) here, 160 | # relative to this directory. They are copied after the builtin static files, 161 | # so a file named "default.css" will overwrite the builtin "default.css". 162 | html_static_path = ['_static'] 163 | 164 | # Add any extra paths that contain custom files (such as robots.txt or 165 | # .htaccess) here, relative to this directory. These files are copied 166 | # directly to the root of the documentation. 167 | # 168 | # html_extra_path = [] 169 | 170 | # If not None, a 'Last updated on:' timestamp is inserted at every page 171 | # bottom, using the given strftime format. 172 | # The empty string is equivalent to '%b %d, %Y'. 173 | # 174 | # html_last_updated_fmt = None 175 | 176 | # If true, SmartyPants will be used to convert quotes and dashes to 177 | # typographically correct entities. 178 | # 179 | # html_use_smartypants = True 180 | 181 | # Custom sidebar templates, maps document names to template names. 182 | # 183 | # html_sidebars = {} 184 | 185 | # Additional templates that should be rendered to pages, maps page names to 186 | # template names. 187 | # 188 | # html_additional_pages = {} 189 | 190 | # If false, no module index is generated. 191 | # 192 | # html_domain_indices = True 193 | 194 | # If false, no index is generated. 195 | # 196 | # html_use_index = True 197 | 198 | # If true, the index is split into individual pages for each letter. 199 | # 200 | # html_split_index = False 201 | 202 | # If true, links to the reST sources are added to the pages. 203 | # 204 | # html_show_sourcelink = True 205 | 206 | # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. 207 | # 208 | # html_show_sphinx = True 209 | 210 | # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. 211 | # 212 | # html_show_copyright = True 213 | 214 | # If true, an OpenSearch description file will be output, and all pages will 215 | # contain a tag referring to it. The value of this option must be the 216 | # base URL from which the finished HTML is served. 217 | # 218 | # html_use_opensearch = '' 219 | 220 | # This is the file name suffix for HTML files (e.g. ".xhtml"). 221 | # html_file_suffix = None 222 | 223 | # Language to be used for generating the HTML full-text search index. 224 | # Sphinx supports the following languages: 225 | # 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja' 226 | # 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr', 'zh' 227 | # 228 | # html_search_language = 'en' 229 | 230 | # A dictionary with options for the search language support, empty by default. 231 | # 'ja' uses this config value. 232 | # 'zh' user can custom change `jieba` dictionary path. 233 | # 234 | # html_search_options = {'type': 'default'} 235 | 236 | # The name of a javascript file (relative to the configuration directory) that 237 | # implements a search results scorer. If empty, the default will be used. 238 | # 239 | # html_search_scorer = 'scorer.js' 240 | 241 | # Output file base name for HTML help builder. 242 | htmlhelp_basename = 'agile-analyticsdoc' 243 | 244 | # -- Options for LaTeX output --------------------------------------------- 245 | 246 | latex_elements = { 247 | # The paper size ('letterpaper' or 'a4paper'). 248 | # 249 | # 'papersize': 'letterpaper', 250 | 251 | # The font size ('10pt', '11pt' or '12pt'). 252 | # 253 | # 'pointsize': '10pt', 254 | 255 | # Additional stuff for the LaTeX preamble. 256 | # 257 | # 'preamble': '', 258 | 259 | # Latex figure (float) alignment 260 | # 261 | # 'figure_align': 'htbp', 262 | } 263 | 264 | # Grouping the document tree into LaTeX files. List of tuples 265 | # (source start file, target name, title, 266 | # author, documentclass [howto, manual, or own class]). 267 | latex_documents = [ 268 | (master_doc, 'agile-analytics.tex', u'agile-analytics Documentation', 269 | u'Chris Heisel', 'manual'), 270 | ] 271 | 272 | # The name of an image file (relative to this directory) to place at the top of 273 | # the title page. 274 | # 275 | # latex_logo = None 276 | 277 | # For "manual" documents, if this is true, then toplevel headings are parts, 278 | # not chapters. 279 | # 280 | # latex_use_parts = False 281 | 282 | # If true, show page references after internal links. 283 | # 284 | # latex_show_pagerefs = False 285 | 286 | # If true, show URL addresses after external links. 287 | # 288 | # latex_show_urls = False 289 | 290 | # Documents to append as an appendix to all manuals. 291 | # 292 | # latex_appendices = [] 293 | 294 | # If false, no module index is generated. 295 | # 296 | # latex_domain_indices = True 297 | 298 | 299 | # -- Options for manual page output --------------------------------------- 300 | 301 | # One entry per manual page. List of tuples 302 | # (source start file, name, description, authors, manual section). 303 | man_pages = [ 304 | (master_doc, 'agile-analytics', u'agile-analytics Documentation', 305 | [author], 1) 306 | ] 307 | 308 | # If true, show URL addresses after external links. 309 | # 310 | # man_show_urls = False 311 | 312 | 313 | # -- Options for Texinfo output ------------------------------------------- 314 | 315 | # Grouping the document tree into Texinfo files. List of tuples 316 | # (source start file, target name, title, author, 317 | # dir menu entry, description, category) 318 | texinfo_documents = [ 319 | (master_doc, 'agile-analytics', u'agile-analytics Documentation', 320 | author, 'agile-analytics', 'One line description of project.', 321 | 'Miscellaneous'), 322 | ] 323 | 324 | # Documents to append as an appendix to all manuals. 325 | # 326 | # texinfo_appendices = [] 327 | 328 | # If false, no module index is generated. 329 | # 330 | # texinfo_domain_indices = True 331 | 332 | # How to display URL addresses: 'footnote', 'no', or 'inline'. 333 | # 334 | # texinfo_show_urls = 'footnote' 335 | 336 | # If true, do not generate a @detailmenu in the "Top" node's menu. 337 | # 338 | # texinfo_no_detailmenu = False 339 | -------------------------------------------------------------------------------- /docs/index.rst: -------------------------------------------------------------------------------- 1 | .. agile-analytics documentation master file, created by 2 | sphinx-quickstart on Fri Jun 17 13:58:53 2016. 3 | You can adapt this file completely to your liking, but it should at least 4 | contain the root `toctree` directive. 5 | 6 | Welcome to agile-analytics's documentation! 7 | =========================================== 8 | 9 | Contents: 10 | 11 | .. toctree:: 12 | :maxdepth: 2 13 | 14 | 15 | 16 | Indices and tables 17 | ================== 18 | 19 | * :ref:`genindex` 20 | * :ref:`modindex` 21 | * :ref:`search` 22 | 23 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | #jira 2 | jira==1.0.3 3 | oauthlib==1.1.2 4 | requests==2.10.0 5 | requests-oauthlib==0.6.1 6 | requests-toolbelt==0.6.2 7 | six==1.10.0 8 | tlslite==0.4.9 9 | 10 | #dateutil 11 | python-dateutil==2.5.3 12 | 13 | #Testing 14 | flake8==2.6.0 15 | mccabe==0.5.0 16 | py==1.4.31 17 | pycodestyle==2.0.0 18 | pyflakes==1.2.3 19 | pytest==2.9.2 20 | pytest-flake8==0.5 21 | coverage==4.1 22 | pytest-cov==2.2.1 23 | pytest-mock==1.1 24 | pretend==1.0.8 25 | 26 | # gspread API client 27 | oauth2client==2.2.0 28 | gspread==0.4.1 29 | google-api-python-client==1.5.1 30 | httplib2==0.9.2 31 | pyasn1==0.1.9 32 | pyasn1-modules==0.0.8 33 | rsa==3.4.2 34 | simplejson==3.8.2 35 | uritemplate==0.6 36 | 37 | #Mathyness 38 | numpy==1.11.1 39 | 40 | # Docs 41 | pockets==0.3 42 | sphinxcontrib-napoleon==0.5.1 43 | 44 | Jinja2==2.8 45 | MarkupSafe==0.23 46 | Pygments==2.1.3 47 | alabaster==0.7.8 48 | babel==2.3.4 49 | docutils==0.12 50 | imagesize==0.7.1 51 | pytz==2016.4 52 | snowballstemmer==1.2.1 53 | sphinx==1.4.4 54 | -------------------------------------------------------------------------------- /setup.cfg: -------------------------------------------------------------------------------- 1 | [pytest] 2 | norecursedirs = .git venv sdist-venv 3 | flake8-ignore = 4 | E501 5 | docs/conf.py ALL 6 | tryout.py ALL 7 | 8 | 9 | [flake8] 10 | ignore = E501 11 | exclude = 12 | docs/conf.py 13 | venv/* 14 | sdist-venv/* 15 | tryout.py 16 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | """setup.""" 2 | 3 | from __future__ import unicode_literals 4 | from setuptools import setup, find_packages 5 | 6 | try: # for pip >= 10 7 | from pip._internal.req import parse_requirements 8 | except ImportError: # for pip <= 9.0.3 9 | from pip.req import parse_requirements 10 | 11 | # parse_requirements() returns generator of pip.req.InstallRequirement objects 12 | install_reqs = parse_requirements("requirements.txt", session=False) 13 | 14 | # reqs is a list of requirement 15 | # e.g. ['django==1.5.1', 'mezzanine==1.4.6'] 16 | reqs = [str(ir.req) for ir in install_reqs] 17 | 18 | description = """Pulls data from agile systems and analyzes it.""" 19 | author = "cmheisel" 20 | README = open('README.md', 'r').read() 21 | try: 22 | version = open('version.txt', 'r').read().strip() 23 | except FileNotFoundError: 24 | version = "unknown" 25 | 26 | setup( 27 | name='agile-analytics', 28 | version=version, 29 | description=description, 30 | long_description=README, 31 | classifiers=[ 32 | # Get strings from http://pypi.python.org/pypi?%3Aaction=list_classifiers 33 | 'Development Status :: 3 - Alpha', 34 | 'Intended Audience :: Information Technology', 35 | 'Programming Language :: Python', 36 | 'Programming Language :: Python :: 3', 37 | 'Topic :: Software Development :: Libraries :: Python Modules', 38 | 'Topic :: Utilities', 39 | 'License :: OSI Approved :: MIT License', 40 | ], 41 | keywords='jira agile lean kanban metrics', 42 | author=author, 43 | author_email='chris@heisel.org', 44 | url='https://github.com/cmheisel/agile-analytics', 45 | license='MIT License', 46 | packages=find_packages(), 47 | include_package_data=True, 48 | zip_safe=True, 49 | install_requires=reqs 50 | ) 51 | -------------------------------------------------------------------------------- /tests/conftest.py: -------------------------------------------------------------------------------- 1 | """Shared fixtures.""" 2 | import csv 3 | from os import path 4 | 5 | import pytest 6 | 7 | 8 | @pytest.fixture 9 | def Ticket(): 10 | """Create an AgileTicket for testing.""" 11 | from agile_analytics.models import AgileTicket 12 | 13 | def _Ticket(**kwargs): 14 | flow_logs = kwargs.pop('flow_logs') 15 | key = kwargs.pop('key') 16 | t = AgileTicket(key=key, ttype="Story") 17 | for key, value in kwargs.items(): 18 | setattr(t, key, value) 19 | for fl in flow_logs: 20 | t.flow_log.append(fl) 21 | return t 22 | return _Ticket 23 | 24 | 25 | @pytest.fixture 26 | def days_agos(days_ago): 27 | """Return 45 dates ending with today.""" 28 | days_agos = {} 29 | for i in range(0, 46): 30 | days_agos[i] = days_ago(i) 31 | return days_agos 32 | 33 | 34 | @pytest.fixture 35 | def AnalyzedAgileTicket(): 36 | """Return a class used by the CUT.""" 37 | from agile_analytics.models import AnalyzedAgileTicket 38 | return AnalyzedAgileTicket 39 | 40 | 41 | @pytest.fixture 42 | def datetime(): 43 | """Return Datetime module.""" 44 | from datetime import datetime 45 | return datetime 46 | 47 | 48 | @pytest.fixture 49 | def date(): 50 | """Return Datetime module.""" 51 | from datetime import date 52 | return date 53 | 54 | 55 | @pytest.fixture 56 | def tzutc(): 57 | from dateutil.tz import tzutc 58 | return tzutc() 59 | 60 | 61 | @pytest.fixture 62 | def relativedelta(): 63 | """Return relativedelta module.""" 64 | from dateutil import relativedelta 65 | return relativedelta 66 | 67 | 68 | @pytest.fixture 69 | def StringIO(): 70 | """Return StringIO.""" 71 | import io 72 | return io 73 | 74 | 75 | @pytest.fixture 76 | def days_ago(datetime, relativedelta, tzutc): 77 | """Helper method for getting dates in the past.""" 78 | def _days_ago(days): 79 | dt = datetime.now() - relativedelta.relativedelta(days=days) 80 | dt = dt.replace(second=0, microsecond=0, tzinfo=tzutc) 81 | return dt 82 | return _days_ago 83 | 84 | 85 | @pytest.fixture 86 | def make_analyzed_tickets(AnalyzedAgileTicket, datetime, tzutc): 87 | """Make ticket from a list of dicts with key data.""" 88 | from dateutil.parser import parse 89 | default = datetime(1979, 8, 15, 0, 0, 0, tzinfo=tzutc) 90 | 91 | def _make_analyzed_tickets(ticket_datas): 92 | tickets = [] 93 | for data in ticket_datas: 94 | t = AnalyzedAgileTicket( 95 | key=data['key'], 96 | committed=dict(state="Committed", entered_at=parse(data['committed'], default=default)), 97 | started=dict(state="Started", entered_at=parse(data['started'], default=default)), 98 | ended=dict(state="Ended", entered_at=parse(data['ended'], default=default)) 99 | ) 100 | tickets.append(t) 101 | return tickets 102 | return _make_analyzed_tickets 103 | 104 | 105 | @pytest.fixture 106 | def weeks_of_tickets(datetime, tzutc, AnalyzedAgileTicket): 107 | """A bunch of tickets.""" 108 | from dateutil.parser import parse 109 | parsed = [] 110 | default = datetime(1979, 8, 15, 0, 0, 0, tzinfo=tzutc) 111 | 112 | current_path = path.dirname(path.abspath(__file__)) 113 | csv_file = path.join(current_path, 'data', 'weeks_of_tickets.csv') 114 | 115 | count = 1 116 | for row in csv.DictReader(open(csv_file, 'r')): 117 | t = AnalyzedAgileTicket( 118 | key="FOO-{}".format(count), 119 | committed=dict(state="committed", entered_at=parse(row['committed'], default=default)), 120 | started=dict(state="started", entered_at=parse(row['started'], default=default)), 121 | ended=dict(state="ended", entered_at=parse(row['ended'], default=default)) 122 | ) 123 | parsed.append(t) 124 | count += 1 125 | 126 | return parsed 127 | -------------------------------------------------------------------------------- /tests/data/weeks_of_tickets.csv: -------------------------------------------------------------------------------- 1 | week,committed,started,ended,cycle,lead,sunday_offset 2 | 5/15/16,4/16/16,5/10/16,5/15/16,5,29,0 3 | 5/15/16,4/17/16,5/14/16,5/16/16,2,29,1 4 | 5/15/16,4/19/16,5/4/16,5/17/16,13,28,2 5 | 5/15/16,5/5/16,5/15/16,5/18/16,3,13,3 6 | 5/22/16,5/2/16,5/10/16,5/26/16,16,24,4 7 | 5/22/16,5/8/16,5/11/16,5/27/16,16,19,5 8 | 5/29/16,5/25/16,5/21/16,6/4/16,14,10,6 9 | 6/5/16,5/30/16,5/18/16,6/5/16,18,6,0 10 | 6/12/16,6/12/16,6/13/16,6/13/16,0,1,1 11 | 6/12/16,5/25/16,6/4/16,6/14/16,10,20,2 12 | 6/12/16,5/24/16,6/6/16,6/15/16,9,22,3 13 | 6/12/16,6/3/16,6/3/16,6/16/16,13,13,4 14 | 6/12/16,6/16/16,6/9/16,6/17/16,8,1,5 15 | 6/19/16,6/6/16,6/6/16,6/25/16,19,19,6 16 | 6/19/16,6/2/16,6/18/16,6/19/16,1,17,0 17 | 6/19/16,6/17/16,6/10/16,6/20/16,10,3,1 18 | 6/19/16,6/16/16,6/7/16,6/21/16,14,5,2 19 | 6/19/16,5/31/16,6/4/16,6/22/16,18,22,3 20 | 6/19/16,6/20/16,6/19/16,6/23/16,4,3,4 21 | 6/19/16,6/16/16,6/7/16,6/24/16,17,8,5 22 | 6/19/16,6/16/16,6/18/16,6/25/16,7,9,6 23 | 6/19/16,6/14/16,6/13/16,6/19/16,6,5,0 24 | 6/19/16,6/13/16,6/16/16,6/20/16,4,7,1 -------------------------------------------------------------------------------- /tests/test_agileticket.py: -------------------------------------------------------------------------------- 1 | """Test the models.""" 2 | 3 | import pytest 4 | 5 | 6 | @pytest.fixture 7 | def klass(): 8 | """Return CUT.""" 9 | from agile_analytics.models import AgileTicket 10 | return AgileTicket 11 | 12 | 13 | @pytest.fixture 14 | def make_one(klass): 15 | """Function to make an AgileTicket.""" 16 | def _make_one(*args, **kwargs): 17 | kwargs['key'] = kwargs.get('key', "TEST-1") 18 | return klass(*args, **kwargs) 19 | return _make_one 20 | 21 | 22 | def test_construction(make_one): 23 | """Ensure make_one works.""" 24 | t = make_one() 25 | assert t 26 | 27 | 28 | def test_flow_log_append_happy(make_one, datetime): 29 | """Ensure FlowLogs require entered_at and state.""" 30 | t = make_one() 31 | t.flow_log.append( 32 | dict( 33 | entered_at=datetime.now(), 34 | state="Missouri" 35 | ) 36 | ) 37 | assert t.flow_log[0] 38 | 39 | 40 | def test_flow_log_append_unicode(make_one, datetime): 41 | """Ensure flow log strings are unicode.""" 42 | t = make_one() 43 | t.flow_log.append( 44 | dict( 45 | entered_at=datetime.now(), 46 | state="LA", 47 | ) 48 | ) 49 | assert t.flow_log[0][u'state'] == str("LA") 50 | 51 | 52 | def test_flow_log_append_datetime(make_one, datetime): 53 | """Ensure flow log datetimes are honored.""" 54 | test_dt = datetime.now() 55 | t = make_one() 56 | t.flow_log.append( 57 | dict( 58 | entered_at=test_dt, 59 | state="OK" 60 | ) 61 | ) 62 | assert t.flow_log[0][u'entered_at'] == test_dt 63 | 64 | 65 | def test_flow_log_append_unhappy(make_one, datetime): 66 | """Ensure we only accept datetime-ish objects.""" 67 | t = make_one() 68 | with pytest.raises(TypeError): 69 | t.flow_log.append( 70 | dict( 71 | state="SD", 72 | entered_at=str(datetime.now()), 73 | ) 74 | ) 75 | 76 | 77 | def test_flow_log_append_unhappy_no_dict(make_one): 78 | """Ensure we only accept dict-ish objects.""" 79 | t = make_one() 80 | with pytest.raises(TypeError): 81 | t.flow_log.append(['VT', '278461911']) 82 | 83 | 84 | def test_flow_log_ordered_ascending(make_one, days_ago): 85 | """Ensure flow log items are oldest > newest.""" 86 | t = make_one() 87 | items = [ 88 | dict(state="SC", entered_at=days_ago(2)), 89 | dict(state="KNAP", entered_at=days_ago(8)), 90 | dict(state="Junohaki", entered_at=days_ago(9)), 91 | dict(state="MA", entered_at=days_ago(10)) 92 | ] 93 | for i in items: 94 | t.flow_log.append(i) 95 | 96 | actual = [fl['state'] for fl in t.flow_log] 97 | expected = [ 98 | "MA", 99 | "Junohaki", 100 | "KNAP", 101 | "SC" 102 | ] 103 | assert actual == expected 104 | -------------------------------------------------------------------------------- /tests/test_analyzed_agile_ticket.py: -------------------------------------------------------------------------------- 1 | """Tests for AnalyzedAgileTickets.""" 2 | 3 | import pytest 4 | 5 | 6 | @pytest.fixture 7 | def klass(): 8 | from agile_analytics.models import AnalyzedAgileTicket 9 | return AnalyzedAgileTicket 10 | 11 | 12 | def test_klass(klass, days_ago): 13 | """Verify the CUT.""" 14 | assert klass 15 | 16 | five_ago = days_ago(5) 17 | four_ago = days_ago(4) 18 | today = days_ago(0) 19 | 20 | k = klass( 21 | key="TEST-1", 22 | committed=dict(entered_at=five_ago, state="To Do"), 23 | started=dict(entered_at=four_ago, state="In Progress"), 24 | ended=dict(entered_at=today, state="Done"), 25 | ) 26 | 27 | assert k.committed['entered_at'] == five_ago 28 | assert k.started['entered_at'] == four_ago 29 | assert k.ended['entered_at'] == today 30 | 31 | assert "{} -- Ended: {}".format(k.key, k.ended['entered_at']) == str(k) 32 | 33 | 34 | def test_lead_time(klass, days_ago): 35 | """Verify lead_time calculations.""" 36 | five_ago = days_ago(5) 37 | four_ago = days_ago(4) 38 | today = days_ago(0) 39 | 40 | k = klass( 41 | key="TEST-1", 42 | committed=dict(entered_at=five_ago, state="To Do"), 43 | started=dict(entered_at=four_ago, state="In Progress"), 44 | ended=dict(entered_at=today, state="Done"), 45 | ) 46 | 47 | assert k.lead_time == 5 48 | 49 | 50 | def test_cycle_time(klass, days_ago): 51 | """Verify lead_time calculations.""" 52 | five_ago = days_ago(5) 53 | four_ago = days_ago(4) 54 | today = days_ago(0) 55 | 56 | k = klass( 57 | key="TEST-1", 58 | committed=dict(entered_at=five_ago, state="To Do"), 59 | started=dict(entered_at=four_ago, state="In Progress"), 60 | ended=dict(entered_at=today, state="Done"), 61 | ) 62 | 63 | assert k.cycle_time == 4 64 | -------------------------------------------------------------------------------- /tests/test_common_fixtures.py: -------------------------------------------------------------------------------- 1 | """Test common possibly complex fixtures.""" 2 | 3 | 4 | def test_weeks_of_tickets(weeks_of_tickets, datetime, tzutc): 5 | """Verify this rather complex fixture.""" 6 | assert len(weeks_of_tickets) == 23 7 | assert weeks_of_tickets[0].ended['entered_at'] == datetime(2016, 5, 15, 0, 0, 0, tzinfo=tzutc) 8 | assert weeks_of_tickets[5].lead_time == 19 9 | -------------------------------------------------------------------------------- /tests/test_created_reporter.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | 4 | @pytest.fixture 5 | def klass(): 6 | from agile_analytics import CreatedReporter 7 | return CreatedReporter 8 | 9 | 10 | def test_klass(klass): 11 | assert klass 12 | 13 | 14 | def test_filter(klass, days_agos, AnalyzedAgileTicket): 15 | """filter_issues ignores issues completed before the specified range.""" 16 | issue_list_kwargs = [] 17 | for i in range(1, 3): 18 | kwargs = dict( 19 | key="TEST-{}".format(i), 20 | committed=dict(state="Committed", entered_at=days_agos[2]), 21 | started=dict(state=None, entered_at=None), 22 | ended=dict(state=None, entered_at=None) 23 | ) 24 | issue_list_kwargs.append(kwargs) 25 | 26 | issue_list = [AnalyzedAgileTicket(**kwargs) for kwargs in issue_list_kwargs] 27 | issue_out_of_range = AnalyzedAgileTicket( 28 | key="TEST-OOR", 29 | committed=dict(state="Committed", entered_at=days_agos[42]), 30 | started=dict(state="Started", entered_at=days_agos[44]), 31 | ended=dict(state="Ended", entered_at=days_agos[45]), 32 | ) 33 | issue_list.append(issue_out_of_range) 34 | 35 | r = klass( 36 | title="Issues Created in last 30 day", 37 | start_date=days_agos[30], 38 | end_date=days_agos[0] 39 | ) 40 | filtered_issues = r.filter_issues(issue_list) 41 | 42 | assert r.start_date > issue_out_of_range.ended['entered_at'] 43 | assert len(filtered_issues) == 2 44 | 45 | 46 | def test_report(klass, weeks_of_tickets, date, datetime, tzutc): 47 | """Report should return counts per week per ticket type""" 48 | 49 | r = klass( 50 | title="Lead Time Percentile Report", 51 | start_date=datetime(2016, 5, 15, 0, 0, 0, tzinfo=tzutc), # Sunday 52 | end_date=datetime(2016, 7, 2, 11, 59, 59, tzinfo=tzutc), # Saturday 53 | ) 54 | 55 | expected = [ 56 | ["Week", "Ticket"], 57 | [date(2016, 5, 15), 0], 58 | [date(2016, 5, 22), 3], 59 | [date(2016, 5, 29), 4], 60 | [date(2016, 6, 5), 1], 61 | [date(2016, 6, 12), 8], 62 | [date(2016, 6, 19), 1], 63 | [date(2016, 6, 26), 0], 64 | ] 65 | 66 | assert r.report_on(weeks_of_tickets).table == expected 67 | -------------------------------------------------------------------------------- /tests/test_csvwriter.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | 4 | @pytest.fixture 5 | def klass(): 6 | from agile_analytics import CSVWriter 7 | return CSVWriter 8 | 9 | 10 | @pytest.fixture 11 | def report(date): 12 | from collections import namedtuple 13 | Report = namedtuple("Report", ["summary", "table"]) 14 | table = [ 15 | ["Week", "Completed"], 16 | [date(2016, 5, 15), 4], 17 | [date(2016, 5, 22), 0], 18 | [date(2016, 5, 29), 0], 19 | [date(2016, 6, 5), 4], 20 | [date(2016, 6, 12), 0], 21 | [date(2016, 6, 19), 0], 22 | ] 23 | r = Report({}, table) 24 | return r 25 | 26 | 27 | def test_init(klass): 28 | w = klass() 29 | assert w 30 | 31 | 32 | def test_write(klass, report, StringIO): 33 | w = klass() 34 | csvstring = StringIO.StringIO() 35 | 36 | expected = """Week,Completed 37 | 2016-05-15,4 38 | 2016-05-22,0 39 | 2016-05-29,0 40 | 2016-06-05,4 41 | 2016-06-12,0 42 | 2016-06-19,0""" 43 | 44 | w.write(report, csvstring) 45 | expected = expected.split() 46 | actual = csvstring.getvalue().split() 47 | assert expected == actual 48 | -------------------------------------------------------------------------------- /tests/test_cycle_pctile_reporter.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | 4 | @pytest.fixture 5 | def klass(): 6 | """Return the CUT.""" 7 | from agile_analytics import CycleTimePercentileReporter 8 | return CycleTimePercentileReporter 9 | 10 | 11 | def test_klass(klass): 12 | """Verify our fixture.""" 13 | assert klass 14 | 15 | 16 | def test_init(klass, datetime): 17 | """Make sure it inits the way we want.""" 18 | k = klass( 19 | title="Cycle Time Percentile Report", 20 | start_date=datetime(2016, 5, 21, 0, 0, 0), 21 | end_date=datetime(2016, 6, 21, 11, 59, 59), 22 | num_weeks=6, 23 | ) 24 | assert k 25 | assert k.title == "Cycle Time Percentile Report" 26 | assert k.num_weeks == 6 27 | 28 | k = klass( 29 | title="Cycle Time Percentile Report 2", 30 | ) 31 | assert k 32 | assert k.title == "Cycle Time Percentile Report 2" 33 | assert k.num_weeks == 4 34 | 35 | 36 | def test_date_range_reconcile(klass, datetime, tzutc): 37 | """Ensure the right dates are set when passed two dates.""" 38 | r = klass(title="Foo") 39 | r.start_date = datetime(2016, 5, 21, 0, 0, 0) 40 | r.end_date = datetime(2016, 6, 21, 11, 59, 59) 41 | 42 | assert r.start_date == datetime(2016, 5, 15, 0, 0, 0, tzinfo=tzutc) # Sunday 43 | assert r.end_date == datetime(2016, 6, 25, 11, 59, 59, tzinfo=tzutc) # Saturday 44 | 45 | 46 | def test_filter(klass, days_agos, AnalyzedAgileTicket): 47 | """filter_issues ignores issues completed before the specified range.""" 48 | issue_list_kwargs = [] 49 | for i in range(1, 3): # 2 issues with 2 day lead 50 | kwargs = dict( 51 | key="TEST-{}".format(i), 52 | committed=dict(state="Committed", entered_at=days_agos[2]), 53 | started=dict(state="Started", entered_at=days_agos[2]), 54 | ended=dict(state="Ended", entered_at=days_agos[0]), 55 | ) 56 | issue_list_kwargs.append(kwargs) 57 | 58 | issue_list = [AnalyzedAgileTicket(**kwargs) for kwargs in issue_list_kwargs] 59 | issue_out_of_range = AnalyzedAgileTicket( 60 | key="TEST-OOR", 61 | committed=dict(state="Committed", entered_at=days_agos[42]), 62 | started=dict(state="Started", entered_at=days_agos[44]), 63 | ended=dict(state="Ended", entered_at=days_agos[45]), 64 | ) 65 | issue_list.append(issue_out_of_range) 66 | 67 | r = klass( 68 | title="Cycle Time Distribution Past 30 days", 69 | start_date=days_agos[30], 70 | end_date=days_agos[0] 71 | ) 72 | filtered_issues = r.filter_issues(issue_list) 73 | 74 | assert r.start_date > issue_out_of_range.ended['entered_at'] 75 | assert len(filtered_issues) == 2 76 | 77 | 78 | def test_report_table(klass, weeks_of_tickets, date, datetime, tzutc): 79 | """Ensure the report table returns a row for every week""" 80 | 81 | expected = [ 82 | ["Week", "50th", "75th", "95th"], 83 | [date(2016, 5, 15), 4, 7, 12], 84 | [date(2016, 5, 22), 9, 15, 16], 85 | [date(2016, 5, 29), 13, 15, 16], 86 | [date(2016, 6, 5), 14, 16, 17], 87 | [date(2016, 6, 12), 13, 16, 17], 88 | [date(2016, 6, 19), 10, 14, 18], 89 | [date(2016, 6, 26), 10, 15, 18], 90 | ] 91 | 92 | r = klass( 93 | title="Cycle Time Percentile Report", 94 | start_date=datetime(2016, 5, 15, 0, 0, 0, tzinfo=tzutc), # Sunday 95 | end_date=datetime(2016, 7, 2, 11, 59, 59, tzinfo=tzutc), # Saturday 96 | num_weeks=7, 97 | ) 98 | report = r.report_on(weeks_of_tickets) 99 | 100 | assert len(report.table) == 8 101 | assert report.table == expected 102 | 103 | 104 | def test_report_no_header(klass, weeks_of_tickets, date, datetime, tzutc): 105 | """Ensure the report table """ 106 | 107 | expected = [ 108 | [date(2016, 5, 15), 4, 7, 12], 109 | [date(2016, 5, 22), 9, 15, 16], 110 | [date(2016, 5, 29), 13, 15, 16], 111 | [date(2016, 6, 5), 14, 16, 17], 112 | [date(2016, 6, 12), 13, 16, 17], 113 | [date(2016, 6, 19), 10, 14, 18], 114 | [date(2016, 6, 26), 10, 15, 18], 115 | ] 116 | 117 | r = klass( 118 | title="Cycle Time Percentile Report", 119 | start_date=datetime(2016, 5, 15, 0, 0, 0, tzinfo=tzutc), # Sunday 120 | end_date=datetime(2016, 7, 2, 11, 59, 59, tzinfo=tzutc), # Saturday 121 | num_weeks=7, 122 | ) 123 | report = r.report_on(weeks_of_tickets, header=False) 124 | 125 | assert len(report.table) == 7 126 | assert report.table == expected 127 | 128 | 129 | def test_report_table_limit(klass, weeks_of_tickets, date, datetime, tzutc): 130 | """Ensure the report table returns no more than the num_weeks provided""" 131 | 132 | expected = [ 133 | ["Week", "50th", "75th", "95th"], 134 | [date(2016, 6, 5), 14, 16, 17], 135 | [date(2016, 6, 12), 13, 16, 17], 136 | [date(2016, 6, 19), 10, 14, 18], 137 | [date(2016, 6, 26), 10, 15, 18], 138 | ] 139 | 140 | r = klass( 141 | title="Cycle Time Percentile Report", 142 | start_date=datetime(2016, 5, 15, 0, 0, 0, tzinfo=tzutc), # Sunday 143 | end_date=datetime(2016, 7, 2, 11, 59, 59, tzinfo=tzutc), # Saturday 144 | num_weeks=4, 145 | ) 146 | report = r.report_on(weeks_of_tickets) 147 | 148 | assert len(report.table) == 5 149 | assert report.table == expected 150 | 151 | 152 | def test_report_table_no_tickets(klass, date, datetime, tzutc): 153 | """Ensure the report table returns a row for every week""" 154 | 155 | expected = [ 156 | ["Week", "50th", "75th", "95th"], 157 | [date(2016, 5, 15), 0, 0, 0], 158 | [date(2016, 5, 22), 0, 0, 0], 159 | [date(2016, 5, 29), 0, 0, 0], 160 | [date(2016, 6, 5), 0, 0, 0], 161 | [date(2016, 6, 12), 0, 0, 0], 162 | [date(2016, 6, 19), 0, 0, 0], 163 | [date(2016, 6, 26), 0, 0, 0], 164 | ] 165 | 166 | r = klass( 167 | title="Cycle Time Percentile Report", 168 | start_date=datetime(2016, 5, 15, 0, 0, 0, tzinfo=tzutc), # Sunday 169 | end_date=datetime(2016, 7, 2, 11, 59, 59, tzinfo=tzutc), # Saturday 170 | num_weeks=7, 171 | ) 172 | report = r.report_on([]) 173 | 174 | assert len(report.table) == 8 175 | assert report.table == expected 176 | -------------------------------------------------------------------------------- /tests/test_date_analyzer.py: -------------------------------------------------------------------------------- 1 | """Test the bundled Cycle Date analyzer.""" 2 | 3 | 4 | import pytest 5 | 6 | 7 | @pytest.fixture 8 | def klass(): 9 | """Return the Class Under Test.""" 10 | from agile_analytics.analyzers import DateAnalyzer 11 | return DateAnalyzer 12 | 13 | 14 | @pytest.fixture 15 | def analyzer(klass): 16 | """Return an instance of the CUT with some defaults.""" 17 | a = klass( 18 | start_states=[u"In Progress", ], 19 | commit_states=[u"Selected", ], 20 | end_states=[u"Done", ] 21 | ) 22 | return a 23 | 24 | 25 | def test_config(analyzer): 26 | """Ensure the analyzer inits properly.""" 27 | assert u"In Progress" in analyzer.start_states 28 | 29 | 30 | def test_missing_config(klass): 31 | """Get an error.""" 32 | with pytest.raises(TypeError): 33 | klass() 34 | 35 | 36 | def test_analyze_no_data(analyzer): 37 | """Get an error.""" 38 | with pytest.raises(TypeError): 39 | analyzer.analyze() 40 | 41 | 42 | def test_analyze_entered_at(analyzer, Ticket, days_ago): 43 | """Analyzed tickets should have entered_at dates for all 3 states.""" 44 | t = Ticket( 45 | key="TEST-1", 46 | created_at=days_ago(10), 47 | updated_at=days_ago(0), 48 | flow_logs=[ 49 | dict(entered_at=days_ago(7), state="Selected"), 50 | dict(entered_at=days_ago(5), state="In Progress"), 51 | dict(entered_at=days_ago(2), state="Done") 52 | ] 53 | ) 54 | results, ignored_issues = analyzer.analyze([t, ]) 55 | assert results[0].ended['entered_at'] == days_ago(2) 56 | assert results[0].started['entered_at'] == days_ago(5) 57 | assert results[0].committed['entered_at'] == days_ago(7) 58 | assert len(ignored_issues) == 0 59 | 60 | 61 | def test_analyze_missing_committed_state(analyzer, Ticket, days_ago): 62 | """A ticket which is missing committed, should be ignored.""" 63 | t = Ticket( 64 | key="TEST-1", 65 | created_at=days_ago(10), 66 | updated_at=days_ago(0), 67 | flow_logs=[ 68 | dict(entered_at=days_ago(7), state="Backlog"), # Doesn't match config 69 | dict(entered_at=days_ago(5), state="In Progress"), 70 | dict(entered_at=days_ago(2), state="Done") 71 | ] 72 | ) 73 | results, ignored_issues = analyzer.analyze([t, ]) 74 | assert len(ignored_issues) == 1 75 | assert ignored_issues[0]['ticket'].key == "TEST-1" 76 | assert ignored_issues[0]['phase'] == "committed" 77 | 78 | 79 | def test_pick_oldest_date(analyzer, Ticket, days_ago): 80 | """Pick the oldest entered_at from the ticket's history.""" 81 | test_flow_logs = [ 82 | dict(entered_at=days_ago(10), state="Selected"), 83 | dict(entered_at=days_ago(9), state="In Progress"), 84 | dict(entered_at=days_ago(8), state="Selected"), 85 | dict(entered_at=days_ago(5), state="In Progress"), 86 | dict(entered_at=days_ago(2), state="Done"), 87 | ] 88 | t = Ticket( 89 | key="TEST-1", 90 | created_at=days_ago(15), 91 | updated_at=days_ago(0), 92 | flow_logs=test_flow_logs 93 | ) 94 | results, ignored_issues = analyzer.analyze([t, ]) 95 | assert results[0].committed['entered_at'] == days_ago(10) 96 | assert results[0].started['entered_at'] == days_ago(9) 97 | 98 | 99 | def test_pick_oldest_done(analyzer, Ticket, days_ago): 100 | """Pick the oldest entered_at from the ticket's history for done.""" 101 | test_flow_logs = [ 102 | dict(entered_at=days_ago(10), state="Selected"), 103 | dict(entered_at=days_ago(9), state="In Progress"), 104 | dict(entered_at=days_ago(8), state="Selected"), 105 | dict(entered_at=days_ago(5), state="In Progress"), 106 | dict(entered_at=days_ago(3), state="Done"), 107 | dict(entered_at=days_ago(2), state="In Progress"), 108 | dict(entered_at=days_ago(1), state="Done"), 109 | ] 110 | t = Ticket( 111 | key="TEST-1", 112 | created_at=days_ago(15), 113 | updated_at=days_ago(0), 114 | flow_logs=test_flow_logs 115 | ) 116 | results, ignored_issues = analyzer.analyze([t, ]) 117 | assert results[0].ended['entered_at'] == days_ago(3) 118 | 119 | 120 | def test_pick_newest_date(analyzer, Ticket, days_ago): 121 | """Pick the oldest entered_at from the ticket's history.""" 122 | test_flow_logs = [ 123 | dict(entered_at=days_ago(10), state="Selected"), 124 | dict(entered_at=days_ago(9), state="In Progress"), 125 | dict(entered_at=days_ago(8), state="Selected"), 126 | dict(entered_at=days_ago(5), state="In Progress"), 127 | dict(entered_at=days_ago(2), state="Done"), 128 | ] 129 | t = Ticket( 130 | key="TEST-1", 131 | created_at=days_ago(15), 132 | updated_at=days_ago(0), 133 | flow_logs=test_flow_logs 134 | ) 135 | results, ignored_issues = analyzer.analyze([t, ], strategy=analyzer.NEWEST_DATE) 136 | assert results[0].committed['entered_at'] == days_ago(8) 137 | assert results[0].started['entered_at'] == days_ago(5) 138 | 139 | 140 | def test_pick_first_of_multiple_states(klass, Ticket, days_ago): 141 | """Pick the first state that matches, so folks can go specific > generic.""" 142 | analyzer = klass( 143 | commit_states=[u"Selected", u"To Do", u"Created"], 144 | start_states=[u"Dev In Progress", u"In Progress", ], 145 | end_states=[u"Done", u"Accepted"], 146 | ) 147 | 148 | test_flow_logs = [ 149 | dict(entered_at=days_ago(30), state="Created"), 150 | dict(entered_at=days_ago(20), state="Selected"), 151 | dict(entered_at=days_ago(11), state="In Progress"), 152 | dict(entered_at=days_ago(10), state="Dev In Progress"), 153 | dict(entered_at=days_ago(5), state="Accepted"), 154 | dict(entered_at=days_ago(3), state="Done"), 155 | ] 156 | t = Ticket( 157 | key="TEST-1", 158 | created_at=days_ago(15), 159 | updated_at=days_ago(0), 160 | flow_logs=test_flow_logs 161 | ) 162 | results, ignored_issues = analyzer.analyze([t, ]) 163 | at = results[0] 164 | 165 | assert at.committed['entered_at'] == days_ago(20) 166 | assert at.started['entered_at'] == days_ago(10) 167 | assert at.ended['entered_at'] == days_ago(3) 168 | 169 | 170 | def test_capturing_ticket_type(klass, Ticket, days_ago): 171 | """The Ticket.type should be passed onto the analyzed ticket.""" 172 | analyzer = klass( 173 | commit_states=[u"Selected", u"To Do", u"Created"], 174 | start_states=[u"Dev In Progress", u"In Progress", ], 175 | end_states=[u"Done", u"Accepted"], 176 | ) 177 | test_flow_logs = [ 178 | dict(entered_at=days_ago(30), state="Created"), 179 | dict(entered_at=days_ago(20), state="Selected"), 180 | dict(entered_at=days_ago(11), state="In Progress"), 181 | dict(entered_at=days_ago(10), state="Dev In Progress"), 182 | dict(entered_at=days_ago(5), state="Accepted"), 183 | dict(entered_at=days_ago(3), state="Done"), 184 | ] 185 | t = Ticket( 186 | key="TEST-1", 187 | created_at=days_ago(15), 188 | updated_at=days_ago(0), 189 | flow_logs=test_flow_logs, 190 | ttype="Story" 191 | ) 192 | results, ignored_issues = analyzer.analyze([t, ]) 193 | at = results[0] 194 | assert at.type == "Story" 195 | 196 | 197 | def test_capturing_title(klass, Ticket, days_ago): 198 | """The Ticket.type should be passed onto the analyzed ticket.""" 199 | analyzer = klass( 200 | commit_states=[u"Selected", u"To Do", u"Created"], 201 | start_states=[u"Dev In Progress", u"In Progress", ], 202 | end_states=[u"Done", u"Accepted"], 203 | ) 204 | test_flow_logs = [ 205 | dict(entered_at=days_ago(30), state="Created"), 206 | dict(entered_at=days_ago(20), state="Selected"), 207 | dict(entered_at=days_ago(11), state="In Progress"), 208 | dict(entered_at=days_ago(10), state="Dev In Progress"), 209 | dict(entered_at=days_ago(5), state="Accepted"), 210 | dict(entered_at=days_ago(3), state="Done"), 211 | ] 212 | t = Ticket( 213 | key="TEST-1", 214 | title="This is my test title", 215 | created_at=days_ago(15), 216 | updated_at=days_ago(0), 217 | flow_logs=test_flow_logs, 218 | ttype="Story" 219 | ) 220 | results, ignored_issues = analyzer.analyze([t, ]) 221 | at = results[0] 222 | assert at.title == "This is my test title" 223 | -------------------------------------------------------------------------------- /tests/test_fetchers.py: -------------------------------------------------------------------------------- 1 | """Tests the bundled fetchers and converters.""" 2 | 3 | from datetime import datetime 4 | 5 | import pytest 6 | 7 | from pretend import stub 8 | 9 | 10 | @pytest.fixture 11 | def tz(): 12 | """Return a timezone.""" 13 | from dateutil.tz import tzutc 14 | return tzutc() 15 | 16 | 17 | @pytest.fixture 18 | def klass(): 19 | """Return the Class Under Test.""" 20 | from agile_analytics.fetchers import JIRAFetcher 21 | return JIRAFetcher 22 | 23 | 24 | @pytest.fixture 25 | def converter(): 26 | """Return the Function Under Test.""" 27 | from agile_analytics.fetchers import convert_jira_issue 28 | return convert_jira_issue 29 | 30 | 31 | @pytest.fixture 32 | def jira_issue(): 33 | """Mock jira issue. 34 | 35 | Alternate way to get a JIRA issue mock 36 | ======================================== 37 | from jira import JIRA 38 | from jira.resources import Issue 39 | 40 | # Grab some fixtures 41 | j = JIRA(server=SERVER, basic_auth=(USERNAME, PASSWORD), options=dict(verify=False)) 42 | i = j.issue("JIRATICKET-926", expand="changelog") 43 | json.dump(i, file("tests/json/jira_ticket.json", "w"), indent=True) # Readable is better 44 | 45 | # Load said fixtures 46 | js = json.load(file("tests/json/jira_ticket.json", 'r')) 47 | i = Issue(options=None, session=None, raw=js) 48 | return i 49 | """ 50 | fields = stub( 51 | summary="This is my summary", 52 | issuetype=stub( 53 | name="Story" 54 | ), 55 | created="2016-03-30T17:27:09.000+0000", 56 | updated="2016-05-18T16:17:21.000+0000", 57 | ) 58 | 59 | histories = [ 60 | # history 61 | stub( 62 | created="2016-04-27T14:21:23.000+0000", 63 | items=[ 64 | stub(field="status", fromString="Open", toString="In Progress"), 65 | ] 66 | ), 67 | stub( 68 | created="2016-04-27T14:21:24.000+0000", 69 | items=[ 70 | stub(field="status", fromString="In Progress", toString="BLOCKED"), 71 | ] 72 | ), 73 | stub( 74 | created="2016-05-02T14:48:32.000+0000", 75 | items=[ 76 | stub(field="status", fromString="BLOCKED", toString="In Progress"), 77 | ] 78 | ), 79 | stub( 80 | created="2016-05-03T18:01:10.000+0000", 81 | items=[ 82 | stub(field="status", fromString="In Progress", toString="In QA"), 83 | ] 84 | ), 85 | stub( 86 | created="2016-05-05T18:42:25.000+0000", 87 | items=[ 88 | stub(field="status", fromString="In QA", toString="Done"), 89 | ] 90 | ), 91 | stub( 92 | created="2016-05-18T16:17:21.000+0000", 93 | items=[ 94 | stub(field="status", fromString="Done", toString="Accepted"), 95 | ] 96 | ), 97 | ] 98 | 99 | i = stub( 100 | key="FOO-1", 101 | fields=fields, 102 | changelog=stub(histories=histories) 103 | ) 104 | return i 105 | 106 | 107 | @pytest.fixture() 108 | def JIRA(jira_issue): 109 | """Fake JIRA instance.""" 110 | class MockJIRA(object): 111 | def __init__(self, *args, **kwargs): 112 | pass 113 | 114 | def search_issues(self, *args, **kwargs): 115 | return [jira_issue, ] 116 | return MockJIRA 117 | 118 | 119 | def test_required_config(klass): 120 | """Test the required class instantiation values.""" 121 | f = klass( 122 | url="https://jira.example.local", 123 | auth=dict(username="foo", password="bar"), 124 | filter_id=9999 125 | ) 126 | assert f 127 | 128 | 129 | @pytest.mark.parametrize("args,exc", [ 130 | ((), TypeError), 131 | ((dict()), TypeError), 132 | ((dict(), None), TypeError), 133 | ]) 134 | def test_missing_config(klass, args, exc): 135 | """Test what happens when the config is missing.""" 136 | with pytest.raises(exc): 137 | klass(*args) 138 | 139 | 140 | def test_weird_auth(klass): 141 | """Ensure we get a TypeError if the auth contains neither key.""" 142 | auth = dict(random="bar", token_up="buuuuddddy") 143 | with pytest.raises(TypeError): 144 | klass( 145 | url="https://jira.example.local", 146 | auth=auth, 147 | filter_id=9999 148 | ) 149 | 150 | 151 | def test_basic_auth_kwargs(klass): 152 | """Ensure basic_auth kwargs are handeled.""" 153 | basic_auth = dict(username="foo", password="bar") 154 | f = klass( 155 | url="https://jira.example.local", 156 | auth=basic_auth, 157 | filter_id=9999 158 | ) 159 | expected = dict(basic_auth=("foo", "bar")) 160 | assert f.auth_kwargs == expected 161 | 162 | 163 | def test_oauth_kwargs(klass): 164 | """Ensure oauth kwargs are handled.""" 165 | oauth = dict( 166 | access_token="foo", 167 | access_token_secret="bar", 168 | consumer_key="baz", 169 | key_cert="---- BAT CERT ---" 170 | ) 171 | f = klass( 172 | url="https://jira.example.local", 173 | auth=oauth, 174 | filter_id=9999 175 | ) 176 | expected = dict(oauth=oauth) 177 | assert f.auth_kwargs == expected 178 | 179 | 180 | def test_extra_kwargs(klass): 181 | """Ensure extra_kwargs work.""" 182 | basic_auth = dict(username="foo", password="bar") 183 | extra_kwargs = dict(options=dict(verify=False)) 184 | f = klass( 185 | url="https://jira.example.local", 186 | auth=basic_auth, 187 | filter_id=9999, 188 | jira_kwargs=extra_kwargs 189 | ) 190 | for key, value in extra_kwargs.items(): 191 | assert f.jira_kwargs[key] == value 192 | 193 | 194 | def test_fetch(klass, JIRA): 195 | """Ensure the JIRAFetcher fetch method returns issues.""" 196 | basic_auth = dict(username="foo", password="bar") 197 | f = klass( 198 | url="https://jira.example.local", 199 | auth=basic_auth, 200 | filter_id=9999, 201 | ) 202 | tickets = f.fetch(jira_klass=JIRA) 203 | assert hasattr(tickets[0], 'flow_log') 204 | 205 | 206 | def test_converter_key(jira_issue, converter): 207 | """Ensure a converted issue has a key.""" 208 | t = converter(jira_issue) 209 | assert t.key == u"FOO-1" 210 | 211 | 212 | def test_converter_summary(jira_issue, converter): 213 | """Ensure a convereted issue has a title.""" 214 | t = converter(jira_issue) 215 | assert t.title == u"This is my summary" 216 | 217 | 218 | def test_converter_summary_empty(jira_issue, converter): 219 | """Ensure a convereted issue has a title.""" 220 | jira_issue.fields.summary = '' 221 | t = converter(jira_issue) 222 | assert t.title == u"" 223 | 224 | 225 | def test_converter_created_at(jira_issue, converter, tz): 226 | """Ensure created_at is populated.""" 227 | t = converter(jira_issue) 228 | assert t.created_at == datetime(2016, 3, 30, 17, 27, 9, tzinfo=tz) 229 | 230 | 231 | def test_converter_updated_at(jira_issue, converter, tz): 232 | """Ensure updated_at is populated.""" 233 | t = converter(jira_issue) 234 | assert t.updated_at == datetime(2016, 5, 18, 16, 17, 21, tzinfo=tz) 235 | 236 | 237 | def test_changelog_conversion(jira_issue, converter, tz): 238 | """Ensure the changelog is converted as expected.""" 239 | expected = [ 240 | dict( 241 | entered_at=datetime(2016, 3, 30, 17, 27, 9, tzinfo=tz), 242 | state=u"Created" 243 | ), 244 | dict( 245 | entered_at=datetime(2016, 4, 27, 14, 21, 23, tzinfo=tz), 246 | state=u"In Progress" 247 | ), 248 | dict( 249 | entered_at=datetime(2016, 4, 27, 14, 21, 24, tzinfo=tz), 250 | state=u"BLOCKED", 251 | ), 252 | dict( 253 | entered_at=datetime(2016, 5, 2, 14, 48, 32, tzinfo=tz), 254 | state=u"In Progress", 255 | ), 256 | dict( 257 | entered_at=datetime(2016, 5, 3, 18, 1, 10, tzinfo=tz), 258 | state=u"In QA", 259 | ), 260 | dict( 261 | entered_at=datetime(2016, 5, 5, 18, 42, 25, tzinfo=tz), 262 | state=u"Done", 263 | ), 264 | dict( 265 | entered_at=datetime(2016, 5, 18, 16, 17, 21, tzinfo=tz), 266 | state=u"Accepted", 267 | ) 268 | ] 269 | 270 | i = jira_issue 271 | t = converter(i) 272 | 273 | assert t.flow_log == expected 274 | 275 | 276 | def test_ticket_type_capture(jira_issue, converter, tz): 277 | """The type of ticket should be captured.""" 278 | t = converter(jira_issue) 279 | assert t.type == "Story" 280 | 281 | 282 | def test_ticket_type_default(jira_issue, converter, tz): 283 | """The type of ticket should be Ticket if issuetype can't be found.""" 284 | del jira_issue.fields.issuetype 285 | t = converter(jira_issue) 286 | assert t.type == "Ticket" 287 | -------------------------------------------------------------------------------- /tests/test_gsheetwriter.py: -------------------------------------------------------------------------------- 1 | """Test the GSheetWriter.""" 2 | 3 | import pytest 4 | 5 | 6 | @pytest.fixture 7 | def gspread(): 8 | import gspread 9 | return gspread 10 | 11 | 12 | @pytest.fixture 13 | def orig_class(): 14 | """Return the CUT with network access enabled""" 15 | from agile_analytics import GSheetWriter 16 | return GSheetWriter 17 | 18 | 19 | def test_orig_class(orig_class): 20 | """Ensure the fixture works.""" 21 | assert orig_class.__name__ == "GSheetWriter" 22 | 23 | 24 | @pytest.fixture 25 | def klass(orig_class, mocker): 26 | """Return the CUT with some networky bits mocked out.""" 27 | credential_mock_attrs = { 28 | 'from_json_keyfile_name.return_value': "FakeTestCredentials" 29 | } 30 | driver_mock_attrs = { 31 | } 32 | orig_class.CREDENTIAL_CLASS = mocker.Mock(**credential_mock_attrs) 33 | orig_class.DRIVER_MODULE = mocker.Mock(**driver_mock_attrs) 34 | return orig_class 35 | 36 | 37 | def test_klass_init(klass): 38 | """Ensure the CUT can be initialized.""" 39 | k = klass('test_secret.json') 40 | assert k.keyfile_name == 'test_secret.json' 41 | 42 | 43 | def test_driver(klass): 44 | """Ensure the driver is initialized properly.""" 45 | k = klass('test_secret.json') 46 | 47 | assert k.driver 48 | 49 | k.CREDENTIAL_CLASS.from_json_keyfile_name.assert_called_once_with( 50 | 'test_secret.json', 51 | k.scope 52 | ) 53 | 54 | k.DRIVER_MODULE.authorize.assert_called_once_with('FakeTestCredentials') 55 | 56 | 57 | def test_get_datasheet_happy(klass, mocker): 58 | """Ensure the get_datasheet method finds existing sheets.""" 59 | k = klass('foo') 60 | 61 | mock_doc = mocker.Mock() 62 | k.get_datasheet(mock_doc, "Foo") 63 | mock_doc.worksheet.called_once_with("Foo") 64 | mock_doc.add_worksheet.assert_not_called() 65 | 66 | 67 | def test_get_datasheet_exception(klass, mocker, gspread): 68 | """Ensure get datasheet method creates one if the requested name doesn't exist.""" 69 | k = klass('foo') 70 | 71 | mock_doc_attrs = { 72 | 'worksheet.side_effect': gspread.exceptions.WorksheetNotFound 73 | } 74 | mock_doc = mocker.Mock(**mock_doc_attrs) 75 | k.get_datasheet(mock_doc, "Foo") 76 | mock_doc.worksheet.called_once_with("Foo") 77 | mock_doc.add_worksheet.called_once_with("Foo", 1, 1) 78 | 79 | 80 | def test_clear_sheet_resizes(klass, mocker): 81 | """Verify clear_sheet resizes.""" 82 | mock_attrs = { 83 | 'findall.return_value': [] 84 | } 85 | mock_sheet = mocker.Mock(**mock_attrs) 86 | 87 | k = klass('foo') 88 | k.clear_sheet(mock_sheet, 1, 20) 89 | mock_sheet.resize.assert_called_once_with(1, 20) 90 | 91 | 92 | def test_clear_sheet_replaces_content(klass, mocker): 93 | """Verify clear_sheet empties out any remaining content.""" 94 | mock_cell = mocker.Mock(value="Hanging Chad") 95 | mock_sheet_attrs = { 96 | 'findall.return_value': [ 97 | mock_cell 98 | ] 99 | } 100 | mock_sheet = mocker.Mock(**mock_sheet_attrs) 101 | 102 | k = klass('foo') 103 | k.clear_sheet(mock_sheet, 1, 1) 104 | 105 | assert mock_cell.value == "" 106 | mock_sheet.update_cells.assert_called_once_with([mock_cell, ]) 107 | 108 | 109 | def test_write_find_by_name(klass, mocker): 110 | """Ensure write finds docs by name.""" 111 | report = mocker.Mock() 112 | report.table = [ 113 | ['Ionic', 'Doric', 'Corinthian'], 114 | ['how to get the weeaboo to stop using the holodeck', 'malarkey', ''], 115 | ['does universal translator work on the weeaboo', ], 116 | ['can the brexit breed with the weeaboo', 'which moon is sailor moon from', 'is there dilithium in crystal pepsi'], 117 | ] 118 | 119 | starting_mock_cells = [ 120 | MockCell("", 1, 1), MockCell("", 2, 1), MockCell("", 3, 1), 121 | MockCell("", 1, 2), MockCell("", 2, 2), MockCell("", 3, 2), 122 | MockCell("", 1, 3), MockCell("", 2, 3), MockCell("", 3, 3), 123 | MockCell("", 1, 4), MockCell("", 2, 4), MockCell("", 3, 4), 124 | MockCell("", 1, 5), MockCell("", 2, 5), MockCell("", 3, 5), 125 | ] 126 | 127 | mock_cell = MockCell("", 1, 1) 128 | mock_sheet_attrs = { 129 | 'findall.return_value': [ 130 | mock_cell, 131 | ], 132 | 'range.return_value': starting_mock_cells 133 | } 134 | mock_sheet = mocker.Mock(**mock_sheet_attrs) 135 | mock_doc = mocker.Mock() 136 | mock_doc.worksheet.return_value = mock_sheet 137 | 138 | mock_driver_result = mocker.Mock() 139 | mock_driver_result.open.return_value = mock_doc 140 | klass.DRIVER_MODULE.authorize.return_value = mock_driver_result 141 | 142 | k = klass('foo') 143 | 144 | k.write(report, "Test Name", "Sheet Name") 145 | 146 | k.driver.open.called_once_with("Test Name") 147 | mock_doc.worksheet.called_once_with("Sheet Name") 148 | assert mock_cell.value == "" 149 | mock_sheet.range.assert_called_once_with('A1:C4') 150 | mock_sheet.update_cells.call_count == 2 151 | 152 | 153 | def test_select_cells(klass, mocker): 154 | """Ensure the right cells are selcted.""" 155 | report = mocker.Mock() 156 | report.table = [ 157 | ['Ionic', 'Doric', 'Corinthian'], 158 | ['how to get the weeaboo to stop using the holodeck', 'malarkey', ''], 159 | ['does universal translator work on the weeaboo', ], 160 | ['can the brexit breed with the weeaboo', 'which moon is sailor moon from', 'is there dilithium in crystal pepsi'], 161 | ['', ] 162 | ] 163 | 164 | mock_sheet = mocker.Mock() 165 | 166 | k = klass('foo') 167 | k.select_range(mock_sheet, report.table) 168 | 169 | mock_sheet.range.assert_called_once_with('A1:C5') 170 | 171 | 172 | class MockCell(object): 173 | def __init__(self, value, col, row): 174 | self.value = value 175 | self.col = col 176 | self.row = row 177 | 178 | def __repr__(self): 179 | return "".format(self.col, self.row, self.value) 180 | 181 | 182 | def test_update_cells(klass, mocker): 183 | """Ensure the cells are updated properly.""" 184 | report = mocker.Mock() 185 | report.table = [ 186 | ['Ionic', 'Doric', 'Corinthian'], 187 | ['how to get the weeaboo to stop using the holodeck', 'malarkey', ''], 188 | ['does universal translator work on the weeaboo', ], 189 | ['can the brexit breed with the weeaboo', 'which moon is sailor moon from', 'is there dilithium in crystal pepsi'], 190 | ['', ] 191 | ] 192 | starting_mock_cells = [ 193 | MockCell("", 1, 1), MockCell("", 2, 1), MockCell("", 3, 1), 194 | MockCell("", 1, 2), MockCell("", 2, 2), MockCell("", 3, 2), 195 | MockCell("", 1, 3), MockCell("", 2, 3), MockCell("", 3, 3), 196 | MockCell("", 1, 4), MockCell("", 2, 4), MockCell("", 3, 4), 197 | MockCell("", 1, 5), MockCell("", 2, 5), MockCell("", 3, 5), 198 | ] 199 | expected_cells = [ 200 | MockCell("Ionic", 1, 1), MockCell("Doric", 2, 1), MockCell("Corinthian", 3, 1), 201 | MockCell("how to get the weeaboo to stop using the holodeck", 1, 2), MockCell("malarkey", 2, 2), MockCell("", 3, 2), 202 | MockCell("does universal translator work on the weeaboo", 1, 3), MockCell("", 2, 3), MockCell("", 3, 3), 203 | MockCell("can the brexit breed with the weeaboo", 1, 4), MockCell("which moon is sailor moon from", 2, 4), MockCell("is there dilithium in crystal pepsi", 3, 4), 204 | MockCell("", 1, 5), MockCell("", 2, 5), MockCell("", 3, 5), 205 | ] 206 | 207 | k = klass('foo') 208 | actual = k.update_cells(starting_mock_cells, report.table) 209 | 210 | for actual_item, expected_item in zip(actual, expected_cells): 211 | assert (actual_item.row, actual_item.col, actual_item.value) == (expected_item.row, expected_item.col, expected_item.value) 212 | -------------------------------------------------------------------------------- /tests/test_lead_pctile_reporter.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | 4 | @pytest.fixture 5 | def klass(): 6 | """Return the CUT.""" 7 | from agile_analytics import LeadTimePercentileReporter 8 | return LeadTimePercentileReporter 9 | 10 | 11 | def test_klass(klass): 12 | """Verify our fixture.""" 13 | assert klass 14 | 15 | 16 | def test_init(klass, datetime): 17 | """Make sure it inits the way we want.""" 18 | k = klass( 19 | title="Lead Time Percentile Report", 20 | start_date=datetime(2016, 5, 21, 0, 0, 0), 21 | end_date=datetime(2016, 6, 21, 11, 59, 59), 22 | num_weeks=6, 23 | ) 24 | assert k 25 | assert k.title == "Lead Time Percentile Report" 26 | assert k.num_weeks == 6 27 | 28 | k = klass( 29 | title="Lead Time Percentile Report 2", 30 | ) 31 | assert k 32 | assert k.title == "Lead Time Percentile Report 2" 33 | assert k.num_weeks == 4 34 | 35 | 36 | def test_date_range_reconcile(klass, datetime, tzutc): 37 | """Ensure the right dates are set when passed two dates.""" 38 | r = klass(title="Foo") 39 | r.start_date = datetime(2016, 5, 21, 0, 0, 0) 40 | r.end_date = datetime(2016, 6, 21, 11, 59, 59) 41 | 42 | assert r.start_date == datetime(2016, 5, 15, 0, 0, 0, tzinfo=tzutc) # Sunday 43 | assert r.end_date == datetime(2016, 6, 25, 11, 59, 59, tzinfo=tzutc) # Saturday 44 | 45 | 46 | def test_filter(klass, days_agos, AnalyzedAgileTicket): 47 | """filter_issues ignores issues completed before the specified range.""" 48 | issue_list_kwargs = [] 49 | for i in range(1, 3): # 2 issues with 2 day lead 50 | kwargs = dict( 51 | key="TEST-{}".format(i), 52 | committed=dict(state="Committed", entered_at=days_agos[2]), 53 | started=dict(state="Started", entered_at=days_agos[2]), 54 | ended=dict(state="Ended", entered_at=days_agos[0]), 55 | ) 56 | issue_list_kwargs.append(kwargs) 57 | 58 | issue_list = [AnalyzedAgileTicket(**kwargs) for kwargs in issue_list_kwargs] 59 | issue_out_of_range = AnalyzedAgileTicket( 60 | key="TEST-OOR", 61 | committed=dict(state="Committed", entered_at=days_agos[42]), 62 | started=dict(state="Started", entered_at=days_agos[44]), 63 | ended=dict(state="Ended", entered_at=days_agos[45]), 64 | ) 65 | issue_list.append(issue_out_of_range) 66 | 67 | r = klass( 68 | title="Cycle Time Distribution Past 30 days", 69 | start_date=days_agos[30], 70 | end_date=days_agos[0] 71 | ) 72 | filtered_issues = r.filter_issues(issue_list) 73 | 74 | assert r.start_date > issue_out_of_range.ended['entered_at'] 75 | assert len(filtered_issues) == 2 76 | 77 | 78 | def test_report_table(klass, weeks_of_tickets, date, datetime, tzutc): 79 | """Ensure the report table returns a row for every week""" 80 | 81 | expected = [ 82 | ["Week", "50th", "75th", "95th"], 83 | [date(2016, 5, 15), 28, 29, 29], 84 | [date(2016, 5, 22), 26, 29, 29], 85 | [date(2016, 5, 29), 24, 28, 29], 86 | [date(2016, 6, 5), 22, 28, 29], 87 | [date(2016, 6, 12), 13, 20, 23], 88 | [date(2016, 6, 19), 8, 17, 22], 89 | [date(2016, 6, 26), 8, 18, 22], 90 | ] 91 | 92 | r = klass( 93 | title="Lead Time Percentile Report", 94 | start_date=datetime(2016, 5, 15, 0, 0, 0, tzinfo=tzutc), # Sunday 95 | end_date=datetime(2016, 7, 2, 11, 59, 59, tzinfo=tzutc), # Saturday 96 | num_weeks=7, 97 | ) 98 | report = r.report_on(weeks_of_tickets) 99 | 100 | assert len(report.table) == 8 101 | assert report.table == expected 102 | 103 | 104 | def test_report_table_no_header(klass, weeks_of_tickets, date, datetime, tzutc): 105 | """Ensure the report table honors the header argument""" 106 | 107 | expected = [ 108 | [date(2016, 5, 15), 28, 29, 29], 109 | [date(2016, 5, 22), 26, 29, 29], 110 | [date(2016, 5, 29), 24, 28, 29], 111 | [date(2016, 6, 5), 22, 28, 29], 112 | [date(2016, 6, 12), 13, 20, 23], 113 | [date(2016, 6, 19), 8, 17, 22], 114 | [date(2016, 6, 26), 8, 18, 22], 115 | ] 116 | 117 | r = klass( 118 | title="Lead Time Percentile Report", 119 | start_date=datetime(2016, 5, 15, 0, 0, 0, tzinfo=tzutc), # Sunday 120 | end_date=datetime(2016, 7, 2, 11, 59, 59, tzinfo=tzutc), # Saturday 121 | num_weeks=7, 122 | ) 123 | report = r.report_on(weeks_of_tickets, header=False) 124 | 125 | assert len(report.table) == 7 126 | assert report.table == expected 127 | 128 | 129 | def test_report_table_limit(klass, weeks_of_tickets, date, datetime, tzutc): 130 | """Ensure the report table returns no more than the num_weeks provided""" 131 | 132 | expected = [ 133 | ["Week", "50th", "75th", "95th"], 134 | [date(2016, 6, 5), 22, 28, 29], 135 | [date(2016, 6, 12), 13, 20, 23], 136 | [date(2016, 6, 19), 8, 17, 22], 137 | [date(2016, 6, 26), 8, 18, 22], 138 | ] 139 | 140 | r = klass( 141 | title="Lead Time Percentile Report", 142 | start_date=datetime(2016, 5, 15, 0, 0, 0, tzinfo=tzutc), # Sunday 143 | end_date=datetime(2016, 7, 2, 11, 59, 59, tzinfo=tzutc), # Saturday 144 | num_weeks=4, 145 | ) 146 | report = r.report_on(weeks_of_tickets) 147 | 148 | assert len(report.table) == 5 149 | assert report.table == expected 150 | 151 | 152 | def test_report_table_no_tickets(klass, date, datetime, tzutc): 153 | """Ensure the report table returns a row for every week""" 154 | 155 | expected = [ 156 | ["Week", "50th", "75th", "95th"], 157 | [date(2016, 5, 15), 0, 0, 0], 158 | [date(2016, 5, 22), 0, 0, 0], 159 | [date(2016, 5, 29), 0, 0, 0], 160 | [date(2016, 6, 5), 0, 0, 0], 161 | [date(2016, 6, 12), 0, 0, 0], 162 | [date(2016, 6, 19), 0, 0, 0], 163 | [date(2016, 6, 26), 0, 0, 0], 164 | ] 165 | 166 | r = klass( 167 | title="Lead Time Percentile Report", 168 | start_date=datetime(2016, 5, 15, 0, 0, 0, tzinfo=tzutc), # Sunday 169 | end_date=datetime(2016, 7, 2, 11, 59, 59, tzinfo=tzutc), # Saturday 170 | num_weeks=7, 171 | ) 172 | report = r.report_on([]) 173 | 174 | assert len(report.table) == 8 175 | assert report.table == expected 176 | -------------------------------------------------------------------------------- /tests/test_lead_reporter.py: -------------------------------------------------------------------------------- 1 | """Test Lead Time reporter.""" 2 | 3 | import pytest 4 | 5 | 6 | @pytest.fixture 7 | def klass(): 8 | """Provide the CUT.""" 9 | from agile_analytics import LeadTimeDistributionReporter 10 | return LeadTimeDistributionReporter 11 | 12 | 13 | def test_klass(klass): 14 | """Ensure the fixture works.""" 15 | assert klass 16 | 17 | 18 | def test_date_selection(klass, datetime, tzutc): 19 | """Ensure the CUT picks Sunday-Saturday date range""" 20 | r = klass("Foo") 21 | r.start_date = datetime(2016, 5, 21, 0, 0, 0) 22 | r.end_date = datetime(2016, 6, 21, 11, 59, 59) 23 | 24 | assert r.start_date == datetime(2016, 5, 15, 0, 0, 0, tzinfo=tzutc) # Sunday 25 | assert r.end_date == datetime(2016, 6, 25, 11, 59, 59, tzinfo=tzutc) # Saturday 26 | 27 | 28 | def test_filter(klass, days_agos, AnalyzedAgileTicket, tzutc): 29 | """filter_issues ignores issues completed before the specified range.""" 30 | issue_list_kwargs = [] 31 | for i in range(1, 3): # 2 issues with 2 day lead 32 | kwargs = dict( 33 | key="TEST-{}".format(i), 34 | committed=dict(state="Committed", entered_at=days_agos[2]), 35 | started=dict(state="Started", entered_at=days_agos[2]), 36 | ended=dict(state="Ended", entered_at=days_agos[0]), 37 | ) 38 | issue_list_kwargs.append(kwargs) 39 | 40 | issue_list = [AnalyzedAgileTicket(**kwargs) for kwargs in issue_list_kwargs] 41 | issue_out_of_range = AnalyzedAgileTicket( 42 | key="TEST-OOR", 43 | committed=dict(state="Committed", entered_at=days_agos[42]), 44 | started=dict(state="Started", entered_at=days_agos[44]), 45 | ended=dict(state="Ended", entered_at=days_agos[45]), 46 | ) 47 | issue_list.append(issue_out_of_range) 48 | 49 | r = klass( 50 | title="Cycle Time Distribution Past 30 days", 51 | start_date=days_agos[30], 52 | end_date=days_agos[0] 53 | ) 54 | filtered_issues = r.filter_issues(issue_list) 55 | 56 | assert r.start_date > issue_out_of_range.ended['entered_at'] 57 | assert len(filtered_issues) == 2 58 | 59 | 60 | def test_report_summary(klass, datetime, tzutc): 61 | """report_on returns an object with meta data.""" 62 | start_date = datetime(2016, 5, 15, 0, 0, 0, tzinfo=tzutc) # Sunday 63 | end_date = datetime(2016, 6, 25, 11, 59, 59, tzinfo=tzutc) # Saturday 64 | 65 | r = klass( 66 | title="Cycle Time Distribution Past 30 days", 67 | start_date=start_date, 68 | end_date=end_date 69 | ) 70 | 71 | expected = dict( 72 | title="Cycle Time Distribution Past 30 days", 73 | start_date=start_date, 74 | end_date=end_date, 75 | ) 76 | 77 | assert r.report_on([]).summary == expected 78 | 79 | 80 | def test_report_table_empty(klass, days_agos): 81 | """Ensure an empty list of tickets is handled.""" 82 | expected = [ 83 | ["Lead Time", "Tickets"] 84 | ] 85 | r = klass( 86 | title="Cycle Time Distribution Past 30 days", 87 | start_date=days_agos[30], 88 | end_date=days_agos[0] 89 | ) 90 | 91 | report = r.report_on([]) 92 | 93 | assert report.table == expected 94 | 95 | 96 | def test_report_table(klass, days_agos, AnalyzedAgileTicket, tzutc): 97 | """report_on returns an object with a tabular represenation of the data""" 98 | issue_list_kwargs = [] 99 | for i in range(1, 3): # 2 issues with 2 day lead 100 | kwargs = dict( 101 | key="TEST-{}".format(i), 102 | committed=dict(state="Committed", entered_at=days_agos[2]), 103 | started=dict(state="Started", entered_at=days_agos[2]), 104 | ended=dict(state="Ended", entered_at=days_agos[0]), 105 | ) 106 | issue_list_kwargs.append(kwargs) 107 | 108 | for i in range(4, 10): # 6 issues, with 5 day lead 109 | kwargs = dict( 110 | key="TEST-{}".format(i), 111 | committed=dict(state="Committed", entered_at=days_agos[5]), 112 | started=dict(state="Started", entered_at=days_agos[4]), 113 | ended=dict(state="Ended", entered_at=days_agos[0]), 114 | ) 115 | issue_list_kwargs.append(kwargs) 116 | 117 | for i in range(11, 13): # 2 issues, with 10 day lead 118 | kwargs = dict( 119 | key="TEST-{}".format(i), 120 | committed=dict(state="Committed", entered_at=days_agos[10]), 121 | started=dict(state="Started", entered_at=days_agos[9]), 122 | ended=dict(state="Ended", entered_at=days_agos[0]), 123 | ) 124 | issue_list_kwargs.append(kwargs) 125 | 126 | issue_list = [AnalyzedAgileTicket(**kwargs) for kwargs in issue_list_kwargs] 127 | 128 | expected = [ 129 | ["Lead Time", "Tickets"], 130 | [1, 0], 131 | [2, 2], 132 | [3, 0], 133 | [4, 0], 134 | [5, 6], 135 | [6, 0], 136 | [7, 0], 137 | [8, 0], 138 | [9, 0], 139 | [10, 2] 140 | ] 141 | r = klass( 142 | title="Cycle Time Distribution Past 30 days", 143 | start_date=days_agos[30], 144 | end_date=days_agos[0] 145 | ) 146 | 147 | report = r.report_on(issue_list) 148 | 149 | assert report.table == expected 150 | -------------------------------------------------------------------------------- /tests/test_partial_date_analyzer.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | 4 | @pytest.fixture 5 | def klass(): 6 | """Return the CUT.""" 7 | from agile_analytics import PartialDateAnalyzer 8 | return PartialDateAnalyzer 9 | 10 | 11 | @pytest.fixture 12 | def analyzer(klass): 13 | """Return an instance of the CUT with some defaults.""" 14 | a = klass( 15 | start_states=["In Progress", ], 16 | commit_states=["Selected", "Created"], 17 | end_states=["Done", ] 18 | ) 19 | return a 20 | 21 | 22 | def test_created_ticket(Ticket, klass, days_ago, analyzer): 23 | """Tickets that been created, but not started/finished, should be handled.""" 24 | t = Ticket( 25 | key="TEST-1", 26 | created_at=days_ago(10), 27 | updated_at=days_ago(0), 28 | flow_logs=[ 29 | dict(entered_at=days_ago(10), state="Created"), 30 | ] 31 | ) 32 | results, ignored_issues = analyzer.analyze([t, ]) 33 | assert results[0].committed['entered_at'] == days_ago(10) 34 | assert len(ignored_issues) == 0 35 | 36 | 37 | def test_created_ticket_title(Ticket, klass, days_ago, analyzer): 38 | """Tickets title should be propogated""" 39 | t = Ticket( 40 | key="TEST-1", 41 | title="Foo", 42 | created_at=days_ago(10), 43 | updated_at=days_ago(0), 44 | flow_logs=[ 45 | dict(entered_at=days_ago(10), state="Created"), 46 | ] 47 | ) 48 | results, ignored_issues = analyzer.analyze([t, ]) 49 | assert results[0].title == "Foo" 50 | -------------------------------------------------------------------------------- /tests/test_reporter.py: -------------------------------------------------------------------------------- 1 | """Test the Reporter base class.""" 2 | 3 | import pytest 4 | 5 | 6 | @pytest.fixture 7 | def klass(): 8 | """Return the CUT.""" 9 | from agile_analytics.reporters import Reporter 10 | return Reporter 11 | 12 | 13 | def test_klass(klass): 14 | """Ensure the CUT exists.""" 15 | assert klass 16 | 17 | 18 | @pytest.fixture 19 | def instance(klass, days_ago): 20 | """Return a pre-init'd CUT.""" 21 | now = days_ago(0) 22 | a_month_ago = days_ago(30) 23 | 24 | k = klass(title="Foo", start_date=a_month_ago, end_date=now) 25 | return k 26 | 27 | 28 | def test_init(klass, days_ago): 29 | """Verify we can init it correctly.""" 30 | 31 | now = days_ago(0) 32 | a_month_ago = days_ago(30) 33 | 34 | k = klass(title="Foo", start_date=a_month_ago, end_date=now) 35 | 36 | assert k 37 | assert k.start_date == a_month_ago 38 | assert k.end_date == now 39 | 40 | 41 | def test_valid_start_date(klass, days_ago): 42 | """Verify valid_start_date returns whatever is passed.""" 43 | 44 | now = days_ago(0) 45 | a_month_ago = days_ago(30) 46 | 47 | k = klass(title="Foo", start_date=a_month_ago, end_date=now) 48 | 49 | assert now == k.valid_start_date(now) 50 | 51 | 52 | def test_valid_end_date(klass, days_ago): 53 | """Verify valid_end_date returns whatever is passed.""" 54 | now = days_ago(0) 55 | a_month_ago = days_ago(30) 56 | 57 | k = klass(title="Foo", start_date=a_month_ago, end_date=now) 58 | 59 | assert a_month_ago == k.valid_end_date(a_month_ago) 60 | 61 | 62 | def test_filter_issues(instance): 63 | """Verify that filter_issues raises NotImplementedError.""" 64 | with pytest.raises(NotImplementedError): 65 | instance.filter_issues([]) 66 | 67 | 68 | def test_report_on(instance): 69 | """Verify that report_on raises NotImplementedError.""" 70 | with pytest.raises(NotImplementedError): 71 | instance.report_on([]) 72 | 73 | 74 | def test_starts_of_weeks(klass, relativedelta, datetime, tzutc): 75 | """Should return a list of datetimes == the Sundays starting each week between start_ and end_date.""" 76 | instance = klass( 77 | title="Foo", 78 | start_date=datetime(2016, 5, 15, 0, 0, 0, tzinfo=tzutc), # Sunday, 79 | end_date=datetime(2016, 6, 25, 11, 59, 59, tzinfo=tzutc) # Saturday 80 | ) 81 | week_starts = list(instance.starts_of_weeks()) 82 | expected_start_of_last_week = instance.end_date.date() - relativedelta.relativedelta(days=6) 83 | assert week_starts[0] == instance.start_date.date() 84 | assert week_starts[-1] == expected_start_of_last_week 85 | for start in week_starts: 86 | assert start.weekday() == instance.SUNDAY 87 | 88 | 89 | def test_filter_on_ended(klass, days_agos, AnalyzedAgileTicket): 90 | """Verify that filter_on_ended only includes tickets that ended in the instance date range.""" 91 | issue_list_kwargs = [] 92 | for i in range(1, 3): # 2 issues with 2 day lead 93 | kwargs = dict( 94 | key="TEST-{}".format(i), 95 | committed=dict(state="Committed", entered_at=days_agos[2]), 96 | started=dict(state="Started", entered_at=days_agos[2]), 97 | ended=dict(state="Ended", entered_at=days_agos[0]), 98 | ) 99 | issue_list_kwargs.append(kwargs) 100 | 101 | issue_list = [AnalyzedAgileTicket(**kwargs) for kwargs in issue_list_kwargs] 102 | issue_out_of_range = AnalyzedAgileTicket( 103 | key="TEST-OOR", 104 | committed=dict(state="Committed", entered_at=days_agos[42]), 105 | started=dict(state="Started", entered_at=days_agos[44]), 106 | ended=dict(state="Ended", entered_at=days_agos[45]), 107 | ) 108 | issue_list.append(issue_out_of_range) 109 | 110 | r = klass( 111 | title="Cycle Time Distribution Past 30 days", 112 | start_date=days_agos[30], 113 | end_date=days_agos[0] 114 | ) 115 | filtered_issues = r.filter_on_ended(issue_list) 116 | 117 | assert r.start_date > issue_out_of_range.ended['entered_at'] 118 | assert len(filtered_issues) == 2 119 | -------------------------------------------------------------------------------- /tests/test_sla_report.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | 4 | @pytest.fixture 5 | def klass(): 6 | """Return the CUT.""" 7 | from agile_analytics import SLAReporter 8 | return SLAReporter 9 | 10 | 11 | def test_klass(klass): 12 | """Verify our fixture.""" 13 | assert klass 14 | 15 | 16 | def test_klass_init(klass): 17 | """Verify init.""" 18 | r = klass( 19 | title="Foo" 20 | ) 21 | assert r 22 | 23 | 24 | def test_date_range_reconcile(klass, datetime, tzutc): 25 | """Ensure the right dates are set when passed two dates.""" 26 | r = klass(title="Foo") 27 | r.start_date = datetime(2016, 5, 21, 0, 0, 0) 28 | r.end_date = datetime(2016, 6, 21, 11, 59, 59) 29 | 30 | assert r.start_date == datetime(2016, 5, 15, 0, 0, 0, tzinfo=tzutc) # Sunday 31 | assert r.end_date == datetime(2016, 6, 25, 11, 59, 59, tzinfo=tzutc) # Saturday 32 | 33 | 34 | def test_filter(klass, days_agos, AnalyzedAgileTicket): 35 | """filter_issues ignores issues completed before the specified range.""" 36 | issue_list_kwargs = [] 37 | for i in range(1, 3): # 2 issues with 2 day lead 38 | kwargs = dict( 39 | key="TEST-{}".format(i), 40 | committed=dict(state="Committed", entered_at=days_agos[2]), 41 | started=dict(state="Started", entered_at=days_agos[2]), 42 | ended=dict(state="Ended", entered_at=days_agos[0]), 43 | ) 44 | issue_list_kwargs.append(kwargs) 45 | 46 | issue_list = [AnalyzedAgileTicket(**kwargs) for kwargs in issue_list_kwargs] 47 | issue_out_of_range = AnalyzedAgileTicket( 48 | key="TEST-OOR", 49 | committed=dict(state="Committed", entered_at=days_agos[42]), 50 | started=dict(state="Started", entered_at=days_agos[44]), 51 | ended=dict(state="Ended", entered_at=days_agos[45]), 52 | ) 53 | issue_list.append(issue_out_of_range) 54 | 55 | r = klass( 56 | title="Cycle Time Distribution Past 30 days", 57 | start_date=days_agos[30], 58 | end_date=days_agos[0] 59 | ) 60 | filtered_issues = r.filter_issues(issue_list) 61 | 62 | assert r.start_date > issue_out_of_range.ended['entered_at'] 63 | assert len(filtered_issues) == 2 64 | 65 | 66 | def test_report(klass, weeks_of_tickets, date, datetime, tzutc): 67 | """We should get back a report with the count of issues that exceed a specified lead time.""" 68 | 69 | expected = [ 70 | ["Week", "Ticket"], 71 | [date(2016, 5, 15), 3], 72 | [date(2016, 5, 22), 2], 73 | [date(2016, 5, 29), 0], 74 | [date(2016, 6, 5), 0], 75 | [date(2016, 6, 12), 2], 76 | [date(2016, 6, 19), 3], 77 | [date(2016, 6, 26), 0], 78 | ] 79 | 80 | r = klass( 81 | title="SLA Breach Report", 82 | start_date=datetime(2016, 5, 15, 0, 0, 0, tzinfo=tzutc), # Sunday 83 | end_date=datetime(2016, 7, 2, 11, 59, 59, tzinfo=tzutc), # Saturday 84 | ) 85 | report = r.report_on(weeks_of_tickets, {"Ticket": 14}) 86 | 87 | assert report.table == expected 88 | -------------------------------------------------------------------------------- /tests/test_ticket_reporter.py: -------------------------------------------------------------------------------- 1 | """Test the TicketReporter class.""" 2 | 3 | import pytest 4 | 5 | 6 | @pytest.fixture 7 | def klass(): 8 | """Return the CUT.""" 9 | from agile_analytics import TicketReporter 10 | return TicketReporter 11 | 12 | 13 | def test_klass(klass): 14 | """Verify the CUT fixture.""" 15 | assert klass 16 | 17 | 18 | def test_klass_init(klass): 19 | """Verify init.""" 20 | r = klass( 21 | title="Foo" 22 | ) 23 | assert r 24 | 25 | 26 | def test_date_range_reconcile(klass, datetime, tzutc): 27 | """Ensure the right dates are set when passed two dates.""" 28 | r = klass(title="Foo") 29 | r.start_date = datetime(2016, 5, 21, 0, 0, 0) 30 | r.end_date = datetime(2016, 6, 21, 11, 59, 59) 31 | 32 | assert r.start_date == datetime(2016, 5, 15, 0, 0, 0, tzinfo=tzutc) # Sunday 33 | assert r.end_date == datetime(2016, 6, 25, 11, 59, 59, tzinfo=tzutc) # Saturday 34 | 35 | 36 | def test_filter(klass, days_agos, AnalyzedAgileTicket): 37 | """filter_issues ignores issues completed before the specified range.""" 38 | issue_list_kwargs = [] 39 | for i in range(1, 3): # 2 issues with 2 day lead 40 | kwargs = dict( 41 | key="TEST-{}".format(i), 42 | committed=dict(state="Committed", entered_at=days_agos[2]), 43 | started=dict(state="Started", entered_at=days_agos[2]), 44 | ended=dict(state="Ended", entered_at=days_agos[0]), 45 | ) 46 | issue_list_kwargs.append(kwargs) 47 | 48 | issue_list = [AnalyzedAgileTicket(**kwargs) for kwargs in issue_list_kwargs] 49 | issue_out_of_range = AnalyzedAgileTicket( 50 | key="TEST-OOR", 51 | committed=dict(state="Committed", entered_at=days_agos[42]), 52 | started=dict(state="Started", entered_at=days_agos[44]), 53 | ended=dict(state="Ended", entered_at=days_agos[45]), 54 | ) 55 | issue_list.append(issue_out_of_range) 56 | 57 | r = klass( 58 | title="Cycle Time Distribution Past 30 days", 59 | start_date=days_agos[30], 60 | end_date=days_agos[0] 61 | ) 62 | filtered_issues = r.filter_issues(issue_list) 63 | 64 | assert r.start_date > issue_out_of_range.ended['entered_at'] 65 | assert len(filtered_issues) == 2 66 | 67 | 68 | def test_report_table(klass, AnalyzedAgileTicket, days_agos): 69 | """Ensure the report table returns a row with details on every ticket.""" 70 | 71 | issue_list_kwargs = [] 72 | for i in range(1, 3): # 2 issues with 2 day lead 73 | kwargs = dict( 74 | key="TEST-{}".format(i), 75 | committed=dict(state="Committed", entered_at=days_agos[i + 3]), 76 | started=dict(state="Started", entered_at=days_agos[i + 2]), 77 | ended=dict(state="Ended", entered_at=days_agos[i]), 78 | ) 79 | issue_list_kwargs.append(kwargs) 80 | issue_list = [AnalyzedAgileTicket(**kwargs) for kwargs in issue_list_kwargs] 81 | issue_list.sort(key=lambda i: i.ended['entered_at']) 82 | 83 | expected = [ 84 | ["Key", "Title", "Lead Time", "Cycle Time", "Commit State", "Commit At", "Start State", "Start At", "End State", "End At"], 85 | ] 86 | for i in issue_list: 87 | row = [ 88 | i.key, 89 | i.title, 90 | i.lead_time, 91 | i.cycle_time, 92 | i.committed['state'], 93 | i.committed['entered_at'], 94 | i.started['state'], 95 | i.started['entered_at'], 96 | i.ended['state'], 97 | i.ended['entered_at'], 98 | ] 99 | expected.append(row) 100 | 101 | r = klass( 102 | title="Cycle Time Distribution Past 30 days", 103 | start_date=days_agos[30], 104 | end_date=days_agos[0] 105 | ) 106 | 107 | actual = r.report_on(issue_list) 108 | assert expected == actual.table 109 | 110 | 111 | def test_report_summary(klass, datetime, tzutc): 112 | """report_on returns an object with meta data.""" 113 | start_date = datetime(2016, 5, 15, 0, 0, 0, tzinfo=tzutc) # Sunday 114 | end_date = datetime(2016, 6, 25, 11, 59, 59, tzinfo=tzutc) # Saturday 115 | 116 | r = klass( 117 | title="Foo", 118 | start_date=start_date, 119 | end_date=end_date 120 | ) 121 | 122 | expected = dict( 123 | title="Foo", 124 | start_date=start_date, 125 | end_date=end_date, 126 | ) 127 | 128 | assert r.report_on([]).summary == expected 129 | -------------------------------------------------------------------------------- /tests/test_tp_reporter.py: -------------------------------------------------------------------------------- 1 | """Test ThroughputReporter""" 2 | 3 | import pytest 4 | 5 | 6 | @pytest.fixture 7 | def klass(): 8 | """Return CUT.""" 9 | from agile_analytics import ThroughputReporter 10 | return ThroughputReporter 11 | 12 | 13 | def test_title(klass): 14 | """Ensure the title gets set.""" 15 | r = klass( 16 | title="Weekly Throughput" 17 | ) 18 | assert r.title == "Weekly Throughput" 19 | 20 | 21 | def test_period(klass): 22 | """Ensure the period can be set.""" 23 | r = klass(title="Weekly Throughput") 24 | r.period = "weekly" 25 | assert r.period == "weekly" 26 | 27 | 28 | def test_date_assignment(klass, days_ago): 29 | """Ensure the range can be set.""" 30 | r = klass(title="Weekly Throughput") 31 | r.start_date = days_ago(30) 32 | r.end_date = days_ago(0) 33 | 34 | assert r.start_date == days_ago(30) 35 | assert r.end_date == days_ago(0) 36 | 37 | 38 | def test_date_range_reconcile(klass, datetime, tzutc): 39 | """Ensure the right dates are set when passed two dates and a weekly period arg.""" 40 | r = klass(title="Weekly Throughput") 41 | r.period = "weekly" 42 | r.start_date = datetime(2016, 5, 21, 0, 0, 0) 43 | r.end_date = datetime(2016, 6, 21, 11, 59, 59) 44 | 45 | assert r.start_date == datetime(2016, 5, 15, 0, 0, 0, tzinfo=tzutc) # Sunday 46 | assert r.end_date == datetime(2016, 6, 25, 11, 59, 59, tzinfo=tzutc) # Saturday 47 | 48 | 49 | def test_date_reconcile_post_hoc(klass, datetime, tzutc): 50 | """When you set the period after the dates, the dates should be adjusted.""" 51 | r = klass(title="Weekly Throughput") 52 | r.start_date = datetime(2016, 5, 21, 0, 0, 0) 53 | r.end_date = datetime(2016, 6, 21, 11, 59, 59) 54 | r.period = "weekly" 55 | 56 | assert r.start_date == datetime(2016, 5, 15, 0, 0, 0, tzinfo=tzutc) # Sunday 57 | assert r.end_date == datetime(2016, 6, 25, 11, 59, 59, tzinfo=tzutc) # Saturday 58 | 59 | 60 | def test_report_summary(klass, datetime, tzutc): 61 | """report_on returns an object with metadata about the report.""" 62 | r = klass( 63 | title="Weekly Throughput", 64 | start_date=datetime(2016, 5, 15, 0, 0, 0), 65 | end_date=datetime(2016, 6, 25, 11, 59, 59), 66 | period="weekly", 67 | ) 68 | 69 | expected = dict( 70 | title="Weekly Throughput", 71 | start_date=datetime(2016, 5, 15, 0, 0, 0, tzinfo=tzutc), 72 | end_date=datetime(2016, 6, 25, 11, 59, 59, tzinfo=tzutc), 73 | period="weekly", 74 | ) 75 | 76 | report = r.report_on([]) 77 | assert report.summary == expected 78 | 79 | 80 | def test_report_summary_table(klass, datetime, date, AnalyzedAgileTicket, tzutc): 81 | """report_on returns an object with metadata about the report.""" 82 | r = klass( 83 | title="Weekly Throughput", 84 | start_date=datetime(2016, 5, 15, 0, 0, 0), 85 | end_date=datetime(2016, 6, 25, 11, 59, 59), 86 | period="weekly", 87 | ) 88 | 89 | analyzed_issues = [ 90 | AnalyzedAgileTicket("KEY-1", {}, {}, dict(state="FOO", entered_at=datetime(2016, 5, 16, 0, 0, 0, tzinfo=tzutc))), 91 | AnalyzedAgileTicket("KEY-2", {}, {}, dict(state="FOO", entered_at=datetime(2016, 5, 17, 0, 0, 0, tzinfo=tzutc))), 92 | AnalyzedAgileTicket("KEY-3", {}, {}, dict(state="FOO", entered_at=datetime(2016, 5, 17, 0, 0, 0, tzinfo=tzutc))), 93 | AnalyzedAgileTicket("KEY-4", {}, {}, dict(state="FOO", entered_at=datetime(2016, 5, 20, 0, 0, 0, tzinfo=tzutc))), 94 | AnalyzedAgileTicket("KEY-5", {}, {}, dict(state="FOO", entered_at=datetime(2016, 6, 8, 0, 0, 0, tzinfo=tzutc))), 95 | AnalyzedAgileTicket("KEY-6", {}, {}, dict(state="FOO", entered_at=datetime(2016, 6, 8, 0, 0, 0, tzinfo=tzutc))), 96 | AnalyzedAgileTicket("KEY-7", {}, {}, dict(state="FOO", entered_at=datetime(2016, 6, 8, 0, 0, 0, tzinfo=tzutc))), 97 | AnalyzedAgileTicket("KEY-8", {}, {}, dict(state="FOO", entered_at=datetime(2016, 6, 8, 0, 0, 0, tzinfo=tzutc))), 98 | AnalyzedAgileTicket("KEY-7", {}, dict(state="FOO", entered_at=datetime(2016, 6, 8, 0, 0, 0, tzinfo=tzutc)), {}), # Started, but not finished this week 99 | ] 100 | 101 | expected = [ 102 | ["Week", "Completed"], 103 | [date(2016, 5, 15), 4], 104 | [date(2016, 5, 22), 0], 105 | [date(2016, 5, 29), 0], 106 | [date(2016, 6, 5), 4], 107 | [date(2016, 6, 12), 0], 108 | [date(2016, 6, 19), 0], 109 | ] 110 | 111 | report = r.report_on(analyzed_issues) 112 | assert report.table[0] == expected[0] 113 | assert len(report.table) == len(expected) 114 | 115 | for i in range(0, len(expected)): 116 | expected_row = expected[i] 117 | actual_row = report.table[i] 118 | assert expected_row[0] == actual_row[0] 119 | assert expected_row[1] == actual_row[1] 120 | -------------------------------------------------------------------------------- /version.txt: -------------------------------------------------------------------------------- 1 | 0.15.2 --------------------------------------------------------------------------------