├── Framework-Initiated ├── framework_initiated_test.py └── jama_proxy.py ├── Jama-Initiated ├── jama_initiated_test.py ├── results_trigger.py └── test_trigger.py ├── LICENSE └── README.md /Framework-Initiated/framework_initiated_test.py: -------------------------------------------------------------------------------- 1 | import os 2 | import random 3 | import sys 4 | 5 | import jama_proxy 6 | 7 | doc_key = "TEST-DOC-15" # TODO: Change this to match your configuration 8 | test_result = random.choice(["Pass", "Fail"]) 9 | test_result_body = "This test {}ed.".format(test_result) 10 | 11 | # The test invokes the code that updates Jama with the results 12 | # You can add markup to your test results for formatting 13 | results_body = sys.argv[0].split(os.sep)[-1] + ':
' + test_result_body 14 | jama_proxy.update_results(doc_key, results_body, test_result) 15 | -------------------------------------------------------------------------------- /Framework-Initiated/jama_proxy.py: -------------------------------------------------------------------------------- 1 | import requests 2 | import urllib 3 | import json 4 | import sys 5 | 6 | 7 | # The base_url of your Jama instance 8 | # For hosted: "https://{Org Base}.jamacloud.com/rest/{version or 'latest'}/" 9 | base_url = '{base_url}/rest/latest/' # TODO: Change this to match your configuration 10 | 11 | # Username and password should be stored somewhere other than in the 12 | # source code according to your organization's security policies 13 | auth = ('username', 'password') # TODO: Change this to match your configuration 14 | 15 | # The following configuration information is all available in the admin section 16 | # of your Jama instance, or by REST calls. This is easiest from your swagger 17 | # documentation at {Jama Base URL}/api-docs 18 | 19 | # Much of this information could be retrieved in this script, but it would all 20 | # need to be retrieved each time the script runs. It's much quicker (and simpler) 21 | # to gather it once and enter it manually 22 | 23 | # The item type ID of sets 24 | set_type_id = 89029 # TODO: Change this to match your configuration 25 | 26 | # The item type ID of your new Test Results item type 27 | test_result_type_id = 89057 # TODO: Change this to match your configuration 28 | 29 | # The project to store test results in 30 | test_result_project_id = 20462 # TODO: Change this to match your configuration 31 | 32 | # The name of the unique name of the field with the results picklist 33 | results_pick_list = 'result' # TODO: Change this to match your configuration 34 | 35 | # The picklist option API ID of 'Pass' 36 | pass_api_id = 156757 # TODO: Change this to match your configuration 37 | 38 | # The picklist option API ID of 'Fail' 39 | fail_api_id = 156758 # TODO: Change this to match your configuration 40 | 41 | 42 | def update_results(document_key, results_text, test_result): 43 | 44 | # You'll need to figure out how to parse your test results here 45 | test_passed = test_result == 'Pass' 46 | 47 | satisfied_item = get_item_by_document_key(document_key) 48 | if satisfied_item is None: 49 | print "Item with document key {} wasn't found.".format(document_key) 50 | return 51 | 52 | downstream_item = get_downstream_result_item(satisfied_item) 53 | project_name = get_project_name(satisfied_item) 54 | set_id = get_set_id_for_name(project_name) 55 | updated_item = create_payload(set_id, satisfied_item, results_text, test_passed) 56 | 57 | if downstream_item is None: 58 | print "Posting new results item." 59 | post_and_relate(updated_item, satisfied_item) 60 | else: 61 | print "Updating existing results item." 62 | requests.put(base_url + 'items/{}'.format(downstream_item['id']), auth=auth, json=updated_item) 63 | 64 | 65 | def post_and_relate(results_item, upstream_item): 66 | print "Posting new results item." 67 | json_response = json.loads(requests.post(base_url + 'items', auth=auth, json=results_item).text) 68 | 69 | results_item_id = json_response['meta']['location'].split('/')[-1] 70 | print "New results item's ID: {}".format(results_item_id) 71 | 72 | print "Relating items {} and {}.".format(results_item_id, upstream_item['id']) 73 | relationship = {'fromItem': upstream_item['id'], 'toItem': results_item_id} 74 | requests.post(base_url + 'relationships', auth=auth, json=relationship) 75 | 76 | 77 | def create_payload(set_id, item, results_text, test_passed): 78 | if test_passed: 79 | result_api_id = pass_api_id 80 | else: 81 | result_api_id = fail_api_id 82 | return { 83 | 'project': test_result_project_id, 84 | 'itemType': test_result_type_id, 85 | 'fields': { 86 | 'name': "Results: {}".format(item['fields']['name']), 87 | 'description': results_text, 88 | results_pick_list: result_api_id 89 | }, 90 | 'location': { 91 | 'parent': { 92 | 'item': set_id 93 | } 94 | } 95 | } 96 | 97 | 98 | def get_project_name(item): 99 | url = base_url + 'projects/{}'.format(item['project']) 100 | return json.loads(requests.get(url, auth=auth).text)['data']['fields']['name'] 101 | 102 | 103 | def get_set_id_for_name(project_name): 104 | escaped_project_name = urllib.quote_plus(project_name) 105 | url = base_url + 'abstractitems?project={}&contains={}'.format(test_result_project_id, escaped_project_name) 106 | json_response = json.loads(requests.get(url, auth=auth).text) 107 | result_count = json_response['meta']['pageInfo']['totalResults'] 108 | if result_count == 1: 109 | print "Adding to existing set." 110 | return json_response['data'][0]['id'] 111 | if result_count == 0: 112 | print "Creating new set." 113 | return create_set(project_name) 114 | 115 | 116 | def create_set(project_name): 117 | url = base_url + 'itemtypes/{}'.format(test_result_type_id) 118 | type_key = json.loads(requests.get(url, auth=auth).text)['data']['typeKey'] 119 | 120 | payload = { 121 | 'project': test_result_project_id, 122 | 'itemType': set_type_id, 123 | 'childItemType': test_result_type_id, 124 | 'fields': { 125 | 'name': project_name, 126 | 'setKey': type_key 127 | } 128 | } 129 | 130 | url = base_url + 'items/' 131 | json_response = json.loads(requests.post(url, auth=auth, json=payload).text) 132 | location = json_response['meta']['location'] 133 | return location.split('/')[-1] 134 | 135 | 136 | def get_item_by_document_key(doc_key): 137 | remaining_results = -1 138 | start_index = 0 139 | 140 | url = base_url + 'abstractitems?contains={}'.format(urllib.quote_plus(doc_key)) 141 | print "Retrieving resource: {}".format(url) 142 | 143 | while remaining_results != 0: 144 | start_at = '&startAt={}'.format(start_index) 145 | 146 | current_url = url + start_at 147 | response = requests.get(current_url, auth=auth) 148 | json_response = json.loads(response.text) 149 | 150 | for item in json_response['data']: 151 | if item['documentKey'].lower() == doc_key.lower(): 152 | return item 153 | 154 | page_info = json_response['meta']['pageInfo'] 155 | total_results = page_info['totalResults'] 156 | result_count = page_info['resultCount'] 157 | remaining_results = total_results - (start_index + result_count) 158 | start_index += 20 159 | 160 | return None 161 | 162 | 163 | def get_downstream_result_item(item): 164 | remaining_results = -1 165 | start_index = 0 166 | 167 | url = base_url + 'items/{}/downstreamrelated'.format(item['id']) 168 | print "Retrieving resource: {}".format(url) 169 | 170 | while remaining_results != 0: 171 | start_at = '?startAt={}'.format(start_index) 172 | 173 | current_url = url + start_at 174 | response = requests.get(current_url, auth=auth) 175 | json_response = json.loads(response.text) 176 | 177 | if json_response['meta']['pageInfo']['totalResults'] == 0: 178 | return None 179 | 180 | for downstream_item in json_response['data']: 181 | if downstream_item['itemType'] == test_result_type_id: 182 | return downstream_item 183 | 184 | page_info = json_response['meta']['pageInfo'] 185 | total_results = page_info['totalResults'] 186 | result_count = page_info['resultCount'] 187 | remaining_results = total_results - (start_index + result_count) 188 | start_index += 20 189 | 190 | 191 | if __name__ == '__main__': 192 | # sys.argv[1] is Document Key 193 | # 194 | # sys.argv[2] is the Results Body. 195 | # This will be used as the test result item's description 196 | # 197 | # sys.argv[3] is the Test Result. 198 | # This will be parsed and used to determine which picklist option to assign 199 | update_results(sys.argv[1], sys.argv[2], sys.argv[3]) 200 | -------------------------------------------------------------------------------- /Jama-Initiated/jama_initiated_test.py: -------------------------------------------------------------------------------- 1 | import datetime 2 | import random 3 | import time 4 | import sys 5 | 6 | 7 | def run_test(item_id, step_number): 8 | time.sleep(random.randrange(0, 10)) # May take some time to run 9 | test_result = random.choice(["Pass", "Fail"]) 10 | f = open("results---{}".format(datetime.datetime.now()), 'w') 11 | # Write formatted results to file 12 | f.write("Item ID: {}\nStep #: {}\nResult: {}".format(item_id, step_number, test_result)) 13 | 14 | 15 | if __name__ == "__main__": 16 | run_test(sys.argv[1], sys.argv[2]) 17 | -------------------------------------------------------------------------------- /Jama-Initiated/results_trigger.py: -------------------------------------------------------------------------------- 1 | import requests 2 | import json 3 | import time 4 | import os 5 | 6 | # The base_url of your Jama instance 7 | # For hosted: "https://{Org Base}.jamacloud.com/rest/{version or 'latest'}/" 8 | base_url = '{base_url}/rest/latest/' # TODO: Change this to match your configuration 9 | 10 | # Username and password should be stored somewhere other than in the 11 | # source code according to your organization's security policies 12 | auth = ('api_user', 'password') # TODO: Change this to match your configuration 13 | 14 | # The frequency to check for results. 15 | polling_interval = 600 # In seconds 16 | 17 | 18 | # This function will need to be tailored to parse the output of your particular 19 | # automated test framework 20 | def parse_results(): 21 | filename = next((f for f in os.listdir('.') if "results---" in f), None) 22 | timestamp = filename.split('---')[1] 23 | parsed_name = "parsed---{}".format(timestamp) 24 | os.rename(filename, parsed_name) 25 | with open(parsed_name, 'r') as results_file: 26 | item_id = results_file.readline().split(':')[1].strip() 27 | step_number = results_file.readline().split(':')[1].strip() 28 | result = results_file.readline().split(':')[1].strip() 29 | print "Found results for run {}.".format(item_id) 30 | 31 | # After parsing results we end up with the Jama item ID, the test run step number 32 | # and the results of the test execution 33 | update_results(item_id, int(step_number), result) 34 | 35 | 36 | def update_results(item_id, step_number, result): 37 | run_url = base_url + "testruns/{}".format(item_id) 38 | run_to_update = json.loads(requests.get(run_url, auth=auth).text)['data'] 39 | print "Retrieved run {} from Jama.".format(run_to_update['id']) 40 | fields = run_to_update['fields'] 41 | remove_field(fields, 'testRunStatus') 42 | remove_field(fields, 'executionDate') 43 | step = fields['testRunSteps'][step_number - 1] 44 | 45 | # Setting the step's status depends on the previous results parsing 46 | if result == 'Pass': 47 | step['status'] = 'PASSED' 48 | else: 49 | step['status'] = 'FAILED' 50 | 51 | print "Updating: \n\tItem ID: {}\n\tStep Number: {}\n\tResult: {}.".format(item_id, step_number, result) 52 | requests.put(run_url, auth=auth, json={'fields': fields}) 53 | print "Attempting to unlock test run {}.".format(item_id) 54 | attempt_unlock(item_id, fields) 55 | 56 | 57 | def attempt_unlock(item_id, fields): 58 | for step in fields['testRunSteps']: 59 | if step['status'] == 'NOT_RUN': 60 | print "Some steps not yet run. Leaving test run {} locked.".format(item_id) 61 | return 62 | print "All steps have been run. Unlocking test run {}.".format(item_id) 63 | lock_url = base_url + "testruns/{}/lock".format(item_id) 64 | requests.put(lock_url, json={'locked': False}, auth=auth) 65 | 66 | 67 | # Convenience function to safely remove read-only fields from the test run 68 | # before updating it 69 | def remove_field(item, field): 70 | try: 71 | del item[field] 72 | except KeyError: 73 | pass 74 | 75 | 76 | if __name__ == "__main__": 77 | while True: 78 | print "==========Polling Results==========" 79 | try: 80 | parse_results() 81 | except AttributeError: 82 | time.sleep(polling_interval) 83 | -------------------------------------------------------------------------------- /Jama-Initiated/test_trigger.py: -------------------------------------------------------------------------------- 1 | import subprocess 2 | import datetime 3 | import requests 4 | import urllib 5 | import json 6 | import time 7 | import os 8 | 9 | # The base_url of your Jama instance 10 | # For hosted: "https://{Org Base}.jamacloud.com/rest/{version or 'latest'}/" 11 | base_url = "{base_url}/rest/latest/" # TODO: Change this to match your configuration 12 | 13 | # Username and password should be stored somewhere other than in the 14 | # source code according to your organization's security policies 15 | auth = ('username', 'password') # TODO: Change this to match your configuration 16 | 17 | # The API ID of a Jama user. When tests are assigned to this user this script 18 | # will run the tests 19 | automation_user_id = 18364 # TODO: Change this to match your configuration 20 | 21 | # The item type ID of test runs in your Jama instance 22 | runs_type_id = 89035 # TODO: Change this to match your configuration 23 | 24 | # The frequency to poll Jama. Frequent polling can impact system performance 25 | polling_interval = 600 # In seconds 26 | 27 | 28 | def run_test(test_run): 29 | test_run_id = test_run['id'] 30 | print "Evaluating run {}.".format(test_run_id) 31 | lock_url = base_url + "testruns/{}/lock".format(test_run_id) 32 | run_is_locked = json.loads(requests.get(lock_url, auth=auth).text)['data']['locked'] 33 | if not run_is_locked: 34 | print "Test run {} not locked. Running.".format(test_run_id) 35 | step_successfully_run = False 36 | 37 | print "Locking test run {}.".format(test_run_id) 38 | requests.put(lock_url, json={'locked': True}, auth=auth) 39 | 40 | run_url = base_url + "testruns/{}".format(test_run_id) 41 | test_run = json.loads(requests.get(run_url, auth=auth).text)['data'] 42 | 43 | steps = test_run['fields']['testRunSteps'] 44 | for step_index in range(0, len(steps)): 45 | test_step = steps[step_index] 46 | if test_step['status'] == 'NOT_RUN': 47 | print "Step {} status == 'NOT_RUN'.".format(step_index) 48 | try: 49 | test_script_name = test_step['action'] 50 | 51 | print "Starting subprocess." 52 | print os.system('pwd') 53 | print os.system('ls') 54 | # This subprocess call can be used to initiate any kind of action. 55 | # Make sure that this kind of action conforms to your organization's security policies. 56 | subprocess.Popen(['python', test_script_name.strip(), str(test_run_id), str(step_index + 1)]) 57 | 58 | step_successfully_run = True 59 | except: 60 | print "Error: Problem running step {} with action: {}.".format(step_index, test_step['action']) 61 | else: 62 | print "Not running step with status {}.".format(test_step['status']) 63 | if not step_successfully_run: 64 | print "Unlocking test run {}.".format(test_run_id) 65 | requests.put(lock_url, json={'locked': False}, auth=auth) 66 | else: 67 | print "Run {} is locked. Skipping.".format(test_run_id) 68 | 69 | 70 | def get_automation_runs(): 71 | last_run_time = get_last_run_time() 72 | if last_run_time is not None: 73 | last_run = datetime.datetime.fromtimestamp(last_run_time) 74 | last_activity_date = last_run.strftime('%Y-%m-%dT%H:%M:%S.000Z') 75 | else: 76 | last_activity_date = None 77 | 78 | runs = get_all_tests(last_activity_date) 79 | print "Runs evaluated: {}.".format(len(runs)) 80 | 81 | to_remove = [] 82 | for run in runs: 83 | if not valid_run(run): 84 | to_remove.append(run) 85 | for run in to_remove: 86 | runs.remove(run) 87 | 88 | return runs 89 | 90 | 91 | def get_all_tests(last_activity_date): 92 | remaining_results = -1 93 | start_index = 0 94 | 95 | all_runs = [] 96 | 97 | contains = urllib.quote_plus('"assignedTo":{}'.format(automation_user_id)) 98 | item_type_segment = "abstractitems?itemType={}".format(runs_type_id) 99 | if last_activity_date is not None: 100 | last_activity_date_segment = "&lastActivityDate={}".format(last_activity_date) 101 | else: 102 | last_activity_date_segment = "" 103 | contains_segment = "&contains={}".format(contains) 104 | sort_by_segment = "&sortBy=modifiedDate.asc" 105 | url = ''.join([ 106 | base_url, 107 | item_type_segment, 108 | last_activity_date_segment, 109 | contains_segment, 110 | sort_by_segment 111 | ]) 112 | 113 | print "Requesting resource: {}.".format(url) 114 | 115 | while remaining_results != 0: 116 | start_at = "&startAt={}".format(start_index) 117 | 118 | current_url = url + start_at 119 | response = requests.get(current_url, auth=auth) 120 | json_response = json.loads(response.text) 121 | 122 | page_info = json_response['meta']['pageInfo'] 123 | total_results = page_info['totalResults'] 124 | result_count = page_info['resultCount'] 125 | remaining_results = total_results - (start_index + result_count) 126 | start_index += 20 127 | 128 | all_runs.extend(json_response['data']) 129 | 130 | return all_runs 131 | 132 | 133 | def valid_run(run): 134 | print "Validating run {}.".format(run['id']) 135 | if run['type'] != 'testruns': 136 | print "Item {} is not a test run. Skipping.".format(run['id']) 137 | return False 138 | if run['fields']['testRunStatus'] != 'NOT_RUN': 139 | print "Run {} status must be NOT_RUN to evaluate. Skipping.".format(run['id']) 140 | return False 141 | print "Run {} is valid.".format(run['id']) 142 | return True 143 | 144 | 145 | def get_last_run_time(): 146 | try: 147 | with open('last_run_time.dat', 'r+') as f: 148 | old_time = f.readline() 149 | except IOError: 150 | old_time = None 151 | 152 | with open('last_run_time.dat', 'w') as f: 153 | f.write(str(time.time() - 5)) 154 | 155 | return float(old_time) if old_time is not None else None 156 | 157 | 158 | if __name__ == "__main__": 159 | while True: 160 | print "==========Polling Jama==========" 161 | tests_to_evaluate = get_automation_runs() 162 | for test in tests_to_evaluate: 163 | run_test(test) 164 | time.sleep(polling_interval) 165 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2017 Jama Software 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Jama and Automated Testing 2 | 3 | Jama Software is the definitive system of record and action for product development. The company’s modern requirements and test management solution helps enterprises accelerate development time, mitigate risk, slash complexity and verify regulatory compliance. More than 600 product-centric organizations, including NASA, Boeing and Caterpillar use Jama to modernize their process for bringing complex products to market. The venture-backed company is headquartered in Portland, Oregon. For more information, visit [jamasoftware.com](http://jamasoftware.com). 4 | 5 | Please visit [dev.jamasoftware.com](http://dev.jamasoftware.com) for additional resources and join the discussion in our community [community.jamasoftware.com](http://community.jamasoftware.com). 6 | 7 | ## Why Connect Jama to your Automated Testing Tool? 8 | Jama is used for managing requirements and the manual test cases that validate and verify those requirements. Jama provides traceability between the various layers of requirements down to manual test cases so teams can ensure test coverage and show the results of those manual tests. However, many teams also validate/verify their requirements through automated testing via a separate Automation Test Tool (ATT). If you want to demonstrate traceability from your requirements to both manual AND automated testing results in Jama, you can leverage Jama’s API to bring automated test results into Jama. 9 | 10 | For more information about which of the approaches below is better for your team see [this article](https://community.jamasoftware.com/blogs/iman-bilal/2017/01/25/connecting-jama-to-your-automated-testing-tool). 11 | 12 | ### Jama Initiated Tests 13 | Jama’s manual test center offers functionality for your team to manually create Test Cases and Test Plans for executing those tests. It also offers some out of the box reporting via Test Plan summary and Test Plan detail reports. Teams familiar with using Jama’s test center to execute manual test cases can use a similar approach to initiate automated tests directly from Jama. 14 | 15 | You’ll need some information from your Jama configuration to configure the scripts: 16 | 17 | 1. __The API ID of the automated test user.__ When test runs are assigned to this user the script will run whatever command(s) appear in the “action” column of the test run. 18 | 19 | 2. __The API ID of test runs in your Jama instance.__ 20 | 21 | 3. __Credentials for an account to access and update the test run results.__ This can be the automated test user or any user with appropriate permissions. 22 | 23 | ### Automated Testing Tool Initiated Tests 24 | For teams not using Jama’s Test Center for manual testing or who don’t want to interact with Jama in order to initiate the automated test scripts we recommend not leveraging the Jama test center at all. 25 | 26 | You’ll need some information from your Jama configuration to configure the script: 27 | 28 | 1. __The API ID of the set item type.__ This is a system-defined item type. 29 | 30 | 2. __The API ID of the test results item type.__ This is the new item type created for this approach. 31 | 32 | 3. __The API ID of the project that will hold the test results.__ 33 | 34 | 4. __The unique name of the field containing the Pass/Fail picklist in the test results item type.__ 35 | 36 | 5. __The API IDs of the ‘pass’ and ‘fail’ options in the above picklist.__ 37 | 38 | ### Interested in having someone guide you through this process? 39 | Jama's Professional Services team can assist you with practical guidance on both the desired cross-team processes and the supporting technical workflow. Contact your Customer Success manager for more information. 40 | --------------------------------------------------------------------------------