├── .coveragerc ├── .gitignore ├── .travis.yml ├── CHANGELOG ├── LICENSE ├── MANIFEST.in ├── README.md ├── TODO.txt ├── cinch ├── __init__.py ├── bin │ ├── __init__.py │ ├── entry_point.py │ └── wrappers.py ├── files │ └── jenkins-plugin-lists │ │ ├── README │ │ ├── default-test.txt │ │ ├── default.txt │ │ ├── optional-test.txt │ │ └── optional.txt ├── group_vars │ ├── all │ ├── cent6 │ ├── cent7 │ ├── fedora │ ├── jenkins_docker_slave │ ├── jenkins_master │ ├── jenkins_slave │ ├── rhel6 │ ├── rhel7 │ └── rhel8 ├── install-rhel7.yml ├── library │ ├── jenkins_cli_user.py │ ├── jenkins_script.py │ ├── jenkins_update_center.py │ ├── jenkins_user_api.py │ └── line_match.py ├── playbooks │ ├── install-rhel7.yml │ ├── jenkins_backup.yml │ ├── jenkins_restart_utility.yml │ └── library ├── roles │ ├── beaker-client │ │ ├── defaults │ │ │ └── main.yml │ │ ├── tasks │ │ │ └── main.yml │ │ └── templates │ │ │ └── etc │ │ │ └── beaker │ │ │ └── client.conf │ ├── certificate_authority │ │ ├── defaults │ │ │ └── main.yml │ │ ├── handlers │ │ │ └── main.yml │ │ └── tasks │ │ │ └── main.yml │ ├── check_ssh │ │ ├── defaults │ │ │ └── main.yml │ │ └── tasks │ │ │ └── main.yml │ ├── dockerize │ │ ├── defaults │ │ │ └── main.yml │ │ └── tasks │ │ │ └── main.yml │ ├── jenkins_common │ │ ├── defaults │ │ │ └── main.yml │ │ ├── handlers │ │ │ └── main.yml │ │ ├── tasks │ │ │ └── main.yml │ │ └── vars │ │ │ └── main.yml │ ├── jenkins_docker_slave │ │ ├── defaults │ │ │ └── main.yml │ │ └── tasks │ │ │ └── main.yml │ ├── jenkins_master │ │ ├── defaults │ │ │ └── main.yml │ │ ├── files │ │ │ ├── mass_disable.groovy │ │ │ └── set_env.groovy │ │ ├── handlers │ │ │ └── main.yml │ │ ├── tasks │ │ │ ├── configure.yml │ │ │ ├── ensure_up.yml │ │ │ ├── firewalld.yml │ │ │ ├── install.yml │ │ │ ├── main.yml │ │ │ ├── pin_plugin.yml │ │ │ ├── plugins.yml │ │ │ ├── post_configure.yml │ │ │ └── pre_install.yml │ │ ├── templates │ │ │ ├── 99-jenkins.conf │ │ │ ├── basic_security.groovy │ │ │ ├── enable-kerberos-sso.groovy │ │ │ ├── etc │ │ │ │ └── nginx │ │ │ │ │ └── conf.d │ │ │ │ │ ├── jenkins_http.conf │ │ │ │ │ └── jenkins_https.conf │ │ │ ├── init_backup.groovy │ │ │ ├── jenkins_pinned │ │ │ ├── jenkins_root_url.groovy │ │ │ ├── roles_and_ldap_auth.groovy │ │ │ ├── set_slaveport.groovy │ │ │ ├── set_usebrowser.groovy │ │ │ ├── setenvvars.groovy │ │ │ ├── sysconfig_jenkins │ │ │ └── user.groovy │ │ └── vars │ │ │ └── main.yml │ ├── jenkins_master_stop │ │ └── tasks │ │ │ └── main.yml │ ├── jenkins_slave │ │ ├── defaults │ │ │ └── main.yml │ │ ├── handlers │ │ │ └── main.yml │ │ ├── tasks │ │ │ ├── check_swarm_errors.yml │ │ │ ├── check_swarm_systemd.yml │ │ │ ├── check_swarm_upstart.yml │ │ │ └── main.yml │ │ └── templates │ │ │ ├── swarm.service │ │ │ ├── swarm.upstart.conf │ │ │ └── sysconfig_jenkins_swarm │ ├── jenkins_slave_container_wrapup │ │ ├── defaults │ │ │ └── main.yml │ │ ├── tasks │ │ │ └── main.yml │ │ └── templates │ │ │ ├── jmaster.sh │ │ │ └── jswarm.sh │ ├── jenkins_slave_teardown │ │ └── tasks │ │ │ └── main.yml │ ├── nginx │ │ ├── defaults │ │ │ └── main.yml │ │ ├── files │ │ │ └── etc │ │ │ │ └── logrotate.d │ │ │ │ └── nginx │ │ ├── handlers │ │ │ └── main.yml │ │ ├── meta │ │ │ └── main.yml │ │ ├── tasks │ │ │ ├── kerberos-setup.yml │ │ │ ├── main.yml │ │ │ ├── nginx.yml │ │ │ ├── selinux.yml │ │ │ └── ssl-setup.yml │ │ └── templates │ │ │ ├── etc │ │ │ └── nginx │ │ │ │ ├── conf.d │ │ │ │ ├── errors.conf.snippet │ │ │ │ └── monitor.conf.snippet │ │ │ │ └── nginx.conf │ │ │ └── example_ssl.conf │ ├── ntp │ │ └── tasks │ │ │ └── main.yml │ ├── repositories │ │ ├── defaults │ │ │ └── main.yml │ │ ├── handlers │ │ │ └── main.yml │ │ └── tasks │ │ │ ├── main.yml │ │ │ ├── repositories.yml │ │ │ └── repository_download.yml │ └── upload_files │ │ └── tasks │ │ └── main.yml ├── site.yml └── teardown.yml ├── docs ├── Makefile └── source │ ├── _static │ └── .gitkeep │ ├── conf.py │ ├── config.rst │ ├── development.rst │ ├── docker_image.rst │ ├── index.rst │ ├── maintainers.rst │ ├── user_files.rst │ └── users.rst ├── inventory ├── cent6_jswarm_docker │ ├── group_vars │ │ └── all │ └── hosts ├── cent7_jswarm_docker │ ├── group_vars │ │ └── all │ └── hosts ├── cent7_master_docker │ ├── group_vars │ │ └── all │ └── hosts ├── fedora_jswarm_docker │ ├── group_vars │ │ └── all │ └── hosts ├── fedora_master_docker │ ├── group_vars │ │ └── all │ └── hosts └── sample │ └── hosts ├── jjb ├── ci-jslave-project-sample-defaults.yaml ├── ci-jslave-project-sample.yaml ├── code-coverage.yaml └── install-rhel7.yaml ├── scripts ├── centos_jswarm.sh ├── centos_master.sh ├── deploy.sh ├── fedora_jswarm.sh ├── fedora_master.sh ├── jswarm.sh └── master.sh ├── setup.py ├── tests ├── __init__.py ├── ansible_lint_rules │ ├── NoFormattingInWhenRule.py │ └── __init__.py ├── cent6_slave.sh ├── cent7_master.sh ├── cent7_slave.sh ├── cinch │ ├── README.md │ ├── controls │ │ └── cinch.rb │ ├── inspec.yml │ └── libraries │ │ └── .gitkeep ├── coverage.sh ├── fedora_master.sh ├── fedora_slave.sh ├── inventory.ini ├── inventory │ ├── master │ └── slave ├── playbook.yml ├── profile.yml.erb ├── test_cli.py └── yamllint.yml ├── tox.ini └── vagrant ├── README.txt ├── ansible.cfg ├── docker_slave └── Vagrantfile ├── master └── Vagrantfile ├── master_rhel7 ├── README └── Vagrantfile ├── master_shell_user └── Vagrantfile ├── master_slave_fedora └── Vagrantfile ├── master_ssl └── Vagrantfile ├── shared.rb ├── shared.sh ├── slave ├── README ├── Vagrantfile ├── mode ├── owner └── simple ├── slave_cent6 └── Vagrantfile ├── slave_rhel6 ├── Vagrantfile ├── configure.sh ├── full_cycle.sh └── hosts ├── slave_rhel7 ├── README └── Vagrantfile ├── slave_rhel8 ├── README └── Vagrantfile └── slave_security_enabled └── Vagrantfile /.coveragerc: -------------------------------------------------------------------------------- 1 | [run] 2 | branch = True 3 | 4 | [report] 5 | exclude_lines = 6 | pragma: no cover 7 | if __name__ == .__main__.: 8 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .tox 2 | .venv 3 | *.retry 4 | inventory/local/* 5 | .vagrant 6 | # generated docs 7 | docs/build/ 8 | 9 | *.swp 10 | *.swo 11 | *.pyc 12 | *.pyo 13 | *.egg-info 14 | .idea 15 | # The destination folder that python setup.py sdist drops results into 16 | dist 17 | build 18 | .coverage 19 | coverage.xml 20 | htmlcov/ 21 | .pytest_cache/ 22 | **/*.lock 23 | tests/profile.yml 24 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | # General Travis config 2 | sudo: required 3 | services: 4 | - docker 5 | language: python 6 | python: # Only one version for now 7 | - "3.6" 8 | cache: pip 9 | 10 | # Installation of the system 11 | addons: 12 | apt: 13 | packages: 14 | - shellcheck 15 | before_install: 16 | - travis_retry pip install -U pip 17 | - pip --version 18 | install: 19 | - travis_retry gem install inspec inspec-bin 20 | - travis_retry pip install tox 21 | - tox --version 22 | script: 23 | - travis_retry tox -v 24 | 25 | # Test matrix will be constructed from here. Listing out each set of tests 26 | # to execute will permit them to be executed in parallel 27 | env: 28 | - TOXENV=lint 29 | - TOXENV=docs 30 | - TOXENV=py27 31 | - TOXENV=cent6_slave 32 | - TOXENV=cent7_slave 33 | - TOXENV=fedora_slave 34 | - TOXENV=cent7_master 35 | 36 | deploy: 37 | - provider: pypi 38 | repo: RedHatQE/cinch 39 | user: "${PYPI_USER}" 40 | password: "${PYPI_PASSWORD}" 41 | distributions: "sdist bdist_wheel" 42 | on: 43 | tags: true 44 | condition: $TOXENV = lint 45 | - provider: script 46 | repo: RedHatQE/cinch 47 | script: ./scripts/deploy.sh centos6 false 48 | on: 49 | tags: true 50 | condition: $TOXENV = cent6_slave 51 | - provider: script 52 | repo: RedHatQE/cinch 53 | script: ./scripts/deploy.sh centos7 true 54 | on: 55 | tags: true 56 | condition: $TOXENV = cent7_slave 57 | - provider: script 58 | repo: RedHatQE/cinch 59 | script: ./scripts/deploy.sh fedora25 false 60 | on: 61 | tags: true 62 | condition: $TOXENV = fedora_slave 63 | -------------------------------------------------------------------------------- /CHANGELOG: -------------------------------------------------------------------------------- 1 | v 1.4.0 (20 Feb 2019) 2 | - Support Linchpin 1.6.2 3 | - Support RHEL 8 slaves 4 | - Upgrade to Python 3 for testing everything we can 5 | - upgrade ansible-lint 6 | - Fix rpm key import timeout 7 | - Fix Jenkins session timeout configuration being ignored 8 | 9 | v 1.3.0 (20 Sep 2018) 10 | - Bump default Jenkins to 2.121 11 | - Update plugins for 2.121 compatibility 12 | 13 | v 1.2.0 (18 Sep 2018) 14 | - Bump default Jenkins to 2.89 15 | - Update plugins for 2.89 compatibility 16 | - Drop support for Ansible < 2.4 in order to fix deprecation warnings in newer 17 | versions of Ansible 18 | - Remove older yum mirrors that are no longer useful 19 | - Improve support for local connections 20 | - Modify check_ssh role to use newer wait_for_connection module in Ansible 21 | 22 | v 1.1.0 (20 Feb 2018) 23 | - Adds support for jenkins_java_extra_options (#204) 24 | - Adds support for jenkins_security_extra_roles (#204) 25 | - Allow update_center_certificate to be pulled from remote instead of 26 | only local files (PR#214) 27 | - Fixed jenkins_cli_shell_user variable to work properly, and generate an SSH 28 | key for it if one does not yet exist (#181) 29 | - Removed gather_facts from upload_files roles (#218) 30 | - Improved internal movement of some files and variables to avoid collisions 31 | - Improvements to CI testing runs (#216) 32 | - Move Python testing to tox (#221) 33 | - Provide option to allow anonymous access when Kerberos is enabled (#205) 34 | - Allow keytab and SSL certs to be pulled from remote sources (#224) 35 | 36 | v 1.0.0 (01 Feb 2018) 37 | - Update default Jenkins version to 2.60 38 | - Update plugins list to 2.60-friendly list 39 | - Add support for extra_repositories in repo list 40 | - Add support for the jenkins_extra_plugins variable, which appends to the 41 | existing provided plugin lists for further customization 42 | - Add check_mode support for kerberos sso Groovy script 43 | - Improved check_mode support for Groovy scripts to ensure modifications are 44 | not made 45 | - Improved documentation for CLI --help and Sphinx docs for Ansible variable 46 | customization 47 | - Fixed issue with spaces in SSH key file path 48 | - Added an example for using URLs to fetch Jenkins plugin lists 49 | - Added Sphinx documentation for maintainer release process 50 | - nginx configuration now allows Jenkins plugin uploads 51 | 52 | v 0.9.0 (16 Nov 2017) 53 | - IMPORTANT - The 'cinchpin' command has been REMOVED for linchpin 1.0.4 54 | compatibility. The new 'teardown' command replaces Jenkins slave 55 | disconnection functionality previously handled by the 'cinchpin' command. 56 | - The RHEL7 installer now creates two virtualenvs, one for linchpin and one for 57 | cinch 58 | - Removed 'latest tip' and Beaker python package installation options from 59 | RHEL7 installer as they are no longer necessary 60 | - Fixed a bug where Jenkins slaves would not be removed from the master during 61 | a provisioning failure in our JJB example workflow 62 | (ci-jslave-project-sample.yaml) 63 | 64 | v 0.8.5 (10 Oct 2017) 65 | - Remove management of the executor setting on masters (GH #182) 66 | - Remove stale, unused repo key that began failing (GH #184) 67 | 68 | v 0.8.4 (25 Sep 2017) 69 | - Bump to linchpin 1.0.4 and Ansible >= 2.3.2 because of syntax errors (GH #176) 70 | - Capture errors from jenkins-cli.jar more robustly (GH #151) 71 | - Streamline installation of the Python pip module (GH #147) 72 | 73 | v 0.8.3 (13 Sep 2017) 74 | - Clean up TravisCI tests 75 | - Clean up Docker UID variable for OpenShift support 76 | - Stop Jenkins process before upgrading 77 | - Avoid modifying Beaker during upgrades 78 | - Make jenkins user passwords optional, to avoid unnecessarily changing them 79 | on existing systems 80 | 81 | v 0.8.2 (12 Sep 2017) 82 | - Added jenkins_cli_shell_user and jenkins_cli_shell_user_home, to allow better 83 | configuration of where to configure Jenkins CLI users' SSH keys 84 | - Improved internal construction of the _jenkins_url variable to avoid possible 85 | SSL hostname mismatch errors 86 | - Removed pre-packaged SSH key. Changed to generating when needed 87 | 88 | v 0.8.1 (11 Sep 2017) 89 | - Removed ansible RPM package from jenkins_master role 90 | 91 | v 0.8.0 (6 Sep 2017) 92 | - Added support for upgrading to Jenkins 2 from Jenkins 1 93 | - Added support for installing pinned plugins from multiple update centers 94 | - Significant improvements to Groovy script error reporting 95 | - Added ability to configure multiple admin users (jenkins_admin_sids), and to 96 | configure users with CLI access independently of them being admins 97 | (jenkins_cli_users) 98 | - Added significant updates to Jenkins configuration for 99 | Kerberos, LDAP/basic auth, backup configurations, and 100 | more 101 | - Added oneshot Jenkins backup playbook for cases where cinch will run on 102 | existing Jenkins masters created without cinch 103 | - Updated Jenkins repo URL 104 | - Added ability to exclude changes to Jenkins plugins+configuration 105 | - Added configurable NTP server support 106 | - Added ability to install packages on both master and slave with single var. 107 | This renames the `additional_packages` var to `jmaster_extra_rpms` to be 108 | consistent with the existing `jslave_extra_rpms`, and adds a new var called 109 | `extra_rpms` for use in the `jenkins_common` role. 110 | 111 | v 0.7.0 (30 June 2017) 112 | - Baseline 113 | -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | recursive-include cinch/files * 2 | recursive-include cinch/group_vars * 3 | recursive-include cinch/library * 4 | recursive-include cinch/roles * 5 | include cinch/teardown.yml 6 | include cinch/site.yml 7 | include README.md 8 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | ![alt text](https://travis-ci.org/RedHatQE/cinch.svg?branch=master "build status") 2 | [![codecov](https://codecov.io/gh/RedHatQE/cinch/branch/master/graph/badge.svg)](https://codecov.io/gh/RedHatQE/cinch) 3 | 4 | # cinch 5 | 6 | This folder contains an Ansible playbook for standing up and configuring 7 | Jenkins masters and slaves. There are roles specifically for the creation of 8 | those configurations, as well as several other roles which can be leveraged 9 | for configuring and standing up resources of other types helpful in the 10 | process of running continuous integration. 11 | 12 | For full documentation on the configuration options of each role, see the 13 | default vars YAML file in the particular role. Any of the values in that file 14 | are intended to be overridden by the user. 15 | 16 | Getting Started 17 | --------------- 18 | 19 | Please see documentation at http://redhatqe-cinch.rtfd.io/ 20 | 21 | Settings 22 | -------- 23 | 24 | Some notable defaults for Jenkins masters currently enabled are 25 | - Java 8 26 | - Jenkins LTS 2.63.3 27 | - an extensive list of plugins found in files/jenkins-plugin-lists/default.txt 28 | - SSL disabled, but Jenkins served off of port 80 29 | 30 | Primary supported target operating systems are 31 | - RHEL 7 32 | - CentOS 7 33 | 34 | IRC Support 35 | --------------- 36 | 37 | `#redhatqe-cinch on chat.freenode.net` 38 | 39 | -------------------------------------------------------------------------------- /TODO.txt: -------------------------------------------------------------------------------- 1 | Additional roles and features needed before full feature parity exists with 1.0 Jenkins Master 2 | 3 | - tasks/create_jenkins_task.py 4 | * _config_jenkins_backups 5 | Backups should probably be their own role 6 | 7 | * _config_job_priorities 8 | Make this part of the Jenkins jobs roles 9 | 10 | * _set_env_vars 11 | Set arbitrary environment variables through Groovy script - could be integrated to master role? 12 | 13 | * _create_jobs 14 | Creates the jobs based on external JJB 15 | Possibly needs more input on where job definitions will be loaded from 16 | 17 | * _configure_views 18 | Setup Jenkins UI views 19 | -------------------------------------------------------------------------------- /cinch/__init__.py: -------------------------------------------------------------------------------- 1 | # Seems unnecessary, but we'll put it here anyway, in case something useful 2 | # comes up in the future 3 | -------------------------------------------------------------------------------- /cinch/bin/__init__.py: -------------------------------------------------------------------------------- 1 | # Seems unnecessary, but we'll put it here anyway, in case something useful 2 | # comes up in the future 3 | -------------------------------------------------------------------------------- /cinch/bin/entry_point.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function 2 | from argparse import ArgumentParser, REMAINDER 3 | from os import getcwd, path 4 | from .wrappers import call_ansible 5 | 6 | import sys 7 | 8 | 9 | def cinch_generic(playbook, help_description): 10 | # Parse the command line arguments 11 | parser = ArgumentParser(description='A CLI wrapper for ansible-playbook ' 12 | 'to run cinch playbooks. ' + help_description) 13 | # The inventory file that the user provides which will get passed along to 14 | # Ansible for its consumption 15 | parser.add_argument('inventory', help='Ansible inventory file (required)') 16 | # All remaining arguments are passed through, untouched, to Ansible 17 | parser.add_argument('args', nargs=REMAINDER, help='extra args to ' 18 | 'pass to the ansible-playbook command (optional)') 19 | args = parser.parse_args() 20 | if len(args.inventory) > 0: 21 | if args.inventory[0] == '/': 22 | inventory = args.inventory 23 | else: 24 | inventory = path.join(getcwd(), args.inventory) 25 | else: 26 | raise Exception('Inventory path needs to be non-empty') 27 | exit_code = call_ansible(inventory, playbook, args.args) 28 | sys.exit(exit_code) 29 | 30 | 31 | def cinch(): 32 | """ 33 | Entry point for the "cinch" CLI that merely wraps the ansible-playbook 34 | command and pre-fills its path to the site.yml file for Cinch. The cinch 35 | tool requires a single argument - the Ansible inventory file - and accepts 36 | an arbitrary number of extra arguments that are passed through to the 37 | ansible-playbook executable. 38 | 39 | :return: Exit code 0 if the execution is completed successfully, or 255 40 | if an unknown error occurs. If ansible-playbook exits with an error code, 41 | this executable will exit with the same code. 42 | """ 43 | help_description = '''This command runs the 'site.yml' playbook to 44 | configure a Jenkins master or slave.''' 45 | cinch_generic('site.yml', help_description) 46 | 47 | 48 | def teardown(): 49 | """ 50 | Entry point for the "teardown" CLI that wraps ansible-playbook commands and 51 | pre-fills its path to the teardown.yml file. 52 | 53 | :return: Exit code 0 if the execution is completed successfully, or 255 if 54 | an unknown error occurs. If ansible-playbook exits with an error code, this 55 | executable will exit with the same code. 56 | """ 57 | help_description = '''This command runs the 'teardown.yml' playbook to 58 | disconnect a Jenkins slave.''' 59 | cinch_generic('teardown.yml', help_description) 60 | 61 | 62 | if __name__ == '__main__': 63 | print('You should not invoke this file directly.') 64 | sys.exit(1) 65 | -------------------------------------------------------------------------------- /cinch/bin/wrappers.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function 2 | 3 | from plumbum import local 4 | from plumbum.commands.processes import ProcessExecutionError 5 | from traceback import print_exc 6 | 7 | import os 8 | import sys 9 | 10 | 11 | BASE = os.path.abspath(os.path.join(os.path.dirname(__file__), '..')) 12 | # Link to our docs with configuration examples 13 | DOCS = 'https://redhatqe-cinch.readthedocs.io/en/latest/users.html' 14 | # Skeleton text to insert in YAML config files 15 | SKEL_TEXT = '''--- 16 | # Add your cinch {0} configuration here 17 | # Examples: {1} 18 | ''' 19 | 20 | 21 | def call_ansible(inventory, playbook, *args): 22 | """ 23 | Wraps a call out to the ansible-playbook executable, passing it the cinch 24 | site.yml file that kicks off this playbook. 25 | 26 | :param inventory: The Ansible inventory file to pass through 27 | :param args: An array of other command-line arguments to ansible-playbook 28 | to pass 29 | :return: The exit code returned from ansible-playbook, or 255 if errors 30 | come from elsewhere 31 | """ 32 | # Construct the arguments to pass to Ansible by munging the arguments 33 | # provided to this method 34 | ansible_args = [ 35 | os.path.join(BASE, playbook), 36 | '-i', inventory, 37 | '-v', 38 | '--ssh-common-args=-o StrictHostKeyChecking=no ' + 39 | '-o UserKnownHostsFile=/dev/null' 40 | ] 41 | ansible_args.extend(args) 42 | ansible = local['ansible-playbook'] 43 | exit_code = command_handler(ansible, ansible_args) 44 | return exit_code 45 | 46 | 47 | def command_handler(command, args): 48 | """ 49 | Generic function to run external programs. 50 | :param command: Exectuable to run 51 | :param args: arguments to be given to the external executable 52 | :return: The exit code of the external command, or exit code 255 if we are 53 | unable to determine the exit code 54 | """ 55 | try: 56 | command.run(args, stdout=sys.stdout, stderr=sys.stderr) 57 | exit_code = 0 58 | except ProcessExecutionError as ex: 59 | print('Error encountered while executing command.', 60 | file=sys.stderr) 61 | exit_code = ex.retcode 62 | except Exception as ex: 63 | print('Unknown error occurred: {0}'.format(ex), file=sys.stderr) 64 | print_exc() 65 | exit_code = 255 66 | return exit_code 67 | -------------------------------------------------------------------------------- /cinch/files/jenkins-plugin-lists/README: -------------------------------------------------------------------------------- 1 | All files in this directory ending in .txt will be newline-separated lists of 2 | Jenkins plugins. These files will be read by our update center build job as 3 | well as the CI Provisioner for determining what list of plugins to build or 4 | install. Each file is named based on the update center ID. If your update 5 | center ID is 'redhat' then the file name should be 'redhat.txt'. 6 | -------------------------------------------------------------------------------- /cinch/files/jenkins-plugin-lists/default-test.txt: -------------------------------------------------------------------------------- 1 | default.txt -------------------------------------------------------------------------------- /cinch/files/jenkins-plugin-lists/default.txt: -------------------------------------------------------------------------------- 1 | ant==1.8 2 | antisamy-markup-formatter==1.5 3 | apache-httpcomponents-client-4-api==4.5.5-3.0 4 | bouncycastle-api==2.17 5 | command-launcher==1.2 6 | credentials==2.1.18 7 | cvs==2.14 8 | display-url-api==2.2.0 9 | external-monitor-job==1.7 10 | javadoc==1.4 11 | jms-messaging==1.1.1 12 | jsch==0.1.54.2 13 | junit==1.24 14 | kerberos-sso==1.4 15 | ldap==1.20 16 | mailer==1.21 17 | mapdb-api==1.0.9.0 18 | matrix-auth==2.3 19 | matrix-project==1.13 20 | maven-plugin==3.1.2 21 | pam-auth==1.4 22 | role-strategy==2.9.0 23 | scm-api==2.2.7 24 | script-security==1.44 25 | ssh-credentials==1.14 26 | structs==1.14 27 | subversion==2.11.1 28 | swarm==3.13 29 | thinBackup==1.9 30 | windows-slaves==1.3.1 31 | workflow-api==2.29 32 | workflow-scm-step==2.6 33 | workflow-step-api==2.16 34 | -------------------------------------------------------------------------------- /cinch/files/jenkins-plugin-lists/optional-test.txt: -------------------------------------------------------------------------------- 1 | optional.txt -------------------------------------------------------------------------------- /cinch/group_vars/all: -------------------------------------------------------------------------------- 1 | # Override this if you have a different mirror infrastructure you would rather pull from 2 | fedora_mirrors: https://mirrors.fedoraproject.org/metalink?arch=$basearch& 3 | centos_mirrors: http://mirror.centos.org/centos/ 4 | 5 | # Override this if you wish your default mirrors to be configured differently 6 | repository_defaults: 7 | failovermethod: priority 8 | enabled: true 9 | skip_if_unavailable: false 10 | gpgcheck: false 11 | 12 | # Default user and directory to place the Jenkins files in 13 | jenkins_user: jenkins 14 | jenkins_user_home: /var/lib/jenkins 15 | # Optionally set a password for the jenkins user. In most cases a null 16 | # password is preferred, which will be set if this variable is left undefined. 17 | #jenkins_user_password: changeme 18 | 19 | # The home directory for the Jenkins user, and the base directory for Jenkins 20 | # files and jobs 21 | jenkins_home: /var/lib/jenkins 22 | # Directives on jswarm file and destination 23 | jswarm_version: 3.9 24 | jswarm_local_directory: /opt/jswarm 25 | jswarm_filename: swarm-client-{{ jswarm_version }}.jar 26 | 27 | # Override default distro NTP server configuration 28 | # Define the var ntp_servers as below to optionally override default NTP 29 | # server lines, including options such as iburst. 30 | # Left undefined, the distro defaults will be used. 31 | #ntp_servers: 32 | # - "server 0.time.example.com iburst" 33 | # - "server 1.time.example.com iburst" 34 | # - "server 2.time.example.com iburst" 35 | # - "server 3.time.example.com iburst" 36 | 37 | # Override these variables with an array of files to be uploaded as-is to the 38 | # destination host. Files in pre_upload_files will be uploaded before any other 39 | # code is run. Files in post_upload_files will be uploaded after all other tasks 40 | # have been run. Each element in the array must contain an object with the 41 | # following fields: 42 | # src (required): the local path to the file to upload 43 | # dest (required): the remote path to upload the file into 44 | # owner (optional): the user who will own the file after upload 45 | # group (optional): the group to set on the file after upload 46 | # mode (optional): the mode to set on the uploaded file 47 | # For more information or clarification on these variables, refer to the Ansible 48 | # module documentation for the "copy" module. Variables in this array match 49 | # the arguments to that module of the same name. 50 | # http://docs.ansible.com/ansible/copy_module.html 51 | # NOTE: Variables are not defined here, as the playbook group_vars/all file overrides 52 | # variables in the inventory file's group vars section (although they do not override 53 | # variables in the inventory dir's group_vars folder). 54 | # Example 55 | # pre_upload_files: 56 | # - src: /home/deployuser/somehost/ssl.key 57 | # dest: /etc/apache2/ssl/ssl.key 58 | # mode: 0600 59 | # post_upload_files: 60 | # - src: /home/deployuser/somehost/id_rsa 61 | # dest: /var/lib/jenkins/.ssh/id_rsa 62 | # owner: jenkins 63 | # mode: 0600 64 | -------------------------------------------------------------------------------- /cinch/group_vars/cent6: -------------------------------------------------------------------------------- 1 | gcc_compat_package: compat-gcc-34 2 | python_pip_package: python-pip 3 | 4 | _repositories: 5 | - name: epel 6 | mirrorlist: "{{ fedora_mirrors }}repo=epel-6" 7 | 8 | _download_repositories: 9 | - http://pkg.jenkins-ci.org/redhat-stable/jenkins.repo 10 | - https://beaker-project.org/yum/beaker-client-CentOS.repo 11 | 12 | rpm_key_imports: 13 | - key: http://pkg.jenkins-ci.org/redhat-stable/jenkins.io.key 14 | validate_certs: true 15 | 16 | jenkins_slave_repositories: "{{ _repositories }}" 17 | jenkins_slave_download_repositories: "{{ _download_repositories }}" 18 | 19 | jenkins_master_repositories: "{{ _repositories }}" 20 | jenkins_master_download_repositories: "{{ _download_repositories }}" 21 | -------------------------------------------------------------------------------- /cinch/group_vars/cent7: -------------------------------------------------------------------------------- 1 | gcc_compat_package: compat-gcc-44 2 | 3 | _repositories: 4 | - name: epel 5 | mirrorlist: "{{ fedora_mirrors }}repo=epel-7" 6 | 7 | _download_repositories: 8 | - https://pkg.jenkins.io/redhat-stable/jenkins.repo 9 | 10 | rpm_key_imports: 11 | - key: http://pkg.jenkins-ci.org/redhat-stable/jenkins.io.key 12 | validate_certs: true 13 | 14 | jenkins_slave_repositories: "{{ _repositories }}" 15 | jenkins_slave_download_repositories: "{{ _download_repositories }}" 16 | 17 | jenkins_master_repositories: "{{ _repositories }}" 18 | jenkins_master_download_repositories: "{{ _download_repositories }}" 19 | -------------------------------------------------------------------------------- /cinch/group_vars/fedora: -------------------------------------------------------------------------------- 1 | repositories: 2 | - name: rawhide 3 | mirrorlist: "{{ fedora_mirrors }}repo=rawhide" 4 | 5 | _download_repositories: 6 | - http://pkg.jenkins-ci.org/redhat-stable/jenkins.repo 7 | 8 | rpm_key_imports: 9 | - key: http://pkg.jenkins-ci.org/redhat-stable/jenkins.io.key 10 | validate_certs: true 11 | 12 | jenkins_master_repositories: [] 13 | jenkins_slave_repositories: [] 14 | 15 | jenkins_master_download_repositories: "{{ _download_repositories }}" 16 | jenkins_slave_download_repositories: [] 17 | 18 | version_pin_file: /etc/dnf/dnf.conf 19 | python_pip_package: "{{ (ansible_distribution_major_version > 26) | ternary('python2-pip', 20 | 'python-pip') }}" 21 | -------------------------------------------------------------------------------- /cinch/group_vars/jenkins_docker_slave: -------------------------------------------------------------------------------- 1 | repositories: "{{ jenkins_slave_repositories }}" 2 | 3 | download_repositories: "{{ jenkins_slave_download_repositories }}" 4 | -------------------------------------------------------------------------------- /cinch/group_vars/jenkins_master: -------------------------------------------------------------------------------- 1 | repositories: "{{ jenkins_master_repositories }}" 2 | download_repositories: "{{ jenkins_master_download_repositories }}" 3 | container_bin_files: 4 | - jmaster.sh 5 | -------------------------------------------------------------------------------- /cinch/group_vars/jenkins_slave: -------------------------------------------------------------------------------- 1 | repositories: "{{ jenkins_slave_repositories }}" 2 | 3 | download_repositories: "{{ jenkins_slave_download_repositories }}" 4 | container_bin_files: 5 | - jswarm.sh 6 | -------------------------------------------------------------------------------- /cinch/group_vars/rhel6: -------------------------------------------------------------------------------- 1 | gcc_compat_package: compat-gcc-34 2 | python_pip_package: python-pip 3 | 4 | all_repositories: 5 | latest: 6 | name: rhel6-latest 7 | baseurl: "{{ rhel_base }}/$basearch/os/" 8 | optional: 9 | name: rhel6-optional 10 | baseurl: "{{ rhel_base }}/$basearch/optional/os/" 11 | extras: 12 | name: rhel6-extras 13 | baseurl: "{{ rhel_base }}/$basearch/extras/os/" 14 | epel: 15 | name: epel 16 | mirrorlist: "{{ fedora_mirrors }}repo=epel-6" 17 | rhscl_python27: 18 | name: rhscl-python27 19 | baseurl: https://www.softwarecollections.org/repos/rhscl/python27/epel-6-$basearch/ 20 | 21 | # These types of repositories will download the requested URL into the /etc/yum.repos.d 22 | # folder in order to enable them 23 | all_download_repositories: 24 | beaker: https://beaker-project.org/yum/beaker-client-RedHatEnterpriseLinux.repo 25 | jenkins: http://pkg.jenkins-ci.org/redhat-stable/jenkins.repo 26 | 27 | jenkins_slave_repositories: 28 | - "{{ all_repositories.latest }}" 29 | - "{{ all_repositories.optional }}" 30 | - "{{ all_repositories.extras }}" 31 | - "{{ all_repositories.epel }}" 32 | - "{{ all_repositories.rhscl_python27 }}" 33 | 34 | jenkins_slave_download_repositories: 35 | - "{{ all_download_repositories.beaker }}" 36 | -------------------------------------------------------------------------------- /cinch/group_vars/rhel7: -------------------------------------------------------------------------------- 1 | gcc_compat_package: compat-gcc-44 2 | 3 | all_repositories: 4 | latest: 5 | name: rhel7-latest 6 | baseurl: "{{ rhel_base }}/$basearch/os/" 7 | optional: 8 | name: rhel7-optional 9 | baseurl: "{{ rhel_base }}/$basearch/optional/os/" 10 | debug: 11 | name: rhel7-debug 12 | baseurl: "{{ rhel_base }}/$basearch/debug/" 13 | epel: 14 | name: epel 15 | mirrorlist: "{{ fedora_mirrors }}repo=epel-7" 16 | certificate: 17 | name: certificate-system 18 | baseurl: "{{ rhel_base }}/$basearch/certificate-system/9/os/" 19 | 20 | # These types of repositories will download the requested URL into the /etc/yum.repos.d 21 | # folder in order to enable them 22 | all_download_repositories: 23 | jenkins: http://pkg.jenkins-ci.org/redhat-stable/jenkins.repo 24 | 25 | rpm_key_imports: 26 | - key: http://pkg.jenkins-ci.org/redhat-stable/jenkins.io.key 27 | validate_certs: true 28 | 29 | jenkins_slave_repositories: 30 | - "{{ all_repositories.latest }}" 31 | - "{{ all_repositories.optional }}" 32 | - "{{ all_repositories.epel }}" 33 | 34 | jenkins_slave_download_repositories: [] 35 | 36 | jenkins_master_repositories: 37 | - "{{ all_repositories.latest }}" 38 | - "{{ all_repositories.optional }}" 39 | - "{{ all_repositories.debug }}" 40 | - "{{ all_repositories.epel }}" 41 | - "{{ all_repositories.certificate }}" 42 | 43 | jenkins_master_download_repositories: 44 | - "{{ all_download_repositories.jenkins }}" 45 | -------------------------------------------------------------------------------- /cinch/group_vars/rhel8: -------------------------------------------------------------------------------- 1 | ansible_python_interpreter: /usr/libexec/platform-python 2 | python_pip_package: python3-pip 3 | 4 | all_repositories: 5 | baseos: 6 | name: rhel8-baseos 7 | baseurl: "{{ rhel_base }}/BaseOS/$basearch/os/" 8 | appstream: 9 | name: rhel8-appstream 10 | baseurl: "{{ rhel_base }}/AppStream/$basearch/os/" 11 | baseos-debug: 12 | name: rhel8-baseos-debug 13 | baseurl: "{{ rhel_base }}/BaseOS/$basearch/debug/tree/" 14 | appstream-debug: 15 | name: rhel8-appstream-debug 16 | baseurl: "{{ rhel_base }}/AppStream/$basearch/debug/tree/" 17 | 18 | # These types of repositories will download the requested URL into the /etc/yum.repos.d 19 | # folder in order to enable them 20 | all_download_repositories: 21 | jenkins: http://pkg.jenkins-ci.org/redhat-stable/jenkins.repo 22 | 23 | rpm_key_imports: 24 | - key: http://pkg.jenkins-ci.org/redhat-stable/jenkins.io.key 25 | validate_certs: true 26 | 27 | jenkins_slave_repositories: 28 | - "{{ all_repositories.baseos }}" 29 | - "{{ all_repositories.appstream }}" 30 | 31 | jenkins_slave_download_repositories: [] 32 | 33 | jenkins_master_repositories: 34 | - "{{ all_repositories.baseos }}" 35 | - "{{ all_repositories.appstream }}" 36 | - "{{ all_repositories.baseos-debug }}" 37 | - "{{ all_repositories.appstream-debug }}" 38 | # which packages do we need here? 39 | # - "{{ all_repositories.certificate }}" 40 | 41 | jenkins_master_download_repositories: 42 | - "{{ all_download_repositories.jenkins }}" 43 | -------------------------------------------------------------------------------- /cinch/install-rhel7.yml: -------------------------------------------------------------------------------- 1 | playbooks/install-rhel7.yml -------------------------------------------------------------------------------- /cinch/library/jenkins_cli_user.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # 3 | # Author: Greg Hellings - or 4 | # 5 | # Module to configure users in Jenkins authorized to use CLI 6 | import xml.etree.ElementTree as ET 7 | import os 8 | from ansible.module_utils.basic import AnsibleModule 9 | 10 | 11 | DOCUMENTATION = ''' 12 | --- 13 | version_added: "2.1" 14 | module: jenkins_cli_user 15 | short_description: configure Jenkins CLI users with pub key 16 | description: 17 | - This module configures admin users in Jenkins to utilize the specified 18 | SSH pubkey. Requires that role-based authentication be enabled and that 19 | a user be configured as an admin 20 | 21 | options: 22 | jenkins_home: 23 | description: 24 | The root directory for the Jenkins install 25 | required: true 26 | jenkins_user: 27 | description: 28 | The name of the user to configure the SSH key for 29 | key_file: 30 | description: 31 | Path to the SSH keyfile to be listed as authorized 32 | required: true 33 | state: 34 | description: 35 | Currently limited to "present" - will create the user 36 | required: false 37 | 38 | author: Gregory Hellings 39 | ''' 40 | 41 | 42 | def main(): 43 | module = AnsibleModule( 44 | argument_spec={ 45 | 'jenkins_home': {'required': True}, 46 | 'jenkins_user': {'required': True}, 47 | 'key_file': {'required': True}, 48 | 'state': {'choices': ['present'], 'default': 'present'} 49 | }, 50 | supports_check_mode=False 51 | ) 52 | params = type('Params', (object,), module.params) 53 | user_config_path = os.path.join(params.jenkins_home, "users") 54 | changed = False 55 | # This is the local SSH key file to read and enter into the Jenkins config 56 | # for the desginated user 57 | with open(params.key_file) as key: 58 | pub_key = key.read() 59 | user_cfg_file = os.path.join(user_config_path, params.jenkins_user, 60 | "config.xml") 61 | # Parsing the XML structure of the per-user configuration file, in 62 | # order to add the SSH keys 63 | usertree = ET.parse(user_cfg_file) 64 | userroot = usertree.getroot() 65 | keyroot = userroot.find("properties") 66 | keys = keyroot.getiterator("authorizedKeys") 67 | if keys: 68 | for key in keys: 69 | # The value of key.text is the appended public SSH keys that 70 | # should be usable by this character, separated each by a \n 71 | # character. Very much like an authoritzed_keys file for SSH 72 | # access. If this key is not a substring of the whole set of 73 | # ahtorized keys, then we append it to the list of keys and set 74 | # changed to true 75 | if pub_key not in str(key.text): 76 | changed = True 77 | if key.text is None: 78 | key.text = pub_key 79 | else: 80 | key.text = str(key.text) + pub_key 81 | else: 82 | # No authorized keys have been added to the user hitherto, and even 83 | # the XML structure that holds the set of authorized keys is not 84 | # present, so we must create the XML structure to hold the 85 | # requested key. In this case, the module is definitely making a 86 | # change to the files 87 | changed = True 88 | prop = userroot.find("properties") 89 | ssh_auth = ET.SubElement(prop, 90 | "org.jenkinsci.main.modules" 91 | ".cli.auth.ssh." 92 | "UserPropertyImpl") 93 | auth_key = ET.SubElement(ssh_auth, "authorizedKeys") 94 | auth_key.text = pub_key 95 | if changed: 96 | usertree.write(user_cfg_file, encoding="UTF-8") 97 | module.exit_json(changed=changed) 98 | # This code should only be accessed if there was an exception within the 99 | # "with" block that prevents the module.exit_json line above from being 100 | # properly executed. Then this will return an error to the user, rather 101 | # than leaving them completely hanging. 102 | module.fail_json(msg="Roles not found - have you configured an admin " 103 | "using the Role-based Authorization Strategy?") 104 | 105 | 106 | main() 107 | -------------------------------------------------------------------------------- /cinch/library/jenkins_update_center.py: -------------------------------------------------------------------------------- 1 | import os 2 | import xml.etree.ElementTree as ET 3 | from ansible.module_utils.basic import AnsibleModule 4 | 5 | 6 | # replace default update center with our own 7 | def set_default(tree, url): 8 | default_update_center = tree.find(".//site[id='default']") 9 | if default_update_center is None: 10 | raise Exception('Default update center not found') 11 | new_url = default_update_center.find('url') 12 | if new_url.text != url: 13 | new_url.text = url 14 | return True 15 | return False 16 | 17 | 18 | # append additional update centers 19 | def append(tree, site_id, url): 20 | xpath = './/site[id="{0}"]'.format(site_id) 21 | check_existing = tree.find(xpath) 22 | if check_existing is None: 23 | # Append if not found 24 | for sites in tree.iter('sites'): 25 | site = ET.Element('site') 26 | new_site_id = ET.SubElement(site, 'id') 27 | new_site_id.text = site_id 28 | new_url = ET.SubElement(site, 'url') 29 | new_url.text = url 30 | sites.append(site) 31 | return True 32 | else: 33 | # Update URL if necessary 34 | url_node = check_existing.find('url') 35 | if url_node is not None and url_node.text != url: 36 | url_node.text = url 37 | return True 38 | if url_node is None: 39 | ET.SubElement(check_existing, 'url').text = url 40 | return True 41 | return False 42 | 43 | 44 | def main(): 45 | module = AnsibleModule( 46 | argument_spec={ 47 | 'jenkins_home': {'default': '/var/lib/jenkins'}, 48 | 'update_center_id': {'required': True}, 49 | 'update_center_url': {'required': True} 50 | } 51 | ) 52 | update_ctr_config = os.path.join(module.params['jenkins_home'], 53 | 'hudson.model.UpdateCenter.xml') 54 | tree = ET.parse(update_ctr_config) 55 | if module.params['update_center_id'] == 'default': 56 | changed = set_default(tree, module.params['update_center_url']) 57 | else: 58 | changed = append(tree, 59 | module.params['update_center_id'], 60 | module.params['update_center_url']) 61 | if changed: 62 | tree.write(update_ctr_config, encoding='UTF-8') 63 | module.exit_json(changed=changed) 64 | 65 | 66 | main() 67 | -------------------------------------------------------------------------------- /cinch/library/jenkins_user_api.py: -------------------------------------------------------------------------------- 1 | from ansible.module_utils.basic import AnsibleModule 2 | from tempfile import NamedTemporaryFile 3 | import subprocess 4 | import os 5 | 6 | 7 | # Due to the nature of Ansible modules, this string cannot reasonably be 8 | # handled outside of this file. Ansible modules must be self-contained single 9 | # file scripts that can be uploaded as a unit. In this case, we need to drop 10 | # this Groovy script into a temporary file to execute with the Jenkins CLI 11 | # command 12 | MYFILE = """ 13 | import hudson.model.User; 14 | User u = User.get("{0}"); 15 | def p = u.getProperty(jenkins.security.ApiTokenProperty.class); 16 | println p.getApiToken(); 17 | """ 18 | 19 | 20 | def main(): 21 | """ 22 | Write out a groovy file that can be executed by the jenkins-cli.jar command 23 | which will print out the value of a specified user's API key. Currently the 24 | name of the user is not sanitized in any manner, so a value that includes 25 | Groovy escape characters or the double quote character (e.g. if the name 26 | includes the backslash character (\\) or the quotation mark character ("), 27 | or something else strange like a tab, newline, null, etc) then it will 28 | cause a problem with the Groovy code. Such usernames are generally not 29 | permissible in systems like Jenkins. If they are used, then the provider 30 | should escape those characters when they are passed into this module.""" 31 | module = AnsibleModule( 32 | argument_spec={ 33 | 'user': {'required': True, 'type': 'str'}, 34 | 'cli_jar': {'default': 35 | '/var/cache/jenkins/war/WEB-INF/jenkins-cli.jar'}, 36 | 'jenkins_url': {'default': 'http://localhost:8080'}, 37 | 'remoting': {'required': True, 'type': 'bool'}, 38 | 'java_command': {'type': 'str', 'default': '/usr/bin/java'} 39 | } 40 | ) 41 | # Permits accessing args as object instead of dict 42 | args = type('Args', (object,), module.params) 43 | # Create a temporary place to put the Groovy code. False on the automatic 44 | # delete option, otherwise the call to the file .close() method would cause 45 | # it to be deleted, whereas we want the file to persist until after the 46 | # command is executed 47 | groovy = NamedTemporaryFile(delete=False) 48 | groovy.write(MYFILE.format(args.user)) 49 | groovy.close() 50 | process_named_args = [args.java_command, 51 | '-jar', 52 | args.cli_jar, 53 | '-s', 54 | args.jenkins_url] 55 | # Append -remoting argument to named argument list depending on the version 56 | # of Jenkins 57 | if args.remoting: 58 | process_named_args.append('-remoting') 59 | process_positional_args = ['groovy', groovy.name] 60 | process = process_named_args + process_positional_args 61 | # The groovy code simply prints out the value of the API key, so we want 62 | # to be able to capture that output 63 | err, output = None, None 64 | p = subprocess.Popen(process, 65 | stdout=subprocess.PIPE, 66 | stderr=subprocess.PIPE) 67 | try: 68 | output, err = p.communicate() 69 | os.unlink(groovy.name) 70 | # It's possible the Popen process has an error code for a whole host of 71 | # reasons 72 | if p.returncode == 0: 73 | module.exit_json(api_key=output.strip(), 74 | err=err, 75 | changed=False, 76 | success=True) 77 | else: 78 | msg = "Error occurred while executing jenkins-cli.jar" 79 | except subprocess.CalledProcessError: 80 | msg = "Error received while attempting to execute Java" 81 | # If err and output are some type of empty, but not the empty string, 82 | # then we reached this point without any output. If they are the empty 83 | # string, then we reached this point but the subprocess output nothing 84 | # on the specified pipe. Providing this data, or a status message such 85 | # as these defaults, provides a better way for users to diagnose the 86 | # problems encountered 87 | if not err and err != "": 88 | err = "No stderr detected" 89 | if not output and output != "": 90 | output = "No stdout detected" 91 | # There are lots of reasons to fall through to here. But if we have, then 92 | # something has most definitely gone wrong. We should report on that 93 | module.fail_json(msg=msg, 94 | stderr=err, 95 | stdout=output, 96 | api_key='') 97 | 98 | 99 | main() 100 | -------------------------------------------------------------------------------- /cinch/library/line_match.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | import re 4 | from ansible.module_utils.basic import AnsibleModule 5 | from os import path 6 | 7 | 8 | def main(): 9 | module = AnsibleModule( 10 | argument_spec={ 11 | 'file': {'required': True}, 12 | 'line': {'required': False}, 13 | 'method': {'choices': ['regex', 'simple'], 'default': 'simple'} 14 | } 15 | ) 16 | # First sanity check that the file exists and is not a directory or such 17 | if not path.exists(module.params['file']) or \ 18 | not path.isfile(module.params['file']): 19 | module.exit_json(changed=False, exists=False, present=False) 20 | # Create the method that will do the matching 21 | if module.params['method'] == 'regex': 22 | expression = re.compile(module.params['line']) 23 | 24 | def matcher(x): 25 | return expression.search(x) is not None 26 | else: 27 | def matcher(x): 28 | return x == module.params['line'] 29 | # Read the file, line by line, and check for matches 30 | with open(module.params['file'], 'r') as reader: 31 | for line in reader.readlines(): 32 | if matcher(line): 33 | module.exit_json(changed=False, exists=True, present=True) 34 | module.exit_json(changed=False, exists=True, present=False) 35 | 36 | 37 | main() 38 | -------------------------------------------------------------------------------- /cinch/playbooks/install-rhel7.yml: -------------------------------------------------------------------------------- 1 | # This playbook is intended to allow installation of 'cinch' into a virtualenv 2 | # on RHEL7, and the primary purpose for its existence is the fact that the 3 | # various python packaging tools (pip, virtualenv, setuptools, etc) are too old 4 | # on RHEL7 to work with newer Python libraries such as those required by 5 | # 'cinch'. 6 | # 7 | # This playbook was tested with Ansible 2.4.1.0 8 | 9 | - name: install cinch into a virtualenv on RHEL7 10 | hosts: localhost 11 | vars: 12 | jenkins_home: /var/lib/jenkins 13 | venvs: 14 | cinch: "{{ jenkins_home }}/opt/cinch" 15 | linchpin: "{{ jenkins_home }}/opt/linchpin" 16 | delete_venv: false 17 | 18 | tasks: 19 | - name: fail if we are not running this playbook on RHEL7 20 | fail: 21 | msg: "this playbook may only be run on RHEL7" 22 | when: ansible_os_family != "RedHat" or ansible_distribution_major_version != "7" 23 | 24 | - name: ensure we are running as the jenkins user 25 | fail: 26 | msg: "this playbook must run as the 'jenkins' user" 27 | when: ansible_user_id != "jenkins" 28 | 29 | - name: check for /var/lib/jenkins directory 30 | stat: 31 | path: "{{ jenkins_home }}" 32 | register: jenkins_home_stat_result 33 | 34 | - name: fail if /var/lib/jenkins does not exist 35 | fail: 36 | msg: "directory {{ jenkins_home }} must exist for this playbook to run" 37 | when: not jenkins_home_stat_result.stat.exists 38 | 39 | - name: check for existing virtualenvs 40 | stat: 41 | path: "{{ item.value }}" 42 | with_dict: "{{ venvs }}" 43 | register: venv_stat_result 44 | 45 | - name: >- 46 | fail if pre-existing virtualenvs are found and cannot be deleted because 47 | delete_venv is set to 'false' 48 | fail: 49 | msg: "directory {{ item.item.value }} exists, but 'delete_venv' setting is False" 50 | with_items: "{{ venv_stat_result.results }}" 51 | when: item.stat.exists and not (delete_venv|bool) 52 | 53 | - name: >- 54 | delete existing virtualenv directory (disabled by default, override 55 | with DELETE_VENV Jenkins job parameter or delete_venv playbook variable) 56 | file: 57 | path: "{{ item.value }}" 58 | state: absent 59 | with_dict: "{{ venvs }}" 60 | when: (delete_venv|bool) 61 | 62 | - name: >- 63 | create virtualenvs with --system-site-packages to allow for selinux 64 | module compatibility, then upgrade setuptools and pip 65 | pip: 66 | name: setuptools pip 67 | virtualenv: "{{ item.value }}" 68 | extra_args: -U 69 | virtualenv_site_packages: true 70 | with_dict: "{{ venvs }}" 71 | retries: 2 72 | register: create_virtualenvs 73 | until: create_virtualenvs is success 74 | 75 | - name: install version 1.6.2 of linchpin using pip 76 | pip: 77 | name: linchpin 78 | virtualenv: "{{ venvs.linchpin }}" 79 | extra_args: -U 80 | version: 1.6.2 81 | retries: 2 82 | register: install_linchpin 83 | until: install_linchpin is success 84 | 85 | - name: install released version of cinch using pip 86 | pip: 87 | name: cinch 88 | virtualenv: "{{ venvs.cinch }}" 89 | extra_args: -U 90 | retries: 2 91 | register: install_cinch 92 | until: install_cinch is success 93 | -------------------------------------------------------------------------------- /cinch/playbooks/jenkins_backup.yml: -------------------------------------------------------------------------------- 1 | # This playbook is specifically provided for scenarios where cinch will be used 2 | # to manage configurations of Jenkins masters that were *not* created by cinch, 3 | # but by some other method, either manually or via another configuration 4 | # management tool. In this case, it's highly recommended to configure backups 5 | # before running cinch against a Jenkins master for the first time. 6 | 7 | # For this playbook to succeed, the thinBackup plugin must be installed, a 8 | # valid Jenkins user with admin rights must be provided, along with a target 9 | # directory for the backup data to live. 10 | 11 | # It's important to avoid submitting a Jenkins admin username that does not 12 | # exist on the target Jenkins master. If this happens, then the 13 | # jenkins_user_api module will happily create a useless Jenkins user with the 14 | # name given, which is best avoided. 15 | 16 | # Example: 17 | # ansible-playbook -u root --private-key /path/to/key 18 | # -i 'jenkins.example.com', jenkins_backup.yml 19 | # -e "jenkins_admin=example_username" 20 | # -e "backup_dir=/var/lib/jenkins/thinBackup_data" 21 | 22 | - hosts: all 23 | vars: 24 | jenkins_url: "http://localhost:8080" 25 | jenkins_user: jenkins 26 | jenkins_backup: 27 | directory: "{{ backup_dir }}" 28 | full_schedule: H 0 * * 0 29 | diff_schedule: H 0 * * * 30 | max_sets: 2 31 | exclude: '' 32 | wait_for_idle: true 33 | quiet_mode_timeout: 480 34 | build_results: false 35 | user_contents: true 36 | cleanup_diffs: true 37 | next_build_number: true 38 | move_to_zip: false 39 | plugin_archives: true 40 | tasks: 41 | - name: check jenkins_admin and backup_dir required variables 42 | set_fact: 43 | jenkins_admin: "{{ jenkins_admin | mandatory }}" 44 | backup_dir: "{{ backup_dir | mandatory }}" 45 | 46 | - name: get Jenkins admin user API details 47 | jenkins_user_api: 48 | user: "{{ jenkins_admin }}" 49 | register: jenkins_admin_api 50 | become: true 51 | become_user: "{{ jenkins_user }}" 52 | 53 | - name: save Jenkins admin user API key 54 | set_fact: 55 | admin_api_key: "{{ jenkins_admin_api.api_key }}" 56 | 57 | - name: stat {{ jenkins_backup.directory }} 58 | stat: 59 | path: "{{ jenkins_backup.directory }}" 60 | register: jenkins_backup_dir 61 | 62 | - name: fail if {{ jenkins_backup.directory }} path does not exist 63 | fail: 64 | msg: "path {{ jenkins_backup.directory }} does not exist, cannot continue" 65 | when: jenkins_backup_dir.stat.isdir is not defined 66 | 67 | - name: set ownership/permissions for {{ jenkins_backup.directory }} 68 | file: 69 | path: "{{ jenkins_backup.directory }}" 70 | owner: "{{ jenkins_user }}" 71 | group: "{{ jenkins_user }}" 72 | mode: 0755 73 | become: true 74 | 75 | - name: configure Jenkins thinBackup plugin 76 | block: 77 | - name: configure Jenkins thinBackup plugin 78 | jenkins_script: 79 | url: "{{ jenkins_url }}" 80 | script: 81 | "{{ lookup('template', '../roles/jenkins_master/templates/init_backup.groovy') }}" 82 | user: "{{ jenkins_admin }}" 83 | password: "{{ admin_api_key }}" 84 | register: script_output 85 | changed_when: "script_output.output.find('CHANGED') != -1" 86 | # Since the jenkins_script module doesn't support check_mode, we disable it 87 | # here so that we can handle check mode inside of the Groovy scripts. 88 | check_mode: false 89 | become: true 90 | become_user: "{{ jenkins_user }}" 91 | rescue: 92 | - name: jenkins_script error message help 93 | fail: 94 | msg: >- 95 | 'thinBackup configuration failed. Is the thinBackup plugin installed? 96 | Is the jenkins_admin variable set to an existing Jenkins user with 97 | admin rights?' 98 | -------------------------------------------------------------------------------- /cinch/playbooks/jenkins_restart_utility.yml: -------------------------------------------------------------------------------- 1 | # This playbook aims to gather diagnostics data in the event of 2 | # trouble with Jenkins, and then restarts Jenkins to keep operations running. 3 | # By default, only the diagnostics data is collected, but the Jenkins service 4 | # restart can be enabled by setting the 'restart_jenkins' variable to true: 5 | 6 | # Example: 7 | # ansible-playbook -u root --private-key /path/to/key 8 | # jenkins_restart_utility.yml 9 | # -e "restart_jenkins=true hostname=jenkins 10 | # domain=example.com" 11 | 12 | # Optionally include an arbitary role here 13 | # to run before the playbook is executed if needed" 14 | 15 | - hosts: localhost 16 | connection: local 17 | tasks: 18 | - name: Add host to inventory 19 | add_host: 20 | name: "{{ hostname }}.{{ domain }}" 21 | groups: "jenkins_master" 22 | 23 | - hosts: jenkins_master 24 | roles: 25 | - role: "{{ dependency_role | default() }}" 26 | 27 | vars: 28 | jenkins_pid_path: '/run/jenkins.pid' 29 | diag_path_prefix: '/var/lib/jenkins/cinch_diagnostics' 30 | jenkins_log_dir: '/var/log/jenkins' 31 | log_history_days: '-7d' 32 | jstack_path: '/usr/bin/jstack' 33 | jmap_path: '/usr/bin/jmap' 34 | jstat_path: '/usr/bin/jstat' 35 | java_path: '/usr/bin/java' 36 | jenkins_cli_path: '/var/cache/jenkins/war/WEB-INF/jenkins-cli.jar' 37 | jenkins_cli_user: 'root' 38 | restart_jenkins: false 39 | tasks: 40 | # This block ignores all errors to ensure that Jenkins is restarted on 41 | # demand even if we can't gather diagnostics data. 42 | - name: gather diagnostics data 43 | block: 44 | - name: check if {{ jenkins_pid_path }} exists 45 | stat: 46 | path: "{{ jenkins_pid_path }}" 47 | register: jenkins_last_pid 48 | 49 | - name: set diagnostics path for this run (date format ex. 20170612T115323) 50 | set_fact: 51 | diag_path: 52 | "{{ (diag_path_prefix 53 | + '/' + ansible_date_time.iso8601_basic_short) }}" 54 | 55 | - name: create diagnostics directory 56 | file: 57 | path: "{{ diag_path }}" 58 | state: directory 59 | owner: jenkins 60 | group: jenkins 61 | mode: 0755 62 | register: diag_path_creation 63 | 64 | - name: find last {{ log_history_days }} days of Jenkins logs 65 | find: 66 | paths: "{{ jenkins_log_dir }}" 67 | patterns: "jenkins.log*" 68 | age: "{{ log_history_days }}" 69 | register: jenkins_logs 70 | 71 | - name: copy Jenkins logs to diagnostics directory 72 | copy: 73 | src: "{{ item.path }}" 74 | dest: "{{ diag_path }}" 75 | remote_src: true 76 | with_items: "{{ jenkins_logs.files }}" 77 | when: diag_path_creation|success 78 | 79 | - name: grab extended diagnostics data from last Jenkins PID 80 | block: 81 | - name: find last recorded Jenkins PID 82 | slurp: 83 | src: "{{ jenkins_pid_path }}" 84 | register: slurped_jenkins_pid 85 | 86 | # By default, strings from slurp are base64 encoded with newlines, 87 | # and that must be removed 88 | - name: set Jenkins PID for later use 89 | set_fact: 90 | jenkins_pid: 91 | "{{ slurped_jenkins_pid['content'] | b64decode | regex_replace('\n', '') }}" 92 | 93 | - name: generate thread dump (jstack) 94 | command: > 95 | timeout 60 {{ jstack_path }} -l {{ jenkins_pid }} 96 | register: thread_dump 97 | 98 | - name: copy thread dump (jstack) to diagnostics directory 99 | copy: 100 | content: "{{ thread_dump.stdout }}" 101 | dest: "{{ diag_path }}/jstack.txt" 102 | when: diag_path_creation|success 103 | 104 | # TODO: add the following package so that this task can work properly: 105 | # java-1.8.0-openjdk-debuginfo 106 | - name: generate JVM memory report (jmap) 107 | command: > 108 | timeout 60 {{ jmap_path }} -heap {{ jenkins_pid }} 109 | register: jvm_memory_jmap 110 | 111 | - name: copy JVM memory report (jmap) to diagnostics directory 112 | copy: 113 | content: "{{ jvm_memory_jmap.stdout }}" 114 | dest: "{{ diag_path }}/jvm-jmap.txt" 115 | when: diag_path_creation|success 116 | 117 | - name: generate JVM memory report (jstat) 118 | command: > 119 | timeout 60 {{ jstat_path }} -gcutil {{ jenkins_pid }} 120 | register: jvm_memory_jstat 121 | 122 | - name: copy JVM memory report (jstat) to diagnostics directory 123 | copy: 124 | content: "{{ jvm_memory_jstat.stdout }}" 125 | dest: "{{ diag_path }}/jvm-jstat.txt" 126 | when: diag_path_creation|success 127 | 128 | # By default, the 'root' Unix user account is assumed to have the 129 | # necessary SSH key to authenticate to Jenkins as an admin user who has 130 | # the authorization to generate the support bundle. 131 | - name: generate support bundle 132 | command: > 133 | timeout 120 {{ java_path }} -jar {{ jenkins_cli_path }} 134 | -s http://localhost:8080 support 135 | register: support_bundle 136 | become: true 137 | become_user: "{{ jenkins_cli_user }}" 138 | 139 | # Strangely, the Jenkins CLI reports the necessary output for a 140 | # successful run of the support bundle on stderr 141 | - name: find support bundle file 142 | set_fact: 143 | support_bundle_file: 144 | "{{ support_bundle.stderr | regex_replace('^Creating: ', '') }}" 145 | when: support_bundle.rc == 0 146 | 147 | - name: copy support bundle file to diagnostics directory 148 | copy: 149 | src: "{{ support_bundle_file }}" 150 | dest: "{{ diag_path }}" 151 | remote_src: true 152 | when: diag_path_creation|success and support_bundle_file is defined 153 | when: jenkins_last_pid.stat.isreg is defined 154 | ignore_errors: true 155 | become: true 156 | become_user: jenkins 157 | 158 | # always restart Jenkins if restart_jenkins is true 159 | - name: restart Jenkins 160 | service: 161 | name: jenkins 162 | state: restarted 163 | when: restart_jenkins and ansible_connection != 'docker' 164 | become: true 165 | -------------------------------------------------------------------------------- /cinch/playbooks/library: -------------------------------------------------------------------------------- 1 | ../library/ -------------------------------------------------------------------------------- /cinch/roles/beaker-client/defaults/main.yml: -------------------------------------------------------------------------------- 1 | # Base URL of the Beaker server (without trailing slash!) 2 | beaker_hub_url: "https://beaker.example.com" 3 | # Kerberos principal. If commented, default principal obtained by kinit is used. 4 | beaker_krb_principal: "" 5 | # Kerberos service prefix. Example: host, HTTP 6 | beaker_krb_service: "HTTP" 7 | # Kerberos realm. If commented, last two parts of domain name are used. 8 | beaker_krb_realm: "EXAMPLE.COM" 9 | # Kerberos keytab file location on Jenkins Master. 10 | beaker_keytab_filepath: "/etc/jenkins.keytab" 11 | # Relative local path to CA cert file used by Beaker to upload. 12 | #beaker_cacert_file: "" 13 | # SSL CA certificate to verify the Beaker server's SSL certificate. 14 | beaker_ca_cert_path: "/etc/pki/tls/certs/ca-bundle.crt" 15 | -------------------------------------------------------------------------------- /cinch/roles/beaker-client/tasks/main.yml: -------------------------------------------------------------------------------- 1 | - name: install beaker 2 | package: 3 | name: 4 | - beaker-client 5 | state: present 6 | retries: 2 7 | register: install_beaker 8 | until: install_beaker is success 9 | 10 | - name: install beaker client config file 11 | become: true 12 | template: 13 | src: etc/beaker/client.conf 14 | dest: /etc/beaker/client.conf 15 | owner: root 16 | group: root 17 | mode: 0644 18 | 19 | - name: copy CA cert if necessary 20 | become: true 21 | copy: 22 | src: "{{ item }}" 23 | dest: "{{ beaker_ca_cert_path }}" 24 | owner: root 25 | group: root 26 | mode: 0644 27 | with_first_found: 28 | - files: 29 | - "{{ beaker_cacert_file }}" 30 | skip: true 31 | -------------------------------------------------------------------------------- /cinch/roles/beaker-client/templates/etc/beaker/client.conf: -------------------------------------------------------------------------------- 1 | # Base URL of the Beaker server (without trailing slash!) 2 | HUB_URL = "{{ beaker_hub_url }}" 3 | 4 | # Hub authentication method 5 | AUTH_METHOD = "krbv" 6 | #AUTH_METHOD = "password" 7 | 8 | # Username and password 9 | #USERNAME = "" 10 | #PASSWORD = "" 11 | 12 | # Kerberos principal. If commented, default principal obtained by kinit is used. 13 | KRB_PRINCIPAL = "{{ beaker_krb_principal }}" 14 | 15 | # Kerberos keytab file. 16 | KRB_KEYTAB = "{{ beaker_keytab_filepath }}" 17 | 18 | # Kerberos service prefix. Example: host, HTTP 19 | KRB_SERVICE = "{{ beaker_krb_service }}" 20 | 21 | # Kerberos realm. If commented, last two parts of domain name are used. Example: MYDOMAIN.COM. 22 | KRB_REALM = "{{ beaker_krb_realm }}" 23 | 24 | # Kerberos credential cache file. 25 | #KRB_CCACHE = "" 26 | # SSL CA certificate to verify the Beaker server's SSL certificate. 27 | # By default, uses the system-wide CA certificate bundle. 28 | CA_CERT = "{{ beaker_ca_cert_path }}" 29 | -------------------------------------------------------------------------------- /cinch/roles/certificate_authority/defaults/main.yml: -------------------------------------------------------------------------------- 1 | certificate_authority_urls: [] 2 | certificate_authority_files: [] 3 | certificate_authority_validate_certs: false 4 | -------------------------------------------------------------------------------- /cinch/roles/certificate_authority/handlers/main.yml: -------------------------------------------------------------------------------- 1 | - name: install certificate authority 2 | command: update-ca-trust force-enable 3 | -------------------------------------------------------------------------------- /cinch/roles/certificate_authority/tasks/main.yml: -------------------------------------------------------------------------------- 1 | - name: install certificate authority from URL 2 | become: true 3 | get_url: 4 | url: "{{ item }}" 5 | dest: /etc/pki/ca-trust/source/anchors/ 6 | owner: root 7 | group: root 8 | mode: 0644 9 | validate_certs: "{{ certificate_authority_validate_certs }}" 10 | with_items: "{{ certificate_authority_urls }}" 11 | notify: install certificate authority 12 | 13 | - name: install CA certs from files 14 | become: true 15 | copy: 16 | src: "{{ item }}" 17 | dest: /etc/pki/ca-trust/source/anchors/ 18 | owner: root 19 | group: root 20 | mode: 0644 21 | with_items: "{{ certificate_authority_files }}" 22 | notify: install certificate authority 23 | -------------------------------------------------------------------------------- /cinch/roles/check_ssh/defaults/main.yml: -------------------------------------------------------------------------------- 1 | ssh_timeout: 600 2 | ssh_retry_delay: 6 3 | -------------------------------------------------------------------------------- /cinch/roles/check_ssh/tasks/main.yml: -------------------------------------------------------------------------------- 1 | - name: wait for ssh connection 2 | wait_for_connection: 3 | sleep: '{{ ssh_retry_delay }}' 4 | timeout: '{{ ssh_timeout }}' 5 | when: ansible_connection == 'ssh' 6 | -------------------------------------------------------------------------------- /cinch/roles/dockerize/defaults/main.yml: -------------------------------------------------------------------------------- 1 | dockerize_version: v0.3.0 2 | dockerize_architecture: linux-amd64 3 | -------------------------------------------------------------------------------- /cinch/roles/dockerize/tasks/main.yml: -------------------------------------------------------------------------------- 1 | - name: download dockerize 2 | get_url: 3 | dest: /tmp/dockerize.tar.gz 4 | url: "https://github.com/jwilder/dockerize/releases/download/\ 5 | {{ dockerize_version }}/dockerize-{{ dockerize_architecture }}\ 6 | -{{ dockerize_version }}.tar.gz" 7 | register: dockerize_download 8 | 9 | - name: unarchive dockerize 10 | unarchive: 11 | src: /tmp/dockerize.tar.gz 12 | remote_src: true 13 | dest: /usr/local/bin 14 | when: dockerize_download|changed 15 | tags: 16 | - skip_ansible_lint 17 | 18 | - name: make sure dockerize has proper attributes 19 | file: 20 | dest: /usr/local/bin/dockerize 21 | mode: 0755 22 | owner: root 23 | group: root 24 | state: file 25 | -------------------------------------------------------------------------------- /cinch/roles/jenkins_common/defaults/main.yml: -------------------------------------------------------------------------------- 1 | # The Java version to install. Older versions are deprecated and will not be 2 | # supported beyond the EOL support for their packages in the distribution of 3 | # choice 4 | java_version: 8 5 | # Optionally set the 'jenkins' Unix account UID here. This should not normally 6 | # need to be changed, but for certain OpenShift instances a value of 7 | # '1000090000' is recommended. 8 | #jenkins_user_uid: 1000090000 9 | 10 | # Set the default (empty) set of authorized SSH public keys for jenkins user. 11 | # To add authorized keys, override this value by adding a list of paths 12 | # (relative or full) to files containing separate public keys. 13 | # e.g.: ['mykey.pub', 'otherkey.pub'] 14 | jenkins_authorized_keys: [] 15 | # A list of additional packages that you wish to install on both masters and 16 | # slaves using the default package manager. 17 | extra_rpms: [] 18 | # The name of the RPM that includes pip 19 | python_pip_package: python2-pip 20 | -------------------------------------------------------------------------------- /cinch/roles/jenkins_common/handlers/main.yml: -------------------------------------------------------------------------------- 1 | - name: generate public SSH key 2 | shell: >- 3 | ssh-keygen -y -f "{{ jenkins_user_home }}/.ssh/id_rsa" > 4 | "{{ jenkins_user_home }}/.ssh/id_rsa.pub" 5 | become: true 6 | become_user: "{{ jenkins_user }}" 7 | notify: configure user as CLI user 8 | 9 | - name: configure user as CLI user 10 | jenkins_cli_user: 11 | jenkins_home: "{{ jenkins_home }}" 12 | jenkins_user: "{{ item }}" 13 | key_file: "{{ jenkins_user_home }}/.ssh/id_rsa.pub" 14 | state: present 15 | become: true 16 | ignore_errors: true 17 | notify: restart Jenkins 18 | with_items: "{{ [jenkins_admin.nickname] + jenkins_cli_users }}" 19 | # Errors ignored because the file might not exist on slaves, and it might not 20 | # yet be configured with roles, etc if this is executed on first run. However, 21 | # it becomes important to have this running during subsequent executions 22 | 23 | - name: restart Jenkins 24 | service: 25 | name: jenkins 26 | state: restarted 27 | become: true 28 | -------------------------------------------------------------------------------- /cinch/roles/jenkins_common/tasks/main.yml: -------------------------------------------------------------------------------- 1 | - name: install necessary python packages 2 | command: dnf install -y python2-dnf 3 | when: ansible_distribution == 'Fedora' and ansible_distribution_major_version < 30 4 | changed_when: false 5 | 6 | - name: install java and other necessary packages 7 | package: 8 | name: "{{ jenkins_common_packages }}" 9 | state: present 10 | retries: 2 11 | register: install_deps 12 | until: install_deps is success 13 | 14 | - name: install additional packages 15 | package: 16 | name: "{{ extra_rpms }}" 17 | state: present 18 | retries: 2 19 | register: install_docker 20 | until: install_docker is success 21 | 22 | - name: create jenkins user 23 | user: 24 | name: "{{ jenkins_user }}" 25 | password: "{{ jenkins_user_password | default(omit) }}" 26 | home: "{{ jenkins_user_home }}" 27 | shell: /bin/bash 28 | uid: "{{ jenkins_user_uid | default(omit) }}" 29 | when: jenkins_user != ansible_user_id and jenkins_user != 'root' 30 | 31 | - name: ensure SSH directory exists 32 | file: 33 | name: "{{ jenkins_user_home }}/.ssh" 34 | owner: "{{ jenkins_user }}" 35 | mode: 0700 36 | state: directory 37 | 38 | - name: generate new SSH key 39 | command: |- 40 | ssh-keygen -b 4096 -t rsa -f {{ jenkins_user_home }}/.ssh/id_rsa 41 | -N '' creates={{ jenkins_user_home }}/.ssh/id_rsa 42 | become: true 43 | become_user: "{{ jenkins_user }}" 44 | notify: generate public SSH key 45 | 46 | - name: add ssh keys to authorized_keys for the jenkins user 47 | authorized_key: 48 | user: "{{ jenkins_user }}" 49 | state: present 50 | key: "{{ item }}" 51 | with_file: "{{ jenkins_authorized_keys }}" 52 | -------------------------------------------------------------------------------- /cinch/roles/jenkins_common/vars/main.yml: -------------------------------------------------------------------------------- 1 | jenkins_common_packages: 2 | - java-1.{{ java_version }}.0-openjdk 3 | - java-1.{{ java_version }}.0-openjdk-devel 4 | - libselinux-python 5 | - openssh 6 | - gcc 7 | - redhat-rpm-config 8 | - git 9 | - "{{ python_pip_package }}" 10 | -------------------------------------------------------------------------------- /cinch/roles/jenkins_docker_slave/defaults/main.yml: -------------------------------------------------------------------------------- 1 | # This is the Docker image tag to pull and execute 2 | # the container from 3 | docker_jenkins_slave_tag: latest 4 | 5 | # The name to give the slave in the Swarm plugin 6 | jslave_name: my_docker_slave 7 | 8 | # Name to give the Docker container on the tanker host 9 | # By default this value will match the value of jslave_name 10 | # to keep consistency with naming things. Override this 11 | # value directly if you wish to break that parallel 12 | # naming default 13 | docker_jenkins_slave_container_name: "{{ jslave_name }}" 14 | 15 | # Set this value to provide the slave a particular 16 | # label in the Swarm plugin 17 | # jswarm_label: some_label_value 18 | 19 | # Set this value to the value of the Jenkins master 20 | # the slave should connect to 21 | # jenkins_master_url: http://some.hostname.local:8443 22 | 23 | # Set this value to pass in extra command line arguments 24 | # to the jswarm executable 25 | # jswarm_extra_args: -var value -othervar otherval 26 | -------------------------------------------------------------------------------- /cinch/roles/jenkins_docker_slave/tasks/main.yml: -------------------------------------------------------------------------------- 1 | - name: ensure docker is installed 2 | package: 3 | name: 4 | - docker 5 | - python-docker-py 6 | state: present 7 | become: true 8 | retries: 2 9 | register: install_docker 10 | until: install_docker is success 11 | 12 | - name: make sure docker is started 13 | service: 14 | name: docker 15 | state: started 16 | enabled: true 17 | become: true 18 | 19 | - name: pull docker image 20 | docker_image: 21 | state: present 22 | name: redhatqecinch/jenkins_slave 23 | tag: "{{ docker_jenkins_slave_tag | default('latest') }}" 24 | become: true 25 | 26 | - name: set environment variables 27 | set_fact: 28 | _env_vars: 29 | JSLAVE_NAME: "{{ jslave_name }}" 30 | JENKINS_MASTER_URL: "{{ jenkins_master_url | mandatory }}" 31 | 32 | - name: set conditional variables - jslave_label 33 | set_fact: 34 | _env_vars: "{{ _env_vars | combine({ 'JSLAVE_LABEL': jslave_label }) }}" 35 | when: jslave_label is defined 36 | 37 | - name: set conditional variables - jswarm_extra_args 38 | set_fact: 39 | _env_vars: "{{ _env_vars | combine({ 'JSWARM_EXTRA_ARGS': jswarm_extra_args }) }}" 40 | when: jswarm_extra_args is defined 41 | 42 | - name: run docker container 43 | docker_container: 44 | name: "{{ docker_jenkins_slave_container_name | default(jslave_name) }}" 45 | state: started 46 | image: redhatqecinch/jenkins_slave:{{ docker_jenkins_slave_tag | default('latest') }} 47 | env: "{{ _env_vars }}" 48 | restart_policy: always 49 | become: true 50 | -------------------------------------------------------------------------------- /cinch/roles/jenkins_master/files/mass_disable.groovy: -------------------------------------------------------------------------------- 1 | for(item in hudson.model.Hudson.instance.items) { 2 | println("Disabling " + item.name) 3 | item.disabled = true 4 | item.save() 5 | } 6 | -------------------------------------------------------------------------------- /cinch/roles/jenkins_master/files/set_env.groovy: -------------------------------------------------------------------------------- 1 | def hudson = hudson.model.Hudson.instance 2 | def globalProps = hudson.globalNodeProperties 3 | if(globalProps.size() != 1) { 4 | globalProps.replaceBy( 5 | [new hudson.slaves.EnvironmentVariablesNodeProperty()]) 6 | } 7 | def props = globalProps.getAll( 8 | hudson.slaves.EnvironmentVariablesNodeProperty.class) 9 | for (prop in props) { 10 | // add prop.envVars.put(key, value) 11 | %s 12 | } 13 | hudson.save() 14 | -------------------------------------------------------------------------------- /cinch/roles/jenkins_master/handlers/main.yml: -------------------------------------------------------------------------------- 1 | - name: restart nginx 2 | systemd: 3 | name: nginx 4 | state: restarted 5 | 6 | - name: update certificate chains 7 | command: /usr/bin/update-ca-trust extract 8 | 9 | - name: restart Jenkins 10 | service: 11 | name: jenkins 12 | state: restarted 13 | 14 | - name: restart Jenkins during upgrade 15 | service: 16 | name: jenkins 17 | state: restarted 18 | when: jenkins_upgrade 19 | 20 | # Reload firewalld by running command, since the firewalld module doesn't 21 | # support the functionality to reload rules. 22 | - name: reload firewalld 23 | command: firewall-cmd --reload 24 | tags: 25 | - skip_ansible_lint 26 | -------------------------------------------------------------------------------- /cinch/roles/jenkins_master/tasks/configure.yml: -------------------------------------------------------------------------------- 1 | - name: set Jenkins Java options 2 | template: 3 | src: sysconfig_jenkins 4 | dest: /etc/sysconfig/jenkins 5 | owner: root 6 | group: root 7 | mode: 0600 8 | notify: restart Jenkins 9 | 10 | - name: update ulimits 11 | template: 12 | src: 99-jenkins.conf 13 | dest: /etc/security/limits.d/99-jenkins.conf 14 | owner: root 15 | group: root 16 | mode: 0644 17 | notify: restart Jenkins 18 | 19 | - name: ensure Jenkins home is writable by jenkins 20 | file: 21 | dest: "{{ jenkins_home }}" 22 | owner: jenkins 23 | group: jenkins 24 | state: directory 25 | 26 | - name: install nginx configuration file for jenkins 27 | template: 28 | src: etc/nginx/conf.d/jenkins_http{{ https_enabled | ternary('s', '') }}.conf 29 | dest: /etc/nginx/conf.d/jenkins.conf 30 | owner: root 31 | group: root 32 | mode: 0644 33 | when: ansible_connection != 'docker' 34 | notify: restart nginx 35 | 36 | - name: flush handlers 37 | meta: flush_handlers 38 | -------------------------------------------------------------------------------- /cinch/roles/jenkins_master/tasks/ensure_up.yml: -------------------------------------------------------------------------------- 1 | - name: check for running in a container 2 | set_fact: 3 | _listening_port: "{{ https_enabled | ternary(8443, 8080) }}" 4 | when: ansible_connection == 'docker' 5 | tags: 6 | - jenkins_check_mode 7 | 8 | - name: discover valid hostname for containers 9 | set_fact: 10 | _jenkins_host: 'localhost' 11 | when: ansible_connection == 'docker' or ansible_connection == 'local' 12 | tags: 13 | - jenkins_check_mode 14 | 15 | - name: discover valid hostname 16 | set_fact: 17 | _jenkins_host: "{{ ansible_host | default(inventory_hostname | default('localhost')) }}" 18 | when: ansible_connection != 'docker' and ansible_connection != 'local' 19 | tags: 20 | - jenkins_check_mode 21 | 22 | - name: construct Jenkins URL 23 | set_fact: 24 | _jenkins_url: "http{{ https_enabled | ternary('s', '') }}://{{ _jenkins_host }}" 25 | tags: 26 | - jenkins_check_mode 27 | 28 | - name: modify Jenkins URL for non-standard ports 29 | set_fact: 30 | _jenkins_url: "{{ _jenkins_url }}:{{ _listening_port }}" 31 | when: _listening_port is defined 32 | tags: 33 | - jenkins_check_mode 34 | 35 | - name: wait for Jenkins to start up 36 | become: false 37 | uri: 38 | url: "{{ _jenkins_url }}" 39 | status_code: 200,401 40 | timeout: 5 41 | validate_certs: false 42 | register: jenkins_status 43 | retries: 30 44 | delay: 15 45 | until: >- 46 | 'status' in jenkins_status and 47 | (jenkins_status['status'] == 200 or jenkins_status['status'] == 401) 48 | 49 | # Newer Jenkins versions require the -remoting flag to allow the Jenkins CLI to 50 | # be available in the way that we use it currently. The 'X-Jenkins' response 51 | # header gives us the current version. 52 | # https://jenkins.io/doc/book/managing/cli/#remoting-connection-mode 53 | - name: check Jenkins version for -remoting flag 54 | set_fact: 55 | jenkins_cli_remoting: "{{ jenkins_status.x_jenkins is version_compare('2.46.2', '>=') }}" 56 | 57 | - name: copy jenkins-jar file to readable place 58 | become: true 59 | copy: 60 | src: "{{ jenkins_cli_jar_src }}" 61 | dest: "{{ jenkins_cli_jar }}" 62 | remote_src: true 63 | owner: "{{ jenkins_cli_shell_user }}" 64 | mode: 0600 65 | 66 | - name: get user api key 67 | jenkins_user_api: 68 | user: "{{ jenkins_admin.nickname }}" 69 | remoting: "{{ jenkins_cli_remoting }}" 70 | cli_jar: "{{ jenkins_cli_jar }}" 71 | register: jenkins_admin_api_key 72 | become: true 73 | become_user: "{{ jenkins_cli_shell_user }}" 74 | retries: "{{ jenkins_user_api_retries }}" 75 | delay: 6 76 | until: jenkins_admin_api_key.api_key 77 | 78 | - name: delete jenkins-cli file 79 | become: true 80 | file: 81 | dest: "{{ jenkins_cli_jar }}" 82 | state: absent 83 | 84 | - name: save API key 85 | set_fact: 86 | admin_api_key: "{{ jenkins_admin_api_key.api_key }}" 87 | -------------------------------------------------------------------------------- /cinch/roles/jenkins_master/tasks/firewalld.yml: -------------------------------------------------------------------------------- 1 | - name: ensure firewalld is installed 2 | package: 3 | name: firewalld,python-firewall 4 | state: present 5 | become: true 6 | retries: 2 7 | register: install_firewalld 8 | until: install_firewalld is success 9 | 10 | - name: ensure firewalld is enabled and started 11 | service: 12 | name: firewalld 13 | state: started 14 | enabled: true 15 | 16 | - name: accept all communication from lo interface 17 | firewalld: 18 | zone: trusted 19 | interface: lo 20 | state: enabled 21 | permanent: true 22 | notify: reload firewalld 23 | 24 | - name: reject other communication to localhost (IPv4) 25 | firewalld: 26 | rich_rule: > 27 | rule family="ipv4" source address="127.0.0.0/8" invert="True" 28 | destination address="127.0.0.0/8" reject type="icmp-port-unreachable" 29 | state: enabled 30 | permanent: true 31 | notify: reload firewalld 32 | 33 | - name: reject other communication to localhost (IPv6) 34 | firewalld: 35 | rich_rule: > 36 | rule family="ipv6" source address="::1/64" invert="True" 37 | destination address="::1/64" reject type="icmp6-port-unreachable" 38 | state: enabled 39 | permanent: true 40 | notify: reload firewalld 41 | 42 | - name: set firewalld to allow Jenkins-related TCP ports 43 | firewalld: 44 | port: "{{ item }}" 45 | state: enabled 46 | permanent: true 47 | with_items: "{{ firewall_tcp_ports }}" 48 | when: item is defined 49 | notify: reload firewalld 50 | 51 | - name: flush handlers 52 | meta: flush_handlers 53 | -------------------------------------------------------------------------------- /cinch/roles/jenkins_master/tasks/install.yml: -------------------------------------------------------------------------------- 1 | - name: unpin jenkins package for version upgrades 2 | lineinfile: 3 | dest: "{{ version_pin_file }}" 4 | state: present 5 | regexp: "^exclude=.*" 6 | line: "exclude={{ upgrade_blacklist | difference(['jenkins']) | join(' ') }}" 7 | when: jenkins_upgrade 8 | 9 | - name: install necessary RPM files 10 | package: 11 | name: "{{ jmaster_rpms }}" 12 | state: present 13 | retries: 2 14 | register: install_master_rpms 15 | until: install_master_rpms is success 16 | notify: restart Jenkins during upgrade 17 | 18 | - name: install additional packages 19 | package: 20 | name: "{{ jmaster_extra_rpms }}" 21 | state: present 22 | retries: 2 23 | register: install_master_extra_rpms 24 | until: install_master_extra_rpms is success 25 | 26 | - name: install gcc_compat where necessary 27 | package: 28 | name: "{{ gcc_compat_package }}" 29 | state: present 30 | when: gcc_compat_package is defined 31 | retries: 2 32 | register: install_gcc_compat_rpms 33 | until: install_gcc_compat_rpms is success 34 | 35 | # This blacklist must be done *after* the package installation defined above, 36 | # otherwise packages can be blacklisted before they are installed initially. 37 | - name: pin certain packages 38 | lineinfile: 39 | dest: "{{ version_pin_file }}" 40 | line: "exclude={{ upgrade_blacklist | join(' ') }}" 41 | state: present 42 | regexp: "^exclude=.*$" 43 | -------------------------------------------------------------------------------- /cinch/roles/jenkins_master/tasks/main.yml: -------------------------------------------------------------------------------- 1 | - name: setup things before install 2 | include: pre_install.yml 3 | 4 | - name: install necessary packages 5 | include: install.yml 6 | 7 | - name: configure firewalld 8 | include: firewalld.yml 9 | when: ansible_connection != 'docker' 10 | 11 | - name: configure Jenkins 12 | include: configure.yml 13 | 14 | - name: startup Jenkins 15 | systemd: 16 | name: jenkins 17 | state: started 18 | enabled: true 19 | 20 | - name: pin plugins to avoid upgrades 21 | include: pin_plugin.yml 22 | when: jenkins_plugin_install_configure 23 | 24 | - name: plugins 25 | include: plugins.yml 26 | when: jenkins_plugin_install_configure 27 | 28 | - name: make sure handlers are flushed 29 | meta: flush_handlers 30 | 31 | - name: wait for the master to be ready to go 32 | include: ensure_up.yml 33 | 34 | - name: application-level post configuration tasks 35 | become: false 36 | include: post_configure.yml 37 | when: jenkins_plugin_install_configure 38 | -------------------------------------------------------------------------------- /cinch/roles/jenkins_master/tasks/pin_plugin.yml: -------------------------------------------------------------------------------- 1 | - name: ensure plugins directory exists 2 | file: 3 | dest: "{{ jenkins_home }}/plugins/" 4 | state: directory 5 | owner: "{{ jenkins_user }}" 6 | mode: 0755 7 | when: (jenkins_pinned_plugins|length) > 0 8 | 9 | - name: pin plugin 10 | template: 11 | src: jenkins_pinned 12 | dest: "{{ jenkins_home }}/plugins/{{ item }}.jpi.pinned" 13 | owner: "{{ jenkins_user }}" 14 | mode: 0644 15 | with_items: "{{ jenkins_pinned_plugins }}" 16 | -------------------------------------------------------------------------------- /cinch/roles/jenkins_master/tasks/plugins.yml: -------------------------------------------------------------------------------- 1 | - name: wait for Jenkins 2 | include: ensure_up.yml 3 | 4 | - name: run update center configure script 5 | jenkins_update_center: 6 | update_center_id: "{{ item.id }}" 7 | update_center_url: "{{ item.url }}" 8 | with_items: "{{ update_centers }}" 9 | notify: restart Jenkins 10 | become_user: jenkins 11 | become: true 12 | 13 | - name: create cert path 14 | file: 15 | dest: "{{ jenkins_home }}/update-center-rootCAs" 16 | state: directory 17 | owner: jenkins 18 | group: jenkins 19 | 20 | - name: copy cert file 21 | copy: 22 | src: "{{ update_center_certificate }}" 23 | dest: "{{ jenkins_home }}/update-center-rootCAs/update-center.crt" 24 | remote_src: "{{ update_center_certificate_remote_src }}" 25 | owner: jenkins 26 | group: jenkins 27 | notify: restart Jenkins 28 | when: update_center_certificate is defined 29 | 30 | - name: force handlers 31 | meta: flush_handlers 32 | 33 | - name: wait for jenkins master 34 | include: ensure_up.yml 35 | 36 | - name: perform plugin install 37 | jenkins_plugin: 38 | url: "{{ _jenkins_url }}" 39 | name: "{{ item.1.split('=')[0] }}" 40 | state: present 41 | version: "{{ item.1.split('=')[2] | default(omit) }}" 42 | validate_certs: false 43 | url_username: "{{ jenkins_admin.nickname }}" 44 | url_password: "{{ admin_api_key | default('') }}" 45 | updates_url: "{{ item.0.url | default(omit) }}" 46 | register: plugin_install 47 | retries: 3 48 | until: not plugin_install is failed 49 | with_subelements: 50 | - "{{ jenkins_plugins + jenkins_extra_plugins }}" 51 | - plugins 52 | notify: restart Jenkins 53 | 54 | - name: flush handlers 55 | meta: flush_handlers 56 | -------------------------------------------------------------------------------- /cinch/roles/jenkins_master/tasks/post_configure.yml: -------------------------------------------------------------------------------- 1 | - name: run Jenkins global config 2 | jenkins_script: 3 | url: "{{ _jenkins_url }}" 4 | script: "{{ item.script }}" 5 | validate_certs: false 6 | user: "{{ jenkins_admin.nickname }}" 7 | password: "{{ admin_api_key | default('') }}" 8 | register: script_output 9 | changed_when: "(script_output.output|default('')).find('CHANGED') != -1" 10 | # Since the jenkins_script module doesn't support check_mode, we disable it 11 | # here so that we can handle check mode inside of the Groovy scripts. 12 | check_mode: false 13 | # We cannot use 'with_file' here because the templates won't be rendered 14 | # correctly. 15 | # These scripts are run in the order listed below, so please edit this list 16 | # using the desired execution order. 17 | with_items: 18 | - script: "{{ lookup('template', role_path + '/templates/set_usebrowser.groovy') }}" 19 | - script: "{{ lookup('template', role_path + '/templates/set_slaveport.groovy') }}" 20 | - script: "{{ lookup('template', role_path + '/templates/user.groovy') }}" 21 | - script: "{{ lookup('template', role_path + '/templates/setenvvars.groovy') }}" 22 | - script: "{{ lookup('template', role_path + '/templates/jenkins_root_url.groovy') }}" 23 | when: "{{ jenkins_security_enabled }}" 24 | - script: "{{ lookup('template', role_path + '/templates/roles_and_ldap_auth.groovy') }}" 25 | when: "{{ jenkins_security_enabled }}" 26 | - script: "{{ lookup('template', role_path + '/templates/enable-kerberos-sso.groovy') }}" 27 | when: "{{ jenkins_security_enabled }}" 28 | # This option will disable security, rather than avoid touching it when 29 | # jenkins_security_enabled is false 30 | - script: "{{ lookup('template', role_path + '/templates/basic_security.groovy') }}" 31 | when: "{{ not jenkins_security_enabled }}" 32 | become: true 33 | become_user: "{{ jenkins_user }}" 34 | # The "when" clause on a task gets evaluated separately for each element in 35 | # an item list. Some of the elements in our list should not always be run 36 | # while others should. So this logic will run any elements that are in the 37 | # with_items without any attached conditionasl, but will evaluate the separate 38 | # "when" clause attached to each item if one is given. This allows individual 39 | # scripts to have their own script logic for when they should be executed 40 | when: (item.when is not defined or item.when) | bool 41 | tags: 42 | - skip_ansible_lint 43 | - jenkins_check_mode 44 | 45 | - name: configure user as CLI user 46 | jenkins_cli_user: 47 | jenkins_home: "{{ jenkins_home }}" 48 | jenkins_user: "{{ item }}" 49 | key_file: "{{ jenkins_cli_shell_user_home }}/.ssh/id_rsa.pub" 50 | state: present 51 | become: true 52 | notify: restart Jenkins 53 | with_items: "{{ [jenkins_admin.nickname] + jenkins_cli_users }}" 54 | -------------------------------------------------------------------------------- /cinch/roles/jenkins_master/tasks/pre_install.yml: -------------------------------------------------------------------------------- 1 | - name: create jenkins_cli_shell_user 2 | become: true 3 | user: 4 | name: "{{ jenkins_cli_shell_user }}" 5 | home: "{{ jenkins_cli_shell_user_home }}" 6 | generate_ssh_key: true 7 | when: jenkins_cli_shell_user != jenkins_user 8 | -------------------------------------------------------------------------------- /cinch/roles/jenkins_master/templates/99-jenkins.conf: -------------------------------------------------------------------------------- 1 | {{ jenkins_user }} soft nofile {{ jenkins_soft_nofile_ulimit }} 2 | {{ jenkins_user }} hard nofile {{ jenkins_hard_nofile_ulimit }} 3 | {{ jenkins_user }} soft nproc {{ jenkins_soft_nproc_ulimit }} 4 | {{ jenkins_user }} hard nproc {{ jenkins_hard_nproc_ulimit }} 5 | {{ jenkins_user }} - fsize {{ jenkins_fsize_ulimit }} 6 | -------------------------------------------------------------------------------- /cinch/roles/jenkins_master/templates/basic_security.groovy: -------------------------------------------------------------------------------- 1 | import hudson.security.*; 2 | 3 | def check_mode = {{ ansible_check_mode|to_json }}; 4 | 5 | boolean changed = false; 6 | def jenkins = jenkins.model.Jenkins.getActiveInstance(); 7 | AuthorizationStrategy strategy = jenkins.getAuthorizationStrategy(); 8 | if ( ! ( strategy instanceof AuthorizationStrategy.Unsecured ) ) { 9 | strategy = new AuthorizationStrategy.Unsecured(); 10 | if ( !check_mode ) jenkins.setAuthorizationStrategy(strategy); 11 | println "CHANGED: Updated strategy to be unsecured." 12 | changed = true; 13 | } 14 | if ( ! (jenkins.getSecurityRealm() instanceof HudsonPrivateSecurityRealm) ) { 15 | HudsonPrivateSecurityRealm realm = new HudsonPrivateSecurityRealm(allowsSignup=false); 16 | if ( !check_mode ) jenkins.setSecurityRealm(realm); 17 | println "CHANGED: Updated security realm to be private realm" 18 | changed = true; 19 | } 20 | 21 | if ( !check_mode && changed ) 22 | jenkins.save(); 23 | -------------------------------------------------------------------------------- /cinch/roles/jenkins_master/templates/enable-kerberos-sso.groovy: -------------------------------------------------------------------------------- 1 | boolean check_mode = {{ ansible_check_mode|to_json }} 2 | boolean anonymous_access = {{ jenkins_kerberos_anonymous_access|to_json }} 3 | 4 | def plugin = com.sonymobile.jenkins.plugins.kerberossso.PluginImpl.getInstance() 5 | 6 | // TODO: Since kerberos-sso 1.4 there is no need to use the private field 7 | // accessor ('@') for any of the fields. 8 | 9 | if (!plugin.@enabled) { 10 | if (!check_mode) plugin.@enabled = true 11 | println "Enabling Kerberos SSO" 12 | } 13 | 14 | // Need this to prevent a NPE on startup 15 | if (!check_mode) plugin.@password = hudson.util.Secret.fromString("changeme") 16 | println "Setting dummy password" 17 | 18 | if (plugin.@allowLocalhost) { 19 | if (!check_mode) plugin.@allowLocalhost = false 20 | println "Disabling auto-login from localhost" 21 | } 22 | 23 | if (plugin.@allowUnsecureBasic) { 24 | if (!check_mode) plugin.@allowUnsecureBasic = false 25 | println "Disabling unsecured Basic Authentication" 26 | } 27 | 28 | if (plugin.@anonymousAccess != anonymous_access) { 29 | if (!check_mode) plugin.@anonymousAccess = anonymous_access; 30 | println "${anonymous_access ? "Enabling" : "Disabling"} anonymous access" 31 | } 32 | 33 | if (!check_mode) plugin.save() 34 | -------------------------------------------------------------------------------- /cinch/roles/jenkins_master/templates/etc/nginx/conf.d/jenkins_http.conf: -------------------------------------------------------------------------------- 1 | {% if jenkins_nginx_rate_limiting == "true" %} 2 | limit_req_zone $binary_remote_addr zone=jenkins:{{ nginx_zone_cache_size }} rate={{ nginx_request_limit }}; 3 | limit_conn_zone $binary_remote_addr zone=addr:{{ nginx_zone_cache_size }}; 4 | {% endif %} 5 | 6 | server_names_hash_bucket_size 6400; 7 | upstream jenkins { 8 | server 127.0.0.1:8080 fail_timeout=0; 9 | } 10 | 11 | server { 12 | listen 80; 13 | server_name {{ service_name }}; 14 | location @jenkins { 15 | proxy_http_version 1.1; 16 | proxy_connect_timeout 30s; 17 | proxy_send_timeout 120; 18 | proxy_read_timeout 120; 19 | client_max_body_size 35m; 20 | proxy_buffer_size 4k; 21 | proxy_buffers 8 32m; 22 | proxy_set_header Host $host; 23 | proxy_set_header X-Real-IP $remote_addr; 24 | proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; 25 | proxy_set_header X-Forwarded-Proto $scheme; 26 | proxy_pass http://jenkins; 27 | {% if jenkins_nginx_rate_limiting == "true" %} 28 | limit_req zone=jenkins burst={{ nginx_burst_queue }} nodelay; 29 | limit_conn addr {{ nginx_max_connections_per_ip }}; 30 | limit_rate_after {{ nginx_bandwidth_limit_after_size }}; 31 | limit_rate {{ nginx_max_bandwidth_outbound }}; 32 | {% endif %} 33 | } 34 | 35 | location ~ "^/static/[0-9a-fA-F]{8}/(.*)" { 36 | 37 | #rewrite all static files into requests to the root 38 | #E.g /static/12345678/css/something.css will become /css/something.css 39 | rewrite "^/static/[0-9a-fA-F]{8}/(.*)" /$1 last; 40 | } 41 | 42 | location /userContent { 43 | #have nginx handle all the static requests to the userContent folder files 44 | #note : This is the $JENKINS_HOME dir 45 | root /var/lib/jenkins/; 46 | if (!-f $request_filename){ 47 | #this file does not exist, might be a directory or a /**view** url 48 | rewrite (.*) /$1 last; 49 | break; 50 | } 51 | sendfile on; 52 | } 53 | 54 | location / { 55 | try_files $uri @jenkins; 56 | } 57 | } 58 | -------------------------------------------------------------------------------- /cinch/roles/jenkins_master/templates/etc/nginx/conf.d/jenkins_https.conf: -------------------------------------------------------------------------------- 1 | {% if jenkins_nginx_rate_limiting == "true" %} 2 | limit_req_zone $binary_remote_addr zone=jenkins:{{ nginx_zone_cache_size }} rate={{ nginx_request_limit }}; 3 | limit_conn_zone $binary_remote_addr zone=addr:{{ nginx_zone_cache_size }}; 4 | {% endif %} 5 | 6 | server_names_hash_bucket_size 6400; 7 | upstream jenkins { 8 | server 127.0.0.1:8080 fail_timeout=0; 9 | } 10 | 11 | server { 12 | listen 80; 13 | server_name {{ service_name }}; 14 | return 301 https://$host$request_uri; 15 | } 16 | 17 | server { 18 | listen 443 ssl; 19 | server_name {{ service_name }}; 20 | gzip on; 21 | ssl_certificate /etc/nginx/conf.d/ssl.pem; 22 | ssl_certificate_key /etc/nginx/conf.d/ssl.key; 23 | ssl_session_timeout 60m; 24 | ssl_protocols TLSv1 TLSv1.1 TLSv1.2; 25 | ssl_ciphers "AES128+EECDH:AES128+EDH"; 26 | ssl_prefer_server_ciphers on; 27 | ssl_session_cache shared:SSL:10m; 28 | add_header Strict-Transport-Security "max-age=63072000; includeSubDomains"; 29 | add_header X-Content-Type-Options nosniff; 30 | 31 | 32 | location @jenkins { 33 | proxy_http_version 1.1; 34 | proxy_connect_timeout 30s; 35 | proxy_send_timeout 120; 36 | proxy_read_timeout 120; 37 | client_max_body_size 35m; 38 | proxy_buffer_size 4k; 39 | proxy_buffers 8 32m; 40 | proxy_set_header Host $host; 41 | proxy_set_header X-Real-IP $remote_addr; 42 | proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; 43 | proxy_set_header X-Forwarded-Proto $scheme; 44 | proxy_redirect http:// https://; 45 | proxy_pass http://jenkins; 46 | {% if jenkins_nginx_rate_limiting == "true" %} 47 | limit_req zone=jenkins burst={{ nginx_burst_queue }} nodelay; 48 | limit_conn addr {{ nginx_max_connections_per_ip }}; 49 | limit_rate_after {{ nginx_bandwidth_limit_after_size }}; 50 | limit_rate {{ nginx_max_bandwidth_outbound }}; 51 | {% endif %} 52 | } 53 | 54 | location ~ "^/static/[0-9a-fA-F]{8}/(.*)" { 55 | 56 | #rewrite all static files into requests to the root 57 | #E.g /static/12345678/css/something.css will become /css/something.css 58 | rewrite "^/static/[0-9a-fA-F]{8}/(.*)" /$1 last; 59 | } 60 | 61 | location /userContent { 62 | #have nginx handle all the static requests to the userContent folder files 63 | #note : This is the $JENKINS_HOME dir 64 | root /var/lib/jenkins/; 65 | if (!-f $request_filename){ 66 | #this file does not exist, might be a directory or a /**view** url 67 | rewrite (.*) /$1 last; 68 | break; 69 | } 70 | sendfile on; 71 | } 72 | 73 | location / { 74 | try_files $uri @jenkins; 75 | } 76 | } 77 | -------------------------------------------------------------------------------- /cinch/roles/jenkins_master/templates/init_backup.groovy: -------------------------------------------------------------------------------- 1 | import jenkins.model.*; 2 | import java.util.logging.Logger; 3 | import org.jvnet.hudson.plugins.thinbackup.*; 4 | 5 | Logger logger = Logger.getLogger("jenkins.groovy.Maintenance"); 6 | def jenkins = Jenkins.getActiveInstance(); 7 | def thinbackup = ThinBackupPluginImpl.getInstance(); 8 | boolean changes = false; 9 | 10 | def check_mode = {{ ansible_check_mode|to_json }}; 11 | String backupPath = "{{ jenkins_backup.directory|default('/jenkins_backup/.backups') }}"; 12 | String fullBackupSchedule = "{{ jenkins_backup.full_schedule|default('H 0 * * 0') }}"; 13 | String diffBackupSchedule = "{{ jenkins_backup.diff_schedule|default('H 0 * * *') }}"; 14 | String excludedFiles = "{{ jenkins_backup.exclude|default('') }}"; 15 | def maxSets = {{ jenkins_backup.max_sets|default(4) }}; 16 | def waitForIdle = {{ jenkins_backup.wait_for_idle|default(false)|to_json }}; 17 | def forceQuietModeTimeout = {{ jenkins_backup.quiet_mode_timeout|default(120)|to_json }}; 18 | def backupBuildResults = {{ jenkins_backup.build_results|default(false)|to_json }}; 19 | def backupUserContents = {{ jenkins_backup.user_contents|default(false)|to_json }}; 20 | def cleanupDiff = {{ jenkins_backup.cleanup_diffs|default(false)|to_json }}; 21 | def backupNextBuildNumber = {{ jenkins_backup.next_build_number|default(false)|to_json }}; 22 | def moveOldBackupsToZipFile = {{ jenkins_backup.move_to_zip|default(false)|to_json }}; 23 | def backupPluginArchives = {{ jenkins_backup.plugin_archives|default(false)|to_json }}; 24 | 25 | def void changed(String field, def value) { 26 | changes = true; 27 | println "CHANGED: Updated " + field + " to " + value; 28 | } 29 | 30 | if( !thinbackup.getBackupPath().equals(backupPath) ){ 31 | if( !check_mode ) thinbackup.setBackupPath(backupPath); 32 | changed("backup path", backupPath); 33 | } 34 | if( !thinbackup.getFullBackupSchedule().equals(fullBackupSchedule) ) { 35 | if( !check_mode ) thinbackup.setFullBackupSchedule(fullBackupSchedule); 36 | changed("full backup schedule", fullBackupSchedule); 37 | } 38 | if( !thinbackup.getDiffBackupSchedule().equals(diffBackupSchedule) ){ 39 | if( !check_mode ) thinbackup.setDiffBackupSchedule(diffBackupSchedule); 40 | changed("diff backup schedule", diffBackupSchedule); 41 | } 42 | if( thinbackup.getNrMaxStoredFull() != maxSets ) { 43 | if( !check_mode ) thinbackup.setNrMaxStoredFull(maxSets); 44 | changed("max number of backups stored", maxSets); 45 | } 46 | if( !thinbackup.getExcludedFilesRegex().equals(excludedFiles) ) { 47 | if( !check_mode ) thinbackup.setExcludedFilesRegex(excludedFiles); 48 | changed("excluded files", excludedFiles); 49 | } 50 | if( thinbackup.isWaitForIdle() != waitForIdle ) { 51 | if( !check_mode ) thinbackup.setWaitForIdle(waitForIdle); 52 | changed("wait for idle", waitForIdle); 53 | } 54 | if( thinbackup.getForceQuietModeTimeout() != forceQuietModeTimeout ) { 55 | if( !check_mode ) thinbackup.setForceQuietModeTimeout(forceQuietModeTimeout); 56 | changed("force quiet mode timeout", forceQuietModeTimeout); 57 | } 58 | if( thinbackup.isBackupBuildResults() != backupBuildResults ) { 59 | if( !check_mode ) thinbackup.setBackupBuildResults(backupBuildResults); 60 | changed("backup build results", backupBuildResults); 61 | } 62 | if( thinbackup.isBackupUserContents() != backupUserContents ) { 63 | if( !check_mode ) thinbackup.setBackupUserContents(backupUserContents); 64 | changed("backup user contents", backupUserContents); 65 | } 66 | if( thinbackup.isCleanupDiff() != cleanupDiff ) { 67 | if( !check_mode ) thinbackup.setCleanupDiff(cleanupDiff); 68 | changed("cleanup diffs", cleanupDiff); 69 | } 70 | if( thinbackup.isBackupNextBuildNumber() != backupNextBuildNumber ) { 71 | if( !check_mode ) thinbackup.setBackupNextBuildNumber(backupNextBuildNumber); 72 | changed("backup next build number", backupNextBuildNumber); 73 | } 74 | if( thinbackup.isMoveOldBackupsToZipFile() != moveOldBackupsToZipFile ) { 75 | if( !check_mode ) thinbackup.setMoveOldBackupsToZipFile(moveOldBackupsToZipFile); 76 | changed("move old backups to zip", moveOldBackupsToZipFile); 77 | } 78 | 79 | if( thinbackup.isBackupPluginArchives() != backupPluginArchives ) { 80 | if( !check_mode ) thinbackup.setBackupPluginArchives(backupPluginArchives); 81 | changed("backup plugin archives", backupPluginArchives); 82 | } 83 | 84 | if( changes && !check_mode ) 85 | thinbackup.save(); 86 | -------------------------------------------------------------------------------- /cinch/roles/jenkins_master/templates/jenkins_pinned: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/RedHatQE/cinch/467e44fb255796a68a72b17f6e30e33170a688fa/cinch/roles/jenkins_master/templates/jenkins_pinned -------------------------------------------------------------------------------- /cinch/roles/jenkins_master/templates/jenkins_root_url.groovy: -------------------------------------------------------------------------------- 1 | import jenkins.model.JenkinsLocationConfiguration; 2 | import org.apache.commons.lang3.StringUtils; 3 | 4 | String newUrl = "{{ _jenkins_url }}"; 5 | boolean changed = false; 6 | def jlc = JenkinsLocationConfiguration.get() 7 | def check_mode = {{ ansible_check_mode|to_json }}; 8 | String oldUrl = StringUtils.stripEnd(jlc.getUrl(), "/"); 9 | 10 | if( !oldUrl.equals(newUrl) ) { 11 | if ( !check_mode ) jlc.setUrl(newUrl); 12 | print "CHANGED: Updated base URL to " + newUrl; 13 | changed = true; 14 | } 15 | 16 | if( !check_mode && changed ) 17 | jlc.save(); 18 | -------------------------------------------------------------------------------- /cinch/roles/jenkins_master/templates/set_slaveport.groovy: -------------------------------------------------------------------------------- 1 | import jenkins.model.Jenkins 2 | def jenkins = Jenkins.instance 3 | // Ansible+Jinja makes the boolean value from the template into leading 4 | // uppercase, which is not valid in Groovy. We use |to_json to ensure that the 5 | // boolean value is lowercase. 6 | def check_mode = {{ ansible_check_mode|to_json }} 7 | 8 | def slave_agent_port = jenkins.getSlaveAgentPort() 9 | int jenkins_slave_agent_port = {{ jenkins_slave_agent_port|int }} 10 | 11 | def change_msg = "CHANGED: slave agent port from " + slave_agent_port + 12 | " to " + jenkins_slave_agent_port 13 | 14 | if (slave_agent_port != jenkins_slave_agent_port) { 15 | if (check_mode) { 16 | println change_msg 17 | } else { 18 | jenkins.setSlaveAgentPort(jenkins_slave_agent_port) 19 | jenkins.save() 20 | println change_msg 21 | } 22 | } else { 23 | println "No changes to slave agent port necessary" 24 | } 25 | -------------------------------------------------------------------------------- /cinch/roles/jenkins_master/templates/set_usebrowser.groovy: -------------------------------------------------------------------------------- 1 | // http://javadoc.jenkins-ci.org/jenkins/model/DownloadSettings.html 2 | 3 | import jenkins.model.Jenkins 4 | def jenkins = Jenkins.instance 5 | // Ansible+Jinja makes the boolean value from the template into leading 6 | // uppercase, which is not valid in Groovy. We use |to_json to ensure that the 7 | // boolean value is lowercase. 8 | def check_mode = {{ ansible_check_mode|to_json }} 9 | 10 | def ds = jenkins.getExtensionList(jenkins.model.DownloadSettings.class)[0] 11 | def usebrowser = ds.isUseBrowser() 12 | def jenkins_usebrowser = {{ jenkins_usebrowser|to_json }} 13 | 14 | def change_msg = "CHANGED: setUseBrowser from " + usebrowser + " to " + 15 | jenkins_usebrowser 16 | 17 | if (usebrowser != jenkins_usebrowser) { 18 | if (check_mode) { 19 | println change_msg 20 | } else { 21 | ds.setUseBrowser(jenkins_usebrowser) 22 | jenkins.save() 23 | println change_msg 24 | } 25 | } else { 26 | println "No changes to setUseBrowser necessary" 27 | } 28 | -------------------------------------------------------------------------------- /cinch/roles/jenkins_master/templates/setenvvars.groovy: -------------------------------------------------------------------------------- 1 | import jenkins.*; 2 | import jenkins.model.*; 3 | import hudson.*; 4 | import hudson.model.*; 5 | import hudson.slaves.*; 6 | 7 | def j = Jenkins.getActiveInstance(); 8 | def globalNodes = j.getGlobalNodeProperties().getAll(hudson.slaves.EnvironmentVariablesNodeProperty.class); 9 | boolean isEmptyNode = (globalNodes.size() == 0); 10 | def check_mode = {{ ansible_check_mode|to_json }}; 11 | 12 | {% for var in jenkins_envvars %} 13 | if (isEmptyNode) { 14 | if ( !check_mode ) 15 | j.globalNodeProperties.replaceBy([new EnvironmentVariablesNodeProperty()]); 16 | isEmptyNode = false; 17 | } 18 | 19 | if ( !check_mode ) 20 | j.globalNodeProperties.get(0).getEnvVars().put("{{ var.key }}", "{{ var.value }}"); 21 | println "Adding environment variable {{ var.key }}={{ var.value }}"; 22 | 23 | {% endfor %} 24 | 25 | if ( !check_mode ) 26 | j.save(); 27 | -------------------------------------------------------------------------------- /cinch/roles/jenkins_master/templates/sysconfig_jenkins: -------------------------------------------------------------------------------- 1 | ## Path: Development/Jenkins 2 | ## Description: Jenkins Continuous Integration Server 3 | ## Type: string 4 | ## Default: "/var/lib/jenkins" 5 | ## ServiceRestart: jenkins 6 | # 7 | # Directory where Jenkins store its configuration and working 8 | # files (checkouts, build reports, artifacts, ...). 9 | # 10 | JENKINS_HOME="{{ jenkins_home }}" 11 | 12 | ## Type: string 13 | ## Default: "" 14 | ## ServiceRestart: jenkins 15 | # 16 | # Java executable to run Jenkins 17 | # When left empty, we'll try to find the suitable Java. 18 | # 19 | JENKINS_JAVA_CMD="{{ jenkins_java_cmd }}" 20 | 21 | ## Type: string 22 | ## Default: "jenkins" 23 | ## ServiceRestart: jenkins 24 | # 25 | # Unix user account that runs the Jenkins daemon 26 | # Be careful when you change this, as you need to update 27 | # permissions of $JENKINS_HOME and /var/log/jenkins. 28 | # 29 | JENKINS_USER="{{ jenkins_user }}" 30 | 31 | ## Type: string 32 | ## Default: "false" 33 | ## ServiceRestart: jenkins 34 | # 35 | # Whether to skip potentially long-running chown at the 36 | # $JENKINS_HOME location. Do not enable this, "true", unless 37 | # you know what you're doing. See JENKINS-23273. 38 | # 39 | #JENKINS_INSTALL_SKIP_CHOWN="false" 40 | 41 | ## Type: string 42 | ## Default: "-Djava.awt.headless=true" 43 | ## ServiceRestart: jenkins 44 | # 45 | # Options to pass to java when running Jenkins. 46 | # 47 | JENKINS_JAVA_OPTIONS="{{ (jenkins_java_options + jenkins_java_extra_options) | join(' ') }}" 48 | ## Type: integer(0:65535) 49 | ## Default: 8080 50 | ## ServiceRestart: jenkins 51 | # 52 | # Port Jenkins is listening on. 53 | # Set to -1 to disable 54 | # 55 | JENKINS_PORT="8080" 56 | 57 | ## Type: string 58 | ## Default: "" 59 | ## ServiceRestart: jenkins 60 | # 61 | # IP address Jenkins listens on for HTTP requests. 62 | # Default is all interfaces (0.0.0.0). 63 | # 64 | JENKINS_LISTEN_ADDRESS="{{ jenkins_http_listen_address }}" 65 | 66 | ## Type: integer(0:65535) 67 | ## Default: "" 68 | ## ServiceRestart: jenkins 69 | # 70 | # HTTPS port Jenkins is listening on. 71 | # Default is disabled. 72 | # 73 | JENKINS_HTTPS_PORT="-1" 74 | 75 | ## Type: string 76 | ## Default: "" 77 | ## ServiceRestart: jenkins 78 | # 79 | # Path to the keystore in JKS format (as created by the JDK 'keytool'). 80 | # Default is disabled. 81 | # 82 | JENKINS_HTTPS_KEYSTORE="" 83 | 84 | ## Type: string 85 | ## Default: "" 86 | ## ServiceRestart: jenkins 87 | # 88 | # Password to access the keystore defined in JENKINS_HTTPS_KEYSTORE. 89 | # Default is disabled. 90 | # 91 | JENKINS_HTTPS_KEYSTORE_PASSWORD="" 92 | 93 | ## Type: string 94 | ## Default: "" 95 | ## ServiceRestart: jenkins 96 | # 97 | # IP address Jenkins listens on for HTTPS requests. 98 | # Default is disabled. 99 | # 100 | JENKINS_HTTPS_LISTEN_ADDRESS="{{ jenkins_https_listen_address }}" 101 | 102 | ## Type: integer(0:65535) 103 | ## Default: 8009 104 | ## ServiceRestart: jenkins 105 | # 106 | # Ajp13 Port Jenkins is listening on. 107 | # Set to -1 to disable 108 | # 109 | JENKINS_AJP_PORT="{{ jenkins_ajp_port }}" 110 | 111 | ## Type: string 112 | ## Default: "" 113 | ## ServiceRestart: jenkins 114 | # 115 | # IP address Jenkins listens on for Ajp13 requests. 116 | # Default is all interfaces (0.0.0.0). 117 | # 118 | JENKINS_AJP_LISTEN_ADDRESS="{{ jenkins_ajp_listen_address }}" 119 | 120 | ## Type: integer(1:9) 121 | ## Default: 5 122 | ## ServiceRestart: jenkins 123 | # 124 | # Debug level for logs -- the higher the value, the more verbose. 125 | # 5 is INFO. 126 | # 127 | JENKINS_DEBUG_LEVEL="{{ jenkins_debug_level }}" 128 | 129 | ## Type: yesno 130 | ## Default: no 131 | ## ServiceRestart: jenkins 132 | # 133 | # Whether to enable access logging or not. 134 | # 135 | JENKINS_ENABLE_ACCESS_LOG="{{ jenkins_enable_access_log }}" 136 | 137 | ## Type: integer 138 | ## Default: 100 139 | ## ServiceRestart: jenkins 140 | # 141 | # Maximum number of HTTP worker threads. 142 | # 143 | JENKINS_HANDLER_MAX="{{ jenkins_handler_max }}" 144 | 145 | ## Type: integer 146 | ## Default: 20 147 | ## ServiceRestart: jenkins 148 | # 149 | # Maximum number of idle HTTP worker threads. 150 | # 151 | JENKINS_HANDLER_IDLE="{{ jenkins_handler_idle }}" 152 | 153 | ## Type: string 154 | ## Default: "" 155 | ## ServiceRestart: jenkins 156 | # 157 | # Pass arbitrary arguments to Jenkins. 158 | # Full option list: java -jar jenkins.war --help 159 | # 160 | JENKINS_ARGS="{{ _jenkins_args }}" 161 | -------------------------------------------------------------------------------- /cinch/roles/jenkins_master/templates/user.groovy: -------------------------------------------------------------------------------- 1 | import hudson.model.User; 2 | import hudson.tasks.Mailer.UserProperty; 3 | import hudson.security.HudsonPrivateSecurityRealm; 4 | import hudson.security.HudsonPrivateSecurityRealm.Details; 5 | import org.jenkinsci.main.modules.cli.auth.ssh.UserPropertyImpl; 6 | 7 | void createOrUpdateUser(String nick, 8 | String email, 9 | String password) { 10 | boolean changed = false; 11 | User user = User.getById(nick, false); 12 | // If user is null, then the user does not yet exist 13 | if ( user == null ) { 14 | // Synthetic or virtual users are created against this, even if the jenkins 15 | // is later going to be configured against LDAP or the like 16 | HudsonPrivateSecurityRealm realm = new HudsonPrivateSecurityRealm(false); 17 | user = realm.createAccount(nick, password); 18 | println "CHANGED: Created user " + nick; 19 | changed = true; 20 | } 21 | 22 | // Ensure the user's email is set properly 23 | UserProperty emailProperty = user.getProperty(UserProperty.class); 24 | if ( emailProperty == null || !emailProperty.getAddress().equals(email) ) { 25 | emailProperty = new UserProperty(email); 26 | user.addProperty(emailProperty); 27 | println "CHANGED: Set " + nick + "'s email to " + email; 28 | changed = true; 29 | } 30 | 31 | // Ensure the password is set properly 32 | Details details = user.getProperty(Details.class); 33 | if ( details == null || !details.isPasswordCorrect(password) ) { 34 | details = Details.fromPlainPassword(password); 35 | user.addProperty(details); 36 | println "CHANGED: Updated password for " + nick; 37 | changed = true; 38 | } 39 | 40 | if ( changed ) 41 | user.save() 42 | } 43 | 44 | createOrUpdateUser("{{ jenkins_admin.nickname }}", 45 | "{{ jenkins_admin.email }}", 46 | "{{ jenkins_admin.password }}"); 47 | {% for user in jenkins_local_users %} 48 | createOrUpdateUser("{{ user.nickname }}", "{{ user.email }}", "{{ user.password }}"); 49 | {% endfor %} 50 | -------------------------------------------------------------------------------- /cinch/roles/jenkins_master/vars/main.yml: -------------------------------------------------------------------------------- 1 | # A set of command-line Java options to be passed into Jenkins. These values 2 | # can be anything accepted either by Jenkins or by the JVM 3 | jenkins_java_options: 4 | # Tell Java it is running on a headless system 5 | - -Djava.awt.headless=true 6 | # disables SNI extension in case of Java 1.7, which causes problems with SSL 7 | - -Djsse.enableSNIExtension=false 8 | # Allows all parameters to be passed between jobs in trigger pipelines 9 | - -Dhudson.model.ParametersAction.keepUndefinedParameters=true 10 | # Alows creation of synthetic users that exist only in Jenkins and not external auth 11 | - -Dhudson.model.User.allowNonExistentUserToLogin=true 12 | # allows the creation of a synthetic/virtual user to be created via an Url. 13 | - -Dhudson.model.User.allowUserCreationViaUrl=true 14 | # Allows admin users to see the API tokens of all users 15 | - -Djenkins.security.ApiTokenProperty.showTokenToAdmins=true 16 | # adds performance log messages during Jenkins initialization. 17 | - -Djenkins.model.Jenkins.logStartupPerformance=true 18 | # helps in cases where SSH slave startup can block. See JENKINS-20108 19 | - -Djava.security.egd=file:/dev/./urandom 20 | # Needed for Jenkins 2.X to bypass SetupWizard. 21 | - -Djenkins.install.runSetupWizard=false 22 | # Needed to disable DNSMulticast 23 | - -Dhudson.DNSMultiCast.disabled=true 24 | # Increase max Java heap size 25 | - -Xmx{{ java_heap_size }} 26 | 27 | # The baseline RPMs that are installed to every Jenkins master 28 | jmaster_rpms: 29 | - "{{ jenkins_rpm }}" 30 | - libvirt-devel 31 | - python-virtualenv 32 | - libyaml-devel 33 | - openssl-devel 34 | - libffi-devel 35 | # The default access roles that all Jenkins masters should have. Permissions are 36 | # listed according to their Java name, and users/groups/etc that should have 37 | # access to that role are lisetd in the "sids" array. In addition to this, every 38 | # master will have an "admin" role created that has knowledge of every permission 39 | # availble at the role's creation time. To add new roles or to override that 40 | # admin role, use the jenkins_security_extra_roles variable in the defaults folder 41 | # of this Ansible role 42 | jenkins_security_roles: 43 | - name: anonymous 44 | permissions: 45 | - com.synopsys.arc.jenkins.plugins.ownership.OwnershipPlugin.Jobs 46 | - hudson.model.Hudson.Read 47 | - hudson.model.View.Read 48 | sids: 49 | - anonymous 50 | 51 | jenkins_cli_jar: "{{ jenkins_cli_shell_user_home }}/jenkins-cli.jar" 52 | jenkins_cli_jar_src: /var/cache/jenkins/war/WEB-INF/jenkins-cli.jar 53 | # Session timeout doesn't work through java options and has to pass as an 54 | # argument. To support both session timeout and custon jenkins args without 55 | # mistakenly override it, a proxy variable will be used. 56 | _jenkins_args: "--sessionTimeout={{ session_timeout }} {{ jenkins_args }}" 57 | -------------------------------------------------------------------------------- /cinch/roles/jenkins_master_stop/tasks/main.yml: -------------------------------------------------------------------------------- 1 | - name: stop the Jenkins process 2 | service: 3 | name: jenkins 4 | state: stopped 5 | become: true 6 | -------------------------------------------------------------------------------- /cinch/roles/jenkins_slave/defaults/main.yml: -------------------------------------------------------------------------------- 1 | # These are arguments which you will need to override 2 | # URL for the Jenkins master this slave should attach to 3 | jenkins_url: http://example.com:8080/jenkins/ 4 | 5 | # The Jenkins name for the slave 6 | jslave_name: "{{ inventory_hostname }}" 7 | # The label for this slave 8 | jslave_label: ops-jslave 9 | # Number of executor processes on the slave - modify based on system capacity and load 10 | jswarm_execs: 10 11 | # Extra command line args passed to jswarm 12 | jswarm_extra_args: '' 13 | # Directory where the swarm will live and execute 14 | jswarm_home: "{{ jenkins_user_home }}" 15 | # Path to Java interpreter to use to run the Swarm client 16 | jenkins_java_cmd: /usr/bin/java 17 | # Command line args to the Java executable, such as heap size, etc 18 | java_args: -Xmx2048m 19 | 20 | # These are variables which you probably don't want to 21 | # override, but which you are free to override if you 22 | # feel the need to 23 | jswarm_download: "http://repo.jenkins-ci.org/releases/org/jenkins-ci/plugins/swarm-client/\ 24 | {{ jswarm_version }}/{{ jswarm_filename }}" 25 | jswarm_local_directory: /opt/jswarm 26 | jenkins_slave_username: '' 27 | jenkins_slave_password: '' 28 | 29 | # These are variables that you really should not override 30 | # unless you really know what you're doing 31 | jslave_rpm_deps: 32 | - wget 33 | 34 | jslave_extra_rpms: [] 35 | -------------------------------------------------------------------------------- /cinch/roles/jenkins_slave/handlers/main.yml: -------------------------------------------------------------------------------- 1 | - name: restart swarm 2 | service: 3 | name: swarm 4 | state: restarted 5 | when: ansible_connection != 'docker' 6 | 7 | - name: reload systemd files 8 | systemd: 9 | daemon_reload: true 10 | name: swarm 11 | -------------------------------------------------------------------------------- /cinch/roles/jenkins_slave/tasks/check_swarm_errors.yml: -------------------------------------------------------------------------------- 1 | # Reporting errors in a readable, pretty-print format must be done in two steps 2 | # here since the 'fail' module cannot split newlines. 3 | 4 | # The service_swarm_err_output var is set by the check_swarm_systemd.yml or 5 | # check_swarm_upstart.yml playbooks, depending on which init system is in use. 6 | - name: report errors for swarm service 7 | debug: var=service_swarm_err_output.stdout.split('\n') 8 | 9 | - name: exit with failure if swarm service could not connect to the master 10 | fail: 11 | msg: 12 | Swarm failed to connect to the master. See prior debug output for details. 13 | -------------------------------------------------------------------------------- /cinch/roles/jenkins_slave/tasks/check_swarm_systemd.yml: -------------------------------------------------------------------------------- 1 | - name: check if swarm connected to the master with systemd 2 | command: systemctl status swarm 3 | ignore_errors: true 4 | changed_when: false 5 | register: service_swarm_status 6 | tags: 7 | - skip_ansible_lint # systemctl module does not support "status" 8 | 9 | # Some shell magic is required here to grab the log output from *only* the last 10 | # run of the swarm service in systemd. To do this, we find the last PID that 11 | # was used to start the service using systemctl, and we extract just the PID 12 | # via 'cut'. Then we pass that PID to journalctl to give us only the log data 13 | # we want. 14 | - name: grab errors for the last run of the systemd swarm service 15 | shell: > 16 | journalctl --no-pager -u swarm -o cat 17 | _PID=$(systemctl show swarm --property=ExecMainPID 18 | | cut -d '=' -f 2) 19 | register: service_swarm_err_output 20 | when: service_swarm_status is failed 21 | 22 | - name: report errors for systemd swarm service 23 | include: check_swarm_errors.yml 24 | when: service_swarm_status is failed 25 | -------------------------------------------------------------------------------- /cinch/roles/jenkins_slave/tasks/check_swarm_upstart.yml: -------------------------------------------------------------------------------- 1 | - name: check if swarm connected to the master with upstart 2 | command: "grep 'INFO: Connected' /var/log/jenkins-swarm.log" 3 | ignore_errors: true 4 | changed_when: false 5 | register: service_swarm_status 6 | 7 | - name: grab errors for the last run of the upstart swarm service 8 | command: cat /var/log/jenkins-swarm.log 9 | register: service_swarm_err_output 10 | when: service_swarm_status|failed 11 | 12 | - name: report errors for upstart swarm service 13 | include: check_swarm_errors.yml 14 | when: service_swarm_status|failed 15 | -------------------------------------------------------------------------------- /cinch/roles/jenkins_slave/tasks/main.yml: -------------------------------------------------------------------------------- 1 | - name: disable SELinux in Fedora 2 | selinux: 3 | state: disabled 4 | when: ansible_distribution == 'Fedora' and ansible_connection != 'docker' 5 | 6 | - name: install rpm packages 7 | package: 8 | name: "{{ ( jslave_rpm_deps + jslave_extra_rpms ) | join(',') }}" 9 | state: present 10 | retries: 2 11 | register: install_worker 12 | until: install_worker is success 13 | 14 | - name: add host to hosts file 15 | lineinfile: 16 | line: "{{ ansible_ssh_host }} {{ inventory_hostname }}.localdomain" 17 | dest: /etc/hosts 18 | when: ansible_connection != 'docker' 19 | 20 | - name: give jenkins user sudo access 21 | lineinfile: 22 | dest: /etc/sudoers 23 | line: "{{ jenkins_user }} ALL=(ALL) NOPASSWD: ALL" 24 | 25 | - name: ensure jswarm destination folder exists 26 | file: 27 | path: "{{ jswarm_local_directory }}" 28 | owner: "{{ jenkins_user }}" 29 | mode: 0755 30 | state: directory 31 | 32 | - name: fetch swarm download 33 | get_url: 34 | dest: "{{ jswarm_local_directory }}/{{ jswarm_filename }}" 35 | url: "{{ jswarm_download }}" 36 | owner: "{{ jenkins_user }}" 37 | 38 | 39 | # Inside of a Docker container, we want to be able to pull values from the container's running 40 | # environment. In order to do that, we need to drop values into the configurable fields that 41 | # match the syntax dockerize uses. Unfortunately, that syntax is very similar to Jinja2 syntax, 42 | # and Ansible does not stop evaluation after only one iteration. Putting Jinja2 raw blocks into 43 | # the string values here allow only the inner string (e.g. '{{ .Env.JENKINS_MASTER_URL }}') to 44 | # pass through to the output and prevents Ansible from trying to do template substitution on the 45 | # string value itself. 46 | - name: set variable for docker container 47 | set_fact: 48 | jenkins_master_url: "{{ '{% raw %}{{ .Env.JENKINS_MASTER_URL }}{% endraw %}' }}" 49 | jslave_name: "{{ '{% raw %}{{ .Env.JSLAVE_NAME }}{% endraw %}' }}" 50 | jslave_label: "{{ '{% raw %}{{ .Env.JSLAVE_LABEL }}{% endraw %}' }}" 51 | jswarm_extra_args: "{{ '{% raw %}{{ default .Env.JSWARM_EXTRA_ARGS \"\" }}{% endraw %}' }}" 52 | when: ansible_connection == 'docker' 53 | 54 | # The default swarm client timeout is 10 seconds. This value should never be 55 | # set to less than 10 seconds, and increasing it should not be necessary. 56 | # It is not recommended to change this value, but it is set here in order to 57 | # define this value in a single location for the systemd and upstart service 58 | # templates. 59 | - name: set variable for swarm service retry timer 60 | set_fact: 61 | swarm_retry_timer: 10 62 | 63 | - name: upload swarm config file 64 | template: 65 | src: sysconfig_jenkins_swarm 66 | dest: /etc/sysconfig/jenkins_swarm 67 | owner: root 68 | mode: 0644 69 | notify: restart swarm 70 | 71 | - name: upload swarm systemd file 72 | template: 73 | src: swarm.service 74 | dest: /etc/systemd/system/swarm.service 75 | owner: root 76 | mode: 0644 77 | notify: restart swarm 78 | when: ansible_service_mgr == 'systemd' 79 | register: systemd_file 80 | 81 | - name: reload systemd files when necessary 82 | systemd: 83 | daemon_reload: true 84 | name: swarm 85 | when: systemd_file is changed 86 | tags: 87 | - skip_ansible_lint 88 | 89 | - name: upload swarm upstart file 90 | template: 91 | src: swarm.upstart.conf 92 | dest: /etc/init/swarm.conf 93 | owner: root 94 | mode: 0644 95 | notify: restart swarm 96 | when: ansible_service_mgr == 'upstart' 97 | 98 | - name: flush handlers to avoid start/restart on initial launch 99 | meta: flush_handlers 100 | 101 | - name: start swarm 102 | service: 103 | name: swarm 104 | state: started 105 | enabled: true 106 | when: ansible_connection != 'docker' 107 | 108 | # There is an unavoidable race condition when running the swarm client within 109 | # an init system. The value of swarm_retry_timer * 1.5 is used to help ensure 110 | # that errors are not caught during a subsequent, and unrelated service 111 | # restart. Essentially, we are checking the status directly in-between service 112 | # restarts. 113 | - name: wait for the swarm connection timeout 114 | pause: 115 | seconds: "{{ (swarm_retry_timer * 1.5)|int }}" 116 | when: ansible_connection != 'docker' 117 | 118 | - name: check swarm service status when using systemd 119 | include: check_swarm_systemd.yml 120 | when: ansible_connection != 'docker' and ansible_service_mgr == 'systemd' 121 | 122 | - name: check swarm service status when using upstart 123 | include: check_swarm_upstart.yml 124 | when: ansible_connection != 'docker' and ansible_service_mgr == 'upstart' 125 | -------------------------------------------------------------------------------- /cinch/roles/jenkins_slave/templates/swarm.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | After=network-online.target 3 | Wants=network-online.target 4 | [Service] 5 | User={{ jenkins_user }} 6 | EnvironmentFile=/etc/sysconfig/jenkins_swarm 7 | Restart=on-failure 8 | 9 | # Wait for the service to fail 10 | TimeoutStartSec={{ swarm_retry_timer }}s 11 | 12 | # Restart failed service after timer expires 13 | RestartSec={{ swarm_retry_timer }}s 14 | 15 | # -retry 0 in this script disables the built-in retry functionality in swarm so 16 | # the init system can manage it 17 | ExecStart={{ jenkins_java_cmd }} \ 18 | $SWARM_JAVA_ARGS \ 19 | -jar "{{ jswarm_local_directory }}/{{ jswarm_filename }}" \ 20 | -master "$SWARM_MASTER" \ 21 | -name "$SWARM_SLAVE_NAME" \ 22 | -executors "$SWARM_EXECUTORS" \ 23 | -labels "$SWARM_SLAVE_LABEL" \ 24 | -fsroot "$SWARM_ROOT" \ 25 | -retry 0 \ 26 | $SWARM_USERNAME \ 27 | $SWARM_PASSWORD \ 28 | $SWARM_EXTRA_ARGS 29 | 30 | [Install] 31 | WantedBy=multi-user.target 32 | -------------------------------------------------------------------------------- /cinch/roles/jenkins_slave/templates/swarm.upstart.conf: -------------------------------------------------------------------------------- 1 | description "Jenkins slave swarm client" 2 | author "David Roble " 3 | start on runlevel [2345] 4 | stop on runlevel [016] 5 | respawn 6 | respawn limit unlimited 7 | 8 | # Wait for the service to fail 9 | kill timeout {{ swarm_retry_timer }} 10 | 11 | # Restart failed service after timer expires 12 | post-stop exec sleep {{ swarm_retry_timer }} 13 | 14 | # -retry 0 in this script disables the built-in retry functionality in swarm so 15 | # the init system can manage it. 16 | # We use 'tee' to write the swarm output to syslog via 'logger' as well as 17 | # wrinting the output from the *last* run of the swarm client to a separate log 18 | # file that can be read by Ansible. upstart doesn't allow us to easily grab the 19 | # log output of the most recent service restart. 20 | script 21 | exec su -s /bin/sh \ 22 | -c 'exec "$0" "$@"' {{ jenkins_user }} -- \ 23 | "{{ jenkins_java_cmd }}" \ 24 | {{ java_args }} \ 25 | -jar "{{ jswarm_local_directory }}/{{ jswarm_filename }}" \ 26 | -master "{{ jenkins_master_url }}" \ 27 | -name "{{ jslave_name }}" \ 28 | -executors {{ jswarm_execs }} \ 29 | -labels "{{ jslave_label }}" \ 30 | -fsroot "{{ jswarm_home }}" \ 31 | -retry 0 \ 32 | {{ (jenkins_slave_username == '') | ternary('', '-username') }} {{ jenkins_slave_username }} \ 33 | {{ (jenkins_slave_password == '') | ternary('', '-password') }} {{ jenkins_slave_password }} \ 34 | -mode exclusive -disableSslVerification -deleteExistingClients {{ jswarm_extra_args }} \ 35 | 2>&1 | tee /var/log/jenkins-swarm.log | logger -t jenkins-swarm 36 | end script 37 | -------------------------------------------------------------------------------- /cinch/roles/jenkins_slave/templates/sysconfig_jenkins_swarm: -------------------------------------------------------------------------------- 1 | {# Any additions to this or major alterations to this file will need to be accompanied by updates to both 2 | the swarm.service file (used in systemd machines like RHEL/CentOS 7, Fedora, etc), in swarm.upstart.conf 3 | (used in upstart machines like RHEL/CentOS 6) and to the jswarm.sh file (used by dockerize in the 4 | docker container images). #} 5 | SWARM_JAVA_ARGS="{{ java_args }}" 6 | SWARM_MASTER="{{ jenkins_master_url }}" 7 | SWARM_SLAVE_NAME="{{ jslave_name }}" 8 | SWARM_EXECUTORS="{{ jswarm_execs }}" 9 | SWARM_SLAVE_LABEL="{{ jslave_label }}" 10 | SWARM_ROOT="{{ jswarm_home }}" 11 | SWARM_USERNAME="{{ (jenkins_slave_username == '') | ternary('', '-username') }} {{ jenkins_slave_username }}" 12 | SWARM_PASSWORD="{{ (jenkins_slave_password == '') | ternary('', '-password') }} {{ jenkins_slave_password }}" 13 | SWARM_EXTRA_ARGS="-mode exclusive -disableSslVerification -deleteExistingClients {{ jswarm_extra_args }}" 14 | -------------------------------------------------------------------------------- /cinch/roles/jenkins_slave_container_wrapup/defaults/main.yml: -------------------------------------------------------------------------------- 1 | host: "{{ hostvars[inventory_hostname]['ansible_host'] | default(inventory_hostname) }}" 2 | -------------------------------------------------------------------------------- /cinch/roles/jenkins_slave_container_wrapup/tasks/main.yml: -------------------------------------------------------------------------------- 1 | - name: clean yum cache 2 | file: 3 | dest: /var/cache/{{ ansible_pkg_mgr }} 4 | state: absent 5 | 6 | - name: recreate cache dir 7 | file: 8 | dest: /var/cache/{{ ansible_pkg_mgr }} 9 | state: directory 10 | owner: root 11 | group: root 12 | mode: 0755 13 | 14 | - name: copy in shell script 15 | template: 16 | src: "{{ item }}" 17 | dest: /usr/local/bin 18 | mode: 0755 19 | owner: root 20 | group: root 21 | with_items: "{{ container_bin_files }}" 22 | 23 | - name: state config file 24 | stat: 25 | path: /etc/sysconfig/jenkins_swarm.templated 26 | register: template_file 27 | 28 | - name: create config file 29 | file: 30 | dest: /etc/sysconfig/jenkins_swarm.templated 31 | owner: "{{ jenkins_user }}" 32 | mode: 0644 33 | state: touch 34 | when: not template_file.stat.exists 35 | 36 | - name: delete weird file 37 | file: 38 | dest: /var/log/lastlog 39 | state: absent 40 | -------------------------------------------------------------------------------- /cinch/roles/jenkins_slave_container_wrapup/templates/jmaster.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -xe 2 | 3 | echo "ERROR: Master containers not yet supported" 4 | exit 1 5 | -------------------------------------------------------------------------------- /cinch/roles/jenkins_slave_container_wrapup/templates/jswarm.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -xe 2 | . /etc/sysconfig/jenkins_swarm.templated 3 | exec java ${SWARM_JAVA_ARGS} \ 4 | -jar "{{ jswarm_local_directory }}/{{ jswarm_filename }}" \ 5 | -master "${SWARM_MASTER}" \ 6 | -name "${SWARM_SLAVE_NAME}" \ 7 | -executors "${SWARM_EXECUTORS}" \ 8 | -labels "${SWARM_SLAVE_LABEL}" \ 9 | -fsroot "${SWARM_ROOT}" \ 10 | ${SWARM_USERNAME} \ 11 | ${SWARM_PASSWORD} \ 12 | ${SWARM_EXTRA_ARGS} 13 | - 14 | -------------------------------------------------------------------------------- /cinch/roles/jenkins_slave_teardown/tasks/main.yml: -------------------------------------------------------------------------------- 1 | - name: stop swarm service to remove Jenkins slave from Jenkins master 2 | service: 3 | name: swarm 4 | state: stopped 5 | -------------------------------------------------------------------------------- /cinch/roles/nginx/defaults/main.yml: -------------------------------------------------------------------------------- 1 | nginx_capable: true 2 | httpd_capable: true 3 | ## set some defaults with the expectation that they will be set in/from calling role 4 | # canonical name for service 5 | service_name: "{{ hostvars[inventory_hostname]['ansible_host'] | default(inventory_hostname) }}" 6 | 7 | ## nginx core configuration defaults 8 | nginx_error_level: "warn" 9 | nginx_worker_processes: 1 10 | nginx_gzip_status: "on" 11 | # Increase upload limits to allow jenkins plugin uploads 12 | nginx_max_body_size: 100M 13 | nginx_send_timeout: 120s 14 | 15 | ## variables unset by default 16 | httpd_no_error_pages: false 17 | https_enabled: false 18 | # If httpd_keytab_file, httpd_ssl_key_file and httpd_ssl_crt_file 19 | # are already present on the remote Jenkins master being configured by cinch, 20 | # set the following variable to true to copy the files from the filesystem 21 | # of the remote host rather than from the system running Ansible 22 | httpd_ssl_keytab_files_remote_src: false 23 | # local path to use as source for keytab 24 | #httpd_keytab_file: "/THIS/FILE/PROBABLY/DOESNT/EXIST" 25 | # Local path to the SSL certificate to use in configuring HTTPS. Can be full or relative 26 | # to the path from where the main playbook is run. This can be overriden if you want 27 | # secure access to Jenkins using your own SSL certificate. By default, a self signed 28 | # certificate will be created and used. 29 | #httpd_ssl_key_file: "/THIS/FILE/PROBABLY/DOESNT/EXIST" 30 | # Local path to the SSL private key in crt or pem format. For more details see comment above. 31 | # When using own SSL certificates, you need to override either this variable 32 | #httpd_ssl_crt_file: "/THIS/FILE/PROBABLY/DOESNT/EXIST" 33 | # Local path to the SSL private key in PEM format. For more details see comment above. 34 | #httpd_ssl_pem_file: "/THIS/FILE/PROBABLY/DOESNT/EXIST" 35 | # location and CN settings for the self signed certificate 36 | ssl_self_signed_string: "/C=US/ST=New York/L=New York City/O=My Department/CN={{ service_name }}" 37 | # whether to use a speedy method to generate Diffie Hellman parameters 38 | ssl_fast_dh: false 39 | nginx_ssl_ca_line: "#ssl_client_certificate /path/to/ca/file;" 40 | 41 | httpd_keytab_line: "auth_gss_keytab /etc/nginx/conf.d/httpd.keytab;" 42 | -------------------------------------------------------------------------------- /cinch/roles/nginx/files/etc/logrotate.d/nginx: -------------------------------------------------------------------------------- 1 | /var/log/nginx/*.log { 2 | daily 3 | missingok 4 | rotate 30 5 | compress 6 | delaycompress 7 | notifempty 8 | create 640 nginx adm 9 | sharedscripts 10 | postrotate 11 | [ -f /var/run/nginx.pid ] && kill -USR1 `cat /var/run/nginx.pid` 12 | endscript 13 | } 14 | -------------------------------------------------------------------------------- /cinch/roles/nginx/handlers/main.yml: -------------------------------------------------------------------------------- 1 | # TODO: systemd: daemon_reload=yes can be used in Ansible 2.4 2 | - name: reload systemd 3 | command: systemctl daemon-reload 4 | when: ansible_service_mgr == 'systemd' 5 | tags: 6 | - skip_ansible_lint 7 | 8 | - name: restart nginx service 9 | service: 10 | name: nginx 11 | state: restarted 12 | -------------------------------------------------------------------------------- /cinch/roles/nginx/meta/main.yml: -------------------------------------------------------------------------------- 1 | # Standards: 1.7 2 | galaxy_info: 3 | author: Jakub Paulovic 4 | description: nginx setup 5 | company: Red Hat 6 | 7 | license: BSD 8 | 9 | min_ansible_version: 2.1 10 | 11 | platforms: 12 | - name: EL 13 | versions: 14 | - 6 15 | - 7 16 | 17 | galaxy_tags: [] 18 | 19 | dependencies: [] 20 | -------------------------------------------------------------------------------- /cinch/roles/nginx/tasks/kerberos-setup.yml: -------------------------------------------------------------------------------- 1 | - name: copy over keytab 2 | copy: 3 | src: "{{ httpd_keytab_file }}" 4 | dest: "/etc/nginx/conf.d/httpd.keytab" 5 | remote_src: "{{ httpd_ssl_keytab_files_remote_src }}" 6 | when: httpd_keytab_file is defined 7 | no_log: true 8 | -------------------------------------------------------------------------------- /cinch/roles/nginx/tasks/main.yml: -------------------------------------------------------------------------------- 1 | - include: nginx.yml 2 | 3 | - include: ssl-setup.yml 4 | when: https_enabled 5 | 6 | - name: setup kerberos 7 | include: kerberos-setup.yml 8 | when: kerberos_capable is defined and kerberos_capable 9 | 10 | - name: setup selinux 11 | include: selinux.yml 12 | when: "'enabled' in ansible_selinux.status" 13 | 14 | - name: enable nginx 15 | service: 16 | name: nginx 17 | enabled: true 18 | -------------------------------------------------------------------------------- /cinch/roles/nginx/tasks/nginx.yml: -------------------------------------------------------------------------------- 1 | - name: install nginx 2 | package: 3 | name: nginx 4 | state: installed 5 | when: "repos_add is not defined or 'epel' not in repos_add" 6 | retries: 2 7 | register: install_nginx 8 | until: install_nginx is success 9 | notify: 10 | - reload systemd 11 | - restart nginx service 12 | 13 | - name: install nginx 14 | package: 15 | name: nginx 16 | state: installed 17 | disablerepo: epel 18 | when: "repos_add is defined and 'epel' in repos_add" 19 | retries: 2 20 | register: install_nginx_no_epel 21 | until: install_nginx_no_epel is success 22 | notify: restart nginx service 23 | 24 | - name: install nginx logrotation file 25 | copy: 26 | src: etc/logrotate.d/nginx 27 | dest: /etc/logrotate.d/nginx 28 | owner: root 29 | group: root 30 | mode: 0644 31 | 32 | - name: install /etc/nginx/nginx.conf 33 | template: 34 | src: etc/nginx/nginx.conf 35 | dest: /etc/nginx/nginx.conf 36 | owner: root 37 | group: root 38 | mode: 0644 39 | notify: restart nginx service 40 | 41 | - name: install /etc/nginx/conf.d/monitor.conf.snippet 42 | template: 43 | src: etc/nginx/conf.d/monitor.conf.snippet 44 | dest: /etc/nginx/conf.d/monitor.conf.snippet 45 | owner: nginx 46 | group: nginx 47 | mode: 0640 48 | notify: restart nginx service 49 | 50 | - name: install /etc/nginx/conf.d/errors.conf.snippet 51 | template: 52 | src: etc/nginx/conf.d/errors.conf.snippet 53 | dest: /etc/nginx/conf.d/errors.conf.snippet 54 | owner: nginx 55 | group: nginx 56 | mode: 0640 57 | notify: restart nginx service 58 | when: httpd_no_error_pages is not defined 59 | -------------------------------------------------------------------------------- /cinch/roles/nginx/tasks/selinux.yml: -------------------------------------------------------------------------------- 1 | - name: ensure libsemanage-python is installed 2 | package: 3 | name: libsemanage-python 4 | state: present 5 | retries: 2 6 | register: install_libsemanage 7 | until: install_libsemanage is success 8 | 9 | - name: enable selinux overrides 10 | seboolean: 11 | name: "{{ item }}" 12 | state: true 13 | persistent: true 14 | with_items: 15 | - httpd_can_network_connect 16 | - httpd_can_network_connect_db 17 | - httpd_can_sendmail 18 | -------------------------------------------------------------------------------- /cinch/roles/nginx/tasks/ssl-setup.yml: -------------------------------------------------------------------------------- 1 | - name: copy over ssl key 2 | copy: 3 | src: "{{ httpd_ssl_key_file }}" 4 | dest: "/etc/nginx/conf.d/ssl.key" 5 | remote_src: "{{ httpd_ssl_keytab_files_remote_src }}" 6 | owner: nginx 7 | group: nginx 8 | mode: 0600 9 | register: setup_ssl_key 10 | when: httpd_ssl_key_file is defined 11 | no_log: true 12 | tags: 13 | - update_ssl_certs 14 | 15 | - name: copy over ssl crt file 16 | copy: 17 | src: "{{ httpd_ssl_crt_file }}" 18 | dest: "/etc/nginx/conf.d/ssl.pem" 19 | remote_src: "{{ httpd_ssl_keytab_files_remote_src }}" 20 | owner: nginx 21 | group: nginx 22 | mode: 0644 23 | register: setup_ssl_pem 24 | when: setup_ssl_key|success 25 | notify: restart nginx service 26 | tags: 27 | - update_ssl_certs 28 | 29 | # generate our own key/crt if pem is missing 30 | - name: generate self signed ssl certificate 31 | command: > 32 | openssl req -new -nodes -x509 -subj "{{ ssl_self_signed_string }}" 33 | -days 3650 -keyout /etc/nginx/conf.d/ssl.key 34 | -out /etc/nginx/conf.d/ssl.pem -extensions v3_ca 35 | args: 36 | creates: /etc/nginx/conf.d/ssl.pem 37 | when: setup_ssl_key|failed or setup_ssl_pem|failed or setup_ssl_key|skipped 38 | or setup_ssl_pem|skipped 39 | 40 | - name: warn that the next step takes a while 41 | debug: 42 | msg: "the next step can take around 15 minutes if it hasn't already been done" 43 | 44 | - name: create Diffie Hellman ephemeral parameters 45 | # https://raymii.org/s/tutorials/Strong_SSL_Security_On_nginx.html 46 | command: openssl dhparam {{ '-dsaparam' if ssl_fast_dh else '' }} -out dhparam.pem 4096 47 | args: 48 | chdir: /etc/ssl/certs 49 | creates: /etc/ssl/certs/dhparam.pem 50 | -------------------------------------------------------------------------------- /cinch/roles/nginx/templates/etc/nginx/conf.d/errors.conf.snippet: -------------------------------------------------------------------------------- 1 | # redirect server error pages to the static pages from ESO_error_pages RPM 2 | 3 | location /errors {root /var/www;} 4 | error_page 401 /errors/401.html; 5 | error_page 403 /errors/403.html; 6 | error_page 404 /errors/404.html; 7 | error_page 500 /errors/500.html; 8 | error_page 501 /errors/501.html; 9 | error_page 502 /errors/502.html; 10 | error_page 503 /errors/503.html; 11 | error_page 504 /errors/504.html; 12 | -------------------------------------------------------------------------------- /cinch/roles/nginx/templates/etc/nginx/conf.d/monitor.conf.snippet: -------------------------------------------------------------------------------- 1 | location /nginx_status { 2 | stub_status on; 3 | access_log off; 4 | allow 127.0.0.1; 5 | deny all; 6 | } 7 | -------------------------------------------------------------------------------- /cinch/roles/nginx/templates/etc/nginx/nginx.conf: -------------------------------------------------------------------------------- 1 | user nginx; 2 | worker_processes {{ nginx_worker_processes }}; 3 | 4 | error_log /var/log/nginx/error.log {{ nginx_error_level }}; 5 | {% if ansible_distribution_major_version == "7" or ansible_distribution == 'Fedora' %} 6 | pid /run/nginx.pid; 7 | {% else %} 8 | pid /var/run/nginx.pid; 9 | {% endif %} 10 | 11 | # Load dynamic modules. See /usr/share/nginx/README.dynamic. 12 | include /usr/share/nginx/modules/*.conf; 13 | 14 | 15 | events { 16 | worker_connections 1024; 17 | } 18 | 19 | 20 | http { 21 | log_format main '$remote_addr - $remote_user [$time_local] "$request" ' 22 | '$status $body_bytes_sent "$http_referer" ' 23 | '"$http_user_agent" "$http_x_forwarded_for"'; 24 | 25 | access_log /var/log/nginx/access.log main; 26 | 27 | sendfile on; 28 | tcp_nopush on; 29 | tcp_nodelay on; 30 | keepalive_timeout 65; 31 | types_hash_max_size 2048; 32 | #server_names_hash_bucket_size 128; 33 | 34 | client_body_in_file_only clean; 35 | client_body_buffer_size 32K; 36 | client_max_body_size {{ nginx_max_body_size }}; 37 | send_timeout {{ nginx_send_timeout }}; 38 | 39 | include /etc/nginx/mime.types; 40 | default_type application/octet-stream; 41 | 42 | gzip {{ nginx_gzip_status }}; 43 | 44 | include /etc/nginx/conf.d/*.conf; 45 | } 46 | -------------------------------------------------------------------------------- /cinch/roles/nginx/templates/example_ssl.conf: -------------------------------------------------------------------------------- 1 | # HTTPS server 2 | # 3 | #server { 4 | # listen 443; 5 | # server_name {{ service_name }}; 6 | 7 | # ssl on; 8 | # ssl_certificate /etc/nginx/conf.d/ssl.pem; 9 | # ssl_certificate_key /etc/nginx/conf.d/ssl.key; 10 | # {{ nginx_ssl_ca_line }} 11 | 12 | # ssl_session_timeout 5m; 13 | 14 | # # https://mozilla.github.io/server-side-tls/ssl-config-generator/ 15 | # # modern configuration. tweak to your needs. 16 | # ssl_protocols TLSv1.1 TLSv1.2; 17 | # ssl_ciphers 'ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-AES256-GCM-SHA384:DHE-RSA-AES128-GCM-SHA256:DHE-DSS-AES128-GCM-SHA256:kEDH+AESGCM:ECDHE-RSA-AES128-SHA256:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA:ECDHE-ECDSA-AES128-SHA:ECDHE-RSA-AES256-SHA384:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA:ECDHE-ECDSA-AES256-SHA:DHE-RSA-AES128-SHA256:DHE-RSA-AES128-SHA:DHE-DSS-AES128-SHA256:DHE-RSA-AES256-SHA256:DHE-DSS-AES256-SHA:DHE-RSA-AES256-SHA:!aNULL:!eNULL:!EXPORT:!DES:!RC4:!3DES:!MD5:!PSK'; 18 | # ssl_prefer_server_ciphers on; 19 | # 20 | # # HSTS (ngx_http_headers_module is required) (15768000 seconds = 6 months) 21 | # add_header Strict-Transport-Security max-age=15768000; 22 | 23 | # location / { 24 | # root /usr/share/nginx/html; 25 | # index index.html index.htm; 26 | # 27 | # # optional kerberos configuration 28 | # auth_gss on; 29 | # auth_gss_allow_basic_fallback off; 30 | # auth_gss_realm EXAMPLE.COM; 31 | # # {{ httpd_keytab_line }} 32 | # auth_gss_service_name HTTP/{{ inventory_hostname }}; 33 | # proxy_set_header REMOTE_USER $remote_user; 34 | # 35 | # } 36 | #} 37 | -------------------------------------------------------------------------------- /cinch/roles/ntp/tasks/main.yml: -------------------------------------------------------------------------------- 1 | - name: install ntp 2 | become: true 3 | package: 4 | name: ntp 5 | state: present 6 | retries: 2 7 | register: install_ntp 8 | until: install_ntp is success 9 | 10 | - name: override ntp servers when ntp_servers is defined 11 | block: 12 | - name: remove existing ntp server values 13 | lineinfile: 14 | backup: true 15 | dest: /etc/ntp.conf 16 | state: absent 17 | regexp: "^server" 18 | 19 | - name: add customized ntp servers 20 | blockinfile: 21 | dest: /etc/ntp.conf 22 | insertafter: EOF 23 | block: "{{ item }}" 24 | with_items: "{{ ntp_servers }}" 25 | when: ntp_servers is defined 26 | 27 | - name: start the NTP service 28 | become: true 29 | service: 30 | name: ntpd 31 | state: started 32 | enabled: true 33 | -------------------------------------------------------------------------------- /cinch/roles/repositories/defaults/main.yml: -------------------------------------------------------------------------------- 1 | # Override with an array of objects like you would find in playbook/group_vars/rhel7 2 | # if you want a different set of repositories than this package installs by default. 3 | # It is not recommended that you touch the "repositories" variable in your 4 | # variables, as that value is set to include repositories that are necessary for 5 | # master and slave software installation. extra_repositories is unused by this 6 | # software and is designed to be overridden by the user with any extra repositories 7 | # they want added to the host. 8 | extra_repositories: [] 9 | repositories: [] 10 | 11 | # Override with an array of strings that are URLs to repository files, if you want 12 | # them downloaded into the target /etc/yum.repos.d/ folder so that they can be 13 | # added to the package repositories 14 | download_repositories: [] 15 | 16 | # Override this with a list of objects matching the pattern 17 | # - key: 18 | # validate_certs: 19 | # The path to a key should point towards a valid GPG key, this can either be local 20 | # to the system or a remote URL that can be accessed and downloaded. 21 | # The validate_certs parameter can be set to "false" if the key is hosted at an SSL 22 | # based URL for which the system does not have certificate authorities installed. 23 | # Generally this should be set to "true" and the SSL certificiates should be imported 24 | # using the certificate_authority role. This parameter will have no effect on paths 25 | # to local files. 26 | # 27 | # Use this variable to import GPG keys for RPM repositories that are installed but 28 | # which do not have keys which can be auto imported but still sign their RPMs and 29 | # you do not want to override the verification behavior 30 | rpm_key_imports: [] 31 | -------------------------------------------------------------------------------- /cinch/roles/repositories/handlers/main.yml: -------------------------------------------------------------------------------- 1 | - name: clean yum cache 2 | command: "{{ ansible_pkg_mgr }} clean all" 3 | tags: 4 | - skip_ansible_lint 5 | -------------------------------------------------------------------------------- /cinch/roles/repositories/tasks/main.yml: -------------------------------------------------------------------------------- 1 | - include: repositories.yml 2 | with_items: "{{ repositories + extra_repositories }}" 3 | 4 | - include: repository_download.yml 5 | with_items: "{{ download_repositories }}" 6 | 7 | - name: import repository keys 8 | rpm_key: 9 | key: "{{ item.key }}" 10 | state: present 11 | validate_certs: "{{ item.validate_certs }}" 12 | with_items: "{{ rpm_key_imports }}" 13 | notify: clean yum cache 14 | retries: 3 15 | delay: 10 16 | register: result 17 | until: result.msg is not defined 18 | -------------------------------------------------------------------------------- /cinch/roles/repositories/tasks/repositories.yml: -------------------------------------------------------------------------------- 1 | - name: install repository 2 | yum_repository: 3 | name: '{{ item.name }}' 4 | failovermethod: '{{ item.failovermethod | default(repository_defaults.failovermethod) }}' 5 | enabled: '{{ item.enabled | default(repository_defaults.enabled) }}' 6 | skip_if_unavailable: >- 7 | {{ item.skip_if_unavailable | default(repository_defaults.skip_if_unavailable) }} 8 | gpgcheck: '{{ item.gpgcheck | default(repository_defaults.gpgcheck) }}' 9 | description: '{{ item.description | default("Repository installed by Ansible") }}' 10 | mirrorlist: '{{ item.mirrorlist | default(omit) }}' 11 | baseurl: '{{ item.baseurl | default(omit) }}' 12 | register: repository_results 13 | -------------------------------------------------------------------------------- /cinch/roles/repositories/tasks/repository_download.yml: -------------------------------------------------------------------------------- 1 | - name: download repository file 2 | get_url: 3 | dest: /etc/yum.repos.d/ 4 | url: "{{ item }}" 5 | register: repository_download 6 | -------------------------------------------------------------------------------- /cinch/roles/upload_files/tasks/main.yml: -------------------------------------------------------------------------------- 1 | - name: upload files 2 | copy: 3 | src: "{{ item.src }}" 4 | dest: "{{ item.dest }}" 5 | owner: "{{ item.owner | default(omit) }}" 6 | group: "{{ item.group | default(omit) }}" 7 | mode: "{{ item.mode | default(omit) }}" 8 | become: true 9 | with_items: "{{ upload_files }}" 10 | -------------------------------------------------------------------------------- /cinch/site.yml: -------------------------------------------------------------------------------- 1 | - name: wait for SSH 2 | hosts: all 3 | gather_facts: false 4 | roles: 5 | - check_ssh 6 | 7 | - name: stop Jenkins process when upgrading 8 | hosts: jenkins_master 9 | roles: 10 | - role: jenkins_master_stop 11 | when: jenkins_upgrade is defined and jenkins_upgrade 12 | 13 | - name: upload files before 14 | hosts: all 15 | gather_facts: false 16 | roles: 17 | - role: upload_files 18 | vars: 19 | upload_files: "{{ pre_upload_files | default([]) }}" 20 | 21 | - name: configure certificate authority, if necessary 22 | become: true 23 | hosts: certificate_authority 24 | gather_facts: false 25 | roles: 26 | - certificate_authority 27 | 28 | - name: configure repositories 29 | become: true 30 | hosts: repositories 31 | roles: 32 | - repositories 33 | 34 | - name: pre-configure jenkins 35 | become: true 36 | hosts: jenkins_master:jenkins_slave 37 | roles: 38 | - jenkins_common 39 | 40 | - name: configure jenkins masters 41 | become: true 42 | hosts: jenkins_master 43 | roles: 44 | - role: beaker-client 45 | when: jenkins_upgrade is not defined or not jenkins_upgrade 46 | - role: nginx 47 | when: ansible_connection != 'docker' 48 | - jenkins_master 49 | - role: ntp 50 | when: ansible_connection != 'docker' 51 | 52 | - name: configure jenkins slaves 53 | become: true 54 | hosts: jenkins_slave 55 | roles: 56 | - jenkins_slave 57 | 58 | - name: configure docker containers 59 | become: true 60 | hosts: jenkins_master:jenkins_slave 61 | roles: 62 | - role: dockerize 63 | when: ansible_connection == 'docker' 64 | - role: jenkins_slave_container_wrapup 65 | when: ansible_connection == 'docker' 66 | 67 | - name: configure container-based slaves 68 | hosts: jenkins_docker_slave 69 | roles: 70 | - jenkins_docker_slave 71 | 72 | - name: upload files after 73 | hosts: all 74 | gather_facts: false 75 | roles: 76 | - role: upload_files 77 | vars: 78 | upload_files: "{{ post_upload_files | default([]) }}" 79 | -------------------------------------------------------------------------------- /cinch/teardown.yml: -------------------------------------------------------------------------------- 1 | - name: wait for SSH 2 | hosts: all 3 | gather_facts: false 4 | roles: 5 | - check_ssh 6 | 7 | - name: teardown jenkins slaves 8 | become: true 9 | hosts: jenkins_slave 10 | roles: 11 | - jenkins_slave_teardown 12 | -------------------------------------------------------------------------------- /docs/Makefile: -------------------------------------------------------------------------------- 1 | # Minimal makefile for Sphinx documentation 2 | # 3 | 4 | # You can set these variables from the command line. 5 | SPHINXOPTS = 6 | SPHINXBUILD = sphinx-build 7 | SPHINXPROJ = cinch 8 | SOURCEDIR = source 9 | BUILDDIR = build 10 | 11 | # Put it first so that "make" without argument is like "make help". 12 | help: 13 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 14 | 15 | .PHONY: help Makefile 16 | 17 | # Catch-all target: route all unknown targets to Sphinx using the new 18 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). 19 | %: Makefile 20 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) -------------------------------------------------------------------------------- /docs/source/_static/.gitkeep: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/RedHatQE/cinch/467e44fb255796a68a72b17f6e30e33170a688fa/docs/source/_static/.gitkeep -------------------------------------------------------------------------------- /docs/source/conf.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # 3 | # cinch documentation build configuration file, created by 4 | # sphinx-quickstart on Thu Jan 19 13:09:21 2017. 5 | # 6 | # This file is execfile()d with the current directory set to its 7 | # containing dir. 8 | # 9 | # Note that not all possible configuration values are present in this 10 | # autogenerated file. 11 | # 12 | # All configuration values have a default; values that are commented out 13 | # serve to show the default. 14 | 15 | # If extensions (or modules to document with autodoc) are in another directory, 16 | # add these directories to sys.path here. If the directory is relative to the 17 | # documentation root, use os.path.abspath to make it absolute, like shown here. 18 | # 19 | # import os 20 | # import sys 21 | # sys.path.insert(0, os.path.abspath('.')) 22 | 23 | 24 | # -- General configuration ------------------------------------------------ 25 | 26 | # If your documentation needs a minimal Sphinx version, state it here. 27 | # 28 | # needs_sphinx = '1.0' 29 | 30 | # Add any Sphinx extension module names here, as strings. They can be 31 | # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom 32 | # ones. 33 | extensions = [] 34 | 35 | # Add any paths that contain templates here, relative to this directory. 36 | templates_path = ['_templates'] 37 | 38 | # The suffix(es) of source filenames. 39 | # You can specify multiple suffix as a list of string: 40 | # 41 | # source_suffix = ['.rst', '.md'] 42 | source_suffix = '.rst' 43 | 44 | # The master toctree document. 45 | master_doc = 'index' 46 | 47 | # General information about the project. 48 | project = u'cinch' 49 | copyright = u'2017, RedHatQE' 50 | author = u'RedHatQE' 51 | 52 | # The version info for the project you're documenting, acts as replacement for 53 | # |version| and |release|, also used in various other places throughout the 54 | # built documents. 55 | # 56 | # The short X.Y version. 57 | version = u'' 58 | # The full version, including alpha/beta/rc tags. 59 | release = u'' 60 | 61 | # The language for content autogenerated by Sphinx. Refer to documentation 62 | # for a list of supported languages. 63 | # 64 | # This is also used if you do content translation via gettext catalogs. 65 | # Usually you set "language" from the command line for these cases. 66 | language = None 67 | 68 | # List of patterns, relative to source directory, that match files and 69 | # directories to ignore when looking for source files. 70 | # This patterns also effect to html_static_path and html_extra_path 71 | exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store'] 72 | 73 | # The name of the Pygments (syntax highlighting) style to use. 74 | pygments_style = 'sphinx' 75 | 76 | # If true, `todo` and `todoList` produce output, else they produce nothing. 77 | todo_include_todos = False 78 | 79 | 80 | # -- Options for HTML output ---------------------------------------------- 81 | 82 | # The theme to use for HTML and HTML Help pages. See the documentation for 83 | # a list of builtin themes. 84 | # 85 | html_theme = 'sphinx_rtd_theme' 86 | 87 | # Theme options are theme-specific and customize the look and feel of a theme 88 | # further. For a list of options available for each theme, see the 89 | # documentation. 90 | # 91 | # html_theme_options = {} 92 | 93 | # Add any paths that contain custom static files (such as style sheets) here, 94 | # relative to this directory. They are copied after the builtin static files, 95 | # so a file named "default.css" will overwrite the builtin "default.css". 96 | html_static_path = ['_static'] 97 | 98 | 99 | # -- Options for HTMLHelp output ------------------------------------------ 100 | 101 | # Output file base name for HTML help builder. 102 | htmlhelp_basename = 'cinchdoc' 103 | 104 | 105 | # -- Options for LaTeX output --------------------------------------------- 106 | 107 | latex_elements = { 108 | # The paper size ('letterpaper' or 'a4paper'). 109 | # 110 | # 'papersize': 'letterpaper', 111 | 112 | # The font size ('10pt', '11pt' or '12pt'). 113 | # 114 | # 'pointsize': '10pt', 115 | 116 | # Additional stuff for the LaTeX preamble. 117 | # 118 | # 'preamble': '', 119 | 120 | # Latex figure (float) alignment 121 | # 122 | # 'figure_align': 'htbp', 123 | } 124 | 125 | # Grouping the document tree into LaTeX files. List of tuples 126 | # (source start file, target name, title, 127 | # author, documentclass [howto, manual, or own class]). 128 | latex_documents = [ 129 | (master_doc, 'cinch.tex', u'cinch Documentation', 130 | u'RedHatQE', 'manual'), 131 | ] 132 | 133 | 134 | # -- Options for manual page output --------------------------------------- 135 | 136 | # One entry per manual page. List of tuples 137 | # (source start file, name, description, authors, manual section). 138 | man_pages = [ 139 | (master_doc, 'cinch', u'cinch Documentation', 140 | [author], 1) 141 | ] 142 | 143 | 144 | # -- Options for Texinfo output ------------------------------------------- 145 | 146 | # Grouping the document tree into Texinfo files. List of tuples 147 | # (source start file, target name, title, author, 148 | # dir menu entry, description, category) 149 | texinfo_documents = [ 150 | (master_doc, 'cinch', u'cinch Documentation', 151 | author, 'cinch', 'One line description of project.', 152 | 'Miscellaneous'), 153 | ] 154 | -------------------------------------------------------------------------------- /docs/source/config.rst: -------------------------------------------------------------------------------- 1 | Common Config Options 2 | ===================== 3 | 4 | All Options 5 | ----------- 6 | 7 | To get the latest up-to-date list of all availble options in Cinch, consult the 8 | files for each Ansible role in the cinch/roles//defaults/main.yml 9 | files in the code base. Every variable should be documented, along with the 10 | default value given. 11 | 12 | Jenkins Plugins 13 | --------------- 14 | 15 | Cinch configures what has been deemed and tested as a reasonable baseline set 16 | of Jenkins plugins. Typically it will not be necessary to alter or remove 17 | elements from this list. The current list can be found in the file 18 | cinch/files/jenkins-plugin-lists/default.txt. Opening this file will give a 19 | list of plugins, one per line. A specific version of a plugin can be specified 20 | by a line that reads "myplugin==1.2.3" and will install specifically version 21 | 1.2.3 of that plugin. 22 | 23 | If the set of default plugins is not acceptable to a user, they can override 24 | the list by defining the variable jenkins_plugins in their host or group vars 25 | for a Cinch run to include the items they want. This variable is an array of 26 | strings, each string being the equivalent of one line from the default.txt 27 | file. 28 | 29 | If a user only wants to add some plugins that are not present in the default 30 | set, without completely overriding the set, this can be accomplished by adding 31 | entries to jenkins_extra_plugins in the same format as entries in the 32 | jenkins_plugins variable. This allows the user to install more plugins than 33 | the default, without needing to worry about falling out of sync with the 34 | default set of plugins 35 | -------------------------------------------------------------------------------- /docs/source/development.rst: -------------------------------------------------------------------------------- 1 | Development 2 | =========== 3 | 4 | Environments 5 | ------------ 6 | 7 | Development occurs targeting each of the specific host environments that are 8 | supported. The default development environment and targeted host is the latest 9 | version of CentOS. 10 | 11 | The fastest way to get yourself up and running is to leverage the Vagrant 12 | machines held within the top-level vagrant folder. These are named according to 13 | the roles that each one is designed to exercise. 14 | 15 | Install 16 | ------- 17 | 18 | To run the software locally, you need a few basic pieces of software installed. 19 | The following packages for Fedora need to be installed, minimally, or the 20 | equivalent packages for your distribution: 21 | 22 | - python-virtualenv 23 | - gcc 24 | - redhat-rpm-config 25 | - openssl-devel 26 | - libvirt-devel 27 | - libyaml-devel 28 | - vagrant 29 | 30 | The only software actually required to run the playbooks is Ansible and its 31 | dependencies. The other packages listed above are required only to install and 32 | build Ansible and its dependencies, such as PyYAML. Thus, if you are looking 33 | to package Cinch for a new distribution, the above packages, less vagrant, 34 | are a good starting place for build dependencies. 35 | 36 | If installing manually, you can activate your Python virtualenv of choice and 37 | issue the command ``pip install /path/to/cinch``. As a developer, if you plan to make 38 | changes to Cinch, then use pip in the local editable mode by issuing the 39 | command ``pip install -e /path/to/cinch`` instead. 40 | 41 | Execution 42 | --------- 43 | 44 | Once all of these depenencies are fulfilled, there are a number of folders 45 | under the top level vagrant/ directory that contain minimally a Vagrantfile. 46 | The Vagrantfile can be used to issue the command "vagrant up" 47 | from within that directory to spin up a collection of machines, against which 48 | the cinch playbooks will be automatically executed. Consult the README in each 49 | directory for more information about which machines will be created out of 50 | that directory, and for any information that the user might need to supply. 51 | 52 | Some of the Vagrantfile values will need to be supplied by the user, 53 | specifically any values related to RHEL repository URLs as there is no public 54 | version of those repositories available. Other values should all be provided 55 | from within those directories already. 56 | 57 | Merely issuing the command ``vagrant up`` should bring up the VMs for each 58 | environment you configure. For the most part, it should be possible to run 59 | each environment on your local system, but there is the potential that having 60 | multiple environments running at the same time on the same host could result 61 | in collissions between the IP addresses of the hosts. It certainly would lead 62 | to provided URLs in the README files being incorrect. 63 | -------------------------------------------------------------------------------- /docs/source/docker_image.rst: -------------------------------------------------------------------------------- 1 | Docker Image 2 | ============ 3 | 4 | For users who do not want to provision an entire system to run a Jenkins slave 5 | there exists a Docker image which can quickly get a Jenkins Swarm connected 6 | instance to run. 7 | 8 | Source Image 9 | ------------ 10 | 11 | For every release of cinch that is made, a version of the Docker container is 12 | pushed to Docker Hub. Multiple tags are pushed for each Cinch release. They 13 | are named by combining source information image along with the version of 14 | Cinch used to build them. 15 | 16 | Currently there are images built off of 17 | 18 | - centos:7 19 | - centos:6 20 | 21 | These get tagged into the Cinch image repository as 22 | 23 | - redhatqecinch/jenkins_slave:cent7-0.5.2 24 | - redhatqecinch/jenkins_slave:cent6-0.5.2 25 | 26 | This indicates two images, one based on the centos:7 image and one based off 27 | the centos:6 image. Both of them are built by the version 0.5.2 release of 28 | Cinch. 29 | 30 | Image Options 31 | ------------- 32 | 33 | As with the rest of Cinch, there are some customizable image options that a 34 | user must supply before the image will work with your infrastrucutre. However, 35 | unlike using the Ansible-based solution to create your own system, there are 36 | far fewer options. Other than the following options, all builds of the Cinch 37 | Docker images utilize all default values for a Cinch slave instance. 38 | 39 | There are two variables that the user is required to provide before the image 40 | will run properly. Those are 41 | 42 | ==================== =================== 43 | Environment Variable Explanation 44 | ==================== =================== 45 | JENKINS_MASTER_URL The URL to the Jenkins master instance 46 | that this slave should connect to 47 | 48 | JSLAVE_NAME The name this slave will be given on the Master node 49 | JSLAVE_LABEL The Jenkins label this slave will receive, 50 | which will be matched against jobs requiring 51 | certain labels for execution 52 | JSWARM_EXTRA_ARGS Additional command-line arguments to 53 | pass to the JSwarm client in the iamge 54 | ==================== =================== 55 | 56 | If the container image is run directly from the Docker command line, these 57 | options may be passed through `docker`'s -e option. When running the image 58 | in Kubernetes or OpenShift, use that system's methods for passing in 59 | environment variables to the iamge. 60 | 61 | Customizing the Image 62 | --------------------- 63 | 64 | Instead of running the base image provided, a group could choose to use a 65 | Dockerfile to extend the base image provided to do such things as install 66 | custom software, edit configurations, etc. If that is the case, then the 67 | environment variables can absolutely be preset within the Dockerfile using its 68 | ENV command, as with any other environment variable. 69 | 70 | Extending the image in this way could simplify deployment, as the image could 71 | include information such as the Jenkins Master URL already configured to 72 | connect to the organization's Jenkins instance. Likewise, different slave 73 | images could be pre-populated with packages and slave labels for building 74 | different types of software or running different types of tasks. As nothing 75 | more than a standard Docker image, the provided images can be made fully 76 | extensible. 77 | 78 | One note is that the image is set to run all commands as the user "jenkins". 79 | If the image is being extended, then it might be necessary to set the USER 80 | command in the extending Dockerfile to "USER root" if system software is 81 | being installed. 82 | -------------------------------------------------------------------------------- /docs/source/index.rst: -------------------------------------------------------------------------------- 1 | .. cinch documentation master file, created by 2 | sphinx-quickstart on Thu Jan 19 13:09:21 2017. 3 | You can adapt this file completely to your liking, but it should at least 4 | contain the root `toctree` directive. 5 | 6 | Welcome to cinch's documentation! 7 | ================================= 8 | 9 | .. toctree:: 10 | :maxdepth: 1 11 | 12 | users 13 | development 14 | docker_image 15 | user_files 16 | config 17 | maintainers 18 | 19 | 20 | 21 | Indices and tables 22 | ================== 23 | 24 | * :ref:`genindex` 25 | * :ref:`search` 26 | -------------------------------------------------------------------------------- /docs/source/maintainers.rst: -------------------------------------------------------------------------------- 1 | Maintainers 2 | =========== 3 | 4 | cinch contains automation to aid in the process of creating releases on GitHub 5 | and PyPI, driven by Travis CI. 6 | 7 | Release Procedure 8 | ----------------- 9 | 10 | As a maintainer, to create a release of cinch, follow this checklist: 11 | 12 | * Ensure version has been bumped in **setup.py**, following `Semantic 13 | Versioning `_ guidelines 14 | * Ensure significant changes are listed in the **CHANGELOG** 15 | * Merge desired changes, including the above checklist items to the **master** 16 | branch 17 | * The release is based on git tags, and the following steps can be followed to 18 | tag the release, given that the upstream remote in your git config is named 19 | **upstream** (adjust git remote and version number as necessary): 20 | 21 | ``git fetch upstream master`` 22 | 23 | ``git merge upstream/master`` 24 | 25 | ``git tag v0.9.0`` 26 | 27 | ``git push --tags upstream master`` 28 | 29 | * It will take about fifteen minutes or so for the automation to add the 30 | release to PyPI, and while you wait, manually copy the release notes from the 31 | **CHANGELOG** to the GitHub releases page. This step is something that could 32 | possibly be automated in the future. 33 | -------------------------------------------------------------------------------- /docs/source/user_files.rst: -------------------------------------------------------------------------------- 1 | User Files 2 | ========== 3 | 4 | Motivation 5 | ---------- 6 | 7 | Anyone using Cinch to provision either a Jenkins master or slave may have the 8 | need to perform configuration to the system that exceeds the ability of Cinch to 9 | reasonably include support for within these playbooks. These could cover nearly 10 | any aspect of system administration, monitoring, configuration, and setup. For 11 | such a case, it is recommended that the user leverage the ability of Ansible to 12 | file a host into multiple different inventory groups, and private configuration 13 | be stored in private playbooks. Then those playbooks can be executed either 14 | before or after (or both) the Cinch playbooks are executed. 15 | 16 | However, there are a few basic system administration tasks that are general 17 | enough, and simple enough, that Cinch has opted to support those features to 18 | assist in the configuration of a Jenkins master. In addition to supporting 19 | the ability to setup Yum/DNF repositories during configuration and configure 20 | certificate authority chains, both of which are important to installing the 21 | packages required by Cinch and to configure SSL options for Jenkins, another 22 | feature supported by Cinch is the ability to upload arbitrary files from the 23 | local system where Ansible is being hosted to the remote system being 24 | configured. 25 | 26 | Mechanisms 27 | ---------- 28 | 29 | Each Ansible host, or group, can have defined values of files to upload to the 30 | remote hosts. These uploads happen at two different points during the execution 31 | of Cinch. The first set of uploads occurs before any Cinch plays have been 32 | executed except for verifying the host is reachable. This means that none of 33 | the Cinch-related configurations will be available during this upload run, 34 | unless they have previously been configured. This includes things like the 35 | "jenkins" system user, configured repositories, certificate authorities, etc. 36 | The second run happens at the very end - after both the master and any slaves 37 | have been configured and are up and running. However, at this point, all such 38 | configurations, users, etc are already present on the system. 39 | 40 | Thus, it is important to realize a file cannot be uploaded to be owned by the 41 | Jenkins user before the Jenkins user is created. If it is necessary to upload 42 | a file as that user before the Jenkins service starts on a configured host, 43 | then it will be necessary to use external playbooks or other methods to ensure 44 | proper behavior. 45 | 46 | Configuration 47 | ------------- 48 | 49 | Configuring uploads either before or after a Cinch run is straightforward. 50 | Simply override the values of the arrays "pre_upload_files" and 51 | "post_upload_files" in the Ansible host or group configurations for all hosts 52 | that require such a feature. 53 | 54 | These arrays require identical structures. Each element in the array should 55 | be an object hash with certain values defined. Those values are listed below: 56 | 57 | ========== =============== 58 | value required? 59 | ========== =============== 60 | src yes 61 | dest yes 62 | owner no 63 | group no 64 | mode no 65 | ========== =============== 66 | 67 | Example: 68 | 69 | .. code:: yaml 70 | 71 | pre_upload_files: 72 | - src: /home/deployuser/somehost/ssl.key 73 | dest: /etc/apache2/ssl/ssl.key 74 | mode: 0600 75 | post_upload_files: 76 | - src: /home/deployuser/somehost/ssh 77 | dest: /var/lib/jenkins/.ssh 78 | owner: jenkins 79 | mode: 0600 80 | 81 | Each of these values is passed directly into the Ansible module called 82 | `copy `_. Refer to that 83 | module's documentation for information about the structure and values that 84 | are permitted to be passed into these values. Note, especially, that this 85 | module can be used to upload whole directories in addition to individual files. 86 | 87 | If the need arises to support more of the options of that module, adding that 88 | support to Cinch can be done. Please just open an issue in the `GitHub Issue 89 | Tracker `_ detailing the requested 90 | functionality. 91 | -------------------------------------------------------------------------------- /inventory/cent6_jswarm_docker/group_vars/all: -------------------------------------------------------------------------------- 1 | # For certain OpenShift instances a value of '1000090000' is recommended. 2 | jenkins_user_uid: 1000090000 3 | -------------------------------------------------------------------------------- /inventory/cent6_jswarm_docker/hosts: -------------------------------------------------------------------------------- 1 | slave ansible_connection=docker ansible_host=jswarm 2 | 3 | [jenkins_slave] 4 | slave 5 | 6 | [cent6] 7 | slave 8 | 9 | [repositories] 10 | slave 11 | -------------------------------------------------------------------------------- /inventory/cent7_jswarm_docker/group_vars/all: -------------------------------------------------------------------------------- 1 | # For certain OpenShift instances a value of '1000090000' is recommended. 2 | jenkins_user_uid: 1000090000 3 | 4 | -------------------------------------------------------------------------------- /inventory/cent7_jswarm_docker/hosts: -------------------------------------------------------------------------------- 1 | slave ansible_connection=docker ansible_host=jswarm 2 | 3 | [jenkins_slave] 4 | slave 5 | 6 | [cent7] 7 | slave 8 | 9 | [repositories] 10 | slave 11 | -------------------------------------------------------------------------------- /inventory/cent7_master_docker/group_vars/all: -------------------------------------------------------------------------------- 1 | # For certain OpenShift instances a value of '1000090000' is recommended. 2 | jenkins_user_uid: 1000090000 3 | 4 | -------------------------------------------------------------------------------- /inventory/cent7_master_docker/hosts: -------------------------------------------------------------------------------- 1 | jmaster ansible_connection=docker 2 | 3 | [cent7] 4 | jmaster 5 | 6 | [repositories] 7 | jmaster 8 | 9 | [jenkins_master] 10 | jmaster 11 | -------------------------------------------------------------------------------- /inventory/fedora_jswarm_docker/group_vars/all: -------------------------------------------------------------------------------- 1 | # For certain OpenShift instances a value of '1000090000' is recommended. 2 | jenkins_user_uid: 1000090000 3 | 4 | -------------------------------------------------------------------------------- /inventory/fedora_jswarm_docker/hosts: -------------------------------------------------------------------------------- 1 | slave ansible_connection=docker ansible_host=jswarm 2 | 3 | [jenkins_slave] 4 | slave 5 | 6 | [fedora] 7 | slave 8 | 9 | [repositories] 10 | slave 11 | -------------------------------------------------------------------------------- /inventory/fedora_master_docker/group_vars/all: -------------------------------------------------------------------------------- 1 | # For certain OpenShift instances a value of '1000090000' is recommended. 2 | jenkins_user_uid: 1000090000 3 | 4 | -------------------------------------------------------------------------------- /inventory/fedora_master_docker/hosts: -------------------------------------------------------------------------------- 1 | jmaster ansible_connection=docker 2 | 3 | [fedora] 4 | jmaster 5 | 6 | [repositories] 7 | jmaster 8 | 9 | [jenkins_master] 10 | jmaster 11 | -------------------------------------------------------------------------------- /inventory/sample/hosts: -------------------------------------------------------------------------------- 1 | [jenkins_master] 2 | 10.0.1.2 ansible_ssh_user=root 3 | -------------------------------------------------------------------------------- /jjb/ci-jslave-project-sample-defaults.yaml: -------------------------------------------------------------------------------- 1 | - defaults: 2 | name: cinch-topology-setup 3 | wrappers: 4 | - ansicolor 5 | - workspace-cleanup 6 | - timestamps 7 | scm: 8 | - git: 9 | # replace this URL with a git repo that contains your topology files 10 | url: 'https://example.com/cinch-topology-example.git' 11 | branches: 12 | - origin/master 13 | basedir: cinch-example 14 | -------------------------------------------------------------------------------- /jjb/ci-jslave-project-sample.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - job-template: 3 | name: '{project}-{topology}-provision' 4 | description: '{description}' 5 | defaults: cinch-topology-setup 6 | node: master 7 | parameters: 8 | - choice: 9 | name: PROVIDER 10 | choices: 11 | - openstack-slave 12 | - beaker-slave 13 | - aws_ec2-slave 14 | builders: 15 | - shell: | 16 | #!/bin/bash -ex 17 | source "${{JENKINS_HOME}}/opt/linchpin/bin/activate" 18 | linchpin -v --creds-path {topology_path}/${{PROVIDER}}/credentials -w {topology_path}/${{PROVIDER}} up 19 | deactivate 20 | 21 | source "${{JENKINS_HOME}}/opt/cinch/bin/activate" 22 | cinch {topology_path}/${{PROVIDER}}/inventories/cinch-test.inventory 23 | deactivate 24 | publishers: 25 | - archive: 26 | artifacts: '{topology_path}/${{PROVIDER}}/inventories/cinch-test.inventory' 27 | allow-empty: 'false' 28 | - archive: 29 | artifacts: '{topology_path}/${{PROVIDER}}/resources/cinch-test.output' 30 | allow-empty: 'true' 31 | - trigger-parameterized-builds: 32 | - project: '{project}-{topology}-runtest' 33 | current-parameters: true 34 | condition: 'SUCCESS' 35 | fail-on-missing: true 36 | - project: '{project}-{topology}-teardown' 37 | current-parameters: true 38 | condition: 'UNSTABLE_OR_WORSE' 39 | fail-on-missing: true 40 | 41 | - job-template: 42 | name: '{project}-{topology}-runtest' 43 | description: '{description}' 44 | node: '{jslave_name}' 45 | builders: 46 | - shell: | 47 | #!/bin/bash -ex 48 | echo "$JOB_NAME $BUILD_DISPLAY_NAME" > test_artifact.txt 49 | publishers: 50 | - archive: 51 | artifacts: 'test_artifact.txt' 52 | allow-empty: 'false' 53 | - trigger-parameterized-builds: 54 | - project: '{project}-{topology}-teardown' 55 | current-parameters: true 56 | 57 | - job-template: 58 | name: '{project}-{topology}-teardown' 59 | description: '{description}' 60 | defaults: cinch-topology-setup 61 | node: master 62 | builders: 63 | - copyartifact: 64 | project: '{project}-{topology}-provision' 65 | filter: '{topology_path}/${{PROVIDER}}/inventories/cinch-test.inventory' 66 | target: '{topology_path}/${{PROVIDER}}/inventories' 67 | flatten: true 68 | - copyartifact: 69 | project: '{project}-{topology}-provision' 70 | filter: '{topology_path}/${{PROVIDER}}/resources/cinch-test.output' 71 | target: '{topology_path}/${{PROVIDER}}/resources' 72 | flatten: true 73 | - shell: | 74 | #!/bin/bash -ex 75 | source "${{JENKINS_HOME}}/opt/cinch/bin/activate" 76 | # Try to remove the Jenkins slave from the Jenkins master, but do not fail 77 | # the entire teardown job if Jenkins slave disconnection cannot be done. 78 | # This is for cases where the provision step failed to attach the slave, 79 | # but the instance should still be destroyed by linchpin. 80 | set +e 81 | teardown {topology_path}/${{PROVIDER}}/inventories/cinch-test.inventory 82 | set -e 83 | deactivate 84 | 85 | source "${{JENKINS_HOME}}/opt/linchpin/bin/activate" 86 | linchpin -v --creds-path {topology_path}/${{PROVIDER}}/credentials -w {topology_path}/${{PROVIDER}} destroy 87 | deactivate 88 | 89 | - job-group: 90 | name: provision-runtest-teardown 91 | jobs: 92 | - '{project}-{topology}-provision' 93 | - '{project}-{topology}-runtest' 94 | - '{project}-{topology}-teardown' 95 | 96 | - project: 97 | name: cinch-jobs 98 | project: cinch 99 | topology: 100 | - example 101 | jobs: 102 | - provision-runtest-teardown 103 | jslave_name: cinch-slave 104 | topology_path: 'cinch-example/examples/linchpin-topologies' 105 | description: | 106 | cinch Jenkins slave provisioning example workflow using Jenkins Job Builder 107 | 108 | https://github.com/RedHatQE/cinch/blob/master/jjb/ci-jslave-project-sample.yaml 109 | -------------------------------------------------------------------------------- /jjb/code-coverage.yaml: -------------------------------------------------------------------------------- 1 | - job: 2 | name: cinch-code-coverage 3 | node: master 4 | wrappers: 5 | - ansicolor 6 | - workspace-cleanup 7 | - timestamps 8 | 9 | # git repo to follow, skip-tag to not require auth 10 | scm: 11 | - git: 12 | url: https://github.com/RedHatQE/cinch.git 13 | branches: 14 | - master 15 | skip-tag: true 16 | basedir: cinch 17 | 18 | # git polling trigger set to once an hour 19 | triggers: 20 | - pollscm: 21 | cron: "H */1 * * *" 22 | ignore-post-commit-hooks: True 23 | 24 | builders: 25 | # coverage tests initialization script 26 | - shell: | 27 | #!/bin/bash -ex 28 | virtualenv codecov && source codecov/bin/activate 29 | # TODO find a better way to install test dependencies 30 | # We can't simply 'pip install cinch' here if we're on RHEL7 31 | pip install pytest-cov 'ansible>=2.1' 'plumbum>=1.6.0' 32 | pushd "${WORKSPACE}/cinch" 33 | py.test --cov=cinch/bin --cov-config .coveragerc --cov-report term \ 34 | --cov-report xml --cov-report html tests 35 | popd 36 | 37 | # sonar runner parameters, set sources and baseDir to project home 38 | # projectKey (string): SonarQube project identification key (unique) 39 | # projectName (string): SonarQube project name (NOT unique) 40 | # projectVersion (string): SonarQube project version (unique) 41 | # sources (string): source code home directory 42 | # projectBaseDir (string): project home directory (same as sources) 43 | # language (string): project language(ruby) 44 | # inclusions (string): file inclusion pattern 45 | # exclusions (string): file exclusion pattern 46 | - sonar: 47 | sonar-name: sonar 48 | properties: | 49 | sonar.projectKey=cinch 50 | sonar.projectName=cinch 51 | sonar.projectVersion=master 52 | sonar.sources=${WORKSPACE}/cinch 53 | sonar.projectBaseDir=${WORKSPACE}/cinch 54 | sonar.python.coverage.reportPath=${WORKSPACE}/cinch/coverage.xml 55 | sonar.language=py 56 | sonar.inclusions=cinch/bin/*.py 57 | sonar.ws.timeout=180 58 | 59 | publishers: 60 | - cobertura: 61 | report-file: "cinch/coverage.xml" 62 | targets: 63 | - files: 64 | healthy: 10 65 | unhealthy: 20 66 | failing: 30 67 | - method: 68 | healthy: 50 69 | unhealthy: 40 70 | failing: 30 71 | -------------------------------------------------------------------------------- /jjb/install-rhel7.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # This JJB template is intended to provide a way to install 'cinch' on RHEL7 3 | # Jenkins masters without needing shell/SSH access. Just create the job using 4 | # the jenkins-jobs command and run it to install 'cinch'. 5 | - job: 6 | name: 'install-cinch-rhel7' 7 | node: master 8 | parameters: 9 | - bool: 10 | name: DELETE_VENV 11 | default: false 12 | description: "Delete pre-existing linchpin/cinch virtualenvs and re-install" 13 | wrappers: 14 | - ansicolor 15 | - workspace-cleanup 16 | - timestamps 17 | scm: 18 | - git: 19 | url: 'https://github.com/RedHatQE/cinch.git' 20 | branches: 21 | - origin/master 22 | basedir: cinch 23 | builders: 24 | - shell: | 25 | #!/bin/bash -ex 26 | virtualenv ansible.venv && source ansible.venv/bin/activate 27 | pip install --upgrade setuptools pip 28 | pip install 'ansible==2.4.1.0' 29 | 30 | export PYTHONUNBUFFERED=1 # Enable real-time output for Ansible 31 | ansible-playbook -i localhost, -c local \ 32 | "${WORKSPACE}/cinch/cinch/playbooks/install-rhel7.yml" \ 33 | -e delete_venv="${DELETE_VENV}" 34 | 35 | deactivate 36 | -------------------------------------------------------------------------------- /scripts/centos_jswarm.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -e 2 | 3 | cinch="$(readlink -f "$(dirname "$0")/../")" 4 | centos_version="${1}" 5 | inventory="${cinch}/inventory/cent${centos_version}_jswarm_docker" 6 | if [ ! -e "${inventory}" ]; then 7 | echo "You must specify a supported CentOS version to continue" 8 | exit 1 9 | fi 10 | "${cinch}/scripts/jswarm.sh" "centos:${centos_version}" "${inventory}" yum 11 | -------------------------------------------------------------------------------- /scripts/centos_master.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -ve 4 | cinch="$(readlink -f "$(dirname "${0}")/../")" 5 | centos_version="${1}" 6 | inventory="${cinch}/inventory/cent${centos_version}_master_docker/hosts" 7 | "${cinch}/scripts/master.sh" "centos:${centos_version}" "${inventory}" yum 8 | -------------------------------------------------------------------------------- /scripts/deploy.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -e 2 | 3 | CINCH="$(readlink -f "$(dirname "$0")/../")" 4 | # Extracts the version line from the setup.py script, and trims off the rest 5 | # of the line to leave only the expected version 6 | CINCH_VERSION=$(grep "${CINCH}/setup.py" -e 'version=' \ 7 | | sed -e "s/.*version='\(.*\)'.*/\1/") 8 | DISTRO="${1}" 9 | LATEST="${2}" 10 | 11 | echo "Deploying version '${CINCH_VERSION}' to '${DISTRO}'" 12 | 13 | # Login to Docker - these environment variables are stored in TravisCI for 14 | # safe keeping 15 | docker login -p "${DOCKER_PASSWORD}" -u "${DOCKER_USER}" 16 | # Push the latest builds of these images to Docker Hub 17 | docker push "redhatqecinch/jenkins_slave:${DISTRO}-${CINCH_VERSION}" 18 | 19 | # Only one version should be tagged as the latest, so let this be it 20 | if [ x"${LATEST}" = "xtrue" ]; then 21 | echo "Tagging '${DISTRO}' as latest and pushing" 22 | # Tag the image as the "latest" version, for people wanting the 23 | # cutting edge 24 | docker tag "redhatqecinch/jenkins_slave:${DISTRO}-${CINCH_VERSION}" \ 25 | redhatqecinch/jenkins_slave:latest 26 | docker push redhatqecinch/jenkins_slave:latest 27 | fi 28 | -------------------------------------------------------------------------------- /scripts/fedora_jswarm.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -e 2 | 3 | fedora_version=25 4 | cinch="$(readlink -f "$(dirname "$0")/../")" 5 | inventory="${cinch}/inventory/fedora_jswarm_docker" 6 | "${cinch}/scripts/jswarm.sh" "fedora:${fedora_version}" "${inventory}" dnf 7 | -------------------------------------------------------------------------------- /scripts/fedora_master.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -ve 4 | cinch="$(readlink -f "$(dirname "${0}")/../")" 5 | fedora_version=25 6 | inventory="${cinch}/inventory/fedora_master_docker/hosts" 7 | "${cinch}/scripts/master.sh" "fedora:${fedora_version}" "${inventory}" dnf 8 | -------------------------------------------------------------------------------- /scripts/jswarm.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set-ve 4 | cinch="$(readlink -f "$(dirname "$0")/../")" 5 | base_image="${1}" 6 | inventory="${2}" 7 | module="${3}" 8 | # Extracts the version line from the setup.py script, and trims off the rest of the line to leave 9 | # only the expected version 10 | cinch_version=$(grep "${cinch}/setup.py" -e 'version=' | sed -e "s/.*version='\(.*\)'.*/\1/") 11 | echo "*****************************************************" 12 | echo "Building cinch container for version ${cinch_version}" 13 | echo "*****************************************************" 14 | echo "Starting container" 15 | ansible -i /dev/null \ 16 | localhost \ 17 | -m docker_container \ 18 | -a "image=${base_image} name=jswarm detach=true tty=true command=/bin/bash" 19 | docker exec -it jswarm "${module}" install -y python 20 | ansible -i "${inventory}" all -m "${module}" -a "name=sudo state=present" 21 | ansible -i "${inventory}" all -m "${module}" -a "name=* state=latest" 22 | echo "Building container with Ansible" 23 | ansible-playbook -i "${inventory}" \ 24 | "${cinch}/cinch/site.yml" \ 25 | -e jenkins_user_password=some_dummy_value 26 | echo "Committing container at tag ${cinch_version}" 27 | docker commit \ 28 | --change 'USER jenkins' \ 29 | --change 'ENTRYPOINT ["/usr/local/bin/dockerize", "-template", "/etc/sysconfig/jenkins_swarm:/etc/sysconfig/jenkins_swarm.templated", "/usr/local/bin/jswarm.sh"]' \ 30 | jswarm "redhatqecinch/jenkins_slave:${base_image//:/}-${cinch_version}" 31 | -------------------------------------------------------------------------------- /scripts/master.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -xe 4 | cinch="$(readlink -f "$(dirname "${0}")/../")" 5 | container_base="${1}" 6 | inventory="${2}" 7 | pkg_mgr="${3}" 8 | container_name=jmaster 9 | # Extracts the version line from the setup.py script, and trims off the rest of the line to leave 10 | # only the expected version 11 | cinch_version=$(grep "${cinch}/setup.py" -e 'version=' | sed -e "s/.*version='\(.*\)'.*/\1/") 12 | if [ ! -e "${inventory}" ]; then 13 | echo "You must specify a valid inventory folder" 14 | exit 1 15 | fi 16 | ######################################################## 17 | # Spin up container and get it rolling 18 | ######################################################## 19 | echo "Starting container from image ${container_base}" 20 | ansible -i /dev/null \ 21 | localhost \ 22 | -m docker_container \ 23 | -a "image=${container_base} \ 24 | name=${container_name} \ 25 | tty=true \ 26 | detach=true \ 27 | command='/usr/lib/systemd/systemd \ 28 | --system' \ 29 | capabilities=SYS_ADMIN \ 30 | $([[ $TRAVIS = true ]] && echo privileged=true)" 31 | # Fedora is lacking python in base image 32 | docker exec -it "${container_name}" "${pkg_mgr}" install -y python 33 | ansible -i "${inventory}" \ 34 | all \ 35 | -m "${pkg_mgr}" \ 36 | -a 'name=sudo state=present' 37 | ansible -i "${inventory}" \ 38 | all \ 39 | -m "${pkg_mgr}" \ 40 | -a 'name=* state=latest' 41 | ######################################################## 42 | # Run cinch against the playbook 43 | ######################################################## 44 | echo "Building container into a Jenkins master for Cinch ${cinch_version}" 45 | ansible-playbook -i "${inventory}" \ 46 | "${cinch}/cinch/site.yml" \ 47 | -e jenkins_user_password=somedummyvalue 48 | ######################################################## 49 | # Run inspec against the container 50 | ######################################################## 51 | erb "${cinch}/tests/profile.yml.erb" > "${cinch}/tests/profile.yml" 52 | inspec exec --chef-license=accept-silent "${cinch}/tests/cinch" \ 53 | --attrs "${cinch}/tests/profile.yml" -t "docker://${container_name}" 54 | ######################################################## 55 | # Finish and close up the container 56 | ######################################################## 57 | echo "Saving image" 58 | docker commit \ 59 | --change 'EXPOSE 8080' \ 60 | --change 'EXPOSE 8009' \ 61 | --change 'ENTRYPOINT ["/usr/lib/systemd/systemd", "--system"]' \ 62 | "${container_name}" "redhatqecinch/jenkins_master:${container_base//:/}-${cinch_version}" 63 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | from setuptools import setup, find_packages 2 | from os import path 3 | 4 | here = path.abspath(path.dirname(__file__)) 5 | with open(path.join(here, 'README.md')) as f: 6 | description = f.read() 7 | 8 | setup( 9 | name='cinch', 10 | version='1.4.0', 11 | description='Cinch continuous integration setup', 12 | long_description=description, 13 | url='https://github.com/RedHatQE/cinch', 14 | author='RedHatQE', 15 | license='GPLv3', 16 | classifiers=[ 17 | 'Development Status :: 5 - Production/Stable', 18 | 'Intended Audience :: Developers', 19 | 'Intended Audience :: Information Technology', 20 | 'Topic :: Software Development :: Quality Assurance', 21 | 'Topic :: Software Development :: Testing', 22 | 'Topic :: Software Development :: Build Tools', 23 | 'License :: OSI Approved :: GNU General Public License v3 (GPLv3)', 24 | 'Programming Language :: Python :: 2.7', 25 | 'Programming Language :: Python :: 3' 26 | ], 27 | keywords='continuous integration, ci, jenkins', 28 | packages=find_packages(exclude=('library', 'bin')), 29 | include_package_data=True, 30 | install_requires=[ 31 | 'ansible>=2.4', 32 | 'plumbum>=1.6.0' 33 | ], 34 | entry_points={ 35 | 'console_scripts': [ 36 | 'cinch=cinch.bin.entry_point:cinch', 37 | 'teardown=cinch.bin.entry_point:teardown' 38 | ] 39 | }, 40 | extras_require={ 41 | 'docs': [ 42 | 'sphinx_rtd_theme', 43 | 'sphinx' 44 | ] 45 | } 46 | ) 47 | -------------------------------------------------------------------------------- /tests/__init__.py: -------------------------------------------------------------------------------- 1 | # This file must exist in the /tests directory in order for pytest-cov to 2 | # report results properly when run in travis 3 | -------------------------------------------------------------------------------- /tests/ansible_lint_rules/NoFormattingInWhenRule.py: -------------------------------------------------------------------------------- 1 | from ansiblelint import AnsibleLintRule 2 | try: 3 | from types import StringTypes 4 | except ImportError: 5 | # Python3 removed types.StringTypes 6 | StringTypes = str, 7 | 8 | 9 | class NoFormattingInWhenRule(AnsibleLintRule): 10 | id = 'ANSIBLE0019' 11 | shortdesc = 'No Jinja2 in when/until' 12 | description = '"when" or "until" lines should not include Jinja2 variables' 13 | tags = ['deprecated'] 14 | 15 | def _is_valid(self, when): 16 | if not isinstance(when, StringTypes): 17 | return True 18 | return when.find('{{') == -1 and when.find('}}') == -1 19 | 20 | def matchplay(self, file, play): 21 | errors = [] 22 | if isinstance(play, dict): 23 | if 'roles' not in play: 24 | return errors 25 | for role in play['roles']: 26 | if self.matchtask(file, role): 27 | errors.append(({'when': role}, 28 | 'role "when" clause has Jinja2 templates')) 29 | if isinstance(play, list): 30 | for play_item in play: 31 | sub_errors = self.matchplay(file, play_item) 32 | if sub_errors: 33 | errors = errors + sub_errors 34 | return errors 35 | 36 | def matchtask(self, file, task): 37 | return ('when' in task and not self._is_valid(task['when'])) or \ 38 | ('until' in task and not self._is_valid(task['until'])) 39 | -------------------------------------------------------------------------------- /tests/ansible_lint_rules/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/RedHatQE/cinch/467e44fb255796a68a72b17f6e30e33170a688fa/tests/ansible_lint_rules/__init__.py -------------------------------------------------------------------------------- /tests/cent6_slave.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -xe 2 | CINCH="$(readlink -f "$(dirname "$0")/../")" 3 | 4 | "${CINCH}/scripts/centos_jswarm.sh" 6 5 | -------------------------------------------------------------------------------- /tests/cent7_master.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -ve 4 | cinch="$(readlink -f "$(dirname "${0}")/../")" 5 | "${cinch}/scripts/centos_master.sh" 7 6 | -------------------------------------------------------------------------------- /tests/cent7_slave.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -xe 2 | cinch="$(readlink -f "$(dirname "$0")/../")" 3 | 4 | "${cinch}/scripts/centos_jswarm.sh" 7 5 | -------------------------------------------------------------------------------- /tests/cinch/README.md: -------------------------------------------------------------------------------- 1 | # Cinch InSpec Profile 2 | 3 | This example shows the implementation of an InSpec profile. 4 | -------------------------------------------------------------------------------- /tests/cinch/controls/cinch.rb: -------------------------------------------------------------------------------- 1 | include_controls "jenkins" 2 | -------------------------------------------------------------------------------- /tests/cinch/inspec.yml: -------------------------------------------------------------------------------- 1 | name: cinch 2 | title: Cinch Profile 3 | maintainer: Alexander Braverman Masis 4 | copyright: Red Hat 5 | copyright_email: abraverm@redhat.com 6 | license: GPL-3.0 7 | summary: Cinch Compliance Profile 8 | version: 0.1.0 9 | depends: 10 | - name: jenkins 11 | git: https://github.com/RedHatQe/jenkins-profile.git 12 | branch: master 13 | -------------------------------------------------------------------------------- /tests/cinch/libraries/.gitkeep: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/RedHatQE/cinch/467e44fb255796a68a72b17f6e30e33170a688fa/tests/cinch/libraries/.gitkeep -------------------------------------------------------------------------------- /tests/coverage.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -e 2 | 3 | py.test --cov=cinch/bin --cov-config .coveragerc --cov-report term \ 4 | --cov-report xml --cov-report html tests 5 | 6 | codecov 7 | -------------------------------------------------------------------------------- /tests/fedora_master.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -ve 4 | cinch="$(readlink -f "$(dirname "${0}")/../")" 5 | "${cinch}/scripts/fedora_master.sh" 6 | -------------------------------------------------------------------------------- /tests/fedora_slave.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -xe 2 | cinch="$(readlink -f "$(dirname "$0")/../")" 3 | 4 | "${cinch}/scripts/fedora_jswarm.sh" 5 | -------------------------------------------------------------------------------- /tests/inventory.ini: -------------------------------------------------------------------------------- 1 | [all] 2 | localhost ansible_connection=local 3 | -------------------------------------------------------------------------------- /tests/inventory/master: -------------------------------------------------------------------------------- 1 | master ansible_connection=docker 2 | 3 | [cent7] 4 | master 5 | 6 | [jenkins_master] 7 | master 8 | 9 | [repositories] 10 | master 11 | -------------------------------------------------------------------------------- /tests/inventory/slave: -------------------------------------------------------------------------------- 1 | slave ansible_connection=docker ansible_host=jswarm 2 | 3 | [jenkins_slave] 4 | slave 5 | 6 | [cent7] 7 | slave 8 | 9 | [repositories] 10 | slave 11 | -------------------------------------------------------------------------------- /tests/playbook.yml: -------------------------------------------------------------------------------- 1 | - hosts: all 2 | tasks: 3 | - debug: 4 | msg: 'test data' 5 | -------------------------------------------------------------------------------- /tests/profile.yml.erb: -------------------------------------------------------------------------------- 1 | version: 2.121.3-1.1 2 | jenkins_home: /var/lib/jenkins 3 | ports: 4 | - 8080 5 | jenkins_url: http://localhost:8080 6 | jenkins_settings: 7 | - key: "hudson/useSecurity" 8 | value: 9 | - 'true' 10 | - key: "hudson/securityRealm/@class" 11 | value: 12 | - 'hudson.security.HudsonPrivateSecurityRealm' 13 | - key: hudson/slaveAgentPort 14 | value: 15 | - '50000' 16 | 17 | jenkins_plugins: 18 | <%= %x(cat cinch/files/jenkins-plugin-lists/default.txt | awk -F '==' '{print " -", $1}') %> 19 | -------------------------------------------------------------------------------- /tests/test_cli.py: -------------------------------------------------------------------------------- 1 | import os 2 | import unittest 3 | 4 | from cinch.bin.wrappers import call_ansible, command_handler 5 | 6 | 7 | class CinchCLI(unittest.TestCase): 8 | 9 | def test_exit_code_zero(self): 10 | # full path to the playbook and inventory must be specified in order to 11 | # avoid path issues with the BASE variable in wrappers.py 12 | playbook = os.path.join(os.getcwd(), 'tests/playbook.yml') 13 | inventory = os.path.join(os.getcwd(), 'tests/inventory.ini') 14 | 15 | # valid inventory and playbook files 16 | self.assertEqual(call_ansible(inventory, playbook), 0) 17 | 18 | def test_exit_code_one(self): 19 | # inventory and playbook files that do not exist 20 | self.assertEqual(call_ansible('junk.ini', 'junk.yml'), 1) 21 | 22 | def test_exit_code_255(self): 23 | # invalid data given to plumbum which should return exit code 255 24 | self.assertEqual(command_handler('invalid_command', 25 | 'invalid_arg'), 255) 26 | -------------------------------------------------------------------------------- /tests/yamllint.yml: -------------------------------------------------------------------------------- 1 | extends: default 2 | 3 | rules: 4 | line-length: 5 | max: 100 6 | level: error 7 | document-start: 8 | present: false 9 | level: error 10 | comments: 11 | require-starting-space: false 12 | -------------------------------------------------------------------------------- /tox.ini: -------------------------------------------------------------------------------- 1 | [tox] 2 | envlist = 3 | docs, 4 | lint, 5 | py27, 6 | cent6_slave, 7 | cent7_master, 8 | cent7_slave, 9 | fedora_slave, 10 | fedora_master 11 | 12 | [testenv] 13 | basepython = 14 | py27: {env:TOXPYTHON:python2.7} 15 | {docs,lint}: {env:TOXPYTHON:python3} 16 | cent{6,7}_slave,cent7_master: {env:TOXPYTHON:python3} 17 | fedora_{master,slave}: {env:TOXPYTHON:python3} 18 | setenv = 19 | PYTHONPATH={toxinidir}/tests 20 | PYTHONUNBUFFERED=yes 21 | passenv = 22 | * 23 | deps = 24 | pytest 25 | pytest-cov 26 | codecov 27 | commands = 28 | {posargs:py.test -vv --cov=cinch/bin --cov-config .coveragerc \ 29 | --cov-report term --cov-report xml --cov-report html tests} 30 | codecov 31 | whitelist_externals = 32 | /usr/bin/bash 33 | /bin/bash 34 | /usr/bin/make 35 | 36 | [testenv:lint] 37 | deps = 38 | flake8 39 | ansible-lint == 4.0.1 40 | yamllint 41 | ansible 42 | skip_install = true 43 | usedevelop = false 44 | # Needs to interpret through bash in order to use shell expansion 45 | commands = 46 | yamllint -s -c tests/yamllint.yml cinch inventory tests \ 47 | cinch/group_vars/all \ 48 | cinch/group_vars/cent6 \ 49 | cinch/group_vars/cent7 \ 50 | cinch/group_vars/fedora \ 51 | cinch/group_vars/jenkins_docker_slave \ 52 | cinch/group_vars/jenkins_master \ 53 | cinch/group_vars/jenkins_slave \ 54 | cinch/group_vars/rhel6 \ 55 | cinch/group_vars/rhel7 56 | # Basic sanity checking of the playbooks 57 | ansible-playbook --syntax-check -i inventory/sample/hosts cinch/site.yml 58 | ansible-playbook --syntax-check -i inventory/sample/hosts \ 59 | cinch/install-rhel7.yml 60 | # Proper lint-checking of the playbooks 61 | ansible-lint -R -r tests/ansible_lint_rules cinch/site.yml 62 | ansible-lint -R -r tests/ansible_lint_rules cinch/install-rhel7.yml 63 | # Lint the Python code 64 | # jenkins_script.py is pulled in from upstream versions of Ansible 65 | flake8 --exclude jenkins_script.py cinch tests setup.py 66 | 67 | [testenv:docs] 68 | deps = 69 | sphinx 70 | sphinx_rtd_theme 71 | skip_install = true 72 | usedevelop = false 73 | commands = 74 | make -C docs html 75 | make -C docs linkcheck 76 | 77 | [testenv:cent6_slave] 78 | deps = 79 | {[testenv]deps} 80 | docker-py 81 | commands = bash ./tests/cent6_slave.sh 82 | 83 | [testenv:cent7_slave] 84 | deps = 85 | {[testenv]deps} 86 | docker-py 87 | commands = bash ./tests/cent7_slave.sh 88 | 89 | [testenv:cent7_master] 90 | deps = 91 | {[testenv]deps} 92 | docker-py 93 | commands = bash ./tests/cent7_master.sh 94 | 95 | [testenv:fedora_slave] 96 | deps = 97 | {[testenv]deps} 98 | docker-py 99 | commands = bash ./tests/fedora_slave.sh 100 | 101 | [testenv:fedora_master] 102 | deps = 103 | {[testenv]deps} 104 | docker-py 105 | commands = bash ./tests/fedora_master.sh 106 | 107 | [testenv:inspec_master] 108 | commands = bash -c "inspec exec tests/cinch -t docker://jmaster" 109 | -------------------------------------------------------------------------------- /vagrant/README.txt: -------------------------------------------------------------------------------- 1 | Each subfolder here contains, minimally, a Vagrantfile and a hosts file that 2 | can be orchestratred together for development and testing, or just for trying 3 | the playbooks in a non-destructive and low-maintenance setting. 4 | 5 | You can choose to manually invoke "vagrant up" if you have options you would 6 | like to pass to Vagrant. Alternatively, ./full_cycle.sh will destroy any 7 | currently running VMs from that machine, spin up fresh ones, and run the 8 | playbooks. 9 | 10 | If you opt to manually spin up through "vagrant up" you can then invoke the 11 | configure.sh script in each directory to run the Ansible playbook against the 12 | included hosts file. Of course it's possible to run that Ansible command by 13 | yourself as well, by just passing in the provided hosts file and pointing to 14 | the site.yml file in the parent of this directory. 15 | 16 | A brief list of the available systems with some of their notable features 17 | 18 | master: 19 | - CentOS 7 20 | - All default options from the playbook 21 | - Guaranteed to work out-of-the-box (if you can spin up Vagrant VMs) 22 | - Please report bugs if this one does not spin up without modification 23 | - Access through http://192.168.8.2/ after spin up 24 | 25 | master_rhel7: 26 | - RHEL 7 27 | - Requires minimally configuring a rhel_base url to point to a valid RHEL 28 | mirror infrastructure 29 | - Will not work without RHEL mirrors, but should spin up just fine with 30 | - Access through http://192.168.8.2/ after spin up 31 | -------------------------------------------------------------------------------- /vagrant/ansible.cfg: -------------------------------------------------------------------------------- 1 | [defaults] 2 | host_key_checking = False 3 | -------------------------------------------------------------------------------- /vagrant/docker_slave/Vagrantfile: -------------------------------------------------------------------------------- 1 | require '../shared.rb' 2 | 3 | Vagrant.configure("2") do |config| 4 | vm(config, "master") 5 | vm(config, "docker-slave") do |ansible| 6 | ansible.groups = { 7 | "jenkins_master" => ["master"], 8 | "jenkins_docker_slave" => ["docker-slave"], 9 | "cent7" => ["master", "docker-slave"], 10 | "repositories" => ["master", "docker-slave"], 11 | "jenkins_docker_slave:vars" => { 12 | "jenkins_master_url" => "http://{{ hostvars['master']['ansible_default_ipv4']['address'] }}/", 13 | "jslave_label" => "my label" 14 | } 15 | } 16 | end 17 | end 18 | -------------------------------------------------------------------------------- /vagrant/master/Vagrantfile: -------------------------------------------------------------------------------- 1 | require "../shared.rb" 2 | 3 | Vagrant.configure("2") do |config| 4 | vm(config, "master") do |ansible| 5 | ansible.groups = { 6 | "jenkins_master" => ["master"], 7 | "cent7" => ["master"], 8 | "repositories" => ["master"] 9 | } 10 | end 11 | end 12 | -------------------------------------------------------------------------------- /vagrant/master_rhel7/README: -------------------------------------------------------------------------------- 1 | To run this box, you will need to find and add a Vagrant box for RHEL7. Currently the 2 | version supported is RHEL 7.4. If you have access to provided RHEL7.4 Vagrant box file, 3 | you can add it to your local system with a command like 4 | 5 | ```vagrant box add --name rhel7.4 http://somewhere.com/vagrant/box/files/rhel7.4.box``` 6 | 7 | Since RHEL7 is under license, these images are probably not readily available to public 8 | infrastructure, but this Vagrantfile is provided in the interest of testing and for those 9 | who are in possession of such a box to be able to test. It should provide some basic 10 | information on what types of variables need to be set to configure a RHEL system to 11 | serve as a cinch Jenkins master. 12 | 13 | To run this box, you'll want to update the box address and the path to your local RHEL7 14 | repository base. Do so by editing the lines in the Vagrantfile to point to the appropriate 15 | box name/URL and the example.com URL to point to your RHEL7 repository. 16 | -------------------------------------------------------------------------------- /vagrant/master_rhel7/Vagrantfile: -------------------------------------------------------------------------------- 1 | require "../shared.rb" 2 | 3 | Vagrant.configure("2") do |config| 4 | vm(config, "master", "generic/rhel7") do |ansible| 5 | ansible.groups = { 6 | "jenkins_master" => ["master"], 7 | "rhel7" => ["master"], 8 | "repositories" => ["master"], 9 | "jenkins_master:vars" => { 10 | "rhel_base" => "http://example.com/content/dist/rhel/server/7/7.4" 11 | } 12 | } 13 | end 14 | end 15 | -------------------------------------------------------------------------------- /vagrant/master_shell_user/Vagrantfile: -------------------------------------------------------------------------------- 1 | require "../shared.rb" 2 | 3 | Vagrant.configure("2") do |config| 4 | vm(config, "master") do |ansible| 5 | ansible.groups = { 6 | "jenkins_master" => ["master"], 7 | "cent7" => ["master"], 8 | "repositories" => ["master"], 9 | "jenkins_master:vars" => { 10 | "jenkins_cli_shell_user" => "derpuser", 11 | "jenkins_cli_shell_user_home" => "/home/derpuser" 12 | } 13 | } 14 | end 15 | end 16 | -------------------------------------------------------------------------------- /vagrant/master_slave_fedora/Vagrantfile: -------------------------------------------------------------------------------- 1 | require "../shared.rb" 2 | 3 | Vagrant.configure("2") do |config| 4 | vm(config, "master", "fedora/26-cloud-base") 5 | vm(config, "slave", "fedora/26-cloud-base") do |ansible| 6 | ansible.groups = { 7 | "jenkins_master" => ["master"], 8 | "jenkins_slave" => ["slave"], 9 | "fedora" => ["master", "slave"], 10 | "repositories" => ["master"], 11 | "jenkins_slave:vars" => { 12 | "jenkins_master_url" => "http://{{ hostvars['master']['ansible_default_ipv4']['address'] }}" 13 | } 14 | } 15 | end 16 | end 17 | -------------------------------------------------------------------------------- /vagrant/master_ssl/Vagrantfile: -------------------------------------------------------------------------------- 1 | require "../shared.rb" 2 | 3 | Vagrant.configure("2") do |config| 4 | vm(config, "master") do |ansible| 5 | ansible.groups = { 6 | "jenkins_master" => ["master"], 7 | "cent7" => ["master"], 8 | "repositories" => ["master"], 9 | "jenkins_master:vars" => {"https_enabled" => "true"} 10 | } 11 | end 12 | end 13 | -------------------------------------------------------------------------------- /vagrant/shared.rb: -------------------------------------------------------------------------------- 1 | ENV['VAGRANT_NO_PARALLEL'] = 'yes' 2 | $ip = 2 3 | 4 | def get_username(base_box) 5 | if base_box.start_with?('centos') 6 | return 'centos' 7 | elsif base_box == 'generic/rhel8' 8 | return 'cloud-user' 9 | elsif base_box.include?('rhel') 10 | return 'root' 11 | else 12 | return 'fedora' 13 | end 14 | end 15 | 16 | def get_image(base_box) 17 | if base_box == 'centos/7' 18 | return 'CentOS-7-x86_64-GenericCloud-released-latest' 19 | elsif base_box == 'centos/6' 20 | return 'CentOS-6-x86_64-GenericCloud-1612' 21 | elsif base_box == 'fedora/26-cloud-base' 22 | return 'Fedora-Cloud-Base-26-compose-latest' 23 | elsif base_box == 'generic/rhel7' 24 | return 'rhel-7.6-server-x86_64-updated' 25 | elsif base_box == 'generic/rhel8' 26 | return 'rhel-8.0-x86_64-latest' 27 | elsif base_box == 'rhel6' 28 | return 'rhel-6.9-server-x86_64-updated' 29 | end 30 | end 31 | 32 | def local_setup(node, base_box) 33 | node.vm.box = base_box 34 | node.vm.network "private_network", ip: "192.168.8.#{$ip}", netmask: "255.255.255.0" 35 | $ip += 1 36 | end 37 | 38 | def vm(config, name, base_box="centos/7") 39 | config.vm.define name do |nodeconfig| 40 | nodeconfig.vm.hostname = name + ".box" 41 | # Provider-specific setup 42 | nodeconfig.vm.provider :libvirt do |lv, override| 43 | local_setup(override, base_box) 44 | end 45 | nodeconfig.vm.provider :virtualbox do |vb, override| 46 | local_setup(override, base_box) 47 | end 48 | nodeconfig.vm.provider :openstack do |os, override| 49 | os.image = get_image(base_box) 50 | os.server_name = ENV['USER'] + '-' + name 51 | override.ssh.username = get_username(base_box) 52 | override.ssh.pty = true 53 | end 54 | # Setup Ansible configuration, if we're asked 55 | nodeconfig.vm.provision :shell, inline: "sed -E -i /etc/sudoers -e '/Defaults\\s+requiretty/d'" 56 | if base_box.start_with?('fedora') 57 | nodeconfig.vm.provision "shell", inline: "sudo dnf install -y python" 58 | end 59 | if block_given? 60 | nodeconfig.vm.provision "ansible" do |ansible| 61 | ansible.playbook = "../../cinch/site.yml" 62 | ansible.limit = "all" 63 | ansible.verbose = "-v" 64 | yield ansible 65 | end 66 | end 67 | # Disable shared folder, since we want to simulate the real world 68 | nodeconfig.vm.synced_folder "../..", "/vagrant", disabled: true 69 | if Vagrant.has_plugin?('vagrant-cachier') 70 | # Needs to be :machine, because in multi-vm environments, when they both hit a 71 | # download point, then things can get hairy when both boxes try to lock 72 | config.cache.scope = :machine 73 | config.cache.synced_folder_opts = { 74 | type: :sshfs 75 | } 76 | end 77 | end 78 | end 79 | -------------------------------------------------------------------------------- /vagrant/shared.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -x 4 | 5 | TOP=$(readlink -f "$(dirname "${0}")/../../") 6 | 7 | function venv { 8 | /bin/bash "${TOP}/bin/ensure_virtualenv.sh" || exit 1 9 | } 10 | 11 | function playbook { 12 | source "${TOP}/.venv/bin/activate" 13 | directory="${1}" 14 | shift 15 | export ANSIBLE_CONFIG="${TOP}/vagrant/ansible.cfg" 16 | cinch "${TOP}/vagrant/${directory}/hosts" \ 17 | -e "vagrant_dir=${TOP}/vagrant/${directory}" \ 18 | "$@" || exit 1 19 | deactivate 20 | } 21 | 22 | function vagrant_cycle { 23 | cd "${TOP}/vagrant/${1}/" || exit 1 24 | vagrant destroy -f || exit 1 25 | vagrant up || exit 1 26 | } 27 | -------------------------------------------------------------------------------- /vagrant/slave/README: -------------------------------------------------------------------------------- 1 | The three extra files in this directory - "mode", "owner", and "simple" are 2 | used to test the file upload module in the playbooks. Examples of how to 3 | structure the data going into the system are visible in the Vagrantfile and 4 | elsewhere in the Cinch documentation. 5 | -------------------------------------------------------------------------------- /vagrant/slave/Vagrantfile: -------------------------------------------------------------------------------- 1 | require "../shared.rb" 2 | require 'json' 3 | 4 | base = File.dirname(__FILE__) 5 | 6 | Vagrant.configure("2") do |config| 7 | vm(config, "master") 8 | vm(config, "slave") do |ansible| 9 | ansible.groups = { 10 | "jenkins_master" => ["master"], 11 | "jenkins_slave" => ["slave"], 12 | "cent7" => ["master", "slave"], 13 | "repositories" => ["master", "slave"], 14 | "jenkins_slave:vars" => { 15 | "jenkins_master_url" => "http://{{ hostvars['master']['ansible_default_ipv4']['address'] }}", 16 | "jenkins_user_password" => "vagrant", 17 | "jenkins_user" => "jenkins", 18 | "jenkins_user_home" => "/var/lib/jenkins" 19 | }, 20 | "jenkins_master:vars" => { 21 | "https_enabled" => "False" 22 | }, 23 | "cent7:vars" => { 24 | # These variables need to be converted to JSON objects, like 25 | # all other copmlex value passed into the inventory file for 26 | # Ansible. We here utilize standard Ruby modules to 27 | # accomplish that 28 | "pre_upload_files" => [ 29 | {"src" => base + "/simple", "dest" => "/simple"}, 30 | {"src" => base + "/mode", "dest" => "/mode", "mode" => "0600"} 31 | ].to_json, 32 | "post_upload_files" => [ 33 | {"src" => base + "/owner", "dest" => "/var/lib/jenkins/owner", "owner" => "jenkins"} 34 | ].to_json 35 | } 36 | } 37 | end 38 | end 39 | -------------------------------------------------------------------------------- /vagrant/slave/mode: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/RedHatQE/cinch/467e44fb255796a68a72b17f6e30e33170a688fa/vagrant/slave/mode -------------------------------------------------------------------------------- /vagrant/slave/owner: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/RedHatQE/cinch/467e44fb255796a68a72b17f6e30e33170a688fa/vagrant/slave/owner -------------------------------------------------------------------------------- /vagrant/slave/simple: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/RedHatQE/cinch/467e44fb255796a68a72b17f6e30e33170a688fa/vagrant/slave/simple -------------------------------------------------------------------------------- /vagrant/slave_cent6/Vagrantfile: -------------------------------------------------------------------------------- 1 | require "../shared.rb" 2 | 3 | Vagrant.configure("2") do |config| 4 | vm(config, "master") 5 | vm(config, "slave", "centos/6") do |ansible| 6 | ansible.groups = { 7 | "jenkins_master" => ["master"], 8 | "jenkins_slave" => ["slave"], 9 | "cent7" => ["master"], 10 | "cent6" => ["slave"], 11 | "repositories" => ["master", "slave"], 12 | "jenkins_slave:vars" => { 13 | "jenkins_master_url" => "http://{{ hostvars['master']['ansible_default_ipv4']['address'] }}", 14 | "jenkins_user_password" => "vagrant", 15 | "jenkins_user" => "jenkins", 16 | "jenkins_user_home" => "/var/lib/jenkins" 17 | }, 18 | "jenkins_master:vars" => { 19 | "https_enabled" => "False" 20 | } 21 | } 22 | end 23 | end 24 | -------------------------------------------------------------------------------- /vagrant/slave_rhel6/Vagrantfile: -------------------------------------------------------------------------------- 1 | require "../shared.rb" 2 | 3 | Vagrant.configure("2") do |config| 4 | vm(config, "slave", "davechaitanya123/rhel6") 5 | end 6 | -------------------------------------------------------------------------------- /vagrant/slave_rhel6/configure.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | source ../shared.sh 4 | venv || exit 1 5 | playbook slave_rhel6 "$@" || exit 1 6 | -------------------------------------------------------------------------------- /vagrant/slave_rhel6/full_cycle.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | source ../shared.sh 4 | vagrant_cycle slave_rhel6 || exit 1 5 | venv || exit 1 6 | playbook slave_rhel6 "$@" || exit 1 7 | -------------------------------------------------------------------------------- /vagrant/slave_rhel6/hosts: -------------------------------------------------------------------------------- 1 | [jenkins_slave] 2 | slave ansible_host=192.168.8.2 3 | 4 | [rhel6] 5 | slave 6 | 7 | [vagrant] 8 | slave 9 | -------------------------------------------------------------------------------- /vagrant/slave_rhel7/README: -------------------------------------------------------------------------------- 1 | This file spins up a pair of Jenkins master and slave instances 2 | running RHEL7. They will be based on a rhel7.4 box. 3 | Since these boxes are not available to the general public, you 4 | will need to either update the Vagrantfile with the name and URL 5 | of your accessible base box or you will need to import the box. 6 | Importing can be done with the command 7 | 8 | ``vagrant box add --name rhel7.4 `` 9 | 10 | You will still need to modify the Vagrantfile to update the value 11 | of the rhel_base variable to point to the base directory of your 12 | available RHEL7 repositories. Since those files are also under 13 | license, you will need to have access to such a repository in order 14 | for the Ansible playbooks to properly install the software. 15 | -------------------------------------------------------------------------------- /vagrant/slave_rhel7/Vagrantfile: -------------------------------------------------------------------------------- 1 | require "../shared.rb" 2 | 3 | Vagrant.configure("2") do |config| 4 | vm(config, "master", "generic/rhel7") 5 | vm(config, "slave", "generic/rhel7") do |ansible| 6 | ansible.groups = { 7 | "jenkins_master" => ["master"], 8 | "jenkins_slave" => ["slave"], 9 | "rhel7" => ["master", "slave"], 10 | "repositories" => ["master", "slave"], 11 | "jenkins_slave:vars" => { 12 | "jenkins_master_url" => "http://{{ hostvars['master']['ansible_default_ipv4']['address'] }}" 13 | }, 14 | "rhel7:vars" => { 15 | "rhel_base" => "http://example.com/content/dist/rhel/server/7/7Server" 16 | } 17 | } 18 | end 19 | end 20 | -------------------------------------------------------------------------------- /vagrant/slave_rhel8/README: -------------------------------------------------------------------------------- 1 | This file spins up a pair of Jenkins master and slave instances 2 | running RHEL8. They will be based on a rhel8.0 box. 3 | 4 | You will still need to modify the Vagrantfile to update the value 5 | of the rhel_base variable to point to the base directory of your 6 | available RHEL8 repositories. Since those files are also under 7 | license, you will need to have access to such a repository in order 8 | for the Ansible playbooks to properly install the software. 9 | -------------------------------------------------------------------------------- /vagrant/slave_rhel8/Vagrantfile: -------------------------------------------------------------------------------- 1 | require "../shared.rb" 2 | 3 | Vagrant.configure("2") do |config| 4 | vm(config, "master", "generic/rhel7") 5 | vm(config, "slave", "generic/rhel8") do |ansible| 6 | ansible.groups = { 7 | "jenkins_master" => ["master"], 8 | "jenkins_slave" => ["slave"], 9 | "rhel8" => ["slave"], 10 | "rhel7" => ["master"], 11 | "repositories" => ["master", "slave"], 12 | "jenkins_slave:vars" => { 13 | "jenkins_master_url" => "http://{{ hostvars['master']['ansible_default_ipv4']['address'] }}" 14 | }, 15 | "rhel7:vars" => { 16 | "rhel_base" => "http://pulp.dist.prod.ext.phx2.redhat.com/content/dist/rhel/server/7/7Server/" 17 | }, 18 | "rhel8:vars" => { 19 | "rhel_base" => "http://pulp.dist.prod.ext.phx2.redhat.com/content/beta/rhel8/8/" 20 | } 21 | } 22 | end 23 | end 24 | -------------------------------------------------------------------------------- /vagrant/slave_security_enabled/Vagrantfile: -------------------------------------------------------------------------------- 1 | require "../shared.rb" 2 | require 'json' 3 | 4 | base = File.dirname(__FILE__) 5 | 6 | Vagrant.configure("2") do |config| 7 | vm(config, "master") 8 | vm(config, "slave") do |ansible| 9 | ansible.groups = { 10 | "jenkins_master" => ["master"], 11 | "jenkins_slave" => ["slave"], 12 | "cent7" => ["master", "slave"], 13 | "repositories" => ["master", "slave"], 14 | "jenkins_slave:vars" => { 15 | "jenkins_master_url" => "http://{{ hostvars['master']['ansible_default_ipv4']['address'] }}", 16 | "jenkins_user" => "jenkins", 17 | "jenkins_user_home" => "/var/lib/jenkins", 18 | "jenkins_slave_username" => "jenkins-admin", 19 | "jenkins_slave_password" => "{{ hostvars['master']['admin_api_key'] }}" 20 | }, 21 | "jenkins_master:vars" => { 22 | "https_enabled" => "False", 23 | "jenkins_security_enabled" => "True", 24 | "jenkins_ldap" => { 25 | "server" => "ldaps://ldap.corp.example.com", 26 | "root_dn" => "dc=example,dc=com", 27 | "user_search" => "uid={0}", 28 | "group_search_base" => "ou=groups", 29 | "group_search_filter" => "(& (cn={0}) (objectclass=posixGroup))", 30 | "group_membership" => "memberUid={1}", 31 | "display_name_attr" => "displayname", 32 | "email_addr_attr" => "mail" 33 | }.to_json, 34 | "jenkins_admin_sids" => [ 35 | "jenkins-admin" 36 | ].to_json 37 | } 38 | } 39 | #ansible.tags = "totest" 40 | end 41 | end 42 | 43 | --------------------------------------------------------------------------------