├── .gitignore ├── Dockerfile ├── LICENSE ├── README.md ├── Vagrantfile ├── group_vars └── all ├── hosts ├── images └── pipeline.jpg ├── kubernetes_files ├── Jenkinsfile ├── deployments │ └── deployment.yaml ├── playbook.yml └── services │ └── service.yaml ├── roles ├── common │ └── tasks │ │ └── main.yml ├── docker │ ├── handlers │ │ └── main.yml │ ├── tasks │ │ └── main.yml │ └── templates │ │ └── daemon.json ├── helm │ └── tasks │ │ ├── main.yml │ │ └── prometheus-operator.yml ├── java │ └── tasks │ │ └── main.yml ├── jenkins │ ├── handlers │ │ └── main.yml │ ├── tasks │ │ └── main.yml │ └── templates │ │ ├── basic-security-groovy.j2 │ │ ├── config-as-code.yaml │ │ └── jenkins-config.j2 ├── kubernetes │ ├── handlers │ │ └── main.yml │ ├── tasks │ │ ├── main.yml │ │ ├── master-setup.yml │ │ └── node-setup.yml │ └── templates │ │ └── kube-flannel.yml ├── postgresql │ ├── tasks │ │ └── main.yml │ └── templates │ │ └── pg_hba.conf └── sonarqube │ ├── tasks │ └── main.yml │ └── templates │ ├── sonar.properties │ └── sonarqube.service ├── scratch.yml └── src ├── main.go └── main_test.go /.gitignore: -------------------------------------------------------------------------------- 1 | join-command 2 | .vagrant 3 | .vscode -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM golang:alpine AS build-env 2 | RUN mkdir /go/src/app && apk update && apk add git 3 | ADD src/main.go /go/src/app/ 4 | WORKDIR /go/src/app 5 | RUN CGO_ENABLED=0 GOOS=linux go build -a -installsuffix cgo -ldflags '-extldflags "-static"' -o app . 6 | 7 | FROM scratch 8 | WORKDIR /app 9 | COPY --from=build-env /go/src/app/app . 10 | ENTRYPOINT [ "./app" ] -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2020 Fatih Koç 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # End-to-End-DevOps 2 | ---- 3 | ## Purpose of the project 4 | 5 | With this project I am trying to create a immutable infrastructure with simple deployment pipeline. I hope that project will help you to understand basics and integrations of tools that used for CI/CD pipelines. 6 | 7 | For now project only includes technologies like Kubernetes, Ansible, Vagrant and Jenkins but in near future additional tools will be added and playbooks will be similar to best practices with some test cases. 8 | 9 | ## Technologies 10 | 11 | - Ansible 12 | - Vagrant 13 | - Jenkins 14 | - Configuration As Code 15 | - Pipeline 16 | - Kubernetes 17 | - Docker 18 | - Helm 19 | - Flannel 20 | - SonarQube 21 | - Sonar-Scanner 22 | - PostgreSQL 23 | - Prometheus-Operator 24 | - Grafana 25 | - AlertManager 26 | - Golang 27 | 28 | 29 | ## What happens after startup 30 | 31 | After provisioning servers, Ansible uses common role for updating packages, changing DNS settings, disabling swap etc. 32 | 33 | SonarQube will be installed with PostgreSQL and Sonar-Scanner. 34 | 35 | Jenkins role is installing Jenkins with some plugins and add admin user. Then uses Configuration As Code plugin to create jobs and credentials. 36 | 37 | Docker and Kubernetes roles are basically install required packages and start Kubernetes Cluster with Flannel. 38 | 39 | After triggering job with commiting GitHub repository, app inside src directory is building and testing. After test pass, SonarQube scans code, Dockerfile is creating the image and push it to the private repository inside DockerHub. Then publish app inside Kubernetes cluster with the help of Ansible playbooks. 40 | 41 | ![Example pipeline](./images/pipeline.jpg) 42 | 43 | ---- 44 | ## Requirements 45 | - Virtualbox 46 | - Vagrant 47 | - Ansible 48 | - Github account 49 | - Dockerhub account 50 | - 8 GB RAM minimum 51 | 52 | ---- 53 | ## Usage 54 | ### Fork repository 55 | 56 | For testing everything I recommend to use private repository. As you can see, repository stores credentials(for now). This can cause serious security problems. 57 | 58 | Also creating private Dockerhub repository is recommended. 59 | 60 | ### Change variables 61 | 62 | Change variables at group_vars/all. Don't forget github and dockerhub. 63 | 64 | github_repo: end-to-end-devops 65 | github_username: ###### 66 | github_password: ###### 67 | 68 | dockerhub_repo: end-to-end-devops 69 | dockerhub_username: ##### 70 | dockerhub_password: ##### 71 | dockerhub_email: ##### 72 | 73 | Change variables at hosts. Make sure that you configured your ssh keys. Check your ssh keys with; 74 | 75 | vagrant ssh-config 76 | 77 | to see where is your ssh keys that you created. 78 | 79 | ### Provisioning 80 | 81 | vagrant up 82 | 83 | ### Show time 84 | 85 | Use ansible-playbook for installing Jenkins, Docker and Kubernetes. You use -vvv parameter to see what happens behind it. 86 | 87 | ansible-playbook -i hosts scratch.yml 88 | 89 | ### Testing pipeline 90 | 91 | Just commit to your project and Jenkins job will be triggered. After finishing job, check your application on your host. 92 | 93 | curl 192.168.7.2:32000 94 | 95 | ### Access Services 96 | 97 | 192.168.7.2:8080 Jenkins 98 | 192.168.7.2:9000/sonarqube SonarQube 99 | 192.168.7.2:80 Grafana 100 | 192.168.7.2:9090 Prometheus 101 | 192.168.7.2.9093 AlertManager 102 | 103 | ### Better, Faster, Stronger! 104 | 105 | Actually I don't download, build and install all of the things. Everything as Code projects like that are taking too much time to build. For example common and docker roles are used in all virtual machines. I created a box with that roles so I don't always update packages and install new ones. You can use snapshots for development too. You use [this](https://scotch.io/tutorials/how-to-create-a-vagrant-base-box-from-an-existing-one) link for creating your own box. 106 | 107 | Everything depends on your commit. If you change little pieces of all roles, then you need to build project from scrath. If you change a little line of code in Configuration-as-Code plugin, just reload file and don't waste your time with building. 108 | 109 | You can also use cloud providers for better internet connection but make sure that you configured a fine firewall and other security stuff. 110 | 111 | 112 | ## About Firewalld and SELinux 113 | 114 | As you can see I don't use firewall for this project. You can enable firewalld and ports, I added them inside project but for testing environment, I don't want to use them. 115 | 116 | ## TODO 117 | 118 | Change helm steps after [this](https://github.com/ansible-collections/kubernetes/pull/61) issue is closed. 119 | -------------------------------------------------------------------------------- /Vagrantfile: -------------------------------------------------------------------------------- 1 | IMAGE_NAME = "centos/7" 2 | N = 1 3 | 4 | Vagrant.configure("2") do |config| 5 | 6 | config.vm.define "master" do |master| 7 | master.vm.box = IMAGE_NAME 8 | master.vm.hostname = "master.example.com" 9 | master.vm.network "private_network", ip: "192.168.7.2" 10 | master.ssh.insert_key = false 11 | master.vm.hostname = "master" 12 | master.vm.synced_folder ".", "/vagrant", disabled: true 13 | master.vm.provider "virtualbox" do |v| 14 | v.name = "master" 15 | v.memory = 6000 16 | v.cpus = 2 17 | end 18 | end 19 | 20 | (1..N).each do |i| 21 | config.vm.define "node#{i}" do |node| 22 | node.vm.box = IMAGE_NAME 23 | node.vm.hostname = "node#{i}.example.com" 24 | node.vm.network "private_network", ip: "192.168.7.#{i+2}" 25 | node.vm.hostname = "node#{i}" 26 | node.ssh.insert_key= false 27 | node.vm.synced_folder ".", "/vagrant", disabled: true 28 | node.vm.provider "virtualbox" do |v| 29 | v.name = "node#{i}" 30 | v.memory = 1024 31 | v.cpus = 1 32 | end 33 | end 34 | end 35 | end 36 | -------------------------------------------------------------------------------- /group_vars/all: -------------------------------------------------------------------------------- 1 | #GENERAL VARIABLES 2 | 3 | master_ip_address: 192.168.7.2 4 | 5 | #COMMON VARIABLES 6 | 7 | common_packages: 8 | - curl 9 | - wget 10 | - python3-pip 11 | - yum-utils 12 | - python-setuptools 13 | - initscripts 14 | - git 15 | - unzip 16 | 17 | kernel_modules: 18 | - br_netfilter #For kubeadm 19 | # - overlay 20 | # - ip_vs 21 | # - ip_vs_rr 22 | # - ip_vs_wrr 23 | # - ip_vs_sh 24 | # - nf_conntrack_ipv4 25 | 26 | sysctl_entries: 27 | - {key: net.bridge.bridge-nf-call-ip6tables, value: 1} 28 | - {key: net.bridge.bridge-nf-call-iptables, value: 1} 29 | - {key: net.ipv4.ip_forward, value: 1} 30 | 31 | #JAVA VARIABLES 32 | 33 | java_package: java-11-openjdk-devel 34 | 35 | #POSTGRESQL VARIABLES 36 | 37 | psql_packages: 38 | - postgresql12-server 39 | - postgresql12 40 | - postgresql12-contrib 41 | - postgresql12-libs 42 | - python3-psycopg2 43 | - python-psycopg2 44 | 45 | #SONARQUBE VARIABLES 46 | 47 | sonarqube_version: 8.1.0.31237 48 | sonarqube_port: 9000 49 | sonar_scanner_cli_version: 4.2.0.1873 50 | 51 | #JENKINS VARIABLES 52 | 53 | github_repo: end-to-end-devops 54 | github_username: ****** 55 | github_password: ****** 56 | 57 | dockerhub_repo: end-to-end-devops 58 | dockerhub_username: ****** 59 | dockerhub_password: ****** 60 | dockerhub_email: ****** 61 | 62 | jenkins_admin_username: fatih 63 | jenkins_admin_password: fatih 64 | jenkins_hostname: localhost 65 | jenkins_port: 8080 66 | 67 | jenkins_init_changes: 68 | - option: "JENKINS_ARGS" 69 | value: "--prefix={{ jenkins_url_prefix }}" 70 | - option: "{{ jenkins_java_options_env_var }}" 71 | value: "{{ jenkins_java_options }}" 72 | 73 | 74 | jenkins_plugin_timeout: 30 75 | 76 | jenkins_plugins: 77 | - timestamper 78 | - credentials-binding 79 | - email-ext 80 | - build-timeout 81 | - workflow-aggregator 82 | - pipeline-stage-view 83 | - pipeline-build-step 84 | - github-branch-source 85 | - ssh-slaves 86 | - pipeline-github-lib 87 | - matrix-auth 88 | - mailer 89 | - antisamy-markup-formatter 90 | - gradle 91 | - pam-auth 92 | - git 93 | - ws-cleanup 94 | - cloudbees-folder 95 | - configuration-as-code 96 | - job-dsl 97 | - sonar 98 | 99 | #KUBERNETES VARIABLES 100 | 101 | pod_network_cidr: 10.244.0.0/16 102 | k8s_interface: eth1 103 | -------------------------------------------------------------------------------- /hosts: -------------------------------------------------------------------------------- 1 | [odin] 2 | master ansible_ssh_host=192.168.7.2 ansible_ssh_user=vagrant ansible_ssh_private_key_file=~/.vagrant.d/insecure_private_key kubernetes_role=master 3 | node1 ansible_ssh_host=192.168.7.3 ansible_ssh_user=vagrant ansible_ssh_private_key_file=~/.vagrant.d/insecure_private_key kubernetes_role=node 4 | 5 | [master] 6 | master ansible_ssh_host=192.168.7.2 ansible_ssh_user=vagrant ansible_ssh_private_key_file=~/.vagrant.d/insecure_private_key kubernetes_role=master 7 | 8 | 9 | [node1] 10 | node1 ansible_ssh_host=192.168.7.3 ansible_ssh_user=vagrant ansible_ssh_private_key_file=~/.vagrant.d/insecure_private_key kubernetes_role=node 11 | 12 | [node2] 13 | node2 ansible_ssh_host=192.168.7.4 ansible_ssh_user=vagrant ansible_ssh_private_key_file=~/.vagrant.d/insecure_private_key kubernetes_role=node 14 | 15 | 16 | [kubernetes] 17 | master ansible_ssh_host=192.168.7.2 ansible_ssh_user=vagrant ansible_ssh_private_key_file=~/.vagrant.d/insecure_private_key kubernetes_role=master 18 | node1 ansible_ssh_host=192.168.7.3 ansible_ssh_user=vagrant ansible_ssh_private_key_file=~/.vagrant.d/insecure_private_key kubernetes_role=node 19 | node2 ansible_ssh_host=192.168.7.4 ansible_ssh_user=vagrant ansible_ssh_private_key_file=~/.vagrant.d/insecure_private_key kubernetes_role=node 20 | -------------------------------------------------------------------------------- /images/pipeline.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fatihkc/end-to-end-devops/33497c6c74fe495dc9cd189362250c6badcfa2cf/images/pipeline.jpg -------------------------------------------------------------------------------- /kubernetes_files/Jenkinsfile: -------------------------------------------------------------------------------- 1 | pipeline { 2 | agent any 3 | environment { 4 | registry = "fatihkoc/end-to-end-infra" 5 | GOCACHE = "/tmp" 6 | } 7 | stages { 8 | stage('Build') { 9 | agent { 10 | docker { 11 | image 'golang' 12 | } 13 | } 14 | steps { 15 | // Create our project directory. 16 | sh 'cd ${GOPATH}/src' 17 | sh 'mkdir -p ${GOPATH}/src/hello-world' 18 | // Copy all files in our Jenkins workspace to our project directory. 19 | sh 'cp -r ${WORKSPACE}/* ${GOPATH}/src/hello-world' 20 | // Build the app. 21 | sh 'cd src && go build' 22 | } 23 | } 24 | stage('Test') { 25 | agent { 26 | docker { 27 | image 'golang' 28 | } 29 | } 30 | steps { 31 | // Create our project directory. 32 | sh 'cd ${GOPATH}/src' 33 | sh 'mkdir -p ${GOPATH}/src/hello-world' 34 | // Copy all files in our Jenkins workspace to our project directory. 35 | sh 'cp -r ${WORKSPACE}/* ${GOPATH}/src/hello-world' 36 | // Remove cached test results. 37 | sh 'go clean -cache' 38 | // Run Unit Tests. 39 | sh 'go test ./src/ -v -short' 40 | } 41 | } 42 | stage('SonarQube analysis') { 43 | steps { 44 | sh 'sonar-scanner -Dsonar.projectKey=test -Dsonar.sources=. -Dsonar.host.url=http://192.168.7.2:9000/sonarqube -Dsonar.login=admin -Dsonar.password=admin' 45 | } 46 | } 47 | stage('Publish') { 48 | environment { 49 | registryCredential = 'dockerhub' 50 | } 51 | steps{ 52 | script { 53 | def appimage = docker.build registry + ":$BUILD_NUMBER" 54 | docker.withRegistry( '', registryCredential ) { 55 | appimage.push() 56 | appimage.push('latest') 57 | } 58 | } 59 | } 60 | } 61 | stage ('Deploy') { 62 | steps { 63 | script{ 64 | def image_id = registry + ":$BUILD_NUMBER" 65 | sh "/usr/local/bin/ansible-playbook kubernetes_files/playbook.yml --extra-vars \"image_id=${image_id}\" -i ./" 66 | } 67 | } 68 | } 69 | } 70 | } 71 | -------------------------------------------------------------------------------- /kubernetes_files/deployments/deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: hello-deployment 5 | labels: 6 | role: app 7 | spec: 8 | replicas: 2 9 | selector: 10 | matchLabels: 11 | role: app 12 | template: 13 | metadata: 14 | labels: 15 | role: app 16 | spec: 17 | containers: 18 | - name: app 19 | image: "{{ image_id }}" 20 | ports: 21 | - containerPort: 80 22 | resources: 23 | requests: 24 | cpu: 10m 25 | imagePullSecrets: 26 | - name: dockerhub -------------------------------------------------------------------------------- /kubernetes_files/playbook.yml: -------------------------------------------------------------------------------- 1 | - hosts: localhost 2 | remote_user: vagrant 3 | tasks: 4 | 5 | - name: 6 | command: kubectl delete secret dockerhub 7 | ignore_errors: yes 8 | 9 | - name: 10 | command: kubectl create secret docker-registry dockerhub --docker-server=docker.io --docker-username={{ dockerhub_username}} --docker-password={{ dockerhub_password }} --docker-email={{ dockerhub_email }} 11 | 12 | - name: Deploy the service 13 | k8s: 14 | state: present 15 | definition: "{{ lookup('template', 'services/service.yaml') | from_yaml }}" 16 | validate_certs: no 17 | namespace: default 18 | - name: Deploy the application 19 | k8s: 20 | state: present 21 | validate_certs: no 22 | namespace: default 23 | definition: "{{ lookup('template', 'deployments/deployment.yaml') | from_yaml }}" -------------------------------------------------------------------------------- /kubernetes_files/services/service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: hello-svc 5 | spec: 6 | selector: 7 | role: app 8 | ports: 9 | - protocol: TCP 10 | port: 80 11 | targetPort: 80 12 | nodePort: 32000 13 | type: NodePort -------------------------------------------------------------------------------- /roles/common/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Update all packages 3 | yum: 4 | name: '*' 5 | state: latest 6 | 7 | - name: Install EPEL repository 8 | yum: 9 | name: 10 | - epel-release 11 | state: latest 12 | 13 | - name: Install common packages 14 | yum: 15 | name: "{{ common_packages}}" 16 | state: latest 17 | 18 | - name: Install ansible and openshift package 19 | pip: 20 | name: 21 | - ansible 22 | - openshift 23 | executable: pip3 24 | extra_args: --user 25 | 26 | #For testing environment firewall can be stopped. 27 | - name: Make sure that firewalld stopped 28 | service: name=firewalld state=stopped 29 | 30 | - name: Add Google DNS to /etc/resolv.conf 31 | blockinfile: 32 | path: /etc/resolv.conf 33 | block: | 34 | 8.8.8.8 35 | 8.8.4.4 36 | state: present 37 | 38 | - name: Add domains to /etc/hosts 39 | blockinfile: 40 | path: /etc/hosts 41 | block: | 42 | 192.168.7.2 master.example.com master 43 | 192.168.7.3 node1.example.com node1 44 | 192.168.7.4 node2.example.com node2 45 | state: present 46 | 47 | #For testing environment SELinux can be disabled 48 | - name: Disable SELinux 49 | selinux: 50 | state: disabled 51 | 52 | #For Kubernetes we need to disable swap. Otherwise It becomes harder to provision cluster. 53 | - name: Disable SWAP-1 54 | shell: | 55 | swapoff -a 56 | 57 | - name: Disable SWAP-2 58 | replace: 59 | path: /etc/fstab 60 | regexp: '^(.+ swap .*)$' 61 | replace: '# \1' 62 | 63 | - name: Load required kernel modules 64 | modprobe: 65 | name: "{{ item }}" 66 | state: present 67 | with_items: "{{ kernel_modules }}" 68 | 69 | - name: Modify sysctl entries 70 | sysctl: 71 | name: '{{ item.key }}' 72 | value: '{{ item.value }}' 73 | sysctl_set: yes 74 | state: present 75 | reload: yes 76 | ignore_errors: True 77 | with_items: "{{ sysctl_entries }}" -------------------------------------------------------------------------------- /roles/docker/handlers/main.yml: -------------------------------------------------------------------------------- 1 | - name: restart docker 2 | service: name=docker state=restarted -------------------------------------------------------------------------------- /roles/docker/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Install required packages for Docker 3 | yum: 4 | name: 5 | - yum-utils 6 | - device-mapper-persistent-data 7 | - lvm2 8 | 9 | - name: Add Docker repo 10 | get_url: 11 | url: https://download.docker.com/linux/centos/docker-ce.repo 12 | dest: /etc/yum.repos.d/docker-ce.repo 13 | become: yes 14 | 15 | - name: Install Docker and dependencies 16 | yum: 17 | name: 18 | - docker-ce 19 | state: latest 20 | 21 | - name: Start Docker Service 22 | service: name=docker state=started enabled=yes 23 | 24 | - name: Add vagrant to Docker group 25 | user: 26 | name: "{{ item }}" 27 | groups: docker 28 | append: true 29 | with_items: 30 | - vagrant 31 | - jenkins 32 | 33 | #Kubernetes need systemd driver. 34 | - name: Copy daemon.json for Docker 35 | template: src=daemon.json dest=/etc/docker/daemon.json 36 | 37 | - name: Restart docker 38 | service: name=docker state=restarted -------------------------------------------------------------------------------- /roles/docker/templates/daemon.json: -------------------------------------------------------------------------------- 1 | { 2 | "exec-opts": ["native.cgroupdriver=systemd"], 3 | "log-driver": "json-file", 4 | "log-opts": { 5 | "max-size": "100m" 6 | }, 7 | "storage-driver": "overlay2", 8 | "storage-opts": [ 9 | "overlay2.override_kernel_check=true" 10 | ] 11 | } -------------------------------------------------------------------------------- /roles/helm/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Install Helm 3.1.2 3 | unarchive: 4 | src: https://get.helm.sh/helm-v3.1.2-linux-amd64.tar.gz 5 | dest: /tmp 6 | remote_src: yes 7 | 8 | - name: Change sonarqube directory name 9 | command: mv /tmp/linux-amd64/helm /usr/bin/helm 10 | 11 | - name: Add repo 12 | command: helm repo add stable https://kubernetes-charts.storage.googleapis.com 13 | become_user: vagrant 14 | when: kubernetes_role == 'master' 15 | 16 | - name: Update repo 17 | command: helm repo update 18 | become_user: vagrant 19 | when: kubernetes_role == 'master' 20 | 21 | - include_tasks: prometheus-operator.yml 22 | when: kubernetes_role == 'master -------------------------------------------------------------------------------- /roles/helm/tasks/prometheus-operator.yml: -------------------------------------------------------------------------------- 1 | ###THIS FILE WILL BE REPLACED WHEN ANSIBLE STARTS TO SUPPORT HELM 3. 2 | 3 | - name: Create monitoring namespace 4 | command: kubectl create namespace monitoring 5 | become_user: vagrant 6 | 7 | - name: Install prometheus-operator 8 | command: helm install --namespace monitoring demo stable/prometheus-operator 9 | become_user: vagrant 10 | 11 | - name: Port-forward 12 | command: kubectl port-forward --address 0.0.0.0 svc/demo-grafana 80 --namespace monitoring 13 | become_user: vagrant -------------------------------------------------------------------------------- /roles/java/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Install Java packages and dependencies 3 | yum: name={{ java_package }} state=present -------------------------------------------------------------------------------- /roles/jenkins/handlers/main.yml: -------------------------------------------------------------------------------- 1 | - name: restart jenkins 2 | service: name=jenkins state=restarted -------------------------------------------------------------------------------- /roles/jenkins/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Add Jenkins repository 3 | get_url: 4 | url: http://pkg.jenkins-ci.org/redhat-stable/jenkins.repo 5 | dest: /etc/yum.repos.d/jenkins.repo 6 | mode: '0640' 7 | 8 | - rpm_key: 9 | state: present 10 | key: https://jenkins-ci.org/redhat/jenkins-ci.org.key 11 | 12 | #2.204.6 version is important. For know this project is working only with that version. 13 | #New versions are crashing due to security issues for now. 14 | - name: Install Jenkins packages and dependencies 15 | yum: name=jenkins-2.204.6 state=present 16 | 17 | #Configure as you want. 18 | - name: Create configuration file for Jenkins 19 | template: 20 | src: jenkins-config.j2 21 | dest: /etc/sysconfig/jenkins 22 | owner: root 23 | group: root 24 | mode: 0600 25 | 26 | - name: Create initial script directory 27 | file: 28 | path: "/var/lib/jenkins/init.groovy.d" 29 | state: directory 30 | owner: jenkins 31 | group: jenkins 32 | 33 | - name: Add admin user to Jenkins 34 | template: 35 | src: basic-security-groovy.j2 36 | dest: /var/lib/jenkins/init.groovy.d/basic-security.groovy 37 | owner: jenkins 38 | group: jenkins 39 | mode: 0775 40 | 41 | # - firewalld: 42 | # port: "{{ jenkins_port }}/tcp" 43 | # permanent: yes 44 | # state: enabled 45 | 46 | - name: Start Jenkins Service 47 | service: name=jenkins state=started enabled=yes 48 | 49 | - name: Copy Configuration-as-Code plugin YAML 50 | template: src=config-as-code.yaml dest=/var/lib/jenkins/jenkins.yaml 51 | 52 | - name: Wait for Jenkins to start up before proceeding. 53 | command: > 54 | curl -D - --silent --max-time 5 http://localhost:{{ jenkins_port }}/cli/ 55 | args: 56 | warn: false 57 | register: result 58 | until: > 59 | (result.stdout.find("403 Forbidden") != -1) 60 | or (result.stdout.find("200 OK") != -1) 61 | and (result.stdout.find("Please wait while") == -1) 62 | retries: 60 63 | delay: 5 64 | changed_when: false 65 | check_mode: false 66 | 67 | - name: Get the jenkins-cli jarfile from the Jenkins server. 68 | get_url: 69 | url: "http://localhost:{{ jenkins_port }}/jnlpJars/jenkins-cli.jar" 70 | dest: /opt/jenkins-cli.jar 71 | register: jarfile_get 72 | until: "'OK' in jarfile_get.msg or '304' in jarfile_get.msg or 'file already exists' in jarfile_get.msg" 73 | retries: 5 74 | delay: 10 75 | check_mode: false 76 | 77 | - name: Remove Jenkins security init scripts after first startup. 78 | file: 79 | path: "/var/lib/jenkins/init.groovy.d/basic-security.groovy" 80 | state: absent 81 | 82 | - name: Create Jenkins updates directory 83 | file: 84 | path: "/var/lib/jenkins/updates" 85 | state: directory 86 | owner: jenkins 87 | group: jenkins 88 | 89 | - name: Download current plugin updates from Jenkins update site. 90 | get_url: 91 | url: "https://update-center.json" 92 | dest: "/var/lib/jenkins/updates/default.json" 93 | owner: jenkins 94 | group: jenkins 95 | mode: 0440 96 | changed_when: false 97 | register: get_result 98 | until: get_result is success 99 | retries: 5 100 | delay: 3 101 | 102 | - name: Remove first and last line from json file. 103 | replace: 104 | path: "/var/lib/jenkins/updates/default.json" 105 | regexp: "1d;$d" 106 | 107 | - name: Install Jenkins plugins using password. 108 | jenkins_plugin: 109 | name: "{{ item.name | default(item) }}" 110 | version: "{{ item.version | default(omit) }}" 111 | jenkins_home: /var/lib/jenkins 112 | url_username: "{{ jenkins_admin_username }}" 113 | url_password: "{{ jenkins_admin_password }}" 114 | state: present 115 | timeout: 30 116 | updates_expiration: 86400 117 | updates_url: https://updates.jenkins.io 118 | url: "http://localhost:{{ jenkins_port }}" 119 | with_dependencies: true 120 | with_items: "{{ jenkins_plugins }}" 121 | notify: restart jenkins 122 | register: plugin_result 123 | until: plugin_result is success 124 | retries: 5 125 | delay: 2 -------------------------------------------------------------------------------- /roles/jenkins/templates/basic-security-groovy.j2: -------------------------------------------------------------------------------- 1 | #!groovy 2 | import hudson.security.* 3 | import jenkins.model.* 4 | 5 | def instance = Jenkins.getInstance() 6 | def hudsonRealm = new HudsonPrivateSecurityRealm(false) 7 | def users = hudsonRealm.getAllUsers() 8 | users_s = users.collect { it.toString() } 9 | 10 | // Create the admin user account if it doesn't already exist. 11 | if ("{{ jenkins_admin_username }}" in users_s) { 12 | println "Admin user already exists - updating password" 13 | 14 | def user = hudson.model.User.get('{{ jenkins_admin_username }}'); 15 | def password = hudson.security.HudsonPrivateSecurityRealm.Details.fromPlainPassword('{{ jenkins_admin_password }}') 16 | user.addProperty(password) 17 | user.save() 18 | } 19 | else { 20 | println "--> creating local admin user" 21 | 22 | hudsonRealm.createAccount('{{ jenkins_admin_username }}', '{{ jenkins_admin_password }}') 23 | instance.setSecurityRealm(hudsonRealm) 24 | 25 | def strategy = new FullControlOnceLoggedInAuthorizationStrategy() 26 | instance.setAuthorizationStrategy(strategy) 27 | instance.save() 28 | } -------------------------------------------------------------------------------- /roles/jenkins/templates/config-as-code.yaml: -------------------------------------------------------------------------------- 1 | credentials: 2 | system: 3 | domainCredentials: 4 | - credentials: 5 | - usernamePassword: 6 | description: "github" 7 | id: "github" 8 | username: "{{ github_username}}" 9 | password: "{{ github_password }}" 10 | scope: GLOBAL 11 | - usernamePassword: 12 | description: "dockerhub" 13 | id: "dockerhub" 14 | username: "{{ dockerhub_username }}" 15 | password: "{{ dockerhub_password }}" 16 | scope: GLOBAL 17 | jenkins: 18 | agentProtocols: 19 | - "JNLP4-connect" 20 | - "Ping" 21 | authorizationStrategy: 22 | loggedInUsersCanDoAnything: 23 | allowAnonymousRead: false 24 | crumbIssuer: 25 | standard: 26 | excludeClientIPFromCrumb: false 27 | disableRememberMe: false 28 | markupFormatter: "plainText" 29 | mode: NORMAL 30 | myViewsTabBar: "standard" 31 | numExecutors: 2 32 | primaryView: 33 | all: 34 | name: "all" 35 | projectNamingStrategy: "standard" 36 | quietPeriod: 5 37 | remotingSecurity: 38 | enabled: true 39 | scmCheckoutRetryCount: 0 40 | slaveAgentPort: -1 41 | updateCenter: 42 | sites: 43 | - id: "default" 44 | url: "https://updates.jenkins.io/update-center.json" 45 | views: 46 | - all: 47 | name: "all" 48 | viewsTabBar: "standard" 49 | security: 50 | apiToken: 51 | creationOfLegacyTokenEnabled: false 52 | tokenGenerationOnCreationEnabled: false 53 | usageStatisticsEnabled: true 54 | sSHD: 55 | port: -1 56 | unclassified: 57 | buildStepOperation: 58 | enabled: false 59 | defaultFolderConfiguration: 60 | healthMetrics: 61 | - worstChildHealthMetric: 62 | recursive: true 63 | extendedEmailPublisher: 64 | adminRequiredForTemplateTesting: false 65 | allowUnregisteredEnabled: false 66 | charset: "UTF-8" 67 | debugMode: false 68 | defaultBody: |- 69 | $PROJECT_NAME - Build # $BUILD_NUMBER - $BUILD_STATUS: 70 | 71 | Check console output at $BUILD_URL to view the results. 72 | defaultSubject: "$PROJECT_NAME - Build # $BUILD_NUMBER - $BUILD_STATUS!" 73 | maxAttachmentSize: -1 74 | maxAttachmentSizeMb: 0 75 | precedenceBulk: false 76 | useSsl: false 77 | watchingEnabled: false 78 | gitHubConfiguration: 79 | apiRateLimitChecker: ThrottleForNormalize 80 | gitHubPluginConfig: 81 | hookUrl: "http://{{ master_ip_address }}:{{ jenkins_port }}/github-webhook/" 82 | gitSCM: 83 | createAccountBasedOnEmail: false 84 | showEntireCommitSummaryInChanges: false 85 | useExistingAccountWithSameEmail: false 86 | location: 87 | adminAddress: "address not configured yet " 88 | url: "http://{{ master_ip_address }}:{{ jenkins_port }}/" 89 | mailer: 90 | charset: "UTF-8" 91 | useSsl: false 92 | pollSCM: 93 | pollingThreadCount: 10 94 | timestamperConfig: 95 | allPipelines: false 96 | elapsedTimeFormat: "''HH:mm:ss.S' '" 97 | systemTimeFormat: "''HH:mm:ss' '" 98 | tool: 99 | git: 100 | installations: 101 | - home: "git" 102 | name: "Default" 103 | 104 | jobs: 105 | - script: > 106 | pipelineJob('pipeline') { 107 | definition { 108 | cpsScm { 109 | scriptPath 'kubernetes_files/Jenkinsfile' 110 | scm { 111 | git { 112 | remote { 113 | github('{{ github_username }}/{{ github_repo }}', 'https') 114 | credentials('github') 115 | } 116 | branch '*/master' 117 | } 118 | } 119 | } 120 | } 121 | triggers { 122 | scm('* * * * *') 123 | } 124 | } -------------------------------------------------------------------------------- /roles/jenkins/templates/jenkins-config.j2: -------------------------------------------------------------------------------- 1 | JENKINS_HOME="/var/lib/jenkins" 2 | 3 | JENKINS_JAVA_CMD="" 4 | 5 | JENKINS_USER="jenkins" 6 | 7 | JENKINS_JAVA_OPTIONS="-Djava.awt.headless=true -Djenkins.install.runSetupWizard=false" 8 | 9 | JENKINS_PORT="{{ jenkins_port }}" 10 | 11 | JENKINS_LISTEN_ADDRESS="" 12 | 13 | JENKINS_HTTPS_PORT="" 14 | 15 | JENKINS_HTTPS_KEYSTORE="" 16 | 17 | JENKINS_HTTPS_KEYSTORE_PASSWORD="" 18 | 19 | JENKINS_HTTPS_LISTEN_ADDRESS="" 20 | 21 | JENKINS_HTTP2_PORT="" 22 | 23 | JENKINS_HTTP2_LISTEN_ADDRESS="" 24 | 25 | JENKINS_DEBUG_LEVEL="5" 26 | 27 | JENKINS_ENABLE_ACCESS_LOG="no" 28 | 29 | JENKINS_HANDLER_MAX="100" 30 | 31 | JENKINS_HANDLER_IDLE="20" 32 | 33 | JENKINS_EXTRA_LIB_FOLDER="" 34 | 35 | JENKINS_ARGS="" 36 | -------------------------------------------------------------------------------- /roles/kubernetes/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: restart kubelet 3 | service: name=kubelet state=restarted 4 | -------------------------------------------------------------------------------- /roles/kubernetes/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Ensure Kubernetes repository exists 3 | yum_repository: 4 | name: kubernetes 5 | description: Kubernetes 6 | enabled: true 7 | gpgcheck: true 8 | repo_gpgcheck: true 9 | baseurl: https://packages.cloud.google.com/yum/repos/kubernetes-el7-x86_64 10 | gpgkey: 11 | - https://packages.cloud.google.com/yum/doc/yum-key.gpg 12 | - https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg 13 | 14 | - name: Add Kubernetes GPG keys 15 | rpm_key: 16 | key: "{{ item }}" 17 | state: present 18 | with_items: 19 | - https://packages.cloud.google.com/yum/doc/yum-key.gpg 20 | - https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg 21 | 22 | - name: Make cache if Kubernetes GPG key changed 23 | command: "yum -q makecache -y --disablerepo='*' --enablerepo='kubernetes'" 24 | 25 | # - firewalld: 26 | # port: "{{ item }}/tcp" 27 | # permanent: yes 28 | # state: enabled 29 | # immediate: yes 30 | # loop: 31 | # - 6443 32 | # - 2379 33 | # - 2380 34 | # - 10250 35 | # - 10251 36 | # - 10252 37 | # when: kubernetes_role == 'master' 38 | 39 | # - firewalld: 40 | # port: "{{ item }}/tcp" 41 | # permanent: yes 42 | # state: enabled 43 | # immediate: yes 44 | # loop: 45 | # - 10250 46 | # - 30000-32767 47 | # when: kubernetes_role == 'node' 48 | 49 | 50 | - name: Install Kubernetes packages 51 | yum: 52 | name: 53 | - kubelet 54 | - kubectl 55 | - kubeadm 56 | state: present 57 | 58 | - name: Configure KUBELET_EXTRA_ARGS 59 | lineinfile: 60 | path: /etc/sysconfig/kubelet 61 | regexp: '^KUBELET_EXTRA_ARGS=' 62 | line: KUBELET_EXTRA_ARGS=--node-ip={{ ansible_host }} 63 | state: present 64 | 65 | - name: Ensure kubelet is started and enabled at boot 66 | service: 67 | name: kubelet 68 | state: started 69 | enabled: true 70 | 71 | - name: Check if Kubernetes has already been initialized 72 | stat: 73 | path: /etc/kubernetes/admin.conf 74 | 75 | - include_tasks: master-setup.yml 76 | when: kubernetes_role == 'master' 77 | 78 | - include_tasks: node-setup.yml 79 | when: kubernetes_role == 'node' 80 | 81 | - name: Setup kubeconfig for jenkins user 82 | command: "{{ item }}" 83 | with_items: 84 | - mkdir -p /var/lib/jenkins/.kube 85 | - cp -i /etc/kubernetes/admin.conf /var/lib/jenkins/.kube/config 86 | - chown jenkins:jenkins /var/lib/jenkins/.kube/config 87 | when: kubernetes_role == 'master' 88 | 89 | - name: Restart docker 90 | service: 91 | name: docker 92 | state: restarted 93 | when: kubernetes_role == 'master' 94 | 95 | - name: Restart jenkins 96 | service: 97 | name: jenkins 98 | state: restarted 99 | when: kubernetes_role == 'master' -------------------------------------------------------------------------------- /roles/kubernetes/tasks/master-setup.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Initialize Kubernetes master with kubeadm init 3 | command: kubeadm init --apiserver-advertise-address={{ master_ip_address }} --pod-network-cidr={{ pod_network_cidr }} 4 | 5 | - name: Setup kubeconfig for vagrant user 6 | command: "{{ item }}" 7 | with_items: 8 | - mkdir -p /home/vagrant/.kube 9 | - cp -i /etc/kubernetes/admin.conf /home/vagrant/.kube/config 10 | - chown vagrant:vagrant /home/vagrant/.kube/config 11 | 12 | - name: Setup kubeconfig for root user 13 | command: "{{ item }}" 14 | with_items: 15 | - mkdir -p /root/.kube 16 | - cp -i /etc/kubernetes/admin.conf /root/.kube/config 17 | - chown root:root /root/.kube/config 18 | 19 | - name: Copy kube-flannel for iface paramater 20 | template: src=kube-flannel.yml dest=/home/vagrant/kube-flannel.yml 21 | 22 | - name: Configure Flannel networking 23 | command: "{{ item }}" 24 | with_items: 25 | - kubectl apply -f https://raw.githubusercontent.com/coreos/flannel/master/Documentation/k8s-manifests/kube-flannel-rbac.yml 26 | - kubectl apply -f /home/vagrant/kube-flannel.yml 27 | 28 | - name: Create token for nodes 29 | command: kubeadm token create --print-join-command 30 | register: join_command 31 | 32 | - name: Copy join command to local file 33 | become: false 34 | local_action: copy content="{{ join_command.stdout_lines[0] }}" dest="./join-command" -------------------------------------------------------------------------------- /roles/kubernetes/tasks/node-setup.yml: -------------------------------------------------------------------------------- 1 | - name: Copy the join command to server location 2 | copy: src=join-command dest=/tmp/join-command.sh mode=0777 3 | 4 | - name: Join the node to cluster 5 | command: sh /tmp/join-command.sh -------------------------------------------------------------------------------- /roles/kubernetes/templates/kube-flannel.yml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: policy/v1beta1 3 | kind: PodSecurityPolicy 4 | metadata: 5 | name: psp.flannel.unprivileged 6 | annotations: 7 | seccomp.security.alpha.kubernetes.io/allowedProfileNames: docker/default 8 | seccomp.security.alpha.kubernetes.io/defaultProfileName: docker/default 9 | apparmor.security.beta.kubernetes.io/allowedProfileNames: runtime/default 10 | apparmor.security.beta.kubernetes.io/defaultProfileName: runtime/default 11 | spec: 12 | privileged: false 13 | volumes: 14 | - configMap 15 | - secret 16 | - emptyDir 17 | - hostPath 18 | allowedHostPaths: 19 | - pathPrefix: "/etc/cni/net.d" 20 | - pathPrefix: "/etc/kube-flannel" 21 | - pathPrefix: "/run/flannel" 22 | readOnlyRootFilesystem: false 23 | # Users and groups 24 | runAsUser: 25 | rule: RunAsAny 26 | supplementalGroups: 27 | rule: RunAsAny 28 | fsGroup: 29 | rule: RunAsAny 30 | # Privilege Escalation 31 | allowPrivilegeEscalation: false 32 | defaultAllowPrivilegeEscalation: false 33 | # Capabilities 34 | allowedCapabilities: ['NET_ADMIN'] 35 | defaultAddCapabilities: [] 36 | requiredDropCapabilities: [] 37 | # Host namespaces 38 | hostPID: false 39 | hostIPC: false 40 | hostNetwork: true 41 | hostPorts: 42 | - min: 0 43 | max: 65535 44 | # SELinux 45 | seLinux: 46 | # SELinux is unused in CaaSP 47 | rule: 'RunAsAny' 48 | --- 49 | kind: ClusterRole 50 | apiVersion: rbac.authorization.k8s.io/v1beta1 51 | metadata: 52 | name: flannel 53 | rules: 54 | - apiGroups: ['extensions'] 55 | resources: ['podsecuritypolicies'] 56 | verbs: ['use'] 57 | resourceNames: ['psp.flannel.unprivileged'] 58 | - apiGroups: 59 | - "" 60 | resources: 61 | - pods 62 | verbs: 63 | - get 64 | - apiGroups: 65 | - "" 66 | resources: 67 | - nodes 68 | verbs: 69 | - list 70 | - watch 71 | - apiGroups: 72 | - "" 73 | resources: 74 | - nodes/status 75 | verbs: 76 | - patch 77 | --- 78 | kind: ClusterRoleBinding 79 | apiVersion: rbac.authorization.k8s.io/v1beta1 80 | metadata: 81 | name: flannel 82 | roleRef: 83 | apiGroup: rbac.authorization.k8s.io 84 | kind: ClusterRole 85 | name: flannel 86 | subjects: 87 | - kind: ServiceAccount 88 | name: flannel 89 | namespace: kube-system 90 | --- 91 | apiVersion: v1 92 | kind: ServiceAccount 93 | metadata: 94 | name: flannel 95 | namespace: kube-system 96 | --- 97 | kind: ConfigMap 98 | apiVersion: v1 99 | metadata: 100 | name: kube-flannel-cfg 101 | namespace: kube-system 102 | labels: 103 | tier: node 104 | app: flannel 105 | data: 106 | cni-conf.json: | 107 | { 108 | "name": "cbr0", 109 | "cniVersion": "0.3.1", 110 | "plugins": [ 111 | { 112 | "type": "flannel", 113 | "delegate": { 114 | "hairpinMode": true, 115 | "isDefaultGateway": true 116 | } 117 | }, 118 | { 119 | "type": "portmap", 120 | "capabilities": { 121 | "portMappings": true 122 | } 123 | } 124 | ] 125 | } 126 | net-conf.json: | 127 | { 128 | "Network": "10.244.0.0/16", 129 | "Backend": { 130 | "Type": "vxlan" 131 | } 132 | } 133 | --- 134 | apiVersion: apps/v1 135 | kind: DaemonSet 136 | metadata: 137 | name: kube-flannel-ds-amd64 138 | namespace: kube-system 139 | labels: 140 | tier: node 141 | app: flannel 142 | spec: 143 | selector: 144 | matchLabels: 145 | app: flannel 146 | template: 147 | metadata: 148 | labels: 149 | tier: node 150 | app: flannel 151 | spec: 152 | affinity: 153 | nodeAffinity: 154 | requiredDuringSchedulingIgnoredDuringExecution: 155 | nodeSelectorTerms: 156 | - matchExpressions: 157 | - key: beta.kubernetes.io/os 158 | operator: In 159 | values: 160 | - linux 161 | - key: beta.kubernetes.io/arch 162 | operator: In 163 | values: 164 | - amd64 165 | hostNetwork: true 166 | tolerations: 167 | - operator: Exists 168 | effect: NoSchedule 169 | serviceAccountName: flannel 170 | initContainers: 171 | - name: install-cni 172 | image: quay.io/coreos/flannel:v0.12.0-amd64 173 | command: 174 | - cp 175 | args: 176 | - -f 177 | - /etc/kube-flannel/cni-conf.json 178 | - /etc/cni/net.d/10-flannel.conflist 179 | volumeMounts: 180 | - name: cni 181 | mountPath: /etc/cni/net.d 182 | - name: flannel-cfg 183 | mountPath: /etc/kube-flannel/ 184 | containers: 185 | - name: kube-flannel 186 | image: quay.io/coreos/flannel:v0.12.0-amd64 187 | command: 188 | - /opt/bin/flanneld 189 | args: 190 | - --ip-masq 191 | - --kube-subnet-mgr 192 | - --iface={{ k8s_interface }} 193 | resources: 194 | requests: 195 | cpu: "100m" 196 | memory: "50Mi" 197 | limits: 198 | cpu: "100m" 199 | memory: "50Mi" 200 | securityContext: 201 | privileged: false 202 | capabilities: 203 | add: ["NET_ADMIN"] 204 | env: 205 | - name: POD_NAME 206 | valueFrom: 207 | fieldRef: 208 | fieldPath: metadata.name 209 | - name: POD_NAMESPACE 210 | valueFrom: 211 | fieldRef: 212 | fieldPath: metadata.namespace 213 | volumeMounts: 214 | - name: run 215 | mountPath: /run/flannel 216 | - name: flannel-cfg 217 | mountPath: /etc/kube-flannel/ 218 | volumes: 219 | - name: run 220 | hostPath: 221 | path: /run/flannel 222 | - name: cni 223 | hostPath: 224 | path: /etc/cni/net.d 225 | - name: flannel-cfg 226 | configMap: 227 | name: kube-flannel-cfg 228 | --- 229 | apiVersion: apps/v1 230 | kind: DaemonSet 231 | metadata: 232 | name: kube-flannel-ds-arm64 233 | namespace: kube-system 234 | labels: 235 | tier: node 236 | app: flannel 237 | spec: 238 | selector: 239 | matchLabels: 240 | app: flannel 241 | template: 242 | metadata: 243 | labels: 244 | tier: node 245 | app: flannel 246 | spec: 247 | affinity: 248 | nodeAffinity: 249 | requiredDuringSchedulingIgnoredDuringExecution: 250 | nodeSelectorTerms: 251 | - matchExpressions: 252 | - key: beta.kubernetes.io/os 253 | operator: In 254 | values: 255 | - linux 256 | - key: beta.kubernetes.io/arch 257 | operator: In 258 | values: 259 | - arm64 260 | hostNetwork: true 261 | tolerations: 262 | - operator: Exists 263 | effect: NoSchedule 264 | serviceAccountName: flannel 265 | initContainers: 266 | - name: install-cni 267 | image: quay.io/coreos/flannel:v0.12.0-arm64 268 | command: 269 | - cp 270 | args: 271 | - -f 272 | - /etc/kube-flannel/cni-conf.json 273 | - /etc/cni/net.d/10-flannel.conflist 274 | volumeMounts: 275 | - name: cni 276 | mountPath: /etc/cni/net.d 277 | - name: flannel-cfg 278 | mountPath: /etc/kube-flannel/ 279 | containers: 280 | - name: kube-flannel 281 | image: quay.io/coreos/flannel:v0.12.0-arm64 282 | command: 283 | - /opt/bin/flanneld 284 | args: 285 | - --ip-masq 286 | - --kube-subnet-mgr 287 | resources: 288 | requests: 289 | cpu: "100m" 290 | memory: "50Mi" 291 | limits: 292 | cpu: "100m" 293 | memory: "50Mi" 294 | securityContext: 295 | privileged: false 296 | capabilities: 297 | add: ["NET_ADMIN"] 298 | env: 299 | - name: POD_NAME 300 | valueFrom: 301 | fieldRef: 302 | fieldPath: metadata.name 303 | - name: POD_NAMESPACE 304 | valueFrom: 305 | fieldRef: 306 | fieldPath: metadata.namespace 307 | volumeMounts: 308 | - name: run 309 | mountPath: /run/flannel 310 | - name: flannel-cfg 311 | mountPath: /etc/kube-flannel/ 312 | volumes: 313 | - name: run 314 | hostPath: 315 | path: /run/flannel 316 | - name: cni 317 | hostPath: 318 | path: /etc/cni/net.d 319 | - name: flannel-cfg 320 | configMap: 321 | name: kube-flannel-cfg 322 | --- 323 | apiVersion: apps/v1 324 | kind: DaemonSet 325 | metadata: 326 | name: kube-flannel-ds-arm 327 | namespace: kube-system 328 | labels: 329 | tier: node 330 | app: flannel 331 | spec: 332 | selector: 333 | matchLabels: 334 | app: flannel 335 | template: 336 | metadata: 337 | labels: 338 | tier: node 339 | app: flannel 340 | spec: 341 | affinity: 342 | nodeAffinity: 343 | requiredDuringSchedulingIgnoredDuringExecution: 344 | nodeSelectorTerms: 345 | - matchExpressions: 346 | - key: beta.kubernetes.io/os 347 | operator: In 348 | values: 349 | - linux 350 | - key: beta.kubernetes.io/arch 351 | operator: In 352 | values: 353 | - arm 354 | hostNetwork: true 355 | tolerations: 356 | - operator: Exists 357 | effect: NoSchedule 358 | serviceAccountName: flannel 359 | initContainers: 360 | - name: install-cni 361 | image: quay.io/coreos/flannel:v0.12.0-arm 362 | command: 363 | - cp 364 | args: 365 | - -f 366 | - /etc/kube-flannel/cni-conf.json 367 | - /etc/cni/net.d/10-flannel.conflist 368 | volumeMounts: 369 | - name: cni 370 | mountPath: /etc/cni/net.d 371 | - name: flannel-cfg 372 | mountPath: /etc/kube-flannel/ 373 | containers: 374 | - name: kube-flannel 375 | image: quay.io/coreos/flannel:v0.12.0-arm 376 | command: 377 | - /opt/bin/flanneld 378 | args: 379 | - --ip-masq 380 | - --kube-subnet-mgr 381 | resources: 382 | requests: 383 | cpu: "100m" 384 | memory: "50Mi" 385 | limits: 386 | cpu: "100m" 387 | memory: "50Mi" 388 | securityContext: 389 | privileged: false 390 | capabilities: 391 | add: ["NET_ADMIN"] 392 | env: 393 | - name: POD_NAME 394 | valueFrom: 395 | fieldRef: 396 | fieldPath: metadata.name 397 | - name: POD_NAMESPACE 398 | valueFrom: 399 | fieldRef: 400 | fieldPath: metadata.namespace 401 | volumeMounts: 402 | - name: run 403 | mountPath: /run/flannel 404 | - name: flannel-cfg 405 | mountPath: /etc/kube-flannel/ 406 | volumes: 407 | - name: run 408 | hostPath: 409 | path: /run/flannel 410 | - name: cni 411 | hostPath: 412 | path: /etc/cni/net.d 413 | - name: flannel-cfg 414 | configMap: 415 | name: kube-flannel-cfg 416 | --- 417 | apiVersion: apps/v1 418 | kind: DaemonSet 419 | metadata: 420 | name: kube-flannel-ds-ppc64le 421 | namespace: kube-system 422 | labels: 423 | tier: node 424 | app: flannel 425 | spec: 426 | selector: 427 | matchLabels: 428 | app: flannel 429 | template: 430 | metadata: 431 | labels: 432 | tier: node 433 | app: flannel 434 | spec: 435 | affinity: 436 | nodeAffinity: 437 | requiredDuringSchedulingIgnoredDuringExecution: 438 | nodeSelectorTerms: 439 | - matchExpressions: 440 | - key: beta.kubernetes.io/os 441 | operator: In 442 | values: 443 | - linux 444 | - key: beta.kubernetes.io/arch 445 | operator: In 446 | values: 447 | - ppc64le 448 | hostNetwork: true 449 | tolerations: 450 | - operator: Exists 451 | effect: NoSchedule 452 | serviceAccountName: flannel 453 | initContainers: 454 | - name: install-cni 455 | image: quay.io/coreos/flannel:v0.12.0-ppc64le 456 | command: 457 | - cp 458 | args: 459 | - -f 460 | - /etc/kube-flannel/cni-conf.json 461 | - /etc/cni/net.d/10-flannel.conflist 462 | volumeMounts: 463 | - name: cni 464 | mountPath: /etc/cni/net.d 465 | - name: flannel-cfg 466 | mountPath: /etc/kube-flannel/ 467 | containers: 468 | - name: kube-flannel 469 | image: quay.io/coreos/flannel:v0.12.0-ppc64le 470 | command: 471 | - /opt/bin/flanneld 472 | args: 473 | - --ip-masq 474 | - --kube-subnet-mgr 475 | resources: 476 | requests: 477 | cpu: "100m" 478 | memory: "50Mi" 479 | limits: 480 | cpu: "100m" 481 | memory: "50Mi" 482 | securityContext: 483 | privileged: false 484 | capabilities: 485 | add: ["NET_ADMIN"] 486 | env: 487 | - name: POD_NAME 488 | valueFrom: 489 | fieldRef: 490 | fieldPath: metadata.name 491 | - name: POD_NAMESPACE 492 | valueFrom: 493 | fieldRef: 494 | fieldPath: metadata.namespace 495 | volumeMounts: 496 | - name: run 497 | mountPath: /run/flannel 498 | - name: flannel-cfg 499 | mountPath: /etc/kube-flannel/ 500 | volumes: 501 | - name: run 502 | hostPath: 503 | path: /run/flannel 504 | - name: cni 505 | hostPath: 506 | path: /etc/cni/net.d 507 | - name: flannel-cfg 508 | configMap: 509 | name: kube-flannel-cfg 510 | --- 511 | apiVersion: apps/v1 512 | kind: DaemonSet 513 | metadata: 514 | name: kube-flannel-ds-s390x 515 | namespace: kube-system 516 | labels: 517 | tier: node 518 | app: flannel 519 | spec: 520 | selector: 521 | matchLabels: 522 | app: flannel 523 | template: 524 | metadata: 525 | labels: 526 | tier: node 527 | app: flannel 528 | spec: 529 | affinity: 530 | nodeAffinity: 531 | requiredDuringSchedulingIgnoredDuringExecution: 532 | nodeSelectorTerms: 533 | - matchExpressions: 534 | - key: beta.kubernetes.io/os 535 | operator: In 536 | values: 537 | - linux 538 | - key: beta.kubernetes.io/arch 539 | operator: In 540 | values: 541 | - s390x 542 | hostNetwork: true 543 | tolerations: 544 | - operator: Exists 545 | effect: NoSchedule 546 | serviceAccountName: flannel 547 | initContainers: 548 | - name: install-cni 549 | image: quay.io/coreos/flannel:v0.12.0-s390x 550 | command: 551 | - cp 552 | args: 553 | - -f 554 | - /etc/kube-flannel/cni-conf.json 555 | - /etc/cni/net.d/10-flannel.conflist 556 | volumeMounts: 557 | - name: cni 558 | mountPath: /etc/cni/net.d 559 | - name: flannel-cfg 560 | mountPath: /etc/kube-flannel/ 561 | containers: 562 | - name: kube-flannel 563 | image: quay.io/coreos/flannel:v0.12.0-s390x 564 | command: 565 | - /opt/bin/flanneld 566 | args: 567 | - --ip-masq 568 | - --kube-subnet-mgr 569 | resources: 570 | requests: 571 | cpu: "100m" 572 | memory: "50Mi" 573 | limits: 574 | cpu: "100m" 575 | memory: "50Mi" 576 | securityContext: 577 | privileged: false 578 | capabilities: 579 | add: ["NET_ADMIN"] 580 | env: 581 | - name: POD_NAME 582 | valueFrom: 583 | fieldRef: 584 | fieldPath: metadata.name 585 | - name: POD_NAMESPACE 586 | valueFrom: 587 | fieldRef: 588 | fieldPath: metadata.namespace 589 | volumeMounts: 590 | - name: run 591 | mountPath: /run/flannel 592 | - name: flannel-cfg 593 | mountPath: /etc/kube-flannel/ 594 | volumes: 595 | - name: run 596 | hostPath: 597 | path: /run/flannel 598 | - name: cni 599 | hostPath: 600 | path: /etc/cni/net.d 601 | - name: flannel-cfg 602 | configMap: 603 | name: kube-flannel-cfg 604 | -------------------------------------------------------------------------------- /roles/postgresql/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Add PostgreSQL repo 3 | yum: 4 | name: 'https://download.postgresql.org/pub/repos/yum/reporpms/EL-7-x86_64/pgdg-redhat-repo-latest.noarch.rpm' 5 | state: present 6 | 7 | - name: Install PostgreSQL 12 and Psycopg2 packages 8 | yum: 9 | name: "{{ psql_packages }}" 10 | state: latest 11 | enablerepo: pgdg12 12 | 13 | - name: Initialize PostgreSQL 14 | command: /usr/pgsql-12/bin/postgresql-12-setup initdb 15 | ignore_errors: yes 16 | 17 | - name: Copy pg_hba.conf file 18 | template: src=pg_hba.conf dest=/var/lib/pgsql/12/data/pg_hba.conf 19 | 20 | - name: Start PostgreSQL-12 Service 21 | service: name=postgresql-12 state=started enabled=yes 22 | 23 | - name: Add sonar user to database 24 | become: true 25 | become_user: postgres 26 | postgresql_user: 27 | name: sonar 28 | password: sonar 29 | role_attr_flags: CREATEDB 30 | 31 | - name: Create sonar database 32 | become: true 33 | become_user: postgres 34 | postgresql_db: 35 | name: sonar 36 | encoding: UTF-8 37 | owner: sonar -------------------------------------------------------------------------------- /roles/postgresql/templates/pg_hba.conf: -------------------------------------------------------------------------------- 1 | # TYPE DATABASE USER ADDRESS METHOD 2 | 3 | # "local" is for Unix domain socket connections only 4 | local all all trust 5 | # IPv4 local connections: 6 | host all all 127.0.0.1/32 md5 7 | # IPv6 local connections: 8 | host all all ::1/128 md5 9 | # Allow replication connections from localhost, by a user with the 10 | # replication privilege. 11 | local replication all trust 12 | host replication all 127.0.0.1/32 md5 13 | host replication all ::1/128 md5 -------------------------------------------------------------------------------- /roles/sonarqube/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Make sure that unzip package is installed 3 | yum: name=unzip state=present 4 | 5 | - name: Download and extract SonarQube 8.1 6 | unarchive: 7 | src: https://binaries.sonarsource.com/Distribution/sonarqube/sonarqube-{{ sonarqube_version }}.zip 8 | dest: /opt 9 | remote_src: yes 10 | 11 | - name: Change sonarqube directory name 12 | command: mv /opt/sonarqube-{{ sonarqube_version }} /opt/sonarqube 13 | 14 | - name: Create sonar user 15 | user: 16 | name: sonar 17 | 18 | - name: Change ownership of /opt/sonarqube 19 | file: 20 | path: /opt/sonarqube 21 | state: directory 22 | recurse: yes 23 | owner: sonar 24 | group: sonar 25 | 26 | - name: Create /var/sonarqube/temp 27 | file: 28 | path: /var/sonarqube/temp 29 | state: directory 30 | owner: sonar 31 | group: sonar 32 | mode: '0755' 33 | 34 | - name: Create /var/sonarqube/data 35 | file: 36 | path: /var/sonarqube/data 37 | state: directory 38 | owner: sonar 39 | group: sonar 40 | mode: '0755' 41 | 42 | - name: Copy sonarqube.properties file 43 | template: src=sonar.properties dest=/opt/sonarqube/conf/sonar.properties 44 | 45 | - name: Copy sonarqube.service file 46 | template: src=sonarqube.service dest=/etc/systemd/system/sonarqube.service 47 | 48 | - name: Change sysctl entry for vm.max_map_count 49 | command: sysctl -w vm.max_map_count=262144 50 | 51 | - name: Reload daemon 52 | systemd: 53 | daemon_reload: yes 54 | 55 | - name: Start SonarQube service 56 | service: 57 | name: sonarqube 58 | state: started 59 | 60 | # - firewalld: 61 | # port: 9000/tcp 62 | # permanent: yes 63 | # state: enabled 64 | 65 | - name: Install Sonar Scanner 66 | unarchive: 67 | src: https://binaries.sonarsource.com/Distribution/sonar-scanner-cli/sonar-scanner-cli-{{ sonar_scanner_cli_version }}-linux.zip 68 | dest: /opt/sonarqube 69 | remote_src: yes 70 | 71 | - name: Link binaries to /usr/bin 72 | file: 73 | src: /opt/sonarqube/sonar-scanner-{{ sonar_scanner_cli_version }}-linux/bin/{{ item }} 74 | dest: /usr/bin/{{ item }} 75 | state: link 76 | force: yes 77 | with_items: 78 | - sonar-scanner 79 | - sonar-scanner-debug -------------------------------------------------------------------------------- /roles/sonarqube/templates/sonar.properties: -------------------------------------------------------------------------------- 1 | 2 | sonar.jdbc.username=sonar 3 | sonar.jdbc.password=sonar 4 | sonar.jdbc.url=jdbc:postgresql://localhost:5432/sonar 5 | sonar.web.host=0.0.0.0 6 | sonar.web.context=/sonarqube 7 | sonar.web.port= {{ sonarqube_port }} 8 | sonar.path.data=/var/sonarqube/data 9 | sonar.path.temp=/var/sonarqube/temp -------------------------------------------------------------------------------- /roles/sonarqube/templates/sonarqube.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=SonarQube service 3 | After=syslog.target network.target 4 | 5 | [Service] 6 | Type=forking 7 | ExecStart=/opt/sonarqube/bin/linux-x86-64/sonar.sh start 8 | ExecStop=/opt/sonarqube/bin/linux-x86-64/sonar.sh stop 9 | LimitNOFILE=65536 10 | LimitNPROC=4096 11 | User=sonar 12 | Group=sonar 13 | Restart=on-failure 14 | 15 | [Install] 16 | WantedBy=multi-user.target -------------------------------------------------------------------------------- /scratch.yml: -------------------------------------------------------------------------------- 1 | --- 2 | #First of all configure VM's 3 | - name: Boot Infra 4 | hosts: odin 5 | remote_user: vagrant 6 | become: yes 7 | 8 | roles: 9 | - common 10 | 11 | #Only master machine uses Jenkins and SonarQube 12 | - name: 13 | hosts: master 14 | remote_user: vagrant 15 | become: yes 16 | 17 | roles: 18 | - java 19 | - postgresql 20 | - sonarqube 21 | - jenkins 22 | 23 | #Use all virtual machine. Make sure that you configured hosts file for kubernetes_role. 24 | - name: Kubernetes 25 | hosts: odin 26 | remote_user: vagrant 27 | become: yes 28 | 29 | roles: 30 | - docker 31 | - kubernetes 32 | - helm -------------------------------------------------------------------------------- /src/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "log" 5 | "net/http" 6 | ) 7 | 8 | type Server struct{} 9 | 10 | func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) { 11 | w.WriteHeader(http.StatusOK) 12 | w.Header().Set("Content-Type", "application/json") 13 | w.Write([]byte(`{"message": "Hello World!"}`)) 14 | } 15 | 16 | func main() { 17 | s := &Server{} 18 | http.Handle("/", s) 19 | log.Fatal(http.ListenAndServe(":80", nil)) 20 | } -------------------------------------------------------------------------------- /src/main_test.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "io/ioutil" 5 | "net/http" 6 | "net/http/httptest" 7 | "testing" 8 | ) 9 | 10 | func TestServeHTTP(t *testing.T) { 11 | handler := &Server{} 12 | server := httptest.NewServer(handler) 13 | defer server.Close() 14 | 15 | resp, err := http.Get(server.URL) 16 | if err != nil { 17 | t.Fatal(err) 18 | } 19 | if resp.StatusCode != 200 { 20 | t.Fatalf("Received non-200 response: %d\n", resp.StatusCode) 21 | } 22 | expected := `{"message": "Hello World!"}` 23 | actual, err := ioutil.ReadAll(resp.Body) 24 | if err != nil { 25 | t.Fatal(err) 26 | } 27 | if expected != string(actual) { 28 | t.Errorf("Expected the message '%s' but got '%s'\n", expected,actual) 29 | } 30 | } --------------------------------------------------------------------------------