├── .gitignore ├── vendor ├── github.com │ ├── DATA-DOG │ │ └── godog │ │ │ ├── .gitignore │ │ │ ├── logo.png │ │ │ ├── gherkin │ │ │ ├── README.md │ │ │ ├── LICENSE │ │ │ └── dialect.go │ │ │ ├── colors │ │ │ ├── ansi_others.go │ │ │ ├── colors.go │ │ │ ├── no_colors.go │ │ │ └── writer.go │ │ │ ├── .travis.yml │ │ │ ├── utils.go │ │ │ ├── ast.go │ │ │ ├── gherkin.go │ │ │ ├── Makefile │ │ │ ├── LICENSE │ │ │ ├── godog.go │ │ │ └── options.go │ ├── ghodss │ │ └── yaml │ │ │ ├── .travis.yml │ │ │ └── .gitignore │ ├── stretchr │ │ └── testify │ │ │ ├── require │ │ │ ├── require_forward.go.tmpl │ │ │ ├── require.go.tmpl │ │ │ ├── forward_requirements.go │ │ │ ├── doc.go │ │ │ └── requirements.go │ │ │ ├── assert │ │ │ ├── assertion_format.go.tmpl │ │ │ ├── assertion_forward.go.tmpl │ │ │ ├── errors.go │ │ │ ├── forward_assertions.go │ │ │ └── doc.go │ │ │ └── LICENSE │ ├── davecgh │ │ └── go-spew │ │ │ ├── LICENSE │ │ │ └── spew │ │ │ └── bypasssafe.go │ └── pmezard │ │ └── go-difflib │ │ └── LICENSE └── gopkg.in │ └── yaml.v2 │ ├── go.mod │ ├── .travis.yml │ ├── NOTICE │ ├── writerc.go │ └── LICENSE.libyaml ├── executor ├── ansible │ ├── roles │ │ └── k8s-local-pv │ │ │ ├── defaults │ │ │ └── main.yml │ │ │ └── tasks │ │ │ └── main.yml │ ├── inventory │ │ ├── hosts │ │ ├── host_vars │ │ │ └── localhost.yml │ │ └── group_vars │ │ │ └── all.yml │ ├── provider │ │ ├── local-pv │ │ │ ├── templates │ │ │ │ ├── storage_class.yaml │ │ │ │ └── pv.yaml │ │ │ ├── setup-local-pv.yaml │ │ │ └── README.md │ │ └── README.md │ ├── plugins │ │ └── callback │ │ │ └── README.md │ ├── utils │ │ ├── getFiles.yaml │ │ └── runTest.yaml │ └── litmus_playbook.yml └── README.md ├── apps ├── fio │ └── tests │ │ ├── performance │ │ ├── test_vars.yml │ │ ├── fio.yml │ │ ├── test_cleanup.yml │ │ └── run_litmus_test.yml │ │ └── data-integrity │ │ ├── test_vars.yml │ │ ├── fio-read.yml │ │ ├── fio-write.yml │ │ └── run_litmus_test.yml ├── percona │ ├── liveness │ │ ├── db-cred.cnf │ │ └── run_litmus_test.yml │ ├── deployers │ │ ├── test_vars.yml │ │ ├── run_litmus_test.yml │ │ └── percona.yml │ ├── workload │ │ ├── tpcc.conf │ │ ├── test_vars.yml │ │ ├── tpcc_bench.yml │ │ └── run_litmus_test.yml │ ├── tests │ │ ├── mysql_storage_benchmark │ │ │ ├── tpcc.conf │ │ │ ├── test_vars.yml │ │ │ ├── test_cleanup.yml │ │ │ └── run_litmus_test.yml │ │ └── mysql_data_persistence │ │ │ ├── test_vars.yml │ │ │ ├── test_cleanup.yml │ │ │ ├── README.md │ │ │ ├── mysql.yml │ │ │ └── run_litmus_test.yml │ ├── chaos │ │ ├── openebs_target_failure │ │ │ ├── chaosutil.j2 │ │ │ ├── test_vars.yml │ │ │ └── test_prerequisites.yml │ │ ├── openebs_volume_replica_failure │ │ │ ├── chaosutil.j2 │ │ │ ├── test_vars.yml │ │ │ └── test_prerequisites.yml │ │ ├── openebs_replica_network_delay │ │ │ ├── chaosutil.j2 │ │ │ ├── test_vars.yml │ │ │ └── test_prerequisites.yml │ │ └── openebs_target_network_delay │ │ │ ├── chaosutil.j2 │ │ │ ├── test_vars.yml │ │ │ └── test_prerequisites.yml │ └── functional │ │ └── snapshot │ │ ├── test_vars.yml │ │ └── run_litmus_test.yml ├── memleak │ └── tests │ │ ├── test_vars.yml │ │ ├── memleak.yml │ │ ├── run_litmus_test.yml │ │ └── test-mem.py ├── crunchy-postgres │ ├── deployers │ │ ├── test_vars.yml │ │ └── run_litmus_test.yml │ ├── workload │ │ ├── test_vars.yml │ │ ├── crunchy_postgres_loadgen.yml │ │ └── run_litmus_test.yml │ └── liveness │ │ └── run_litmus_test.yml ├── jenkins │ ├── deployers │ │ ├── test_vars.yml │ │ ├── jenkins.yml │ │ └── run_litmus_test.yml │ ├── upgrade │ │ ├── test_vars.yml │ │ └── run_litmus_test.yml │ └── job-simulator │ │ └── run_litmus_test.yml ├── jupyter │ └── deployers │ │ ├── test_vars.yml │ │ ├── run_litmus_test.yml │ │ └── jupyter_openebs.yml ├── mongodb │ ├── deployers │ │ ├── test_vars.yml │ │ ├── run_litmus_test.yml │ │ └── mongo_statefulset.yml │ ├── workload │ │ ├── test_vars.yml │ │ ├── mongo_loadgen.yml │ │ └── run_litmus_test.yml │ └── liveness │ │ └── run_litmus_test.yml ├── redis │ └── deployers │ │ ├── test_vars.yml │ │ └── run_litmus_test.yml ├── cassandra │ ├── deployers │ │ ├── test_vars.yml │ │ └── run_litmus_test.yml │ ├── workload │ │ ├── test_vars.yml │ │ ├── cassandra_loadgen.yml │ │ └── run_litmus_test.yml │ ├── functional │ │ └── scale_replicas │ │ │ ├── test_vars.yml │ │ │ └── run_litmus_test.yml │ └── liveness │ │ └── run_litmus_test.yml ├── cockroachdb │ ├── deployers │ │ ├── test_vars.yml │ │ └── run_litmus_test.yml │ └── workload │ │ ├── test_vars.yml │ │ ├── cockroachdb_loadgen.yml │ │ └── run_litmus_test.yml └── minio │ └── tests │ └── deploy_minio │ ├── e2e.feature │ └── README.md ├── k8s ├── aws │ ├── ebs-volumes │ │ ├── vars.yml │ │ └── README.md │ └── k8s-installer │ │ ├── vars.yml │ │ ├── delete-aws-cluster.yml │ │ └── README.md ├── gcp │ ├── k8s-installer │ │ ├── check-cluster-availability.yml │ │ ├── create-vpc.yml │ │ ├── delete-vpc.yml │ │ ├── README.md │ │ └── delete-k8s-cluster.yml │ └── gpd-disks │ │ ├── README.md │ │ └── delete-gpd.yml ├── eks │ └── k8s-installer │ │ ├── vars.yml │ │ ├── delete-eks-cluster.yml │ │ └── README.md ├── utils │ ├── name_generator │ │ └── test_namesgenerator.py │ └── health │ │ └── test_health.py ├── gke │ └── k8s-installer │ │ ├── vars.yml │ │ ├── delete-gke-cluster.yml │ │ └── README.md ├── packet │ ├── k8s-installer │ │ ├── vars.yml │ │ ├── pre_requisite.sh │ │ └── delete_packet_cluster.yml │ └── packet-storage │ │ ├── vars.yml │ │ └── README.md └── azure │ └── k8s-installer │ ├── README.md │ ├── delete-k8s-cluster.yml │ └── create-k8s-cluster.yml ├── hack ├── godog.sh ├── litmus-result.j2 ├── litmus-prerequisites.sh ├── push └── rbac.yaml ├── providers └── openebs │ └── installers │ ├── storageclass │ ├── 0.6 │ │ ├── ansible │ │ │ ├── vars.yaml │ │ │ ├── sp.j2 │ │ │ └── sc.j2 │ │ └── litmusbook │ │ │ ├── storageclass_setup.yaml │ │ │ └── storageclass_cleanup.yaml │ └── 0.7 │ │ ├── ansible │ │ ├── vars.yaml │ │ └── storageclass-jiva.j2 │ │ └── litmusbook │ │ └── storageclass_cleanup.yaml │ └── operator │ ├── 0.6 │ ├── ansible │ │ └── vars.yaml │ └── litmusbook │ │ ├── openebs_cleanup.yaml │ │ └── openebs_setup.yaml │ └── master │ ├── ansible │ └── vars.yaml │ └── litmusbook │ └── openebs_cleanup.yaml ├── .travis.yml ├── tools ├── README.md ├── godog-runner │ └── Dockerfile └── ansible-runner │ └── Dockerfile ├── .github ├── PULL_REQUEST_TEMPLATE.md └── ISSUE_TEMPLATE.md ├── NOTICE.md ├── common └── utils │ ├── deploy_application.yml │ ├── check_deployment_status.yml │ ├── check_statefulset_status.yml │ ├── update_litmus_result_resource.yml │ └── scale_statefulset_replicas.yml ├── chaoslib ├── chaoskube │ ├── rbac.yaml │ └── chaoskube.yaml ├── kubectl │ ├── pod_evict_by_taint.yaml │ └── cordon_drain_node.yaml ├── pumba │ └── pumba_kube.yaml └── openebs │ ├── openebs_cstor_target_failure.yaml │ ├── jiva_replica_pod_failure.yaml │ └── jiva_controller_pod_failure.yaml ├── Gopkg.toml ├── docs ├── running_test_suite.md └── litmus_deep_dive.md ├── pkg ├── time │ └── time.go └── exec │ └── exec.go └── funclib └── kubectl ├── scale_replicas.yml └── app_upgrade.yml /.gitignore: -------------------------------------------------------------------------------- 1 | .idea 2 | -------------------------------------------------------------------------------- /vendor/github.com/DATA-DOG/godog/.gitignore: -------------------------------------------------------------------------------- 1 | /cmd/godog/godog 2 | /example/example 3 | -------------------------------------------------------------------------------- /executor/ansible/roles/k8s-local-pv/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | local_storageclass_yaml: local-storage-sc.yaml 3 | 4 | -------------------------------------------------------------------------------- /vendor/github.com/DATA-DOG/godog/logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Anil-matcha/litmus/master/vendor/github.com/DATA-DOG/godog/logo.png -------------------------------------------------------------------------------- /vendor/github.com/ghodss/yaml/.travis.yml: -------------------------------------------------------------------------------- 1 | language: go 2 | go: 3 | - 1.3 4 | - 1.4 5 | script: 6 | - go test 7 | - go build 8 | -------------------------------------------------------------------------------- /apps/fio/tests/performance/test_vars.yml: -------------------------------------------------------------------------------- 1 | --- 2 | ## TEST-SPECIFIC PARAMS 3 | 4 | test_name: fio-benchmark 5 | pod_yaml_alias: fio.yml 6 | 7 | -------------------------------------------------------------------------------- /apps/percona/liveness/db-cred.cnf: -------------------------------------------------------------------------------- 1 | { 2 | "db_server_ip": "10.105.68.234", 3 | "db_user": "root", 4 | "db_password": "k8sDem0" 5 | } 6 | -------------------------------------------------------------------------------- /executor/ansible/inventory/hosts: -------------------------------------------------------------------------------- 1 | localhost ansible_connection=local 2 | ansible_become_pass="{{ lookup('env','LOCAL_USER_PASSWORD') }}" 3 | -------------------------------------------------------------------------------- /vendor/gopkg.in/yaml.v2/go.mod: -------------------------------------------------------------------------------- 1 | module "gopkg.in/yaml.v2" 2 | 3 | require ( 4 | "gopkg.in/check.v1" v0.0.0-20161208181325-20d25e280405 5 | ) 6 | -------------------------------------------------------------------------------- /executor/ansible/inventory/host_vars/localhost.yml: -------------------------------------------------------------------------------- 1 | --- 2 | ansible_connection: local 3 | ansible_become_pass: "{{ lookup('env','LOCAL_USER_PASSWORD') }}" 4 | -------------------------------------------------------------------------------- /k8s/aws/ebs-volumes/vars.yml: -------------------------------------------------------------------------------- 1 | --- 2 | zone: eu-west-2a 3 | region: eu-west-2 4 | volume_size: 50 5 | volume_type: gp2 6 | device_name: /dev/xvdb 7 | mount_path: /mnt/openebs -------------------------------------------------------------------------------- /apps/memleak/tests/test_vars.yml: -------------------------------------------------------------------------------- 1 | --- 2 | ## TEST-SPECIFIC PARAMS 3 | 4 | test_name: memleak-test 5 | memleak_yml: memleak.yml 6 | app_ns: "{{ lookup('env','APP_NAMESPACE') }}" 7 | -------------------------------------------------------------------------------- /hack/godog.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sh 2 | 3 | set -o errexit 4 | set -o nounset 5 | 6 | CURDIR=`pwd` 7 | 8 | cd "$1" && godog --stop-on-failure e2e.feature 9 | 10 | cd ${CURDIR} 11 | -------------------------------------------------------------------------------- /vendor/gopkg.in/yaml.v2/.travis.yml: -------------------------------------------------------------------------------- 1 | language: go 2 | 3 | go: 4 | - 1.4 5 | - 1.5 6 | - 1.6 7 | - 1.7 8 | - 1.8 9 | - 1.9 10 | - tip 11 | 12 | go_import_path: gopkg.in/yaml.v2 13 | -------------------------------------------------------------------------------- /apps/crunchy-postgres/deployers/test_vars.yml: -------------------------------------------------------------------------------- 1 | pg_statefulset: postgres_statefulset.yml 2 | app_ns: "{{ lookup('env','APP_NAMESPACE') }}" 3 | app_label: "{{ lookup('env','APP_LABEL') }}" 4 | test_name: postgres-deployment 5 | -------------------------------------------------------------------------------- /k8s/gcp/k8s-installer/check-cluster-availability.yml: -------------------------------------------------------------------------------- 1 | - hosts: localhost 2 | tasks: 3 | - name: Checking Cluster availability 4 | shell: python ../../utils/health/cluster_health_check.py -n {{ nodes | int + 1 }} 5 | -------------------------------------------------------------------------------- /apps/fio/tests/data-integrity/test_vars.yml: -------------------------------------------------------------------------------- 1 | --- 2 | ## TEST-SPECIFIC PARAMS 3 | 4 | test_name: fio-data-integrity 5 | fio_write_yml: fio-write.yml 6 | fio_read_yml: fio-read.yml 7 | app_ns: "{{ lookup('env','FIO_NAMESPACE') }}" 8 | -------------------------------------------------------------------------------- /apps/jenkins/deployers/test_vars.yml: -------------------------------------------------------------------------------- 1 | # Test specifiv parametres 2 | test_name: jenkins-deployment 3 | app_ns: "{{ lookup('env','APP_NAMESPACE') }}" 4 | app_label: "{{ lookup('env','APP_LABEL') }}" 5 | jenkins_deployment: jenkins.yml 6 | -------------------------------------------------------------------------------- /providers/openebs/installers/storageclass/0.6/ansible/vars.yaml: -------------------------------------------------------------------------------- 1 | kubeapply: kubectl 2 | storage_pool: sp.yaml 3 | storage_class: sc.yaml 4 | installation_test_name: storageclassinstallerv0.6 5 | cleanup_test_name: storageclasscleanupv0.6 -------------------------------------------------------------------------------- /apps/percona/deployers/test_vars.yml: -------------------------------------------------------------------------------- 1 | # Test-specific parametres 2 | 3 | percona_deployment: percona.yml 4 | app_ns: "{{ lookup('env','APP_NAMESPACE') }}" 5 | app_label: "{{ lookup('env','APP_LABEL') }}" 6 | test_name: percona-deployment 7 | -------------------------------------------------------------------------------- /apps/percona/workload/tpcc.conf: -------------------------------------------------------------------------------- 1 | { 2 | "db_user": "testuser", 3 | "db_password": "password", 4 | "warehouses": "1", 5 | "connections": "18", 6 | "warmup_period": "10", 7 | "run_duration": "60", 8 | "interval": "10" 9 | } 10 | -------------------------------------------------------------------------------- /apps/jupyter/deployers/test_vars.yml: -------------------------------------------------------------------------------- 1 | # Test-specific parametres 2 | 3 | jupyter_deployment: jupyter_openebs.yml 4 | app_ns: "{{ lookup('env','APP_NAMESPACE') }}" 5 | app_label: "{{ lookup('env','APP_LABEL') }}" 6 | test_name: jupyter-deployment 7 | -------------------------------------------------------------------------------- /apps/mongodb/deployers/test_vars.yml: -------------------------------------------------------------------------------- 1 | # Test-specific parametres 2 | 3 | mongodb_deployment: mongo_statefulset.yml 4 | app_ns: "{{ lookup('env','APP_NAMESPACE') }}" 5 | app_label: "{{ lookup('env','APP_LABEL') }}" 6 | test_name: mongodb-deployment 7 | -------------------------------------------------------------------------------- /executor/ansible/provider/local-pv/templates/storage_class.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | kind: StorageClass 3 | apiVersion: storage.k8s.io/v1 4 | metadata: 5 | name: local-storage 6 | provisioner: kubernetes.io/no-provisioner 7 | volumeBindingMode: WaitForFirstConsumer 8 | -------------------------------------------------------------------------------- /providers/openebs/installers/storageclass/0.7/ansible/vars.yaml: -------------------------------------------------------------------------------- 1 | kubeapply: kubectl 2 | storage_class_jiva: sc_jiva.yaml 3 | storage_class_cstor: sc_cstor.yaml 4 | installation_test_name: storageclassinstallerv0.7 5 | cleanup_test_name: storageclasscleanupv0.7 -------------------------------------------------------------------------------- /vendor/github.com/stretchr/testify/require/require_forward.go.tmpl: -------------------------------------------------------------------------------- 1 | {{.CommentWithoutT "a"}} 2 | func (a *Assertions) {{.DocInfo.Name}}({{.Params}}) { 3 | if h, ok := a.t.(tHelper); ok { h.Helper() } 4 | {{.DocInfo.Name}}(a.t, {{.ForwardedParams}}) 5 | } 6 | -------------------------------------------------------------------------------- /apps/percona/tests/mysql_storage_benchmark/tpcc.conf: -------------------------------------------------------------------------------- 1 | { 2 | "db_user": "root", 3 | "db_password": "k8sDem0", 4 | "warehouses": "1", 5 | "connections": "16", 6 | "warmup_period": "10", 7 | "run_duration": "120", 8 | "interval": "2" 9 | } 10 | -------------------------------------------------------------------------------- /apps/redis/deployers/test_vars.yml: -------------------------------------------------------------------------------- 1 | test_name: redis-deployment 2 | redis_deployment: redis_statefulset.yml 3 | app_ns: "{{ lookup('env','APP_NAMESPACE') }}" 4 | app_label: "{{ lookup('env','APP_LABEL') }}" 5 | app_replica: "{{ lookup('env','APP_REPLICA') }}" 6 | 7 | -------------------------------------------------------------------------------- /providers/openebs/installers/storageclass/0.6/ansible/sp.j2: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: openebs.io/v1alpha1 3 | kind: StoragePool 4 | metadata: 5 | name: {{ lookup('env','STORAGE_POOL_NAME') }} 6 | type: hostdir 7 | spec: 8 | path: "{{ lookup('env','STORAGE_PATH') }}" -------------------------------------------------------------------------------- /vendor/github.com/stretchr/testify/assert/assertion_format.go.tmpl: -------------------------------------------------------------------------------- 1 | {{.CommentFormat}} 2 | func {{.DocInfo.Name}}f(t TestingT, {{.ParamsFormat}}) bool { 3 | if h, ok := t.(tHelper); ok { h.Helper() } 4 | return {{.DocInfo.Name}}(t, {{.ForwardedParamsFormat}}) 5 | } 6 | -------------------------------------------------------------------------------- /vendor/github.com/stretchr/testify/assert/assertion_forward.go.tmpl: -------------------------------------------------------------------------------- 1 | {{.CommentWithoutT "a"}} 2 | func (a *Assertions) {{.DocInfo.Name}}({{.Params}}) bool { 3 | if h, ok := a.t.(tHelper); ok { h.Helper() } 4 | return {{.DocInfo.Name}}(a.t, {{.ForwardedParams}}) 5 | } 6 | -------------------------------------------------------------------------------- /vendor/github.com/stretchr/testify/require/require.go.tmpl: -------------------------------------------------------------------------------- 1 | {{.Comment}} 2 | func {{.DocInfo.Name}}(t TestingT, {{.Params}}) { 3 | if assert.{{.DocInfo.Name}}(t, {{.ForwardedParams}}) { return } 4 | if h, ok := t.(tHelper); ok { h.Helper() } 5 | t.FailNow() 6 | } 7 | -------------------------------------------------------------------------------- /apps/cassandra/deployers/test_vars.yml: -------------------------------------------------------------------------------- 1 | cassandra_deployment: cassandra_statefulset.yml 2 | app_ns: "{{ lookup('env','APP_NAMESPACE') }}" 3 | app_label: "{{ lookup('env','APP_LABEL') }}" 4 | test_name: cassandra-deployment 5 | app_replica: "{{ lookup('env','APP_REPLICA') }}" 6 | 7 | -------------------------------------------------------------------------------- /executor/ansible/provider/local-pv/setup-local-pv.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: kubernetes-kubemasters 3 | roles: 4 | - role: k8s-local-pv 5 | template_path: "{{ playbook_dir }}/templates" 6 | local_storage_class: storage_class.yaml 7 | local_pv: pv.yaml 8 | 9 | -------------------------------------------------------------------------------- /vendor/github.com/DATA-DOG/godog/gherkin/README.md: -------------------------------------------------------------------------------- 1 | [![Build Status](https://secure.travis-ci.org/cucumber/gherkin-go.svg)](http://travis-ci.org/cucumber/gherkin-go) 2 | 3 | Gherkin parser/compiler for Go. Please see [Gherkin](https://github.com/cucumber/gherkin) for details. 4 | -------------------------------------------------------------------------------- /k8s/eks/k8s-installer/vars.yml: -------------------------------------------------------------------------------- 1 | --- 2 | initial_node_count: 3 3 | disk_size_gb: 80 4 | region: us-west-2 # Currently EKS is mostly available on us-west-2 region 5 | ssh_user: ec2-user #Default user for Amazon Linux Images(AMI) 6 | node_type: m5.large 7 | nodes_min: 3 8 | nodes_max: 5 9 | -------------------------------------------------------------------------------- /apps/mongodb/workload/test_vars.yml: -------------------------------------------------------------------------------- 1 | mongodb_loadgen: mongo_loadgen.yml 2 | 3 | namespace: "{{ lookup('env','APP_NAMESPACE') }}" 4 | 5 | test_name: mongodb-loadgen 6 | 7 | loadgen_label: "{{ lookup('env','LOADGEN_LABEL') }}" 8 | 9 | app_label: "{{ lookup('env','APP_LABEL') }}" 10 | -------------------------------------------------------------------------------- /k8s/utils/name_generator/test_namesgenerator.py: -------------------------------------------------------------------------------- 1 | import namesgenerator 2 | import unittest 3 | 4 | class MyTest(unittest.TestCase): 5 | def test_namesgenerator(self): 6 | self.assertEqual(type(namesgenerator.get_random_name()), str) 7 | if __name__ == "__main__": 8 | unittest.main() -------------------------------------------------------------------------------- /apps/cassandra/workload/test_vars.yml: -------------------------------------------------------------------------------- 1 | cassandra_loadgen: cassandra_loadgen.yml 2 | namespace: "{{ lookup('env','LOADGEN_NS') }}" 3 | test_name: cassandra-loadgen 4 | loadgen_label: "{{ lookup('env','LOADGEN_LABEL') }}" 5 | io_minutes: 3 6 | application_label: "{{ lookup('env','APPLICATION_LABEL') }}" -------------------------------------------------------------------------------- /apps/percona/tests/mysql_data_persistence/test_vars.yml: -------------------------------------------------------------------------------- 1 | --- 2 | ## TEST-SPECIFIC PARAMS 3 | 4 | test_name: mysql-data-persistence 5 | pod_yaml_alias: mysql.yml 6 | supported_chaos_types: 7 | - APP_POD_EVICT/KUBECTL 8 | - APP_NODE_DRAIN/KUBECTL 9 | - APP_POD_KILL/PUMBA 10 | 11 | 12 | -------------------------------------------------------------------------------- /executor/ansible/plugins/callback/README.md: -------------------------------------------------------------------------------- 1 | # Ansible Plugins 2 | ----------------- 3 | 4 | Contains custom plugins that can be integrated into Ansible. Currently holds the callback plugin which has been modified 5 | to hide/suppress certain messages during failed retries in a loop-execution. 6 | 7 | -------------------------------------------------------------------------------- /k8s/aws/k8s-installer/vars.yml: -------------------------------------------------------------------------------- 1 | --- 2 | region: eu-west-2 3 | available_zone: eu-west-2a 4 | cidr_block: 10.0.0.0/16 5 | cidr_block2: 10.0.1.0/24 6 | destination_cidr: 0.0.0.0/0 7 | image: ami-6b3fd60c 8 | master_size: t2.xlarge 9 | node_size: t2.xlarge 10 | networking: kube-router 11 | node_count: 3 -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | dist: trusty 2 | sudo: required 3 | install: true 4 | language: go 5 | go: 6 | - 1.9.1 7 | env: 8 | global: 9 | - GOARCH=amd64 10 | before_install: 11 | - sleep 15 12 | - sudo apt-get install -y 13 | - sudo apt-get install -y curl 14 | script: 15 | - make godeps 16 | - make 17 | -------------------------------------------------------------------------------- /apps/cockroachdb/deployers/test_vars.yml: -------------------------------------------------------------------------------- 1 | --- 2 | cockroachdb_deployment: cockroachdb_sts.yml 3 | 4 | cockroachdb_svc: cockroachdb_svc.yml 5 | 6 | app_ns: "{{ lookup('env','APP_NAMESPACE') }}" 7 | 8 | app_label: "{{ lookup('env','APP_LABEL') }}" 9 | 10 | test_name: cockroachdb-deployment 11 | 12 | 13 | -------------------------------------------------------------------------------- /k8s/gke/k8s-installer/vars.yml: -------------------------------------------------------------------------------- 1 | --- 2 | user: "{{ lookup('env','GKEUSER') }}" 3 | initial_node_count: 3 4 | machine_type: n1-standard-2 5 | disk_size_gb: 80 6 | image_type: Ubuntu 7 | zone: us-central1-a 8 | project: openebs-ci 9 | auth_kind: serviceaccount 10 | ssh_key_fn: gke_ssh_key 11 | ssh_user: gkeuser -------------------------------------------------------------------------------- /apps/cockroachdb/workload/test_vars.yml: -------------------------------------------------------------------------------- 1 | --- 2 | test_name: cockroachdb-load-generation 3 | 4 | namespace: "{{ lookup('env','APP_NS') }}" 5 | 6 | cockroachdb_loadgen: cockroachdb_loadgen.yml 7 | 8 | app_label: "{{ lookup('env','APP_LABEL') }}" 9 | 10 | loadgen_label: "{{ lookup('env','LOADGEN_LABEL') }}" 11 | -------------------------------------------------------------------------------- /apps/jenkins/upgrade/test_vars.yml: -------------------------------------------------------------------------------- 1 | test_name: jenkins-upgrade-test 2 | app_namespace: "{{ lookup('env','APP_NAMESPACE') }}" 3 | label: "{{ lookup('env','APP_LABEL') }}" 4 | funcutil: kubectl/app_upgrade.yml 5 | deploy_type: "{{ lookup('env','DEPLOY_TYPE') }}" 6 | image_name: "{{ lookup('env','IMAGE_NAME') }}" 7 | 8 | -------------------------------------------------------------------------------- /apps/crunchy-postgres/workload/test_vars.yml: -------------------------------------------------------------------------------- 1 | --- 2 | crunchy_loadgen: crunchy_postgres_loadgen.yml 3 | 4 | namespace: "{{ lookup('env','APP_NAMESPACE') }}" 5 | 6 | test_name: crunchy-loadgen 7 | 8 | loadgen_label: "{{ lookup('env','LOADGEN_LABEL') }}" 9 | 10 | app_label: "{{ lookup('env','APP_LABEL') }}" 11 | -------------------------------------------------------------------------------- /apps/cassandra/functional/scale_replicas/test_vars.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | test_name: scale-app-replicas 4 | 5 | app_ns: "{{ lookup('env','APP_NAMESPACE') }}" 6 | 7 | app_label: "{{ lookup('env','APP_LABEL') }}" 8 | 9 | app_replica_count: "{{ lookup('env','REPLICA_COUNT') }}" 10 | 11 | deploy_type: "{{ lookup('env','DEPLOY_TYPE') }}" 12 | -------------------------------------------------------------------------------- /k8s/packet/k8s-installer/vars.yml: -------------------------------------------------------------------------------- 1 | --- 2 | project_id: bdbd5cf0-7453-434b-99f8-0c3e3f83dbe2 3 | workers_count: 3 4 | master_count: 1 # kubeadm support single master only 5 | location: ams1 6 | os: ubuntu_16_04 7 | network_cidr: 10.1.0.0/16 8 | master_config: t1.small.x86 9 | workers_config: t1.small.x86 10 | default_k8s_version: 1.10.0-00 -------------------------------------------------------------------------------- /tools/README.md: -------------------------------------------------------------------------------- 1 | As much as possible Litmus reuses the standard containers. However in certain cases, the workloads may need to be instrumented with additional tooling to help with simulating different test conditions. This folder holds the extensions or helper containers, which will be used by Litmus for running storage performance benchmark tests. 2 | -------------------------------------------------------------------------------- /.github/PULL_REQUEST_TEMPLATE.md: -------------------------------------------------------------------------------- 1 | 2 | 3 | **What this PR does / why we need it**: 4 | 5 | **Which issue this PR fixes** *(optional, in `fixes #(, fixes #, ...)` format, will close that issue when PR gets merged)*: fixes # 6 | 7 | **Special notes for your reviewer**: 8 | -------------------------------------------------------------------------------- /apps/percona/chaos/openebs_target_failure/chaosutil.j2: -------------------------------------------------------------------------------- 1 | {% if stg_prov is defined and stg_prov == 'openebs.io/provisioner-iscsi' %} 2 | {% if stg_engine is defined and stg_engine == 'cstor' %} 3 | chaosutil: openebs/openebs_cstor_target_failure.yaml 4 | {% else %} 5 | chaosutil: openebs/jiva_controller_pod_failure.yaml 6 | {% endif %} 7 | {% endif %} 8 | -------------------------------------------------------------------------------- /apps/percona/chaos/openebs_volume_replica_failure/chaosutil.j2: -------------------------------------------------------------------------------- 1 | {% if stg_prov is defined and stg_prov == 'openebs.io/provisioner-iscsi' %} 2 | {% if stg_engine is defined and stg_engine == 'cstor' %} 3 | chaosutil: openebs/cstor_pool_deploy_failure.yaml 4 | {% else %} 5 | chaosutil: openebs/jiva_replica_pod_failure.yaml 6 | {% endif %} 7 | {% endif %} 8 | -------------------------------------------------------------------------------- /apps/percona/tests/mysql_storage_benchmark/test_vars.yml: -------------------------------------------------------------------------------- 1 | --- 2 | ## TEST-SPECIFIC PARAMS 3 | 4 | test_name: mysql-benchmark 5 | pod_yaml_alias: mysql.yml 6 | 7 | ## PROVIDER-SPECIFIC PARARMS 8 | 9 | # OpenEBS 10 | 11 | openebs_operator: 12 | - maya-apiserver 13 | - openebs-provisioner 14 | 15 | # Local Volume 16 | 17 | local_pv_name: local-pv 18 | -------------------------------------------------------------------------------- /providers/openebs/installers/operator/0.6/ansible/vars.yaml: -------------------------------------------------------------------------------- 1 | kubeapply: kubectl --kubeconfig /root/admin.conf 2 | namespace: openebs 3 | openebs_operator_link: https://raw.githubusercontent.com/openebs/openebs/v0.6/k8s/openebs-operator.yaml 4 | openebs_operator: openebs-operator.yaml 5 | installation_test_name: openebsinstallerv0.6 6 | cleanup_test_name: openebscleanupv0.6 -------------------------------------------------------------------------------- /vendor/github.com/ghodss/yaml/.gitignore: -------------------------------------------------------------------------------- 1 | # OSX leaves these everywhere on SMB shares 2 | ._* 3 | 4 | # Eclipse files 5 | .classpath 6 | .project 7 | .settings/** 8 | 9 | # Emacs save files 10 | *~ 11 | 12 | # Vim-related files 13 | [._]*.s[a-w][a-z] 14 | [._]s[a-w][a-z] 15 | *.un~ 16 | Session.vim 17 | .netrwhist 18 | 19 | # Go test binaries 20 | *.test 21 | -------------------------------------------------------------------------------- /apps/percona/chaos/openebs_replica_network_delay/chaosutil.j2: -------------------------------------------------------------------------------- 1 | {% if stg_prov is defined and stg_prov == 'openebs.io/provisioner-iscsi' %} 2 | {% if stg_engine is defined and stg_engine == 'cstor' %} 3 | chaosutil: openebs/cstor_replica_network_delay.yaml 4 | {% else %} 5 | chaosutil: openebs/jiva_replica_network_delay.yaml 6 | {% endif %} 7 | {% endif %} 8 | -------------------------------------------------------------------------------- /apps/percona/chaos/openebs_target_network_delay/chaosutil.j2: -------------------------------------------------------------------------------- 1 | {% if stg_prov is defined and stg_prov == 'openebs.io/provisioner-iscsi' %} 2 | {% if stg_engine is defined and stg_engine == 'cstor' %} 3 | chaosutil: openebs/cstor_target_network_delay.yaml 4 | {% else %} 5 | chaosutil: openebs/jiva_controller_network_delay.yaml 6 | {% endif %} 7 | {% endif %} 8 | -------------------------------------------------------------------------------- /apps/percona/chaos/openebs_volume_replica_failure/test_vars.yml: -------------------------------------------------------------------------------- 1 | test_name: openebs-volume-replica-failure 2 | namespace: "{{ lookup('env','APP_NAMESPACE') }}" 3 | label: "{{ lookup('env','APP_LABEL') }}" 4 | pvc: "{{ lookup('env','APP_PVC') }}" 5 | liveness_label: "{{ lookup('env','LIVENESS_APP_LABEL') }}" 6 | liveness_namespace: "{{ lookup('env','LIVENESS_APP_NAMESPACE') }}" 7 | 8 | 9 | -------------------------------------------------------------------------------- /executor/README.md: -------------------------------------------------------------------------------- 1 | This folder will contains the code that can help executing all or a subset of tests. There can be many 2 | different ways of executing a series of tests, for example, Ansible is one of the ways to execute the tests. 3 | 4 | The executor helps with performing batch execution of Litmus tests with ability to select/skip tests & 5 | consolidate results or dashboarding purposes 6 | -------------------------------------------------------------------------------- /apps/percona/chaos/openebs_replica_network_delay/test_vars.yml: -------------------------------------------------------------------------------- 1 | test_name: openebs-replica-network-delay 2 | namespace: "{{ lookup('env','APP_NAMESPACE') }}" 3 | label: "{{ lookup('env','APP_LABEL') }}" 4 | pvc: "{{ lookup('env','APP_PVC') }}" 5 | n_delay: "{{ lookup('env','NETWORK_DELAY') }}" 6 | c_duration: "{{ lookup('env','CHAOS_DURATION') }}" 7 | operator_ns: "{{ lookup('env','OPERATOR_NAMESPACE') }}" 8 | -------------------------------------------------------------------------------- /NOTICE.md: -------------------------------------------------------------------------------- 1 | The source code developed for the OpenEBS Project is licensed 2 | under Apache 2.0. 3 | 4 | However, the OpenEBS project contains unmodified/modified 5 | subcomponents from other OpenSource Projects with separate 6 | copyright notices and license terms. 7 | 8 | Your use of the source code for these subcomponents is subject 9 | to the terms and conditions as defined by those source projects. 10 | -------------------------------------------------------------------------------- /vendor/github.com/stretchr/testify/assert/errors.go: -------------------------------------------------------------------------------- 1 | package assert 2 | 3 | import ( 4 | "errors" 5 | ) 6 | 7 | // AnError is an error instance useful for testing. If the code does not care 8 | // about error specifics, and only needs to return the error for example, this 9 | // error should be used to make the test code more readable. 10 | var AnError = errors.New("assert.AnError general error for testing") 11 | -------------------------------------------------------------------------------- /common/utils/deploy_application.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # This util can deploy application in K8s cluster. 3 | # The parameters required are 4 | # - app_yml ( Application spec) 5 | # - app_ns ( namespace where application needs to be deployed) 6 | 7 | - name: Deploy Application 8 | shell: kubectl apply -f {{ item }} -n {{ app_ns }} 9 | args: 10 | executable: /bin/bash 11 | with_items: "{{ app_yml }}" 12 | 13 | 14 | -------------------------------------------------------------------------------- /apps/percona/functional/snapshot/test_vars.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | test_name: openebs-snapshot-restore 4 | 5 | snapshot_name: percona-snapshot 6 | 7 | operator_ns: "{{ lookup('env','OPERATOR_NAMESPACE') }}" 8 | 9 | app_ns: "{{ lookup('env','APP_NAMESPACE') }}" 10 | 11 | user_name: "{{ lookup('env','DB_USERNAME') }}" 12 | 13 | password: "{{ lookup('env','DB_PASSWORD') }}" 14 | 15 | app_label: "{{ lookup('env','APP_LABEL') }}" 16 | -------------------------------------------------------------------------------- /apps/percona/chaos/openebs_target_failure/test_vars.yml: -------------------------------------------------------------------------------- 1 | test_name: openebs-target-failure 2 | namespace: "{{ lookup('env','APP_NAMESPACE') }}" 3 | target_namespace: "{{ lookup('env','TARGET_NAMESPACE') }}" 4 | label: "{{ lookup('env','APP_LABEL') }}" 5 | pvc: "{{ lookup('env','APP_PVC') }}" 6 | liveness_label: "{{ lookup('env','LIVENESS_APP_LABEL') }}" 7 | liveness_namespace: "{{ lookup('env','LIVENESS_APP_NAMESPACE') }}" 8 | 9 | 10 | -------------------------------------------------------------------------------- /executor/ansible/provider/local-pv/README.md: -------------------------------------------------------------------------------- 1 | ### Setting up local disk resources 2 | 3 | - Manually discover, format and mount the disk on desired node 4 | - Update templates/pv.yaml with appropriate node and disk mount location 5 | 6 | #### Note: 7 | 8 | - The local PV is beta in Kubernetes 1.10. 9 | - The standard PersistentVolumeReclaim policy is "Retain", "Delete" is yet to be supported in all types of clusters 10 | 11 | 12 | -------------------------------------------------------------------------------- /apps/percona/workload/test_vars.yml: -------------------------------------------------------------------------------- 1 | percona_loadgen: tpcc_bench.yml 2 | namespace: "{{ lookup('env','APP_NAMESPACE') }}" 3 | test_name: percona-loadgen 4 | app_service_label: "{{ lookup('env','APP_SERVICE_LABEL') }}" 5 | loadgen_label: "{{ lookup('env','LOADGEN_LABEL') }}" 6 | db_user: "{{ lookup('env','DB_USER') }}" 7 | db_password: "{{ lookup('env','DB_PASSWORD') }}" 8 | app_label: "{{ lookup('env','APP_LABEL') }}" 9 | tpcc_conf: tpcc.conf 10 | -------------------------------------------------------------------------------- /executor/ansible/provider/README.md: -------------------------------------------------------------------------------- 1 | The provider contains playbooks to setup storage providers on the Kubernetes cluster. This may involve installing Operators, 2 | static provisioning of disk resources (say, Kubernetes Local Persistent Volume), or cloud storage (Google persistent disks). 3 | 4 | The playbook may be self-contained, i.e., consist of all steps required to setup the storage provider or can invoke a role 5 | created for that purpose 6 | 7 | 8 | 9 | 10 | 11 | -------------------------------------------------------------------------------- /vendor/github.com/DATA-DOG/godog/colors/ansi_others.go: -------------------------------------------------------------------------------- 1 | // Copyright 2014 shiena Authors. All rights reserved. 2 | // Use of this source code is governed by a MIT-style 3 | // license that can be found in the LICENSE file. 4 | 5 | // +build !windows 6 | 7 | package colors 8 | 9 | import "io" 10 | 11 | type ansiColorWriter struct { 12 | w io.Writer 13 | mode outputMode 14 | } 15 | 16 | func (cw *ansiColorWriter) Write(p []byte) (int, error) { 17 | return cw.w.Write(p) 18 | } 19 | -------------------------------------------------------------------------------- /k8s/packet/packet-storage/vars.yml: -------------------------------------------------------------------------------- 1 | base_url: https://api.packet.net # base url for access packet api 2 | performance_tier: storage_1 # storage_1=Performance tier: Standard & storage_2 = Performance tier: Performance 3 | location: ams1 # location available: ams1, ewr1, nrt1 & sjc1 4 | billing_cycle: hourly 5 | size: 50 # size in GB 6 | device_id_location: /tmp/packet/device_id # file location where cluster device id stored 7 | project_id: bdbd5cf0-7453-434b-99f8-0c3e3f83dbe2 # project id of packet cloud -------------------------------------------------------------------------------- /vendor/github.com/stretchr/testify/assert/forward_assertions.go: -------------------------------------------------------------------------------- 1 | package assert 2 | 3 | // Assertions provides assertion methods around the 4 | // TestingT interface. 5 | type Assertions struct { 6 | t TestingT 7 | } 8 | 9 | // New makes a new Assertions object for the specified TestingT. 10 | func New(t TestingT) *Assertions { 11 | return &Assertions{ 12 | t: t, 13 | } 14 | } 15 | 16 | //go:generate go run ../_codegen/main.go -output-package=assert -template=assertion_forward.go.tmpl -include-format-funcs 17 | -------------------------------------------------------------------------------- /vendor/github.com/stretchr/testify/require/forward_requirements.go: -------------------------------------------------------------------------------- 1 | package require 2 | 3 | // Assertions provides assertion methods around the 4 | // TestingT interface. 5 | type Assertions struct { 6 | t TestingT 7 | } 8 | 9 | // New makes a new Assertions object for the specified TestingT. 10 | func New(t TestingT) *Assertions { 11 | return &Assertions{ 12 | t: t, 13 | } 14 | } 15 | 16 | //go:generate go run ../_codegen/main.go -output-package=require -template=require_forward.go.tmpl -include-format-funcs 17 | -------------------------------------------------------------------------------- /common/utils/check_deployment_status.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # This util checks the application status in k8s cluster 3 | # The parameters required are 4 | # - app_ns ( namespace where the application is deployed) 5 | # - app_label ( Label of application in the form 'key=value')) 6 | 7 | - name: Check the pod status 8 | shell: kubectl get pods -n {{ app_ns }} -l {{ app_label }} 9 | args: 10 | executable: /bin/bash 11 | register: result 12 | until: "'Running' in result.stdout" 13 | delay: 30 14 | retries: 15 15 | -------------------------------------------------------------------------------- /providers/openebs/installers/storageclass/0.6/ansible/sc.j2: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: storage.k8s.io/v1 3 | kind: StorageClass 4 | metadata: 5 | name: "{{ lookup('env','STORAGECLASS_NAME') }}" 6 | provisioner: openebs.io/provisioner-iscsi 7 | parameters: 8 | openebs.io/storage-pool: "{{ lookup('env','STORAGE_POOL_NAME') }}" 9 | openebs.io/jiva-replica-count: "{{ lookup('env','JIVA_REPLICA_COUNT') }}" 10 | openebs.io/volume-monitor: "{{ lookup('env','VOLUME_MONITOR') }}" 11 | openebs.io/capacity: {{ lookup('env','CAPACITY') }} -------------------------------------------------------------------------------- /apps/percona/chaos/openebs_target_network_delay/test_vars.yml: -------------------------------------------------------------------------------- 1 | test_name: openebs-target-network-delay 2 | namespace: "{{ lookup('env','APP_NAMESPACE') }}" 3 | label: "{{ lookup('env','APP_LABEL') }}" 4 | pvc: "{{ lookup('env','APP_PVC') }}" 5 | n_delay: "{{ lookup('env','NETWORK_DELAY') }}" 6 | c_duration: "{{ lookup('env','CHAOS_DURATION') }}" 7 | operator_ns: "{{ lookup('env','OPERATOR_NAMESPACE') }}" 8 | liveness_label: "{{ lookup('env','LIVENESS_APP_LABEL') }}" 9 | liveness_namespace: "{{ lookup('env','LIVENESS_APP_NAMESPACE') }}" 10 | 11 | -------------------------------------------------------------------------------- /chaoslib/chaoskube/rbac.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRole 3 | metadata: 4 | name: chaoskube 5 | rules: 6 | - apiGroups: [""] 7 | resources: ["pods"] 8 | verbs: ["list", "delete"] 9 | 10 | --- 11 | 12 | apiVersion: rbac.authorization.k8s.io/v1 13 | kind: ClusterRoleBinding 14 | metadata: 15 | name: chaoskube 16 | roleRef: 17 | apiGroup: rbac.authorization.k8s.io 18 | kind: ClusterRole 19 | name: chaoskube 20 | subjects: 21 | - kind: ServiceAccount 22 | name: chaoskube 23 | namespace: default 24 | -------------------------------------------------------------------------------- /executor/ansible/utils/getFiles.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Obtain list of Kubernetes test job specifications 3 | find: 4 | paths: "{{ dir }}" 5 | patterns: "{{ expr }}" 6 | recurse: yes 7 | use_regex: yes 8 | register: result 9 | 10 | - debug: 11 | msg: "{{ item.path }}" 12 | with_items: "{{ result.files }}" 13 | 14 | - name: Create test path list 15 | lineinfile: 16 | path: "{{ tfile }}" 17 | line: "{{ item.path }}" 18 | state: present 19 | create: yes 20 | with_items: "{{ result.files }}" 21 | 22 | -------------------------------------------------------------------------------- /hack/litmus-result.j2: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: litmus.io/v1alpha1 3 | kind: LitmusResult 4 | metadata: 5 | 6 | # name of the litmus testcase 7 | name: {{ test }} 8 | spec: 9 | 10 | # holds information on the testcase 11 | testMetadata: 12 | app: {{ app }} 13 | chaostype: {{ chaostype }} 14 | 15 | # holds the state of testcase, manually updated by json merge patch 16 | # result is the useful value today, but anticipate phase use in future 17 | testStatus: 18 | phase: {{ phase }} 19 | result: {{ verdict }} 20 | 21 | -------------------------------------------------------------------------------- /apps/cassandra/workload/cassandra_loadgen.yml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: batch/v1 3 | kind: Job 4 | metadata: 5 | name: cassandra-loadgen 6 | spec: 7 | template: 8 | metadata: 9 | name: cassandra-loadgen 10 | labels: 11 | loadgen_lkey: loadgen_lvalue 12 | spec: 13 | restartPolicy: Never 14 | containers: 15 | - name: cassandra-loadgen 16 | image: cassandra 17 | command: ["/bin/bash"] 18 | args: ["-c", "cassandra-stress write duration=5m no-warmup -node cassandra-0.cassandra"] 19 | tty: true 20 | 21 | -------------------------------------------------------------------------------- /executor/ansible/provider/local-pv/templates/pv.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: PersistentVolume 4 | metadata: 5 | name: local-pv 6 | spec: 7 | capacity: 8 | storage: 5G 9 | accessModes: 10 | - ReadWriteOnce 11 | persistentVolumeReclaimPolicy: Retain 12 | storageClassName: local-storage 13 | local: 14 | path: /mnt/disks/vol1 15 | nodeAffinity: 16 | required: 17 | nodeSelectorTerms: 18 | - matchExpressions: 19 | - key: kubernetes.io/hostname 20 | operator: In 21 | values: 22 | - kubeminion-01 23 | -------------------------------------------------------------------------------- /apps/cockroachdb/workload/cockroachdb_loadgen.yml: -------------------------------------------------------------------------------- 1 | apiVersion: batch/v1 2 | kind: Job 3 | metadata: 4 | name: cockroachdb-lg 5 | spec: 6 | template: 7 | metadata: 8 | labels: 9 | loadgen_lkey: loadgen_lvalue 10 | spec: 11 | restartPolicy: Never 12 | containers: 13 | - name: cockroachdb-lg 14 | image: cockroachdb/loadgen-kv:0.1 15 | imagePullPolicy: IfNotPresent 16 | command: 17 | - "/kv" 18 | - "--duration" 19 | - "time_duration" 20 | - "postgres://root@fqdn:26257/kv?sslmode=disable" 21 | -------------------------------------------------------------------------------- /vendor/gopkg.in/yaml.v2/NOTICE: -------------------------------------------------------------------------------- 1 | Copyright 2011-2016 Canonical Ltd. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | -------------------------------------------------------------------------------- /executor/ansible/inventory/group_vars/all.yml: -------------------------------------------------------------------------------- 1 | --- 2 | ######################################### 3 | # Ansible Runtime Specifications # 4 | ######################################### 5 | 6 | #Provider storage class 7 | #Supported providers(openebs, local-pv) 8 | storage_class: openebs-standard 9 | 10 | #Option to enable slack notifications to specified channel 11 | #Accepted entries(true, false): default:true 12 | slack_notify: true 13 | 14 | #Select the desired application 15 | #Supported applications(percona, tba, all) 16 | application: 17 | - percona 18 | - fio 19 | 20 | litmus_dir: "{{ ansible_env.HOME }}/git/litmus" 21 | 22 | -------------------------------------------------------------------------------- /vendor/github.com/DATA-DOG/godog/.travis.yml: -------------------------------------------------------------------------------- 1 | language: go 2 | go: 3 | - 1.5.x 4 | - 1.6.x 5 | - 1.7.x 6 | - 1.8.x 7 | - 1.9.x 8 | - 1.10.x 9 | 10 | go_import_path: github.com/DATA-DOG/godog 11 | 12 | install: go install github.com/DATA-DOG/godog/cmd/godog 13 | 14 | script: 15 | - go vet github.com/DATA-DOG/godog 16 | - go vet github.com/DATA-DOG/godog/gherkin 17 | - go vet github.com/DATA-DOG/godog/colors 18 | - test -z "$(go fmt ./...)" # fail if not formatted properly 19 | - godog -f progress 20 | - go test -v -race -coverprofile=coverage.txt -covermode=atomic 21 | 22 | after_success: 23 | - bash <(curl -s https://codecov.io/bash) 24 | -------------------------------------------------------------------------------- /tools/godog-runner/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM golang:latest 2 | 3 | MAINTAINER AmitD 4 | 5 | # Install kubectl 6 | ENV KUBE_LATEST_VERSION="v1.12.0" 7 | 8 | RUN curl -L https://storage.googleapis.com/kubernetes-release/release/${KUBE_LATEST_VERSION}/bin/linux/amd64/kubectl -o /usr/local/bin/kubectl \ 9 | && chmod +x /usr/local/bin/kubectl \ 10 | && kubectl version --client 11 | 12 | # Install go tools 13 | RUN go get github.com/DATA-DOG/godog/cmd/godog 14 | RUN go get -u github.com/golang/dep/cmd/dep 15 | 16 | # Add source code 17 | RUN mkdir -p /go/src/github.com/openebs/litmus 18 | ADD . /go/src/github.com/openebs/litmus/ 19 | WORKDIR /go/src/github.com/openebs/litmus/ 20 | 21 | # Go dep 22 | RUN dep ensure 23 | -------------------------------------------------------------------------------- /vendor/github.com/DATA-DOG/godog/utils.go: -------------------------------------------------------------------------------- 1 | package godog 2 | 3 | import ( 4 | "strings" 5 | "time" 6 | 7 | "github.com/DATA-DOG/godog/colors" 8 | ) 9 | 10 | // empty struct value takes no space allocation 11 | type void struct{} 12 | 13 | var red = colors.Red 14 | var redb = colors.Bold(colors.Red) 15 | var green = colors.Green 16 | var black = colors.Black 17 | var blackb = colors.Bold(colors.Black) 18 | var yellow = colors.Yellow 19 | var cyan = colors.Cyan 20 | var cyanb = colors.Bold(colors.Cyan) 21 | var whiteb = colors.Bold(colors.White) 22 | 23 | // repeats a space n times 24 | func s(n int) string { 25 | return strings.Repeat(" ", n) 26 | } 27 | 28 | var timeNowFunc = func() time.Time { 29 | return time.Now() 30 | } 31 | -------------------------------------------------------------------------------- /executor/ansible/litmus_playbook.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: localhost 3 | gather_facts: yes 4 | 5 | vars: 6 | testfile: "{{ playbook_dir }}/tests.out" 7 | 8 | tasks: 9 | 10 | - name: Clear existing test lists 11 | file: 12 | path: "{{ testfile }}" 13 | state: absent 14 | 15 | - name: Obtain list of Kubernetes test job specifications 16 | include: utils/getFiles.yaml 17 | dir="{{ litmus_dir }}/apps/{{ item }}/tests" 18 | expr="^run_litmus" 19 | tfile="{{ testfile }}" 20 | with_items: "{{ application }}" 21 | 22 | - name: Run the Kubernetes test jobs on selected storage providers 23 | include: utils/runTest.yaml 24 | with_lines: cat {{ testfile }} 25 | -------------------------------------------------------------------------------- /apps/memleak/tests/memleak.yml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Pod 4 | metadata: 5 | name: memleak-test 6 | labels: 7 | name: memleak 8 | spec: 9 | containers: 10 | - resources: 11 | limits: 12 | cpu: 0.5 13 | name: memleak-test 14 | image: openebs/tests-dd-client 15 | tty: true 16 | volumeMounts: 17 | - mountPath: /datadir 18 | name: demo-vol1 19 | volumes: 20 | - name: demo-vol1 21 | persistentVolumeClaim: 22 | claimName: demo-vol1-claim 23 | --- 24 | kind: PersistentVolumeClaim 25 | apiVersion: v1 26 | metadata: 27 | name: demo-vol1-claim 28 | spec: 29 | storageClassName: testclass 30 | accessModes: 31 | - ReadWriteOnce 32 | resources: 33 | requests: 34 | storage: "4G" 35 | 36 | 37 | -------------------------------------------------------------------------------- /apps/percona/workload/tpcc_bench.yml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: batch/v1 3 | kind: Job 4 | metadata: 5 | name: tpcc-bench 6 | spec: 7 | template: 8 | metadata: 9 | name: tpcc-bench 10 | labels: 11 | loadgen_lkey: loadgen_lvalue 12 | spec: 13 | restartPolicy: Never 14 | containers: 15 | - name: tpcc-bench 16 | image: openebs/tests-tpcc-client 17 | command: ["/bin/bash"] 18 | args: ["-c", "./tpcc-runner.sh service_ip tpcc.conf; exit 0"] 19 | volumeMounts: 20 | - name: tpcc-configmap 21 | mountPath: /tpcc-mysql/tpcc.conf 22 | subPath: tpcc.conf 23 | tty: true 24 | volumes: 25 | - name: tpcc-configmap 26 | configMap: 27 | name: tpcc-config 28 | -------------------------------------------------------------------------------- /k8s/azure/k8s-installer/README.md: -------------------------------------------------------------------------------- 1 | ## Create Kubernetes Cluster for CI in Azure Cloud (AKS) 2 | 3 | > **Pre-requisites** 4 | > - Azure CLI 5 | > - Ansible 6 | 7 | ### **Steps**: 8 | 9 | To create a cluster you need to authenticate the Azure CLI at the first place. 10 | 11 | ```bash 12 | az login -u -p 13 | ``` 14 | - To create a cluster: 15 | ```bash 16 | ansible-playbook create-k8s-cluster.yml --extra-vars "nodes=3 node_vm_size=Standard_D3" 17 | ``` 18 | 19 | - To delete the cluster 20 | 21 | ```bash 22 | ansible-playbook delete-k8s-cluster.yml 23 | ``` 24 | > Optionally, you can also pass the cluster name in the `extra-vars` while running Creation/Deletion playbook 25 | 26 | **NOTE**: Currently the total node count is 3 and VM Size is set to `Standard_D3`. -------------------------------------------------------------------------------- /vendor/gopkg.in/yaml.v2/writerc.go: -------------------------------------------------------------------------------- 1 | package yaml 2 | 3 | // Set the writer error and return false. 4 | func yaml_emitter_set_writer_error(emitter *yaml_emitter_t, problem string) bool { 5 | emitter.error = yaml_WRITER_ERROR 6 | emitter.problem = problem 7 | return false 8 | } 9 | 10 | // Flush the output buffer. 11 | func yaml_emitter_flush(emitter *yaml_emitter_t) bool { 12 | if emitter.write_handler == nil { 13 | panic("write handler not set") 14 | } 15 | 16 | // Check if the buffer is empty. 17 | if emitter.buffer_pos == 0 { 18 | return true 19 | } 20 | 21 | if err := emitter.write_handler(emitter, emitter.buffer[:emitter.buffer_pos]); err != nil { 22 | return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error()) 23 | } 24 | emitter.buffer_pos = 0 25 | return true 26 | } 27 | -------------------------------------------------------------------------------- /apps/minio/tests/deploy_minio/e2e.feature: -------------------------------------------------------------------------------- 1 | Feature: Test deployment of Minio on Kubernetes PV 2 | In order to test deployment of Minio on Kubernetes PV 3 | As an end user 4 | I need to be able to launch Minio on Kubernetes PV 5 | 6 | Scenario: launch Minio on PV 7 | Given I have a kubernetes cluster with volume operator installed 8 | When I launch minio application on volume 9 | Then wait for "180s" 10 | And verify minio application is launched successfully on volume 11 | And verify PVC is bound 12 | And verify PV is deployed 13 | 14 | Scenario: delete Minio instance 15 | Given minio application is launched successfully on volume 16 | When I delete minio instance along with volume 17 | Then wait for "60s" 18 | And verify minio application is deleted 19 | And verify PV is deleted 20 | -------------------------------------------------------------------------------- /providers/openebs/installers/operator/master/ansible/vars.yaml: -------------------------------------------------------------------------------- 1 | kubeapply: kubectl --kubeconfig /root/admin.conf 2 | namespace: openebs 3 | #openebs_operator_link: https://raw.githubusercontent.com/openebs/openebs/master/k8s/openebs-operator.yaml 4 | #storageclass_link: https://raw.githubusercontent.com/openebs/openebs/master/k8s/openebs-storageclasses.yaml 5 | openebs_operator_link: "https://raw.githubusercontent.com/openebs/openebs/{{ lookup('env','OPENEBS_VERSION') }}/k8s/openebs-operator.yaml" 6 | storageclass_link: "https://raw.githubusercontent.com/openebs/openebs/{{ lookup('env','OPENEBS_VERSION') }}/k8s/openebs-storageclasses.yaml" 7 | openebs_operator: openebs-operator.yaml 8 | storageclass: storageclass.yaml 9 | selector_name: openebs-ndm 10 | installation_test_name: openebsinstaller 11 | cleanup_test_name: openebscleanup 12 | -------------------------------------------------------------------------------- /vendor/github.com/DATA-DOG/godog/ast.go: -------------------------------------------------------------------------------- 1 | package godog 2 | 3 | import "go/ast" 4 | 5 | func astContexts(f *ast.File) []string { 6 | var contexts []string 7 | for _, d := range f.Decls { 8 | switch fun := d.(type) { 9 | case *ast.FuncDecl: 10 | for _, param := range fun.Type.Params.List { 11 | switch expr := param.Type.(type) { 12 | case *ast.StarExpr: 13 | switch x := expr.X.(type) { 14 | case *ast.Ident: 15 | if x.Name == "Suite" { 16 | contexts = append(contexts, fun.Name.Name) 17 | } 18 | case *ast.SelectorExpr: 19 | switch t := x.X.(type) { 20 | case *ast.Ident: 21 | if t.Name == "godog" && x.Sel.Name == "Suite" { 22 | contexts = append(contexts, fun.Name.Name) 23 | } 24 | } 25 | } 26 | } 27 | } 28 | } 29 | } 30 | return contexts 31 | } 32 | -------------------------------------------------------------------------------- /apps/minio/tests/deploy_minio/README.md: -------------------------------------------------------------------------------- 1 | ### Test Minio deployment in Kubernetes 2 | 3 | #### Use-Case 4 | Feature: Test deployment of Minio on Kubernetes PV 5 | In order to test deployment of Minio on Kubernetes PV 6 | As an end user 7 | I need to be able to launch Minio on Kubernetes PV 8 | 9 | #### Implementation 10 | - Step 1: Describe the scenario(s) in **e2e.feature** file 11 | - Step 2: Run **godog e2e.feature** 12 | - Step 3: Implement undefined steps (also referred to as snippets) in **e2e_test.go** file 13 | - Step 4: Re-Run **godog e2e.feature** 14 | 15 | #### Best Practices 16 | - 1: Make use of standard go practices 17 | - 2: Transform the usecase into structure(s) & its properties 18 | - 3: Now fit the godog generated function snippets into above structure' methods 19 | 20 | #### References 21 | - https://github.com/DATA-DOG/godog 22 | -------------------------------------------------------------------------------- /vendor/github.com/davecgh/go-spew/LICENSE: -------------------------------------------------------------------------------- 1 | ISC License 2 | 3 | Copyright (c) 2012-2016 Dave Collins 4 | 5 | Permission to use, copy, modify, and/or distribute this software for any 6 | purpose with or without fee is hereby granted, provided that the above 7 | copyright notice and this permission notice appear in all copies. 8 | 9 | THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 10 | WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 11 | MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 12 | ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 13 | WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 14 | ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 15 | OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 16 | -------------------------------------------------------------------------------- /Gopkg.toml: -------------------------------------------------------------------------------- 1 | # Gopkg.toml example 2 | # 3 | # Refer to https://golang.github.io/dep/docs/Gopkg.toml.html 4 | # for detailed Gopkg.toml documentation. 5 | # 6 | # required = ["github.com/user/thing/cmd/thing"] 7 | # ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"] 8 | # 9 | # [[constraint]] 10 | # name = "github.com/user/project" 11 | # version = "1.0.0" 12 | # 13 | # [[constraint]] 14 | # name = "github.com/user/project2" 15 | # branch = "dev" 16 | # source = "github.com/myfork/project2" 17 | # 18 | # [[override]] 19 | # name = "github.com/x/y" 20 | # version = "2.4.0" 21 | # 22 | # [prune] 23 | # non-go = false 24 | # go-tests = true 25 | # unused-packages = true 26 | 27 | 28 | [[constraint]] 29 | name = "github.com/DATA-DOG/godog" 30 | version = "0.7.6" 31 | 32 | [prune] 33 | go-tests = true 34 | unused-packages = true 35 | -------------------------------------------------------------------------------- /apps/cassandra/liveness/run_litmus_test.yml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: batch/v1 3 | kind: Job 4 | metadata: 5 | generateName: cassandra-liveness- 6 | namespace: litmus 7 | 8 | spec: 9 | template: 10 | metadata: 11 | name: cassandra-liveness 12 | 13 | spec: 14 | restartPolicy: Never 15 | containers: 16 | - name: cassandra-liveness 17 | image: openebs/cassandra-liveness 18 | imagePullPolicy: Always 19 | 20 | env: 21 | 22 | # Namespace in which application is running 23 | - name: NAMESPACE 24 | value: litmus 25 | 26 | # Service name of application 27 | - name: SERVICE 28 | value: cassandra 29 | 30 | # Port on which application is listening 31 | - name: PORT 32 | value: "9042" 33 | 34 | command: ["/bin/bash"] 35 | args: ["-c", "python ./liveness.py ; exit 0"] 36 | -------------------------------------------------------------------------------- /common/utils/check_statefulset_status.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # This utility checks if all the replicas in a statefulset are running. 3 | # The parameters required are 4 | # - app_ns (Namespace on which the application is deployed) 5 | # - app_label( Label of application in the form 'key=value')) 6 | # - 7 | - name: Obtain the number of replicas. 8 | shell: kubectl get statefulset -n {{ app_ns }} -l {{ app_label }} -o custom-columns=:spec.replicas 9 | args: 10 | executable: /bin/bash 11 | register: rep_count 12 | until: "rep_count.rc == 0" 13 | delay: 60 14 | retries: 15 15 | 16 | - name: Obtain the ready replica count and compare with the replica count. 17 | shell: kubectl get statefulset -n {{ app_ns }} -l {{ app_label }} -o custom-columns=:..readyReplicas 18 | args: 19 | executable: /bin/bash 20 | register: ready_rep 21 | until: "ready_rep.rc == 0 and ready_rep.stdout|int == rep_count.stdout|int" 22 | delay: 60 23 | retries: 15 24 | -------------------------------------------------------------------------------- /apps/mongodb/workload/mongo_loadgen.yml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: batch/v1 3 | kind: Job 4 | metadata: 5 | name: mongo-loadgen 6 | spec: 7 | template: 8 | metadata: 9 | name: mongo-loadgen 10 | labels: 11 | loadgen_lkey: loadgen_lvalue 12 | spec: 13 | restartPolicy: Never 14 | containers: 15 | - name: mongo-loadgen 16 | image: openebs/tests-mongo-client 17 | command: ["/bin/bash"] 18 | args: ["-c", "./sysbench/sysbench --mongo-write-concern=1 --mongo-url='mongodb://podname.servicename' --mongo-database-name=database_name --test=./sysbench/tests/mongodb/oltp.lua --oltp_table_size=100 --oltp_tables_count=10 --num-threads=10 --rand-type=pareto --report-interval=10 --max-requests=0 --max-time=600 --oltp-point-selects=10 --oltp-simple-ranges=1 --oltp-sum-ranges=1 --oltp-order-ranges=1 --oltp-distinct-ranges=1 --oltp-index-updates=1 --oltp-non-index-updates=1 --oltp-inserts=1 run"] 19 | tty: true 20 | 21 | -------------------------------------------------------------------------------- /vendor/github.com/DATA-DOG/godog/gherkin.go: -------------------------------------------------------------------------------- 1 | package godog 2 | 3 | import "github.com/DATA-DOG/godog/gherkin" 4 | 5 | // examples is a helper func to cast gherkin.Examples 6 | // or gherkin.BaseExamples if its empty 7 | // @TODO: this should go away with gherkin update 8 | func examples(ex interface{}) (*gherkin.Examples, bool) { 9 | t, ok := ex.(*gherkin.Examples) 10 | return t, ok 11 | } 12 | 13 | // means there are no scenarios or they do not have steps 14 | func isEmptyFeature(ft *gherkin.Feature) bool { 15 | for _, def := range ft.ScenarioDefinitions { 16 | if !isEmptyScenario(def) { 17 | return false 18 | } 19 | } 20 | return true 21 | } 22 | 23 | // means scenario dooes not have steps 24 | func isEmptyScenario(def interface{}) bool { 25 | switch t := def.(type) { 26 | case *gherkin.Scenario: 27 | if len(t.Steps) > 0 { 28 | return false 29 | } 30 | case *gherkin.ScenarioOutline: 31 | if len(t.Steps) > 0 { 32 | return false 33 | } 34 | } 35 | return true 36 | } 37 | -------------------------------------------------------------------------------- /apps/jenkins/job-simulator/run_litmus_test.yml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: batch/v1 3 | kind: Job 4 | metadata: 5 | generateName: jenkins-job-simulator- 6 | 7 | spec: 8 | template: 9 | metadata: 10 | name: jenkins-job-simulator 11 | 12 | spec: 13 | restartPolicy: Never 14 | containers: 15 | - name: jenkins-job-simulator 16 | image: openebs/jenkins-simulator 17 | imagePullPolicy: Always 18 | 19 | env: 20 | 21 | # Time in min. for which simulator will run 22 | - name: MINUTES 23 | value: "1" 24 | 25 | - name: SERVICE 26 | value: jenkins-svc 27 | 28 | - name: NAMESPACE 29 | value: litmus 30 | 31 | - name: PASSWORD 32 | value: b3a0e3050cc0461aa17755f2537e0608 33 | 34 | - name: USER 35 | value: Admin 36 | 37 | command: ["/bin/bash"] 38 | args: ["-c", "python ./job.py ; exit 0"] 39 | -------------------------------------------------------------------------------- /vendor/github.com/stretchr/testify/require/doc.go: -------------------------------------------------------------------------------- 1 | // Package require implements the same assertions as the `assert` package but 2 | // stops test execution when a test fails. 3 | // 4 | // Example Usage 5 | // 6 | // The following is a complete example using require in a standard test function: 7 | // import ( 8 | // "testing" 9 | // "github.com/stretchr/testify/require" 10 | // ) 11 | // 12 | // func TestSomething(t *testing.T) { 13 | // 14 | // var a string = "Hello" 15 | // var b string = "Hello" 16 | // 17 | // require.Equal(t, a, b, "The two words should be the same.") 18 | // 19 | // } 20 | // 21 | // Assertions 22 | // 23 | // The `require` package have same global functions as in the `assert` package, 24 | // but instead of returning a boolean result they call `t.FailNow()`. 25 | // 26 | // Every assertion function also takes an optional string message as the final argument, 27 | // allowing custom error messages to be appended to the message the assertion method outputs. 28 | package require 29 | -------------------------------------------------------------------------------- /apps/fio/tests/performance/fio.yml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: batch/v1 3 | kind: Job 4 | metadata: 5 | name: fio 6 | spec: 7 | template: 8 | metadata: 9 | name: fio 10 | labels: 11 | name: fio 12 | spec: 13 | restartPolicy: Never 14 | nodeSelector: 15 | kubernetes.io/hostname: testNode 16 | containers: 17 | - name: perfrunner 18 | image: openebs/tests-fio 19 | command: ["/bin/bash"] 20 | args: ["-c", "./fio_runner.sh --template file/basic-rw --size 256m --duration 60; exit 0"] 21 | volumeMounts: 22 | - mountPath: /datadir 23 | name: fio-vol 24 | tty: true 25 | volumes: 26 | - name: fio-vol 27 | persistentVolumeClaim: 28 | claimName: testClaim 29 | --- 30 | kind: PersistentVolumeClaim 31 | apiVersion: v1 32 | metadata: 33 | name: testClaim 34 | spec: 35 | storageClassName: testClass 36 | accessModes: 37 | - ReadWriteOnce 38 | resources: 39 | requests: 40 | storage: "5G" 41 | -------------------------------------------------------------------------------- /hack/litmus-prerequisites.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | error_handler() 4 | { 5 | rc=$1; message=$(echo $2 | cut -d "=" -f 2); act=$(echo $3 | cut -d "=" -f 2) 6 | if [ $rc -ne 0 ]; then 7 | echo "$message" 8 | if [ "$act" == "exit" ]; then 9 | exit 1 10 | fi 11 | fi 12 | } 13 | 14 | default_kube_config_path="$HOME/.kube/config" 15 | read -p "Provide the KUBECONFIG path: [default=$default_kube_config_path] " answer 16 | : ${answer:=$default_kube_config_path} 17 | 18 | echo "Selected kubeconfig file: $answer" 19 | 20 | echo "Applying the litmus RBAC.." 21 | kubectl apply -f rbac.yaml; retcode=$? 22 | error_handler $retcode msg="Unable to setup litmus RBAC, exiting" action="exit" 23 | 24 | cp $answer admin.conf; retcode=$? 25 | error_handler $retcode msg="Unable to find the kubeconfig file, exiting" action="exit" 26 | 27 | echo "Creating configmap.." 28 | kubectl create configmap kubeconfig --from-file=admin.conf -n litmus; retcode=$? 29 | error_handler $retcode msg="Unable to create kubeconfig configmap, exiting" action="exit" 30 | 31 | -------------------------------------------------------------------------------- /apps/cassandra/workload/run_litmus_test.yml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: batch/v1 3 | kind: Job 4 | metadata: 5 | generateName: litmus-cassandra-loadgen- 6 | namespace: litmus 7 | spec: 8 | template: 9 | metadata: 10 | name: litmus 11 | labels: 12 | loadgen: cassandra-loadjob 13 | 14 | spec: 15 | serviceAccountName: litmus 16 | restartPolicy: Never 17 | containers: 18 | - name: ansibletest 19 | image: openebs/ansible-runner:ci 20 | imagePullPolicy: Always 21 | env: 22 | - name: ANSIBLE_STDOUT_CALLBACK 23 | #value: log_plays 24 | value: default 25 | 26 | - name: LOADGEN_NS 27 | value: app-cass-ns 28 | 29 | - name: LOADGEN_LABEL 30 | value: 'loadgen=cassandra-loadgen' 31 | 32 | - name: APPLICATION_LABEL 33 | value: 'app=cassandra' 34 | 35 | command: ["/bin/bash"] 36 | args: ["-c", "ansible-playbook ./cassandra/workload/test.yml -i /etc/ansible/hosts -v; exit 0"] 37 | -------------------------------------------------------------------------------- /apps/cockroachdb/workload/run_litmus_test.yml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: batch/v1 3 | kind: Job 4 | metadata: 5 | generateName: cockroachdb-loadgen- 6 | namespace: litmus 7 | spec: 8 | template: 9 | metadata: 10 | name: litmus 11 | labels: 12 | loadgen: cockroachdb-loadjob 13 | spec: 14 | serviceAccountName: litmus 15 | restartPolicy: Never 16 | containers: 17 | - name: loadgen 18 | image: openebs/ansible-runner:ci 19 | imagePullPolicy: Always 20 | env: 21 | - name: ANSIBLE_STDOUT_CALLBACK 22 | value: default 23 | 24 | - name: APP_NS 25 | value: app-cdb-ns 26 | 27 | - name: APP_LABEL 28 | value: 'app=cockroachdb' 29 | 30 | - name: LOADGEN_LABEL 31 | value: 'loadgen=cockroachdb-load' 32 | 33 | - name: TIME_INTERVAL 34 | value: 5m 35 | 36 | command: ["/bin/bash"] 37 | args: ["-c", "ansible-playbook ./cockroachdb/workload/test.yml -i /etc/ansible/hosts -v; exit 0"] 38 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE.md: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | ## Is this a BUG REPORT or FEATURE REQUEST? 5 | 6 | Choose one: BUG REPORT or FEATURE REQUEST 7 | 8 | 20 | 21 | 22 | 23 | **What happened**: 24 | 25 | **What you expected to happen**: 26 | 27 | **How to reproduce it (as minimally and precisely as possible)**: 28 | 29 | 30 | **Anything else we need to know?**: 31 | 32 | -------------------------------------------------------------------------------- /docs/running_test_suite.md: -------------------------------------------------------------------------------- 1 | # Running a Complete Test Suite 2 | 3 | The Litmus test suite can be run on a kubernetes cluster using an ansible-based executor framework. 4 | This involves: 5 | 6 | - Setting up ansible on any linux machine (ansible test harness), with SSH access to the kubernetes cluster 7 | - Generating the ansible inventory file with host information (master/control node & hosts) 8 | - Modifying a global variables file to: 9 | 10 | - Set Provider and storage class 11 | - Select test Category (call or subset) 12 | - Enable/Disable some services like log collection, notifications etc.., 13 | 14 | Follow the executor/README for detailed instructions on how to perform above steps. Once these pre-requisites 15 | have been met, execute the following on the ansible test harness: 16 | 17 | ``` 18 | ./litmus/executor/ansible/run-litmus.sh 19 | ``` 20 | 21 | The above script will verify that it has all the details required for it to proceed and provides you with 22 | test task execution status. 23 | 24 | *Litmus may take a while to show a reaction as it puts the system through rigorous scrutiny!* 25 | -------------------------------------------------------------------------------- /k8s/aws/ebs-volumes/README.md: -------------------------------------------------------------------------------- 1 | 2 | # AWS platform specific code and scripts 3 | 4 | ## Amazon web service and Attaching EBS Volume in Provided Kubernetes cluster 5 | 6 | ### Pre-requisites 7 | 8 | - AWS Cluster is up and running 9 | - Disable ansible host key checking `export ANSIBLE_HOST_KEY_CHECKING=False` 10 | 11 | #### Execution and Inventory Requirements 12 | 13 | - Public IPs of VMs/Nodes 14 | - SSH connection to VMs/Nodes 15 | - `delegate_to` and `with_items` ansible module use for SSH in VMs/Nodes and loop on each VMs/Nodes respectively 16 | 17 | ### Creating, attach and mount EBS Volume in AWS Cluster 18 | 19 | - `create-ebs-volume`, will create, attach and mount EBS volume to each node of provided cluster name. 20 | 21 | ```bash 22 | ansible-playbook create-ebs-volume.yml -vv --extra-vars "cluster_name=" 23 | ``` 24 | 25 | ### Unmount, Detach and delete EBS Volume in AWS cluster 26 | 27 | - `delete-ebs-volume`, will unmount, detach and delete EBS volume from the provided cluster. 28 | 29 | ```bash 30 | ansible-playbook delete-ebs-volume.yml -vv --extra-vars "cluster_name=" 31 | ``` -------------------------------------------------------------------------------- /k8s/gcp/gpd-disks/README.md: -------------------------------------------------------------------------------- 1 | 2 | # GCE platform specific code and scripts 3 | 4 | ## Attaching and Mounting Google Persistent Disks to Cluster's Nodes 5 | 6 | ### Pre-requisites 7 | 8 | - Cluster in GCE is up, running and accesible. 9 | - Service account JSON and email with sufficient privilege to Disks and Compute Engine access 10 | 11 | 12 | ### Creating, attaching and mounting GPDs in worker nodes 13 | 14 | - `create-gpd.yml`, will create, attach and mount the GPD to each worker node of given cluster, sequentially. 15 | 16 | ```bash 17 | ansible-playbook create-gpd.yml --extra-vars "cluster_name= json= email= project=" 18 | ``` 19 | ** If you don't specify the cluster name, it will by read the `~logs/cluster` to get the name by default. 20 | 21 | ### Unmount, Detach and delete GPDs in worker nodes 22 | 23 | - `delete-gpd.yml`, will unmount, detach and delete disks from each worker node of specified cluster. 24 | 25 | ```bash 26 | ansible-playbook delete-gpd.yml --extra-vars "cluster_name= json= email= project=" 27 | ``` -------------------------------------------------------------------------------- /apps/percona/functional/snapshot/run_litmus_test.yml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: batch/v1 3 | kind: Job 4 | metadata: 5 | generateName: litmus-openebs-snapshot-restore- 6 | namespace: litmus 7 | spec: 8 | template: 9 | metadata: 10 | labels: 11 | name: openebs-snapshot-restore 12 | spec: 13 | serviceAccountName: litmus 14 | restartPolicy: Never 15 | containers: 16 | - name: ansibletest 17 | image: openebs/ansible-runner:ci 18 | imagePullPolicy: Always 19 | env: 20 | - name: ANSIBLE_STDOUT_CALLBACK 21 | value: default 22 | 23 | - name: OPERATOR_NAMESPACE 24 | value: openebs 25 | 26 | - name: APP_NAMESPACE 27 | value: app-percona-ns 28 | 29 | - name: APP_LABEL 30 | value: 'name=percona' 31 | 32 | - name: DB_USERNAME 33 | value: root 34 | 35 | - name: DB_PASSWORD 36 | value: k8sDem0 37 | 38 | command: ["/bin/bash"] 39 | args: ["-c", "ansible-playbook ./percona/functional/snapshot/test.yml -i /etc/ansible/hosts -vv; exit 0"] 40 | 41 | -------------------------------------------------------------------------------- /apps/jenkins/deployers/jenkins.yml: -------------------------------------------------------------------------------- 1 | kind: PersistentVolumeClaim 2 | apiVersion: v1 3 | metadata: 4 | name: testclaim 5 | spec: 6 | storageClassName: testclass 7 | accessModes: 8 | - ReadWriteOnce 9 | resources: 10 | requests: 11 | storage: 5G 12 | --- 13 | apiVersion: extensions/v1beta1 14 | kind: Deployment 15 | metadata: 16 | name: jenkins 17 | spec: 18 | replicas: 1 19 | template: 20 | metadata: 21 | labels: 22 | lkey: lvalue 23 | spec: 24 | securityContext: 25 | fsGroup: 1000 26 | containers: 27 | - name: jenkins 28 | imagePullPolicy: IfNotPresent 29 | image: jenkins/jenkins:lts 30 | ports: 31 | - containerPort: 8080 32 | volumeMounts: 33 | - mountPath: /var/jenkins_home 34 | name: jenkins-home 35 | volumes: 36 | - name: jenkins-home 37 | persistentVolumeClaim: 38 | claimName: testclaim 39 | --- 40 | apiVersion: v1 41 | kind: Service 42 | metadata: 43 | name: jenkins-svc 44 | labels: 45 | lkey: lvalue 46 | spec: 47 | ports: 48 | - port: 80 49 | targetPort: 8080 50 | selector: 51 | lkey: lvalue 52 | type: NodePort 53 | -------------------------------------------------------------------------------- /apps/percona/tests/mysql_data_persistence/test_cleanup.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Get pvc name to verify successful pvc deletion 3 | shell: > 4 | kubectl get pvc {{ test_name }} 5 | -o custom-columns=:spec.volumeName -n litmus 6 | --no-headers 7 | args: 8 | executable: /bin/bash 9 | register: pv 10 | 11 | - name: Delete percona mysql pod 12 | shell: > 13 | source ~/.profile; kubectl delete -f {{ pod_yaml_alias }} 14 | -n litmus 15 | args: 16 | executable: /bin/bash 17 | 18 | - name: Confirm percona pod has been deleted 19 | shell: source ~/.profile; kubectl get pods -n litmus 20 | args: 21 | executable: /bin/bash 22 | register: result 23 | until: "'percona' not in result.stdout" 24 | delay: 30 25 | retries: 12 26 | 27 | - block: 28 | - name: Confirm pvc pod has been deleted 29 | shell: > 30 | kubectl get pods -n litmus | grep {{ pv.stdout }} 31 | args: 32 | executable: /bin/bash 33 | register: result 34 | failed_when: "'pvc' and 'Running' in result.stdout" 35 | delay: 30 36 | retries: 12 37 | when: "'openebs-standard' in lookup('env','PROVIDER_STORAGE_CLASS')" 38 | 39 | -------------------------------------------------------------------------------- /k8s/packet/k8s-installer/pre_requisite.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | apt-get update && apt-get install -y docker.io apt-transport-https curl 3 | curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | apt-key add - 4 | touch /etc/apt/sources.list.d/kubernetes.list 5 | sh -c 'echo "deb https://apt.kubernetes.io/ kubernetes-xenial main" > /etc/apt/sources.list.d/kubernetes.list' 6 | apt-get update && apt-get install -y kubelet=$1 kubeadm=$1 kubectl=$1 7 | swapoff -a 8 | 9 | master() { 10 | sed -i "s/cgroup-driver=systemd/cgroup-driver=cgroupfs/g" /etc/systemd/system/kubelet.service.d/10-kubeadm.conf 11 | systemctl daemon-reload 12 | systemctl restart kubelet 13 | sleep 10 14 | kubeadm init --apiserver-advertise-address=$1 --apiserver-cert-extra-sans=10.0.2.15 --pod-network-cidr=$2 15 | mkdir -p $HOME/.kube 16 | cp -i /etc/kubernetes/admin.conf $HOME/.kube/config 17 | chown $(id -u):$(id -g) $HOME/.kube/config 18 | sysctl net.bridge.bridge-nf-call-iptables=1 19 | kubectl apply -f https://raw.githubusercontent.com/cloudnativelabs/kube-router/master/daemonset/kubeadm-kuberouter.yaml 20 | } 21 | 22 | if [ "$4" == "master" ];then 23 | master $2 $3 24 | fi -------------------------------------------------------------------------------- /vendor/github.com/DATA-DOG/godog/gherkin/LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2014-2016 Cucumber Ltd, Gaspar Nagy 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in 13 | all copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 21 | THE SOFTWARE. 22 | -------------------------------------------------------------------------------- /common/utils/update_litmus_result_resource.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - block: 3 | - name: Generate the litmus result CR to reflect SOT (Start of Test) 4 | template: 5 | src: /litmus-result.j2 6 | dest: litmus-result.yaml 7 | vars: 8 | test: "{{ test_name }}" 9 | app: "" 10 | chaostype: "" 11 | phase: in-progress 12 | verdict: "{{ flag }}" 13 | 14 | - name: Apply the litmus result CR 15 | shell: kubectl apply -f litmus-result.yaml 16 | args: 17 | executable: /bin/bash 18 | register: lr_status 19 | failed_when: "lr_status.rc != 0" 20 | 21 | when: status == "SOT" 22 | 23 | - block: 24 | - name: Generate the litmus result CR to reflect EOT (End of Test) 25 | template: 26 | src: /litmus-result.j2 27 | dest: litmus-result.yaml 28 | vars: 29 | test: "{{ test_name }}" 30 | app: "" 31 | chaostype: "" 32 | phase: completed 33 | verdict: "{{ flag }}" 34 | 35 | - name: Apply the litmus result CR 36 | shell: kubectl apply -f litmus-result.yaml 37 | args: 38 | executable: /bin/bash 39 | register: lr_status 40 | failed_when: "lr_status.rc != 0" 41 | 42 | when: status == "EOT" 43 | -------------------------------------------------------------------------------- /vendor/github.com/DATA-DOG/godog/colors/colors.go: -------------------------------------------------------------------------------- 1 | package colors 2 | 3 | import ( 4 | "fmt" 5 | "strings" 6 | ) 7 | 8 | const ansiEscape = "\x1b" 9 | 10 | // a color code type 11 | type color int 12 | 13 | // some ansi colors 14 | const ( 15 | black color = iota + 30 16 | red 17 | green 18 | yellow 19 | blue 20 | magenta 21 | cyan 22 | white 23 | ) 24 | 25 | func colorize(s interface{}, c color) string { 26 | return fmt.Sprintf("%s[%dm%v%s[0m", ansiEscape, c, s, ansiEscape) 27 | } 28 | 29 | type ColorFunc func(interface{}) string 30 | 31 | func Bold(fn ColorFunc) ColorFunc { 32 | return ColorFunc(func(input interface{}) string { 33 | return strings.Replace(fn(input), ansiEscape+"[", ansiEscape+"[1;", 1) 34 | }) 35 | } 36 | 37 | func Green(s interface{}) string { 38 | return colorize(s, green) 39 | } 40 | 41 | func Red(s interface{}) string { 42 | return colorize(s, red) 43 | } 44 | 45 | func Cyan(s interface{}) string { 46 | return colorize(s, cyan) 47 | } 48 | 49 | func Black(s interface{}) string { 50 | return colorize(s, black) 51 | } 52 | 53 | func Yellow(s interface{}) string { 54 | return colorize(s, yellow) 55 | } 56 | 57 | func White(s interface{}) string { 58 | return colorize(s, white) 59 | } 60 | -------------------------------------------------------------------------------- /vendor/github.com/DATA-DOG/godog/Makefile: -------------------------------------------------------------------------------- 1 | .PHONY: test gherkin bump cover 2 | 3 | VERS := $(shell grep 'const Version' -m 1 godog.go | awk -F\" '{print $$2}') 4 | 5 | test: 6 | @echo "running all tests" 7 | @go install ./... 8 | @go fmt ./... 9 | @golint github.com/DATA-DOG/godog 10 | @golint github.com/DATA-DOG/godog/cmd/godog 11 | go vet ./... 12 | go test -race 13 | godog -f progress -c 4 14 | 15 | gherkin: 16 | @if [ -z "$(VERS)" ]; then echo "Provide gherkin version like: 'VERS=commit-hash'"; exit 1; fi 17 | @rm -rf gherkin 18 | @mkdir gherkin 19 | @curl -s -L https://github.com/cucumber/gherkin-go/tarball/$(VERS) | tar -C gherkin -zx --strip-components 1 20 | @rm -rf gherkin/{.travis.yml,.gitignore,*_test.go,gherkin-generate*,*.razor,*.jq,Makefile,CONTRIBUTING.md} 21 | 22 | bump: 23 | @if [ -z "$(VERSION)" ]; then echo "Provide version like: 'VERSION=$(VERS) make bump'"; exit 1; fi 24 | @echo "bumping version from: $(VERS) to $(VERSION)" 25 | @sed -i.bak 's/$(VERS)/$(VERSION)/g' godog.go 26 | @sed -i.bak 's/$(VERS)/$(VERSION)/g' examples/api/version.feature 27 | @find . -name '*.bak' | xargs rm 28 | 29 | cover: 30 | go test -race -coverprofile=coverage.txt 31 | go tool cover -html=coverage.txt 32 | rm coverage.txt 33 | -------------------------------------------------------------------------------- /apps/fio/tests/data-integrity/fio-read.yml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: ConfigMap 4 | metadata: 5 | name: basic-read 6 | data: 7 | 8 | basic-rw : |- 9 | 10 | [global] 11 | directory=/datadir 12 | 13 | [basic-fio] 14 | rw=read 15 | bs=4k 16 | verify=crc32c 17 | verify=pattern 18 | verify_pattern=%o 19 | --- 20 | apiVersion: batch/v1 21 | kind: Job 22 | metadata: 23 | name: fio-read 24 | spec: 25 | template: 26 | metadata: 27 | name: fio-read 28 | labels: 29 | name: fio-read 30 | spec: 31 | restartPolicy: Never 32 | containers: 33 | - name: perfrunner 34 | image: openebs/tests-fio 35 | command: ["/bin/bash"] 36 | args: ["-c", "./fio_runner.sh --size 128m; exit 0"] 37 | volumeMounts: 38 | - mountPath: /datadir 39 | name: demo-vol1 40 | - mountPath: templates/file/basic-rw 41 | subPath: basic-rw 42 | name: basic-configmap-read 43 | tty: true 44 | 45 | volumes: 46 | - name: demo-vol1 47 | persistentVolumeClaim: 48 | claimName: demo-vol1-claim 49 | - name: basic-configmap-read 50 | configMap: 51 | name: basic-read 52 | -------------------------------------------------------------------------------- /providers/openebs/installers/storageclass/0.7/ansible/storageclass-jiva.j2: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: openebs.io/v1alpha1 3 | kind: StoragePool 4 | metadata: 5 | name: {{ lookup('env','JIVA_POOL_NAME') }} 6 | type: hostdir 7 | spec: 8 | path: "{{ lookup('env','STORAGE_PATH') }}" 9 | --- 10 | apiVersion: storage.k8s.io/v1 11 | kind: StorageClass 12 | metadata: 13 | name: {{ lookup('env','JIVA_STORAGECLASS_NAME') }} 14 | annotations: 15 | cas.openebs.io/create-volume-template: jiva-volume-create-default-0.7.0 16 | cas.openebs.io/delete-volume-template: jiva-volume-delete-default-0.7.0 17 | cas.openebs.io/read-volume-template: jiva-volume-read-default-0.7.0 18 | cas.openebs.io/config: | 19 | - name: ControllerImage 20 | value: {{ lookup('env','OPENEBS_IO_JIVA_CONTROLLER_IMAGE') }} 21 | - name: ReplicaImage 22 | value: {{ lookup('env','OPENEBS_IO_JIVA_REPLICA_IMAGE') }} 23 | - name: VolumeMonitorImage 24 | value: {{ lookup('env','OPENEBS_IO_VOLUME_MONITOR_IMAGE') }} 25 | - name: ReplicaCount 26 | value: "{{ lookup('env','OPENEBS_IO_JIVA_REPLICA_COUNT') }}" 27 | - name: StoragePool 28 | value: {{ lookup('env','JIVA_POOL_NAME') }} 29 | provisioner: openebs.io/provisioner-iscsi 30 | --- -------------------------------------------------------------------------------- /vendor/github.com/DATA-DOG/godog/colors/no_colors.go: -------------------------------------------------------------------------------- 1 | package colors 2 | 3 | import ( 4 | "bytes" 5 | "fmt" 6 | "io" 7 | ) 8 | 9 | type noColors struct { 10 | out io.Writer 11 | lastbuf bytes.Buffer 12 | } 13 | 14 | func Uncolored(w io.Writer) io.Writer { 15 | return &noColors{out: w} 16 | } 17 | 18 | func (w *noColors) Write(data []byte) (n int, err error) { 19 | er := bytes.NewBuffer(data) 20 | loop: 21 | for { 22 | c1, _, err := er.ReadRune() 23 | if err != nil { 24 | break loop 25 | } 26 | if c1 != 0x1b { 27 | fmt.Fprint(w.out, string(c1)) 28 | continue 29 | } 30 | c2, _, err := er.ReadRune() 31 | if err != nil { 32 | w.lastbuf.WriteRune(c1) 33 | break loop 34 | } 35 | if c2 != 0x5b { 36 | w.lastbuf.WriteRune(c1) 37 | w.lastbuf.WriteRune(c2) 38 | continue 39 | } 40 | 41 | var buf bytes.Buffer 42 | for { 43 | c, _, err := er.ReadRune() 44 | if err != nil { 45 | w.lastbuf.WriteRune(c1) 46 | w.lastbuf.WriteRune(c2) 47 | w.lastbuf.Write(buf.Bytes()) 48 | break loop 49 | } 50 | if ('a' <= c && c <= 'z') || ('A' <= c && c <= 'Z') || c == '@' { 51 | break 52 | } 53 | buf.Write([]byte(string(c))) 54 | } 55 | } 56 | return len(data) - w.lastbuf.Len(), nil 57 | } 58 | -------------------------------------------------------------------------------- /vendor/github.com/stretchr/testify/require/requirements.go: -------------------------------------------------------------------------------- 1 | package require 2 | 3 | // TestingT is an interface wrapper around *testing.T 4 | type TestingT interface { 5 | Errorf(format string, args ...interface{}) 6 | FailNow() 7 | } 8 | 9 | type tHelper interface { 10 | Helper() 11 | } 12 | 13 | // ComparisonAssertionFunc is a common function prototype when comparing two values. Can be useful 14 | // for table driven tests. 15 | type ComparisonAssertionFunc func(TestingT, interface{}, interface{}, ...interface{}) 16 | 17 | // ValueAssertionFunc is a common function prototype when validating a single value. Can be useful 18 | // for table driven tests. 19 | type ValueAssertionFunc func(TestingT, interface{}, ...interface{}) 20 | 21 | // BoolAssertionFunc is a common function prototype when validating a bool value. Can be useful 22 | // for table driven tests. 23 | type BoolAssertionFunc func(TestingT, bool, ...interface{}) 24 | 25 | // ValuesAssertionFunc is a common function prototype when validating an error value. Can be useful 26 | // for table driven tests. 27 | type ErrorAssertionFunc func(TestingT, error, ...interface{}) 28 | 29 | //go:generate go run ../_codegen/main.go -output-package=require -template=require.go.tmpl -include-format-funcs 30 | -------------------------------------------------------------------------------- /vendor/github.com/stretchr/testify/LICENSE: -------------------------------------------------------------------------------- 1 | Copyright (c) 2012 - 2013 Mat Ryer and Tyler Bunnell 2 | 3 | Please consider promoting this project if you find it useful. 4 | 5 | Permission is hereby granted, free of charge, to any person 6 | obtaining a copy of this software and associated documentation 7 | files (the "Software"), to deal in the Software without restriction, 8 | including without limitation the rights to use, copy, modify, merge, 9 | publish, distribute, sublicense, and/or sell copies of the Software, 10 | and to permit persons to whom the Software is furnished to do so, 11 | subject to the following conditions: 12 | 13 | The above copyright notice and this permission notice shall be included 14 | in all copies or substantial portions of the Software. 15 | 16 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 17 | EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES 18 | OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. 19 | IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, 20 | DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT 21 | OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE 22 | OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 23 | -------------------------------------------------------------------------------- /apps/percona/deployers/run_litmus_test.yml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: batch/v1 3 | kind: Job 4 | metadata: 5 | generateName: litmus-percona- 6 | namespace: litmus 7 | spec: 8 | template: 9 | metadata: 10 | name: litmus 11 | labels: 12 | app: percona-deployment 13 | 14 | spec: 15 | serviceAccountName: litmus 16 | restartPolicy: Never 17 | containers: 18 | - name: ansibletest 19 | image: openebs/ansible-runner:ci 20 | imagePullPolicy: Always 21 | 22 | env: 23 | - name: ANSIBLE_STDOUT_CALLBACK 24 | #value: log_plays, actionable, default 25 | value: default 26 | 27 | - name: PROVIDER_STORAGE_CLASS 28 | # Supported values: openebs-standard, local-storage 29 | value: openebs-standard 30 | 31 | - name: APP_PVC 32 | value: percona-mysql-claim 33 | 34 | # Application label 35 | - name: APP_LABEL 36 | value: 'name=percona' 37 | 38 | # Application namespace 39 | - name: APP_NAMESPACE 40 | value: app-percona-ns 41 | 42 | command: ["/bin/bash"] 43 | args: ["-c", "ansible-playbook ./percona/deployers/test.yml -i /etc/ansible/hosts -v; exit 0"] 44 | 45 | -------------------------------------------------------------------------------- /apps/cassandra/functional/scale_replicas/run_litmus_test.yml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: batch/v1 3 | kind: Job 4 | metadata: 5 | generateName: litmus-replica-scale- 6 | namespace: litmus 7 | spec: 8 | template: 9 | metadata: 10 | name: litmus 11 | labels: 12 | app: cassandra-scaleup-litmus 13 | 14 | spec: 15 | serviceAccountName: litmus 16 | restartPolicy: Never 17 | containers: 18 | - name: ansibletest 19 | image: openebs/ansible-runner:ci 20 | imagePullPolicy: Always 21 | 22 | env: 23 | - name: ANSIBLE_STDOUT_CALLBACK 24 | value: default 25 | 26 | # Application label 27 | - name: APP_LABEL 28 | value: 'app=cassandra' 29 | 30 | # Application namespace 31 | - name: APP_NAMESPACE 32 | value: app-cass-ns 33 | 34 | # Deployment type either statefulset or deployment 35 | - name: DEPLOY_TYPE 36 | value: statefulset 37 | 38 | # The total number of replicas 39 | - name: REPLICA_COUNT 40 | value: '3' 41 | 42 | command: ["/bin/bash"] 43 | args: ["-c", "ansible-playbook ./cassandra/functional/scale_replicas/test.yml -i /etc/ansible/hosts -v; exit 0"] 44 | -------------------------------------------------------------------------------- /apps/jenkins/deployers/run_litmus_test.yml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: batch/v1 3 | kind: Job 4 | metadata: 5 | generateName: litmus-jenkins- 6 | namespace: litmus 7 | spec: 8 | template: 9 | metadata: 10 | name: litmus 11 | labels: 12 | app: jenkins-deployment-litmus 13 | 14 | spec: 15 | serviceAccountName: litmus 16 | restartPolicy: Never 17 | containers: 18 | - name: ansibletest 19 | image: openebs/ansible-runner:ci 20 | imagePullPolicy: Always 21 | 22 | env: 23 | - name: ANSIBLE_STDOUT_CALLBACK 24 | #value: log_plays, actionable, default 25 | value: default 26 | 27 | - name: PROVIDER_STORAGE_CLASS 28 | # Supported values: openebs-standard, local-storage 29 | value: openebs-standard 30 | 31 | - name: APP_PVC 32 | value: jenkins-claim 33 | 34 | # Application label 35 | - name: APP_LABEL 36 | value: 'app=jenkins-app' 37 | 38 | # Application namespace 39 | - name: APP_NAMESPACE 40 | value: app-jenkins-ns 41 | 42 | command: ["/bin/bash"] 43 | args: ["-c", "ansible-playbook ./jenkins/deployers/test.yml -i /etc/ansible/hosts -v; exit 0"] 44 | 45 | -------------------------------------------------------------------------------- /providers/openebs/installers/operator/0.6/litmusbook/openebs_cleanup.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: batch/v1 3 | kind: Job 4 | metadata: 5 | name: litmus-openebs-cleanup-v0.6 6 | namespace: litmus 7 | spec: 8 | template: 9 | metadata: 10 | name: litmus 11 | spec: 12 | serviceAccountName: litmus 13 | restartPolicy: Never 14 | containers: 15 | - name: ansibletest 16 | image: openebs/ansible-runner:ci 17 | imagePullPolicy: Always 18 | env: 19 | - name: mountPath 20 | value: /mnt/openebs 21 | - name: ANSIBLE_STDOUT_CALLBACK 22 | value: actionable 23 | - name: RUN_ID 24 | value: 25 | command: ["/bin/bash"] 26 | args: ["-c", "ansible-playbook ./operator/0.6/ansible/openebs_cleanup.yaml -i /etc/ansible/hosts -vv; exit 0"] 27 | volumeMounts: 28 | - name: kubeconfig 29 | mountPath: /root/admin.conf 30 | subPath: admin.conf 31 | - name: logs 32 | mountPath: /var/log/ansible 33 | volumes: 34 | - name: kubeconfig 35 | configMap: 36 | name: kubeconfig 37 | - name: logs 38 | hostPath: 39 | path: /mnt/openebs 40 | type: "" 41 | -------------------------------------------------------------------------------- /apps/jupyter/deployers/run_litmus_test.yml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: batch/v1 3 | kind: Job 4 | metadata: 5 | generateName: litmus-jupyter- 6 | namespace: litmus 7 | spec: 8 | template: 9 | metadata: 10 | name: litmus 11 | labels: 12 | app: jupyter-deployment-litmus 13 | 14 | spec: 15 | serviceAccountName: litmus 16 | restartPolicy: Never 17 | containers: 18 | - name: ansibletest 19 | image: openebs/ansible-runner:ci 20 | imagePullPolicy: Always 21 | 22 | env: 23 | - name: ANSIBLE_STDOUT_CALLBACK 24 | #value: log_plays, actionable, default 25 | value: default 26 | 27 | - name: PROVIDER_STORAGE_CLASS 28 | # Supported values: openebs-standard, local-storage 29 | value: openebs-standard 30 | 31 | - name: APP_PVC 32 | value: jupyter-data-vol-claim 33 | 34 | # Application label 35 | - name: APP_LABEL 36 | value: 'app=jupyter-server' 37 | 38 | # Application namespace 39 | - name: APP_NAMESPACE 40 | value: app-jupyter-ns 41 | 42 | command: ["/bin/bash"] 43 | args: ["-c", "ansible-playbook ./jupyter/deployers/test.yml -i /etc/ansible/hosts -v; exit 0"] 44 | 45 | -------------------------------------------------------------------------------- /providers/openebs/installers/storageclass/0.6/litmusbook/storageclass_setup.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: batch/v1 3 | kind: Job 4 | metadata: 5 | name: litmus-storageclass-setup-jiva-v0.6 6 | namespace: litmus 7 | spec: 8 | template: 9 | metadata: 10 | name: litmus 11 | spec: 12 | serviceAccountName: litmus 13 | restartPolicy: Never 14 | containers: 15 | - name: ansibletest 16 | image: openebs/ansible-runner:ci 17 | imagePullPolicy: Always 18 | env: 19 | - name: mountPath 20 | value: /mnt/openebs 21 | - name: ANSIBLE_STDOUT_CALLBACK 22 | value: actionable 23 | - name: STORAGE_POOL_NAME 24 | value: openebs-mntdir 25 | - name: STORAGECLASS_NAME 26 | value: openebs-storageclass 27 | - name: STORAGE_PATH 28 | value: /var/openebs 29 | - name: JIVA_REPLICA_COUNT 30 | value: "3" 31 | - name: CAPACITY 32 | value: 5G 33 | - name: VOLUME_MONITOR 34 | value: "true" 35 | - name: RUN_ID 36 | value: 37 | command: ["/bin/bash"] 38 | args: ["-c", "ansible-playbook ./storageclass/0.6/ansible/storageclass_setup.yaml -i /etc/ansible/hosts -vv; exit 0"] 39 | -------------------------------------------------------------------------------- /providers/openebs/installers/storageclass/0.6/litmusbook/storageclass_cleanup.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: batch/v1 3 | kind: Job 4 | metadata: 5 | name: litmus-storageclass-cleanup-jiva-v0.6 6 | namespace: litmus 7 | spec: 8 | template: 9 | metadata: 10 | name: litmus 11 | spec: 12 | serviceAccountName: litmus 13 | restartPolicy: Never 14 | containers: 15 | - name: ansibletest 16 | image: openebs/ansible-runner:ci 17 | imagePullPolicy: Always 18 | env: 19 | - name: mountPath 20 | value: /mnt/openebs 21 | - name: ANSIBLE_STDOUT_CALLBACK 22 | value: actionable 23 | - name: STORAGE_POOL_NAME 24 | value: openebs-mntdir 25 | - name: STORAGECLASS_NAME 26 | value: openebs-storageclass 27 | - name: STORAGE_PATH 28 | value: /var/openebs 29 | - name: JIVA_REPLICA_COUNT 30 | value: "3" 31 | - name: CAPACITY 32 | value: 5G 33 | - name: VOLUME_MONITOR 34 | value: "true" 35 | - name: RUN_ID 36 | value: 37 | command: ["/bin/bash"] 38 | args: ["-c", "ansible-playbook ./storageclass/0.6/ansible/storageclass_cleanup.yaml -i /etc/ansible/hosts -vv; exit 0"] 39 | -------------------------------------------------------------------------------- /apps/jupyter/deployers/jupyter_openebs.yml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1beta1 2 | kind: Deployment 3 | metadata: 4 | name: jupyter-server 5 | spec: 6 | replicas: 1 7 | template: 8 | metadata: 9 | labels: 10 | lkey: lvalue 11 | spec: 12 | containers: 13 | - name: jupyter-server 14 | imagePullPolicy: Always 15 | image: satyamz/docker-jupyter:v0.4 16 | ports: 17 | - containerPort: 8888 18 | env: 19 | - name: GIT_REPO 20 | value: https://github.com/vharsh/plot-demo.git 21 | volumeMounts: 22 | - name: data-vol 23 | mountPath: /mnt/data 24 | volumes: 25 | - name: data-vol 26 | persistentVolumeClaim: 27 | claimName: testclaim 28 | --- 29 | kind: PersistentVolumeClaim 30 | apiVersion: v1 31 | metadata: 32 | name: testclaim 33 | spec: 34 | storageClassName: testclass 35 | accessModes: 36 | - ReadWriteOnce 37 | resources: 38 | requests: 39 | storage: 5G 40 | --- 41 | apiVersion: v1 42 | kind: Service 43 | metadata: 44 | name: jupyter-service 45 | labels: 46 | lkey: lvalue 47 | spec: 48 | ports: 49 | - name: ui 50 | port: 8888 51 | nodePort: 32424 52 | protocol: TCP 53 | selector: 54 | lkey: lvalue 55 | sessionAffinity: None 56 | type: NodePort 57 | 58 | -------------------------------------------------------------------------------- /apps/mongodb/deployers/run_litmus_test.yml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: batch/v1 3 | kind: Job 4 | metadata: 5 | generateName: litmus-mongodb- 6 | namespace: litmus 7 | spec: 8 | template: 9 | metadata: 10 | name: litmus 11 | labels: 12 | app: mongodb-deployment-litmus 13 | 14 | spec: 15 | serviceAccountName: litmus 16 | restartPolicy: Never 17 | containers: 18 | - name: ansibletest 19 | image: openebs/ansible-runner:ci 20 | imagePullPolicy: Always 21 | 22 | env: 23 | - name: ANSIBLE_STDOUT_CALLBACK 24 | #value: log_plays 25 | value: default 26 | 27 | - name: PROVIDER_STORAGE_CLASS 28 | # Supported values: openebs-standard, local-storage 29 | value: openebs-mongodb 30 | 31 | - name: APP_PVC 32 | value: openebs-mongo 33 | 34 | # Application label 35 | - name: APP_LABEL 36 | value: 'app=mongo' 37 | 38 | # Application namespace 39 | - name: APP_NAMESPACE 40 | value: app-mongo-ns 41 | 42 | - name: DEPLOY_TYPE 43 | value: statefulset 44 | 45 | command: ["/bin/bash"] 46 | args: ["-c", "ansible-playbook ./mongodb/deployers/test.yml -i /etc/ansible/hosts -v; exit 0"] 47 | 48 | -------------------------------------------------------------------------------- /apps/mongodb/workload/run_litmus_test.yml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: batch/v1 3 | kind: Job 4 | metadata: 5 | generateName: mongodb-loadgen- 6 | namespace: litmus 7 | spec: 8 | template: 9 | metadata: 10 | name: mongodb-loadgen 11 | namespace: litmus 12 | labels: 13 | loadgen: mongodb-loadjob 14 | spec: 15 | serviceAccountName: litmus 16 | restartPolicy: Never 17 | containers: 18 | - name: ansibletest 19 | image: openebs/ansible-runner:ci 20 | imagePullPolicy: Always 21 | env: 22 | - name: ANSIBLE_STDOUT_CALLBACK 23 | #value: log_plays 24 | value: default 25 | 26 | - name: POD_NAME 27 | value: mongo-0 28 | 29 | - name: SERVICE_NAME 30 | value: mongo 31 | 32 | - name: LOADGEN_LABEL 33 | value: loadgen=mongo-loadgen 34 | 35 | - name: DATABASE_NAME 36 | value: sbtest 37 | 38 | # Application label 39 | - name: APP_LABEL 40 | value: 'app=mongo' 41 | 42 | # Application namespace 43 | - name: APP_NAMESPACE 44 | value: app-mongo-ns 45 | 46 | command: ["/bin/bash"] 47 | args: ["-c", "ansible-playbook ./mongodb/workload/test.yml -i /etc/ansible/hosts -v; exit 0"] 48 | -------------------------------------------------------------------------------- /hack/push: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | if [ -z "${REPONAME}" ] 5 | then 6 | REPONAME="openebs" 7 | fi 8 | 9 | if [ -z "${IMGNAME}" ] || [ -z "${IMGTAG}" ]; 10 | then 11 | echo "Image details are missing. Nothing to push."; 12 | exit 1 13 | fi 14 | 15 | IMAGEID=$( sudo docker images -q ${REPONAME}/${IMGNAME}:${IMGTAG} ) 16 | 17 | if [ ! -z "${DNAME}" ] && [ ! -z "${DPASS}" ]; 18 | then 19 | sudo docker login -u "${DNAME}" -p "${DPASS}"; 20 | # Push image to docker hub 21 | echo "Pushing ${REPONAME}/${IMGNAME}:${IMGTAG} ..."; 22 | sudo docker push ${REPONAME}/${IMGNAME}:${IMGTAG} ; 23 | if [ ! -z "${TRAVIS_TAG}" ] ; 24 | then 25 | # Push with different tags if tagged as a release 26 | # When github is tagged with a release, then Travis will 27 | # set the release tag in env TRAVIS_TAG 28 | echo "Pushing ${REPONAME}/${IMGNAME}:${TRAVIS_TAG} ..."; 29 | sudo docker tag ${IMAGEID} ${REPONAME}/${IMGNAME}:${TRAVIS_TAG} 30 | sudo docker push ${REPONAME}/${IMGNAME}:${TRAVIS_TAG}; 31 | echo "Pushing ${REPONAME}/${IMGNAME}:latest ..."; 32 | sudo docker tag ${IMAGEID} ${REPONAME}/${IMGNAME}:latest 33 | sudo docker push ${REPONAME}/${IMGNAME}:latest; 34 | fi; 35 | else 36 | echo "No docker credentials provided. Skip uploading ${REPONAME}/${IMGNAME}:${IMGTAG} to docker hub"; 37 | fi; 38 | -------------------------------------------------------------------------------- /providers/openebs/installers/operator/master/litmusbook/openebs_cleanup.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: batch/v1 3 | kind: Job 4 | metadata: 5 | name: litmus-openebs-cleanup 6 | namespace: litmus 7 | spec: 8 | template: 9 | metadata: 10 | name: litmus 11 | labels: 12 | provider: openebs-cleanup 13 | spec: 14 | serviceAccountName: litmus 15 | restartPolicy: Never 16 | containers: 17 | - name: ansibletest 18 | image: openebs/ansible-runner:ci 19 | imagePullPolicy: Always 20 | env: 21 | - name: mountPath 22 | value: /mnt/openebs 23 | - name: ANSIBLE_STDOUT_CALLBACK 24 | value: actionable 25 | - name: RUN_ID 26 | value: 27 | command: ["/bin/bash"] 28 | args: ["-c", "ansible-playbook ./operator/master/ansible/openebs_cleanup.yaml -i /etc/ansible/hosts -vv; exit 0"] 29 | volumeMounts: 30 | - name: kubeconfig 31 | mountPath: /root/admin.conf 32 | subPath: admin.conf 33 | - name: logs 34 | mountPath: /var/log/ansible 35 | volumes: 36 | - name: kubeconfig 37 | configMap: 38 | name: kubeconfig 39 | - name: logs 40 | hostPath: 41 | path: /mnt/openebs 42 | type: "" 43 | -------------------------------------------------------------------------------- /chaoslib/kubectl/pod_evict_by_taint.yaml: -------------------------------------------------------------------------------- 1 | - block: 2 | 3 | - name: Identify the application node 4 | shell: > 5 | kubectl get pod {{ app }} -n {{ app_ns }} 6 | --no-headers -o custom-columns=:spec.nodeName 7 | args: 8 | executable: /bin/bash 9 | register: result 10 | 11 | - name: Record the application node name 12 | set_fact: 13 | app_node: "{{ result.stdout }}" 14 | 15 | - name: Force eviction of pods by tainting the app node 16 | shell: > 17 | kubectl taint node {{ app_node }} 18 | {{ taint }}=:NoExecute 19 | args: 20 | executable: /bin/bash 21 | register: result 22 | until: "'tainted' in result.stdout" 23 | delay: 20 24 | retries: 12 25 | 26 | - name: Wait for application pod reschedule (evict) 27 | # Do not untaint until evict occurs 28 | wait_for: 29 | timeout: 30 30 | 31 | when: action == "taint" 32 | 33 | - block: 34 | 35 | - name: Untaint the application node 36 | shell: > 37 | kubectl taint node {{ app_node }} 38 | {{ taint }}:NoExecute- 39 | args: 40 | executable: /bin/bash 41 | register: result 42 | failed_when: "'untainted' not in result.stdout" 43 | 44 | when: action == "untaint" 45 | 46 | -------------------------------------------------------------------------------- /apps/jenkins/upgrade/run_litmus_test.yml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: batch/v1 3 | kind: Job 4 | metadata: 5 | generateName: litmus-jenkins-upgrade- 6 | namespace: litmus 7 | spec: 8 | template: 9 | metadata: 10 | name: litmus 11 | labels: 12 | app: upgrade-jenkins 13 | 14 | spec: 15 | serviceAccountName: litmus 16 | restartPolicy: Never 17 | containers: 18 | - name: ansibletest 19 | image: openebs/ansible-runner:ci 20 | imagePullPolicy: Always 21 | 22 | env: 23 | - name: ANSIBLE_STDOUT_CALLBACK 24 | #value: log_plays, actionable, default 25 | value: default 26 | 27 | # Application label 28 | - name: APP_LABEL 29 | value: 'app=jenkins-app' 30 | 31 | # Application namespace 32 | - name: APP_NAMESPACE 33 | value: app-jenkins-ns 34 | 35 | # Either deployment or statefulset 36 | - name: DEPLOY_TYPE 37 | value: deployment 38 | 39 | # App is deployed with other than Specified image name for application 40 | - name: IMAGE_NAME 41 | value: jenkins/jenkins:2.125 42 | 43 | command: ["/bin/bash"] 44 | args: ["-c", "ansible-playbook ./jenkins/upgrade/test.yml -i /etc/ansible/hosts -v; exit 0"] 45 | 46 | -------------------------------------------------------------------------------- /vendor/github.com/DATA-DOG/godog/gherkin/dialect.go: -------------------------------------------------------------------------------- 1 | package gherkin 2 | 3 | type GherkinDialect struct { 4 | Language string 5 | Name string 6 | Native string 7 | Keywords map[string][]string 8 | } 9 | 10 | func (g *GherkinDialect) FeatureKeywords() []string { 11 | return g.Keywords["feature"] 12 | } 13 | 14 | func (g *GherkinDialect) ScenarioKeywords() []string { 15 | return g.Keywords["scenario"] 16 | } 17 | 18 | func (g *GherkinDialect) StepKeywords() []string { 19 | result := g.Keywords["given"] 20 | result = append(result, g.Keywords["when"]...) 21 | result = append(result, g.Keywords["then"]...) 22 | result = append(result, g.Keywords["and"]...) 23 | result = append(result, g.Keywords["but"]...) 24 | return result 25 | } 26 | 27 | func (g *GherkinDialect) BackgroundKeywords() []string { 28 | return g.Keywords["background"] 29 | } 30 | 31 | func (g *GherkinDialect) ScenarioOutlineKeywords() []string { 32 | return g.Keywords["scenarioOutline"] 33 | } 34 | 35 | func (g *GherkinDialect) ExamplesKeywords() []string { 36 | return g.Keywords["examples"] 37 | } 38 | 39 | type GherkinDialectProvider interface { 40 | GetDialect(language string) *GherkinDialect 41 | } 42 | 43 | type gherkinDialectMap map[string]*GherkinDialect 44 | 45 | func (g gherkinDialectMap) GetDialect(language string) *GherkinDialect { 46 | return g[language] 47 | } 48 | -------------------------------------------------------------------------------- /common/utils/scale_statefulset_replicas.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # This utilty task file can scale up the statefulset application deployed in K8s cluster. 3 | # The parameters required are 4 | # - app_ns ( namespace in which applicaion is deployed) 5 | # - app_label( Applicastions's label in the form key=value) 6 | # - app_replica_count( Required number of application replicas) 7 | 8 | - name: Identifying the statefulset deployed. 9 | shell: kubectl get statefulset -n {{ app_ns }} --no-headers -l {{ app_label }} -o custom-columns=:metadata.name 10 | args: 11 | executable: /bin/bash 12 | register: result 13 | 14 | - name: Recording the application pod name. 15 | set_fact: 16 | sts_name: "{{ result.stdout }}" 17 | 18 | - name: scaling up the statefulset application. 19 | shell: kubectl scale statefulsets {{ sts_name }} --replicas={{ app_replica_count }} -n {{ app_ns }} 20 | args: 21 | executable: /bin/bash 22 | register: result 23 | failed_when: "'scaled' not in result.stdout" 24 | 25 | - name: Check if all the application replicas are running. 26 | shell: kubectl get statefulsets -n {{ app_ns }} --no-headers -l {{ app_label }} -o custom-columns=:..readyReplicas 27 | args: 28 | executable: /bin/bash 29 | register: running_replicas 30 | until: "running_replicas.stdout|int == app_replica_count|int" 31 | delay: 60 32 | retries: 15 33 | 34 | -------------------------------------------------------------------------------- /tools/ansible-runner/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM ubuntu 2 | 3 | LABEL maintainer="OpenEBS" 4 | 5 | #Installing necessary ubuntu packages 6 | RUN rm -rf /var/lib/apt/lists/* && \ 7 | apt-get clean && \ 8 | apt-get update --fix-missing || true && \ 9 | apt-get install -y python-minimal python-pip \ 10 | curl openssh-client 11 | 12 | #Installing ansible 13 | RUN pip install ansible 14 | 15 | #Installing Kubectl 16 | ENV KUBE_LATEST_VERSION="v1.12.0" 17 | RUN curl -L https://storage.googleapis.com/kubernetes-release/release/${KUBE_LATEST_VERSION}/bin/linux/amd64/kubectl -o /usr/local/bin/kubectl && \ 18 | chmod +x /usr/local/bin/kubectl && \ 19 | curl -o /usr/local/bin/aws-iam-authenticator https://amazon-eks.s3-us-west-2.amazonaws.com/1.10.3/2018-07-26/bin/linux/amd64/aws-iam-authenticator && \chmod +x /usr/local/bin/aws-iam-authenticator 20 | 21 | #Adding hosts entries and making ansible folders 22 | RUN mkdir /etc/ansible/ /ansible && \ 23 | echo "[local]" >> /etc/ansible/hosts && \ 24 | echo "127.0.0.1" >> /etc/ansible/hosts 25 | 26 | #Copying Necessary Files 27 | COPY providers/openebs/installers/ ./ 28 | COPY ./apps/ ./ 29 | COPY ./chaoslib ./chaoslib/ 30 | COPY ./funclib ./funclib/ 31 | COPY ./executor/ansible/plugins/callback/actionable.py /usr/local/lib/python2.7/dist-packages/ansible/plugins/callback/ 32 | COPY ./hack/litmus-result.j2 ./ 33 | -------------------------------------------------------------------------------- /apps/crunchy-postgres/deployers/run_litmus_test.yml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: batch/v1 3 | kind: Job 4 | metadata: 5 | generateName: litmus-pgset- 6 | namespace: litmus 7 | spec: 8 | template: 9 | metadata: 10 | name: litmus 11 | labels: 12 | app: postgres-deployment-litmus 13 | 14 | spec: 15 | serviceAccountName: litmus 16 | restartPolicy: Never 17 | containers: 18 | - name: ansibletest 19 | image: openebs/ansible-runner:ci 20 | imagePullPolicy: Always 21 | 22 | env: 23 | - name: ANSIBLE_STDOUT_CALLBACK 24 | #value: log_plays, actionable, default 25 | value: default 26 | 27 | - name: PROVIDER_STORAGE_CLASS 28 | # Supported values: openebs-standard,openebs-standalone,local-storage 29 | value: openebs-standalone 30 | 31 | - name: APP_PVC 32 | value: pgdata-claim 33 | 34 | # Application label 35 | - name: APP_LABEL 36 | value: 'app=pgset' 37 | 38 | # Application namespace 39 | - name: APP_NAMESPACE 40 | value: app-pgres-ns 41 | 42 | - name: DEPLOY_TYPE 43 | value: statefulset 44 | 45 | command: ["/bin/bash"] 46 | args: ["-c", "ansible-playbook ./crunchy-postgres/deployers/test.yml -i /etc/ansible/hosts -v; exit 0"] 47 | -------------------------------------------------------------------------------- /pkg/time/time.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2018 The OpenEBS Authors 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | 17 | package time 18 | 19 | import ( 20 | "time" 21 | ) 22 | 23 | // WaitFor pauses the current goroutine for the provided time. 24 | // This function is modelled using time.ParseDuration. 25 | // 26 | // time.ParseDuration parses a duration string. 27 | // A duration string is a possibly signed sequence of 28 | // decimal numbers, each with optional fraction and a unit suffix, 29 | // such as "300ms", "-1.5h" or "2h45m". 30 | // 31 | // Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h". 32 | func WaitFor(duration string) (err error) { 33 | // sleep interval 34 | interval, err := time.ParseDuration(duration) 35 | if err != nil { 36 | return 37 | } 38 | 39 | sleep(interval) 40 | return 41 | } 42 | 43 | var sleep = time.Sleep 44 | -------------------------------------------------------------------------------- /apps/cockroachdb/deployers/run_litmus_test.yml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: batch/v1 3 | kind: Job 4 | metadata: 5 | generateName: litmus-cockroachdb- 6 | namespace: litmus 7 | spec: 8 | template: 9 | metadata: 10 | name: litmus 11 | labels: 12 | apps: cockroachdb-deployment-litmus 13 | 14 | spec: 15 | serviceAccountName: litmus 16 | restartPolicy: Never 17 | containers: 18 | - name: ansibletest 19 | image: openebs/ansible-runner:ci 20 | imagePullPolicy: Always 21 | 22 | env: 23 | - name: ANSIBLE_STDOUT_CALLBACK 24 | #value: log_plays, actionable, default 25 | value: default 26 | 27 | - name: PROVIDER_STORAGE_CLASS 28 | # Supported values: openebs-standard,openebs-standalone,local-storage 29 | value: openebs-standalone 30 | 31 | - name: APP_PVC 32 | value: cockroachdb-claim 33 | 34 | # Application label 35 | - name: APP_LABEL 36 | value: 'app=cockroachdb' 37 | 38 | # Application namespace 39 | - name: APP_NAMESPACE 40 | value: app-cdb-ns 41 | 42 | - name: DEPLOY_TYPE 43 | value: statefulset 44 | 45 | command: ["/bin/bash"] 46 | args: ["-c", "ansible-playbook ./cockroachdb/deployers/test.yml -i /etc/ansible/hosts -v; exit 0"] 47 | -------------------------------------------------------------------------------- /apps/percona/workload/run_litmus_test.yml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: batch/v1 3 | kind: Job 4 | metadata: 5 | generateName: percona-loadgen- 6 | namespace: litmus 7 | spec: 8 | template: 9 | metadata: 10 | name: percona-loadgen 11 | namespace: litmus 12 | labels: 13 | loadgen: percona-loadjob 14 | spec: 15 | serviceAccountName: litmus 16 | restartPolicy: Never 17 | containers: 18 | - name: ansibletest 19 | image: openebs/ansible-runner:ci 20 | imagePullPolicy: Always 21 | env: 22 | - name: ANSIBLE_STDOUT_CALLBACK 23 | #value: log_plays 24 | value: default 25 | 26 | - name: APP_LABEL 27 | value: name=percona 28 | 29 | #Namespace in which loadgen pod will be deployed 30 | - name: APP_NAMESPACE 31 | value: app-percona-ns 32 | 33 | - name: APP_SERVICE_LABEL 34 | value: name=percona 35 | 36 | - name: LOADGEN_LABEL 37 | value: loadgen=percona-loadgen 38 | 39 | # Database user name 40 | - name: DB_USER 41 | value: root 42 | 43 | - name: DB_PASSWORD 44 | value: k8sDem0 45 | 46 | command: ["/bin/bash"] 47 | args: ["-c", "ansible-playbook ./percona/workload/test.yml -i /etc/ansible/hosts -v; exit 0"] 48 | -------------------------------------------------------------------------------- /k8s/packet/k8s-installer/delete_packet_cluster.yml: -------------------------------------------------------------------------------- 1 | # delete_packet_cluster.yml 2 | # Description: This will delete a cluster in packet 3 | ############################################################################################### 4 | #Test Steps: 5 | 6 | #1. Deleting instances in packet 7 | #2. Deleting device Id from file 8 | #3. Deleting ssh key from packet 9 | 10 | ############################################################################################### 11 | 12 | --- 13 | - hosts: localhost 14 | 15 | vars_files: 16 | - vars.yml 17 | 18 | tasks: 19 | - block: 20 | - name: Deleting instances in packet 21 | packet_device: 22 | project_id: "{{ project_id }}" 23 | state: absent 24 | device_ids: "{{ item }}" 25 | with_lines: cat /tmp/packet/device_id 26 | 27 | - name: Deleting device Id from file 28 | lineinfile: 29 | state: absent 30 | path: "/tmp/packet/device_id" 31 | regexp: "" 32 | mode: 0755 33 | 34 | - name: Deleting ssh key from packet 35 | packet_sshkey: 36 | key_file: ~/.ssh/id_rsa.pub 37 | state: absent 38 | 39 | - name: Set Test Status 40 | set_fact: 41 | flag: "Test Passed" 42 | 43 | rescue: 44 | - name: Set Test Status 45 | set_fact: 46 | flag: "Test Failed" -------------------------------------------------------------------------------- /k8s/packet/packet-storage/README.md: -------------------------------------------------------------------------------- 1 | 2 | # Packet platform specific code and scripts 3 | 4 | #### Creating storage on Packet Cloud which provides `multi-tenant block storage` service (backed by the fine folks at Datera) https://www.packet.net/cloud/storage/ 5 | 6 | ### Pre-requisites 7 | 8 | - Packet Cluster is up and running and device Id stored in a file `/tmp/packet/device_id` 9 | - `packet` directory present at location `/tmp` 10 | 11 | **Example**: Format of device_id stored in file 12 | 13 | ```bash 14 | $ cat /tmp/packet/device_id 15 | 52793aab-02b6-4c7e-a6d8-5f7893660b18 16 | 0477161f-90f2-463f-a6f7-db63c19dbda8 17 | 07668999-37c5-4e0e-be6f-786ba8eef574 18 | c5bb635c-2fd8-4394-bbd3-10ceb81c0876 19 | ``` 20 | 21 | #### Execution and Inventory Requirements 22 | 23 | - Packet api token required: (https://app.packet.net/users/<`project-id`>/api-keys) 24 | 25 | ### Creating and attaching packet storage to cluster 26 | 27 | - `create-volume`, will create and attach `Datera` volume to each of nodes present in file (/tmp/packet/device_id). 28 | 29 | ```bash 30 | ansible-playbook create-volume.yml -vv --extra-vars "packet_api=" 31 | ``` 32 | 33 | ### Detach and delete packet Volume 34 | 35 | - `delete-volume`, will detach and delete `Datera` volume using the volume id present in (/tmp/packet/volume_id). 36 | 37 | ```bash 38 | ansible-playbook delete-volume.yml -vv --extra-vars "packet_api=" 39 | ``` -------------------------------------------------------------------------------- /k8s/gcp/k8s-installer/create-vpc.yml: -------------------------------------------------------------------------------- 1 | # Description: Creates a Virtual Private Cloud with the name openebs-e2e, that will be specified 2 | # with all other clusters. Using --subnet-mode=auto will create a subnet for every zone. This file 3 | # is ran at the begining of e2e tests 4 | # Author: Harshvardhan Karn 5 | ############################################################################################### 6 | #Steps: 7 | #1. Create a VPC, openebs-e2e in Google Cloud 8 | ############################################################################################### 9 | 10 | --- 11 | - hosts: localhost 12 | tasks: 13 | - block: 14 | 15 | - name: Generating Random VPC Name 16 | shell: python ../../utils/name_generator/namesgenerator.py 17 | register: vpc 18 | - set_fact: 19 | vpc: "{{ vpc.stdout }}" 20 | - name: Creating VPC 21 | shell: gcloud compute --project={{ project }} networks create {{ vpc }} --subnet-mode=auto 22 | 23 | - lineinfile: 24 | create: yes 25 | state: present 26 | path: "~/logs/vpc" 27 | line: "{{ vpc }}" 28 | 29 | - name: Test Passed 30 | set_fact: 31 | flag: "Test Passed" 32 | rescue: 33 | - name: Test Failed 34 | set_fact: 35 | flag: "Test Failed" 36 | -------------------------------------------------------------------------------- /vendor/gopkg.in/yaml.v2/LICENSE.libyaml: -------------------------------------------------------------------------------- 1 | The following files were ported to Go from C files of libyaml, and thus 2 | are still covered by their original copyright and license: 3 | 4 | apic.go 5 | emitterc.go 6 | parserc.go 7 | readerc.go 8 | scannerc.go 9 | writerc.go 10 | yamlh.go 11 | yamlprivateh.go 12 | 13 | Copyright (c) 2006 Kirill Simonov 14 | 15 | Permission is hereby granted, free of charge, to any person obtaining a copy of 16 | this software and associated documentation files (the "Software"), to deal in 17 | the Software without restriction, including without limitation the rights to 18 | use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies 19 | of the Software, and to permit persons to whom the Software is furnished to do 20 | so, subject to the following conditions: 21 | 22 | The above copyright notice and this permission notice shall be included in all 23 | copies or substantial portions of the Software. 24 | 25 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 26 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 27 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 28 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 29 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 30 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 31 | SOFTWARE. 32 | -------------------------------------------------------------------------------- /vendor/github.com/DATA-DOG/godog/colors/writer.go: -------------------------------------------------------------------------------- 1 | // Copyright 2014 shiena Authors. All rights reserved. 2 | // Use of this source code is governed by a MIT-style 3 | // license that can be found in the LICENSE file. 4 | 5 | package colors 6 | 7 | import "io" 8 | 9 | type outputMode int 10 | 11 | // DiscardNonColorEscSeq supports the divided color escape sequence. 12 | // But non-color escape sequence is not output. 13 | // Please use the OutputNonColorEscSeq If you want to output a non-color 14 | // escape sequences such as ncurses. However, it does not support the divided 15 | // color escape sequence. 16 | const ( 17 | _ outputMode = iota 18 | discardNonColorEscSeq 19 | outputNonColorEscSeq 20 | ) 21 | 22 | // Colored creates and initializes a new ansiColorWriter 23 | // using io.Writer w as its initial contents. 24 | // In the console of Windows, which change the foreground and background 25 | // colors of the text by the escape sequence. 26 | // In the console of other systems, which writes to w all text. 27 | func Colored(w io.Writer) io.Writer { 28 | return createModeAnsiColorWriter(w, discardNonColorEscSeq) 29 | } 30 | 31 | // NewModeAnsiColorWriter create and initializes a new ansiColorWriter 32 | // by specifying the outputMode. 33 | func createModeAnsiColorWriter(w io.Writer, mode outputMode) io.Writer { 34 | if _, ok := w.(*ansiColorWriter); !ok { 35 | return &ansiColorWriter{ 36 | w: w, 37 | mode: mode, 38 | } 39 | } 40 | return w 41 | } 42 | -------------------------------------------------------------------------------- /executor/ansible/utils/runTest.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Get the Test item from Test Job List 3 | set_fact: 4 | test: "{{ item }}" 5 | 6 | - block: 7 | 8 | - name: Replace the storage class based on provider 9 | replace: 10 | path: "{{ test }}" 11 | regexp: "openebs-standard" 12 | replace: "{{ storage_class }}" 13 | 14 | - name: Run the test Kubernetes job YAML 15 | shell: source ~/.profile; kubectl create -f run_litmus_test.yaml 16 | args: 17 | executable: /bin/bash 18 | delegate_to: "{{groups['kubernetes-kubemasters'].0}}" 19 | 20 | - name: Verify the test Kubernetes job is Successful 21 | # This is a placeholder task that waits 30m for job complete 22 | shell: > 23 | source ~/.profile; 24 | kubectl get job litmus --no-headers 25 | -n litmus -o custom-columns=:status.succeeded 26 | args: 27 | executable: /bin/bash 28 | register: result 29 | delegate_to: "{{groups['kubernetes-kubemasters'].0}}" 30 | until: "result.stdout|int == 1" 31 | delay: 120 32 | retries: 15 33 | 34 | ## TODO: Result CR parse, slack notify if applicable 35 | 36 | rescue: 37 | - name: Handle job failure 38 | debug: 39 | msg: "Unable to complete test, please examine the job spec for {{ test }}" 40 | 41 | always: 42 | - name: Message b/w test job runs 43 | debug: 44 | msg: "Moving to next test..." 45 | -------------------------------------------------------------------------------- /apps/cassandra/deployers/run_litmus_test.yml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: batch/v1 3 | kind: Job 4 | metadata: 5 | generateName: litmus-cassandra- 6 | namespace: litmus 7 | spec: 8 | template: 9 | metadata: 10 | name: litmus 11 | labels: 12 | app: cassandra-deployment-litmus 13 | 14 | spec: 15 | serviceAccountName: litmus 16 | restartPolicy: Never 17 | containers: 18 | - name: ansibletest 19 | image: openebs/ansible-runner:ci 20 | imagePullPolicy: Always 21 | 22 | env: 23 | - name: ANSIBLE_STDOUT_CALLBACK 24 | #value: log_plays 25 | value: default 26 | 27 | - name: PROVIDER_STORAGE_CLASS 28 | # Supported values: openebs-standard,openebs-standalone,local-storage 29 | value: openebs-standalone 30 | 31 | # Application pvc 32 | - name: APP_PVC 33 | value: openebs-cassandra 34 | 35 | # Application label 36 | - name: APP_LABEL 37 | value: 'app=cassandra' 38 | 39 | # Application namespace 40 | - name: APP_NAMESPACE 41 | value: app-cass-ns 42 | 43 | - name: DEPLOY_TYPE 44 | value: statefulset 45 | 46 | - name: APP_REPLICA 47 | value: 'replicas=2' 48 | 49 | command: ["/bin/bash"] 50 | args: ["-c", "ansible-playbook ./cassandra/deployers/test.yml -i /etc/ansible/hosts -v; exit 0"] 51 | -------------------------------------------------------------------------------- /apps/fio/tests/performance/test_cleanup.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Get pvc name to verify successful pvc deletion 3 | shell: > 4 | kubectl get pvc {{ test_name }} 5 | -o custom-columns=:spec.volumeName -n litmus 6 | --no-headers 7 | args: 8 | executable: /bin/bash 9 | register: pv 10 | 11 | - name: Delete fio job 12 | shell: > 13 | source ~/.profile; kubectl delete -f {{ pod_yaml_alias }} 14 | -n litmus 15 | args: 16 | executable: /bin/bash 17 | 18 | - name: Confirm fio pod has been deleted 19 | shell: source ~/.profile; kubectl get pods -n litmus 20 | args: 21 | executable: /bin/bash 22 | register: result 23 | until: "'fio' not in result.stdout" 24 | delay: 30 25 | retries: 12 26 | 27 | - block: 28 | - name: Confirm pvc pod has been deleted 29 | shell: > 30 | kubectl get pods -n litmus | grep {{ pv.stdout }} 31 | args: 32 | executable: /bin/bash 33 | register: result 34 | failed_when: "'pvc' and 'Running' in result.stdout" 35 | delay: 30 36 | retries: 12 37 | when: "'openebs-standard' in lookup('env','PROVIDER_STORAGE_CLASS')" 38 | 39 | - block: 40 | - name: Remove the local persistent volume 41 | shell: kubectl delete pv {{ pv.stdout }} 42 | args: 43 | executable: /bin/bash 44 | register: result 45 | failed_when: "'persistentvolume' and 'deleted' not in result.stdout" 46 | when: "'local-storage' in lookup('env','PROVIDER_STORAGE_CLASS')" 47 | -------------------------------------------------------------------------------- /k8s/azure/k8s-installer/delete-k8s-cluster.yml: -------------------------------------------------------------------------------- 1 | # Description: Deletes the Resource group as well as the AKS Cluster associated 2 | # Author: Harshvardhan Karn 3 | ############################################################################################### 4 | # Steps: 5 | # 1. Delete the Cluster 6 | # 2. Delete the Logged cluster Infix 7 | ############################################################################################### 8 | --- 9 | - hosts: localhost 10 | vars: 11 | cluster_name: 12 | tasks: 13 | - block: 14 | - name: Fetching Cluster Name 15 | shell: cat ~/logs/azure_clusters 16 | register: cluster_name 17 | when: not cluster_name 18 | - set_fact: 19 | cluster_name: "{{ cluster_name.stdout }}" 20 | when: cluster_name.stdout is defined 21 | 22 | - name: Deleting Resource Group & Cluster 23 | shell: az group delete -n aks-{{ cluster_name }}-rg -y 24 | 25 | - name: Removing Cluster Name entry from log 26 | lineinfile: 27 | path: ~/logs/azure_clusters 28 | state: absent 29 | regexp: '{{ cluster_name }}' 30 | 31 | - name: Test Passed 32 | set_fact: 33 | flag: "Test Passed" 34 | rescue: 35 | - name: Test Failed 36 | set_fact: 37 | flag: "Test Failed" -------------------------------------------------------------------------------- /k8s/gcp/k8s-installer/delete-vpc.yml: -------------------------------------------------------------------------------- 1 | # Description: Deletes the Virtual Private Cloud with the name openebs-e2e, runs when all e2e test 2 | # are finished running 3 | # Author: Harshvardhan Karn 4 | ############################################################################################### 5 | #Steps: 6 | #1. Delete the routes(if exists) and VPC , openebs-e2e in Google Cloud 7 | ############################################################################################### 8 | 9 | --- 10 | - hosts: localhost 11 | tasks: 12 | - block: 13 | - name: Fetch VPC Name 14 | shell: cat ~/logs/vpc 15 | register: vpc 16 | - set_fact: 17 | vpc: "{{ vpc.stdout }}" 18 | - name: Deleting Routes, if exists 19 | shell: gcloud compute routes delete $(gcloud compute routes list --filter="network:{{ vpc }}" --format="get(name)") -q 20 | ignore_errors: yes 21 | - name: Deleting VPC 22 | shell: gcloud compute --project={{ project }} networks delete {{ vpc }} -q 23 | 24 | - name: Removing VPC Name entry from log 25 | lineinfile: 26 | path: ~/logs/vpc 27 | state: absent 28 | regexp: '{{ vpc }}' 29 | - name: Test Passed 30 | set_fact: 31 | flag: "Test Passed" 32 | rescue: 33 | - name: Test Failed 34 | set_fact: 35 | flag: "Test Failed" 36 | -------------------------------------------------------------------------------- /apps/mongodb/liveness/run_litmus_test.yml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: batch/v1 3 | kind: Job 4 | metadata: 5 | generateName: liveness-mongo- 6 | 7 | spec: 8 | template: 9 | metadata: 10 | name: liveness 11 | 12 | spec: 13 | restartPolicy: Never 14 | containers: 15 | - name: liveness 16 | image: openebs/mongodb-client 17 | imagePullPolicy: Always 18 | 19 | env: 20 | 21 | # Time period (in sec) b/w retries for DB init check 22 | - name: INIT_WAIT_DELAY 23 | value: "30" 24 | 25 | # No of retries for DB init check 26 | - name: INIT_RETRY_COUNT 27 | value: "10" 28 | 29 | # Time period (in sec) b/w liveness checks 30 | - name: LIVENESS_PERIOD_SECONDS 31 | value: "10" 32 | 33 | # Time period (in sec) b/w retries for db_connect failure 34 | - name: LIVENESS_TIMEOUT_SECONDS 35 | value: "10" 36 | 37 | # No of retries after a db_connect failure before declaring liveness fail 38 | - name: LIVENESS_RETRY_COUNT 39 | value: "6" 40 | 41 | # Namespace in which mongo is running 42 | - name: NAMESPACE 43 | value: litmus 44 | 45 | # Servive name of mongodb 46 | - name: SERVICE_NAME 47 | value: mongo 48 | 49 | command: ["/bin/bash"] 50 | args: ["-c", "python ./server.py ; exit 0"] 51 | -------------------------------------------------------------------------------- /apps/redis/deployers/run_litmus_test.yml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: batch/v1 3 | kind: Job 4 | metadata: 5 | generateName: litmus-redis- 6 | namespace: litmus 7 | spec: 8 | template: 9 | metadata: 10 | name: litmus 11 | labels: 12 | app: redis-deployment-litmus 13 | 14 | spec: 15 | serviceAccountName: litmus 16 | restartPolicy: Never 17 | containers: 18 | - name: ansibletest 19 | image: openebs/ansible-runner:ci 20 | imagePullPolicy: Always 21 | 22 | env: 23 | - name: ANSIBLE_STDOUT_CALLBACK 24 | #value: log_plays 25 | value: default 26 | 27 | - name: PROVIDER_STORAGE_CLASS 28 | # Supported values: openebs-standard, local-storage,openebs-standalone 29 | value: openebs-standalone 30 | 31 | # Application pvc 32 | - name: APP_PVC 33 | value: openebs-redis 34 | 35 | # Application label 36 | - name: APP_LABEL 37 | value: 'app=redis' 38 | 39 | # Application namespace 40 | - name: APP_NAMESPACE 41 | value: app-redis-ns 42 | 43 | # Supported values: Deployment ,statefulset. 44 | - name: DEPLOY_TYPE 45 | value: statefulset 46 | 47 | - name: APP_REPLICA 48 | value: 'replicas=3' 49 | 50 | command: ["/bin/bash"] 51 | args: ["-c", "ansible-playbook ./redis/deployers/test.yml -i /etc/ansible/hosts -vv; exit 0"] 52 | -------------------------------------------------------------------------------- /funclib/kubectl/scale_replicas.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # This utilty task file can scale up the statefulset or deployment replicas in K8s cluster. 3 | # The parameters required are 4 | # - app_ns ( namespace in which applicaion is deployed) 5 | # - app_label( Applications's label in the form key=value) 6 | # - app_replica_count( Required number of application replicas) 7 | # - deploy_type (Either 'deployment' or 'statefulset') 8 | # The above parameters should be obtained as environmental variables from the litmus-book. 9 | 10 | - name: Obtaining the application pod name. 11 | shell: kubectl get {{ deploy_type }} -n {{ app_ns }} --no-headers -l {{ app_label }} -o custom-columns=:metadata.name 12 | args: 13 | executable: /bin/bash 14 | register: result 15 | 16 | - name: Recording the application pod name. 17 | set_fact: 18 | app_name: "{{ result.stdout }}" 19 | 20 | - name: scaling up the replicas. 21 | shell: kubectl scale {{ deploy_type}} {{ app_name }} --replicas={{ app_replica_count }} -n {{ app_ns }} 22 | args: 23 | executable: /bin/bash 24 | register: result 25 | failed_when: "'scaled' not in result.stdout" 26 | 27 | - name: Check if all the application replicas are running. 28 | shell: kubectl get {{ deploy_type }} -n {{ app_ns }} --no-headers -l {{ app_label }} -o custom-columns=:..readyReplicas 29 | args: 30 | executable: /bin/bash 31 | register: running_replicas 32 | until: "running_replicas.stdout|int == app_replica_count|int" 33 | delay: 60 34 | retries: 15 35 | 36 | -------------------------------------------------------------------------------- /apps/fio/tests/data-integrity/fio-write.yml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: ConfigMap 4 | metadata: 5 | name: basic 6 | data: 7 | 8 | basic-rw : |- 9 | 10 | [global] 11 | directory=/datadir 12 | 13 | [basic-fio] 14 | rw=write 15 | bs=4k 16 | time_based=1 17 | runtime=60 18 | verify=crc32c 19 | verify=pattern 20 | verify_pattern=%o 21 | --- 22 | apiVersion: batch/v1 23 | kind: Job 24 | metadata: 25 | name: fio 26 | spec: 27 | template: 28 | metadata: 29 | name: fio 30 | labels: 31 | name: fio-write 32 | spec: 33 | restartPolicy: Never 34 | containers: 35 | - name: perfrunner 36 | image: openebs/tests-fio 37 | command: ["/bin/bash"] 38 | args: ["-c", "./fio_runner.sh --size 256m; exit 0"] 39 | volumeMounts: 40 | - mountPath: /datadir 41 | name: demo-vol1 42 | - mountPath: templates/file/basic-rw 43 | subPath: basic-rw 44 | name: basic-configmap 45 | tty: true 46 | 47 | volumes: 48 | - name: demo-vol1 49 | persistentVolumeClaim: 50 | claimName: demo-vol1-claim 51 | - name: basic-configmap 52 | configMap: 53 | name: basic 54 | --- 55 | kind: PersistentVolumeClaim 56 | apiVersion: v1 57 | metadata: 58 | name: demo-vol1-claim 59 | spec: 60 | storageClassName: testclass 61 | accessModes: 62 | - ReadWriteOnce 63 | resources: 64 | requests: 65 | storage: "5G" 66 | -------------------------------------------------------------------------------- /apps/percona/deployers/percona.yml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: apps/v1beta1 3 | kind: Deployment 4 | metadata: 5 | name: percona 6 | labels: 7 | name: percona 8 | spec: 9 | replicas: 1 10 | selector: 11 | matchLabels: 12 | lkey: lvalue 13 | template: 14 | metadata: 15 | labels: 16 | lkey: lvalue 17 | spec: 18 | containers: 19 | - resources: 20 | limits: 21 | cpu: 0.5 22 | name: percona 23 | image: percona 24 | imagePullPolicy: IfNotPresent 25 | args: 26 | - "--ignore-db-dir" 27 | - "lost+found" 28 | env: 29 | - name: MYSQL_ROOT_PASSWORD 30 | value: k8sDem0 31 | ports: 32 | - containerPort: 3306 33 | name: percona 34 | volumeMounts: 35 | - mountPath: /var/lib/mysql 36 | name: data-vol 37 | volumes: 38 | - name: data-vol 39 | persistentVolumeClaim: 40 | claimName: testclaim 41 | --- 42 | kind: PersistentVolumeClaim 43 | apiVersion: v1 44 | metadata: 45 | name: testclaim 46 | spec: 47 | storageClassName: testclass 48 | accessModes: 49 | - ReadWriteOnce 50 | resources: 51 | requests: 52 | storage: 5G 53 | --- 54 | apiVersion: v1 55 | kind: Service 56 | metadata: 57 | name: percona-mysql 58 | labels: 59 | lkey: lvalue 60 | spec: 61 | ports: 62 | - port: 3306 63 | targetPort: 3306 64 | selector: 65 | name: percona 66 | 67 | -------------------------------------------------------------------------------- /k8s/eks/k8s-installer/delete-eks-cluster.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: localhost 3 | gather_facts: yes 4 | 5 | vars_files: 6 | - vars.yml 7 | vars: 8 | cluster_name: 9 | test_case_name: "EKS_DELETE_CLUSTER" 10 | 11 | tasks: 12 | - block: 13 | - name: Fetch Cluster Name 14 | shell: cat ~/logs/clusters 15 | register: cluster_name 16 | when: not cluster_name 17 | 18 | - set_fact: 19 | cluster_name: "{{ cluster_name.stdout }}" 20 | when: cluster_name.stdout is defined 21 | 22 | - name: Delete EKS cluster 23 | shell: eksctl delete cluster --name={{cluster_name}} --region={{region}} 24 | args: 25 | executable: /bin/bash 26 | no_log: true 27 | 28 | - name: Delete the ec2 key pair 29 | ec2_key: 30 | name: "{{cluster_name}}" 31 | region: "{{region}}" 32 | state: absent 33 | no_log: true 34 | 35 | - name: Delete the generated PEM file 36 | file: 37 | path: "$HOME/{{cluster_name}}.pem" 38 | state: absent 39 | 40 | - name: Remove Cluster Info 41 | lineinfile: 42 | state: absent 43 | path: "~/logs/clusters" 44 | regexp: "{{ cluster_name }}" 45 | 46 | - name: Set Test Status 47 | set_fact: 48 | flag: "Test Passed" 49 | 50 | rescue: 51 | - name: Set Test Status 52 | set_fact: 53 | flag: "Test Failed" 54 | -------------------------------------------------------------------------------- /apps/percona/tests/mysql_data_persistence/README.md: -------------------------------------------------------------------------------- 1 | ## Checking MySQL data persistence upon forced reschedule (eviction) 2 | 3 | ### Objective 4 | 5 | - This test checks MySQL data persistence with a specified storage solution after the application is subjected to 6 | different types of failures, induced via "chaos" operations. Currently, the following chaos types are supported by the test job: 7 | 8 | - APP_POD_KILL/PUMBA : The MySQL pod is terminated abruptly (via SIGKILL) , multiple times, over a perdiod of 120s using Pumba 9 | - APP_POD_EVICT/KUBECTL : The MySQL and other pods on the application node are forcefully evicted by Kubernetes via resource taints 10 | - APP_NODE_DRAIN/KUBECTL : The application node is taken down gracefully via cordon & drain process 11 | 12 | ### Considerations 13 | 14 | - This test requires a multi-node Kubernetes cluster 15 | *Note:* The min. count depends on individual storage solution's HA policies. For example OpenEBS needs 3-node cluster 16 | - This test simulates node loss, with original cluster state being reverted to at the end of test 17 | - The application reschedule time is also impacted by the amount of delay between disk attach and mount attempts by Kubernetes 18 | 19 | ### Steps to Run 20 | 21 | [Pre-Requisites](https://github.com/openebs/litmus#running-a-specific-test) 22 | - View the following test info on the litmus node at /mnt/mysql_data_persistence : 23 | 24 | - Pod logs at "Logstash__.tar 25 | - Playbook run logs at "hosts/127.0.0.1" 26 | - Result at "result.json" 27 | 28 | 29 | -------------------------------------------------------------------------------- /vendor/github.com/pmezard/go-difflib/LICENSE: -------------------------------------------------------------------------------- 1 | Copyright (c) 2013, Patrick Mezard 2 | All rights reserved. 3 | 4 | Redistribution and use in source and binary forms, with or without 5 | modification, are permitted provided that the following conditions are 6 | met: 7 | 8 | Redistributions of source code must retain the above copyright 9 | notice, this list of conditions and the following disclaimer. 10 | Redistributions in binary form must reproduce the above copyright 11 | notice, this list of conditions and the following disclaimer in the 12 | documentation and/or other materials provided with the distribution. 13 | The names of its contributors may not be used to endorse or promote 14 | products derived from this software without specific prior written 15 | permission. 16 | 17 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS 18 | IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 19 | TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A 20 | PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 21 | HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 22 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED 23 | TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 24 | PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 25 | LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 26 | NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 27 | SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 | -------------------------------------------------------------------------------- /apps/mongodb/deployers/mongo_statefulset.yml: -------------------------------------------------------------------------------- 1 | # Headless service for stable DNS entries of StatefulSet members. 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | name: mongo 6 | labels: 7 | lkey: lvalue 8 | spec: 9 | ports: 10 | - port: 27017 11 | targetPort: 27017 12 | clusterIP: None 13 | selector: 14 | role: mongo 15 | --- 16 | apiVersion: apps/v1beta1 17 | kind: StatefulSet 18 | metadata: 19 | name: mongo 20 | spec: 21 | serviceName: "mongo" 22 | replicas: 3 23 | template: 24 | metadata: 25 | labels: 26 | lkey: lvalue 27 | role: mongo 28 | environment: test 29 | spec: 30 | terminationGracePeriodSeconds: 10 31 | containers: 32 | - name: mongo 33 | image: mongo 34 | command: 35 | # - mongod 36 | # - "--replSet" 37 | # - rs0 38 | # - "--smallfiles" 39 | # - "--noprealloc" 40 | # - "--bind_ip_all" 41 | ports: 42 | - containerPort: 27017 43 | volumeMounts: 44 | - name: testclaim 45 | mountPath: /data/db 46 | - name: mongo-sidecar 47 | image: cvallance/mongo-k8s-sidecar 48 | env: 49 | - name: MONGO_SIDECAR_POD_LABELS 50 | value: "role=mongo,environment=test" 51 | volumeClaimTemplates: 52 | - metadata: 53 | name: testclaim 54 | spec: 55 | storageClassName: testclass 56 | accessModes: 57 | - ReadWriteOnce 58 | resources: 59 | requests: 60 | storage: 5G 61 | -------------------------------------------------------------------------------- /chaoslib/kubectl/cordon_drain_node.yaml: -------------------------------------------------------------------------------- 1 | - block: 2 | 3 | - name: Identify the application node 4 | shell: > 5 | kubectl get pod {{ app }} -n {{ app_ns }} 6 | --no-headers -o custom-columns=:spec.nodeName 7 | args: 8 | executable: /bin/bash 9 | register: result 10 | 11 | - name: Record the application node name 12 | set_fact: 13 | app_node: "{{ result.stdout }}" 14 | 15 | - name: Cordon the application node 16 | shell: > 17 | kubectl cordon {{ app_node }} 18 | args: 19 | executable: /bin/bash 20 | register: result 21 | until: "'cordoned' in result.stdout" 22 | delay: 20 23 | retries: 12 24 | 25 | - name: Drain the application node 26 | shell: > 27 | kubectl drain {{ app_node }} 28 | --ignore-daemonsets --force 29 | args: 30 | executable: /bin/bash 31 | register: result 32 | until: "'drained' in result.stdout" 33 | delay: 20 34 | retries: 12 35 | 36 | - name: Wait for application pod reschedule (evict) 37 | # Do not untaint until evict occurs 38 | wait_for: 39 | timeout: 30 40 | 41 | when: action == "drain" 42 | 43 | - block: 44 | 45 | - name: Uncordon the application node 46 | shell: > 47 | kubectl uncordon {{ app_node }} 48 | args: 49 | executable: /bin/bash 50 | register: result 51 | until: "'uncordoned' in result.stdout" 52 | delay: 20 53 | retries: 12 54 | 55 | when: action == "uncordon" 56 | 57 | -------------------------------------------------------------------------------- /docs/litmus_deep_dive.md: -------------------------------------------------------------------------------- 1 | # Litmus Deep Dive 2 | 3 | Litmus tests range from initial setup and configuration validation to deploying and running workloads under various conditions and failures. 4 | 5 | Litmus comprises the following major components: 6 | - **Deployments** that help in setting up different types of Kubernetes Clusters like on-premise, cloud, OpenShift, etc. The default is that the deployment scripts to provision and configure OpenEBS storage, however, these deployments are easily extended to support other storage. 7 | - **Framework** for test execution that includes: 8 | * Defining and running test suites 9 | * Capturing logs and generating reports about the test runs 10 | * Fault/Error injection tools that help to perform chaos tests 11 | * Examples that demonstrate how to integrate these test pipelines with Slack notifications 12 | - **Test modules** that can be triggered from within a Kubernetes cluster. Think of these a containerized tests. For instance, the **_mysql-client_** can be launched as a pod to validate the MySQL resiliency while the underlying nodes and the connected storage are subjected to chaos engineering. 13 | - **Tests** that themselves are written in easy to understand formats, either in plain English (thanks Godog!) or in Ansible Playbooks. These tests primarily interact with the Kubernetes cluster via **_kubectl_** making them highly portable. 14 | 15 | Litmus can be used to test a given workload in a variety of Kubernetes environments, for example, a developer minikube or a GKE cluster with a specific storage solution or as a part of a full-fledged CI setup. 16 | -------------------------------------------------------------------------------- /vendor/github.com/stretchr/testify/assert/doc.go: -------------------------------------------------------------------------------- 1 | // Package assert provides a set of comprehensive testing tools for use with the normal Go testing system. 2 | // 3 | // Example Usage 4 | // 5 | // The following is a complete example using assert in a standard test function: 6 | // import ( 7 | // "testing" 8 | // "github.com/stretchr/testify/assert" 9 | // ) 10 | // 11 | // func TestSomething(t *testing.T) { 12 | // 13 | // var a string = "Hello" 14 | // var b string = "Hello" 15 | // 16 | // assert.Equal(t, a, b, "The two words should be the same.") 17 | // 18 | // } 19 | // 20 | // if you assert many times, use the format below: 21 | // 22 | // import ( 23 | // "testing" 24 | // "github.com/stretchr/testify/assert" 25 | // ) 26 | // 27 | // func TestSomething(t *testing.T) { 28 | // assert := assert.New(t) 29 | // 30 | // var a string = "Hello" 31 | // var b string = "Hello" 32 | // 33 | // assert.Equal(a, b, "The two words should be the same.") 34 | // } 35 | // 36 | // Assertions 37 | // 38 | // Assertions allow you to easily write test code, and are global funcs in the `assert` package. 39 | // All assertion functions take, as the first argument, the `*testing.T` object provided by the 40 | // testing framework. This allows the assertion funcs to write the failings and other details to 41 | // the correct place. 42 | // 43 | // Every assertion function also takes an optional string message as the final argument, 44 | // allowing custom error messages to be appended to the message the assertion method outputs. 45 | package assert 46 | -------------------------------------------------------------------------------- /vendor/github.com/DATA-DOG/godog/LICENSE: -------------------------------------------------------------------------------- 1 | The three clause BSD license (http://en.wikipedia.org/wiki/BSD_licenses) 2 | 3 | Copyright (c) 2015-2018, DATA-DOG team 4 | All rights reserved. 5 | 6 | Redistribution and use in source and binary forms, with or without 7 | modification, are permitted provided that the following conditions are met: 8 | 9 | * Redistributions of source code must retain the above copyright notice, this 10 | list of conditions and the following disclaimer. 11 | 12 | * Redistributions in binary form must reproduce the above copyright notice, 13 | this list of conditions and the following disclaimer in the documentation 14 | and/or other materials provided with the distribution. 15 | 16 | * The name DataDog.lt may not be used to endorse or promote products 17 | derived from this software without specific prior written permission. 18 | 19 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 20 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 22 | DISCLAIMED. IN NO EVENT SHALL MICHAEL BOSTOCK BE LIABLE FOR ANY DIRECT, 23 | INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 24 | BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 25 | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY 26 | OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 27 | NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, 28 | EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 29 | -------------------------------------------------------------------------------- /k8s/gcp/gpd-disks/delete-gpd.yml: -------------------------------------------------------------------------------- 1 | # delete-gpd.yml 2 | # Description: This Ansible playbook will remove the previously attached GPDs to the Cluster 3 | # Author: Harshvardhan Karn 4 | ############################################################################################### 5 | #Test Steps: 6 | # 7 | # 1. Get Cluster name, if not provided 8 | # 2. Get the nodes name from the specified cluster 9 | # 3. Use the fetched node to get the GPDs, then detach and delete 10 | # 11 | ############################################################################################### 12 | --- 13 | - hosts: localhost 14 | vars: 15 | files: 16 | email: 17 | project: 18 | cluster_name: 19 | tasks: 20 | - name: Fetching Cluster Name 21 | shell: cat ~/logs/clusters 22 | register: cluster_name 23 | when: not cluster_name 24 | 25 | - set_fact: 26 | cluster_name: "{{ cluster_name.stdout }}" 27 | when: cluster_name.stdout is defined 28 | 29 | - name: Get nodes 30 | command: gcloud compute instances list --filter="metadata.items.key['cluster-name']['value']~'{{cluster_name}}' name~'nodes'" --format json 31 | register: response 32 | 33 | - set_fact: 34 | data: "{{ response.stdout }}" 35 | 36 | - local_action: 37 | module: gce_pd 38 | state: deleted 39 | credentials_file: "{{ files }}" 40 | service_account_email: "{{ email }}" 41 | project_id: "{{ project }}" 42 | instance_name: "{{item.name }}" 43 | zone: us-central1-a 44 | name: "{{ item.name + '-' + 'gpd' }}" 45 | with_items: "{{ data }}" 46 | 47 | -------------------------------------------------------------------------------- /apps/memleak/tests/run_litmus_test.yml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: batch/v1 3 | kind: Job 4 | metadata: 5 | generateName: litmus-memleak-test- 6 | namespace: litmus 7 | spec: 8 | template: 9 | metadata: 10 | labels: 11 | name: litmus 12 | app: memleak-test 13 | spec: 14 | serviceAccountName: litmus 15 | restartPolicy: Never 16 | containers: 17 | - name: ansibletest 18 | image: openebs/ansible-runner:ci 19 | imagePullPolicy: Always 20 | env: 21 | - name: ANSIBLE_STDOUT_CALLBACK 22 | value: default 23 | 24 | - name: PROVIDER_STORAGE_CLASS 25 | value: openebs-standard 26 | 27 | - name: APP_NAMESPACE 28 | value: memleak 29 | 30 | command: ["/bin/bash"] 31 | args: ["-c", "ansible-playbook ./memleak/tests/test.yml -i /etc/ansible/hosts -v; exit 0"] 32 | volumeMounts: 33 | - name: logs 34 | mountPath: /var/log/ansible 35 | tty: true 36 | - name: logger 37 | image: openebs/logger 38 | command: ["/bin/bash"] 39 | args: ["-c", "./logger.sh -d 10 -r maya,openebs,pvc,memleak; exit 0"] 40 | volumeMounts: 41 | - name: kubeconfig 42 | mountPath: /root/admin.conf 43 | subPath: admin.conf 44 | - name: logs 45 | mountPath: /mnt 46 | tty: true 47 | volumes: 48 | - name: kubeconfig 49 | configMap: 50 | name: kubeconfig 51 | - name: logs 52 | hostPath: 53 | path: /mnt/memleak 54 | type: "" 55 | -------------------------------------------------------------------------------- /executor/ansible/roles/k8s-local-pv/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Get kubernetes master name 3 | shell: hostname 4 | args: 5 | executable: /bin/bash 6 | register: result 7 | 8 | - name: Get kubernetes master status 9 | shell: source ~/.profile; kubectl get nodes | grep {{ result.stdout.lower()}} | awk '{print $2}' 10 | args: 11 | executable: /bin/bash 12 | register: result 13 | until: result.stdout == 'Ready' 14 | delay: 60 15 | retries: 10 16 | ignore_errors: true 17 | 18 | - name: 19 | debug: 20 | msg: "Ending play, K8S Master NOT READY" 21 | when: result.stdout != "Ready" 22 | 23 | - name: Ending Playbook Run - K8S master is NOT READY 24 | meta: end_play 25 | when: result.stdout != "Ready" 26 | 27 | - name: Copy local-storage artifacts to kube-master 28 | copy: 29 | src: "{{ item }}" 30 | dest: "{{ ansible_env.HOME }}" 31 | with_items: 32 | - "{{ template_path }}/{{ local_storage_class }}" 33 | - "{{ template_path }}/{{ local_pv }}" 34 | 35 | - name: Deploy the local-pv storageclass 36 | shell: > 37 | source ~/.profile; 38 | kubectl apply -f {{ ansible_env.HOME }}/{{ local_storage_class }} 39 | args: 40 | executable: /bin/bash 41 | register: result 42 | failed_when: "'storageclass' and ('created' or 'configured') not in result.stdout" 43 | 44 | - name: Create the local persistent volume 45 | shell: > 46 | source ~/.profile; 47 | kubectl apply -f {{ ansible_env.HOME }}/{{ local_pv }} 48 | args: 49 | executable: /bin/bash 50 | register: result 51 | failed_when: "'persistentvolume' and ('created' or 'configured') not in result.stdout" 52 | -------------------------------------------------------------------------------- /apps/crunchy-postgres/workload/crunchy_postgres_loadgen.yml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: batch/v1 3 | kind: Job 4 | metadata: 5 | generateName: crunchy-loadgen- 6 | spec: 7 | template: 8 | metadata: 9 | name: crunchy-loadgen 10 | labels: 11 | loadgen_lkey: loadgen_lvalue 12 | 13 | spec: 14 | restartPolicy: Never 15 | containers: 16 | - name: postgres-loadgen 17 | image: openebs/tests-postgresql-client 18 | imagePullPolicy: Always 19 | 20 | env: 21 | # Time period (in sec) b/w retries for DB init check 22 | - name: NAMESPACE 23 | value: name_space 24 | 25 | # Service name of postgres application 26 | - name: SERVICE_NAME 27 | value: servicename 28 | 29 | # Database Name 30 | - name: DATABASE_NAME 31 | value: dbname 32 | 33 | # Password to access Database 34 | - name: PASSWORD 35 | value: database_password 36 | 37 | # Database user 38 | - name: DATABASE_USER 39 | value: dbuser 40 | 41 | #Port on which crunchy databse is listening 42 | - name: PORT 43 | value: "port" 44 | 45 | #Number of parallel transactions to perform 46 | - name: PARALLEL_TRANSACTION 47 | value: "paralleltransaction" 48 | 49 | #Number of transaction to perform 50 | - name: TRANSACTIONS 51 | value: "database_transaction" 52 | 53 | command: ["/bin/bash"] 54 | args: ["-c", "./test.sh ; exit 0"] 55 | -------------------------------------------------------------------------------- /apps/percona/tests/mysql_storage_benchmark/test_cleanup.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Get pvc name to verify successful pvc deletion 3 | shell: > 4 | kubectl get pvc {{ test_name }} 5 | -o custom-columns=:spec.volumeName -n litmus 6 | --no-headers 7 | args: 8 | executable: /bin/bash 9 | register: pv 10 | 11 | - name: Delete percona mysql pod 12 | shell: > 13 | source ~/.profile; kubectl delete -f {{ pod_yaml_alias }} 14 | -n litmus 15 | args: 16 | executable: /bin/bash 17 | 18 | - name: Delete the TPCC config map 19 | shell: kubectl delete cm tpcc-config -n litmus 20 | args: 21 | executable: /bin/bash 22 | 23 | - name: Confirm percona pod has been deleted 24 | shell: source ~/.profile; kubectl get pods -n litmus 25 | args: 26 | executable: /bin/bash 27 | register: result 28 | until: "'percona' not in result.stdout" 29 | delay: 30 30 | retries: 12 31 | 32 | - block: 33 | - name: Confirm pvc pod has been deleted 34 | shell: > 35 | kubectl get pods -n litmus | grep {{ pv.stdout }} 36 | args: 37 | executable: /bin/bash 38 | register: result 39 | failed_when: "'pvc' and 'Running' in result.stdout" 40 | delay: 30 41 | retries: 12 42 | when: "'openebs-standard' in lookup('env','PROVIDER_STORAGE_CLASS')" 43 | 44 | - block: 45 | - name: Remove the local persistent volume 46 | shell: kubectl delete pv {{ pv.stdout }} 47 | args: 48 | executable: /bin/bash 49 | register: result 50 | failed_when: "'persistentvolume' and 'deleted' not in result.stdout" 51 | when: "'local-storage' in lookup('env','PROVIDER_STORAGE_CLASS')" 52 | -------------------------------------------------------------------------------- /k8s/gcp/k8s-installer/README.md: -------------------------------------------------------------------------------- 1 | # GCP platform specific code and scripts 2 | ## Google Cloud provisioning and setting up Kubernetes cluster using KOPS 3 | These playbook act as a wrapper class for all the `kops`, `gsutil` & `gcloud` command. 4 | 5 | ### Prerequisites 6 | - kubectl 7 | - gcloud 8 | - kops 9 | 10 | ### Setting up 11 | 12 | - Run `glcloud init`, and authenticate into your google account linked with the Google Cloud 13 | 14 | --- 15 | 16 | ### Running 17 | 18 | - Run `create-vpc.yml` using anisble-playbook, that will create a Virtual Private Cloud 19 | ```bash 20 | ansible-playbook create-vpc.yml --extra-vars "project= vpc_name=" 21 | ``` 22 | 23 | - Run `create-k8s-cluster`, this will create a Bucket and the cluster 24 | 25 | ```bash 26 | ansible-playbook create-k8s-cluster.yml -vv --extra-vars "project= nodes=1 vpc_name=" 27 | ``` 28 | **Optional --extra-vars** 29 | 30 | > k8s_version=1.11.1 31 | > cluster_name=my-Cluster 32 | 33 | --- 34 | 35 | ### Deleting the cluster 36 | 37 | - Run `delete-k8s-cluster`, this will delete the cluster as well as the Bucket associated 38 | 39 | ```bash 40 | ansible-playbook delete-k8s-cluster.yml 41 | ``` 42 | **Optional --extra-vars** 43 | 44 | > cluster_name=my-Cluster 45 | 46 | It will delete the cluster specified else it will delete the last created cluster. 47 | If you have created **multiple** cluster, you have to specify the, existing cluster name to be deleted in extra-vars. 48 | 49 | - Run `delete-vpc` to delete the existing VPC (if required) 50 | ```bash 51 | ansible-playbook delete-vpc.yml --extra-vars "project= vpc_name=" 52 | ``` 53 | 54 | -------------------------------------------------------------------------------- /apps/percona/liveness/run_litmus_test.yml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Pod 4 | metadata: 5 | generateName: mysql-liveness-check- 6 | labels: 7 | name: mysql-liveness-check 8 | spec: 9 | 10 | restartPolicy: Never 11 | 12 | #affinity: 13 | # podAffinity: # (/podAntiAffinity) 14 | # requiredDuringSchedulingIgnoredDuringExecution: 15 | # - labelSelector: 16 | # matchExpressions: 17 | # - key: name 18 | # operator: In 19 | # values: 20 | # - litmus # (/percona) 21 | # topologyKey: kubernetes.io/hostname 22 | 23 | containers: 24 | - name: mysql-liveness-check 25 | image: openebs/tests-mysql-client 26 | env: 27 | # Time period (in sec) b/w retries for DB init check 28 | - name: INIT_WAIT_DELAY 29 | value: "30" 30 | 31 | # No of retries for DB init check 32 | - name: INIT_RETRY_COUNT 33 | value: "10" 34 | 35 | # Time period (in sec) b/w liveness checks 36 | - name: LIVENESS_PERIOD_SECONDS 37 | value: "10" 38 | 39 | # Time period (in sec) b/w retries for db_connect failure 40 | - name: LIVENESS_TIMEOUT_SECONDS 41 | value: "10" 42 | 43 | # No of retries after a db_connect failure before declaring liveness fail 44 | - name: LIVENESS_RETRY_COUNT 45 | value: "6" 46 | 47 | command: ["/bin/bash"] 48 | args: ["-c", "bash mysql-liveness-check.sh db-cred.cnf ; exit 0"] 49 | tty: true 50 | volumeMounts: 51 | - mountPath: /db-cred.cnf 52 | subPath: db-cred.cnf 53 | name: db-cred 54 | volumes: 55 | - name: db-cred 56 | configMap: 57 | name: db-cred 58 | -------------------------------------------------------------------------------- /k8s/aws/k8s-installer/delete-aws-cluster.yml: -------------------------------------------------------------------------------- 1 | #Delete-aws-cluster.yml 2 | #Description: This will delete aws cluster, bucket, tags from subnets and cluster name file 3 | 4 | ######################################################################################################################################################################## 5 | 6 | #Steps: 7 | #1. Deleting AWS cluster 8 | #2. Deleting AWS bucket 9 | #3. Deleting cluster name from file 10 | ######################################################################################################################################################################## 11 | 12 | --- 13 | 14 | - hosts: localhost 15 | 16 | vars_files: 17 | - vars.yml 18 | 19 | vars: 20 | cluster_name: "{{ lookup('lines', 'grep cluster_name /tmp/aws/cluster_name.csv | cut -d: -f2') }}" 21 | 22 | tasks: 23 | - block: 24 | - name: Deleting aws cluster 25 | command: kops delete cluster --name k8s-{{ cluster_name }}.k8s.local --state=s3://k8s-bucket-{{ cluster_name }} --yes 26 | ignore_errors: True 27 | 28 | - s3_bucket: 29 | name: "k8s-bucket-{{ cluster_name }}" 30 | region: "{{ region }}" 31 | state: absent 32 | force: yes 33 | 34 | - name: Deleting cluster name from csv file 35 | lineinfile: 36 | state: absent 37 | path: "/tmp/aws/cluster_name.csv" 38 | regexp: "" 39 | mode: 0755 40 | 41 | - name: Set Test Status 42 | set_fact: 43 | flag: "Test Passed" 44 | 45 | rescue: 46 | - name: Set Test Status 47 | set_fact: 48 | flag: "Test Failed" -------------------------------------------------------------------------------- /k8s/gke/k8s-installer/delete-gke-cluster.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: localhost 3 | gather_facts: yes 4 | 5 | vars_files: 6 | - vars.yml 7 | vars: 8 | cluster_name: 9 | test_case_name: "GKE_DELETE_CLUSTER" 10 | 11 | tasks: 12 | - block: 13 | - name: Fetch Cluster Name 14 | shell: cat ~/logs/clusters 15 | register: cluster_name 16 | when: not cluster_name 17 | 18 | - set_fact: 19 | cluster_name: "{{ cluster_name.stdout }}" 20 | when: cluster_name.stdout is defined 21 | 22 | - name: Delete GKE cluster 23 | gcp_container_cluster: 24 | name: "{{ cluster_name }}" 25 | initial_node_count: "{{ initial_node_count }}" 26 | monitoring_service: none 27 | node_config: 28 | machine_type: "{{ machine_type }}" 29 | disk_size_gb: "{{ disk_size_gb }}" 30 | image_type: "{{ image_type }}" 31 | oauth_scopes: 32 | - https://www.googleapis.com/auth/cloud-platform 33 | zone: "{{ zone }}" 34 | project: "{{ project }}" 35 | auth_kind: "{{ auth_kind }}" 36 | scopes: 37 | - https://www.googleapis.com/auth/cloud-platform 38 | state: absent 39 | 40 | - name: Remove Cluster Info 41 | lineinfile: 42 | state: absent 43 | path: "~/logs/clusters" 44 | regexp: "{{ cluster_name }}" 45 | 46 | - name: Set Test Status 47 | set_fact: 48 | flag: "Test Passed" 49 | 50 | rescue: 51 | - name: Set Test Status 52 | set_fact: 53 | flag: "Test Failed" 54 | -------------------------------------------------------------------------------- /vendor/github.com/DATA-DOG/godog/godog.go: -------------------------------------------------------------------------------- 1 | /* 2 | Package godog is the official Cucumber BDD framework for Golang, it merges specification 3 | and test documentation into one cohesive whole. 4 | 5 | Godog does not intervene with the standard "go test" command and it's behavior. 6 | You can leverage both frameworks to functionally test your application while 7 | maintaining all test related source code in *_test.go files. 8 | 9 | Godog acts similar compared to go test command. It uses go 10 | compiler and linker tool in order to produce test executable. Godog 11 | contexts needs to be exported same as Test functions for go test. 12 | 13 | For example, imagine you’re about to create the famous UNIX ls command. 14 | Before you begin, you describe how the feature should work, see the example below.. 15 | 16 | Example: 17 | Feature: ls 18 | In order to see the directory structure 19 | As a UNIX user 20 | I need to be able to list the current directory's contents 21 | 22 | Scenario: 23 | Given I am in a directory "test" 24 | And I have a file named "foo" 25 | And I have a file named "bar" 26 | When I run ls 27 | Then I should get output: 28 | """ 29 | bar 30 | foo 31 | """ 32 | 33 | Now, wouldn’t it be cool if something could read this sentence and use it to actually 34 | run a test against the ls command? Hey, that’s exactly what this package does! 35 | As you’ll see, Godog is easy to learn, quick to use, and will put the fun back into tests. 36 | 37 | Godog was inspired by Behat and Cucumber the above description is taken from it's documentation. 38 | */ 39 | package godog 40 | 41 | // Version of package - based on Semantic Versioning 2.0.0 http://semver.org/ 42 | const Version = "v0.7.6" 43 | -------------------------------------------------------------------------------- /apps/percona/chaos/openebs_target_failure/test_prerequisites.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Identify the storage class used by the PVC 3 | shell: > 4 | kubectl get pvc {{ pvc }} -n {{ namespace }} 5 | --no-headers -o custom-columns=:spec.storageClassName 6 | args: 7 | executable: /bin/bash 8 | register: storage_class 9 | 10 | - name: Identify the storage provisioner used by the SC 11 | shell: > 12 | kubectl get sc {{ storage_class.stdout }} 13 | --no-headers -o custom-columns=:provisioner 14 | args: 15 | executable: /bin/bash 16 | register: provisioner 17 | 18 | - name: Record the storage class name 19 | set_fact: 20 | sc: "{{ storage_class.stdout }}" 21 | 22 | - name: Record the storage provisioner name 23 | set_fact: 24 | stg_prov: "{{ provisioner.stdout }}" 25 | 26 | - block: 27 | - name: Derive PV name from PVC to query storage engine type (openebs) 28 | shell: > 29 | kubectl get pvc {{ pvc }} -n {{ namespace }} 30 | --no-headers -o custom-columns=:spec.volumeName 31 | args: 32 | executable: /bin/bash 33 | register: pv 34 | 35 | - name: Check for presence & value of cas type annotation 36 | shell: > 37 | kubectl get pv {{ pv.stdout }} --no-headers 38 | -o jsonpath="{.metadata.annotations.openebs\\.io/cas-type}" 39 | args: 40 | executable: /bin/bash 41 | register: openebs_stg_engine 42 | 43 | - name: Record the storage engine name 44 | set_fact: 45 | stg_engine: "{{ openebs_stg_engine.stdout }}" 46 | when: stg_prov == "openebs.io/provisioner-iscsi" 47 | 48 | - name: Identify the chaos util to be invoked 49 | template: 50 | src: chaosutil.j2 51 | dest: chaosutil.yml 52 | 53 | -------------------------------------------------------------------------------- /apps/percona/tests/mysql_storage_benchmark/run_litmus_test.yml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: batch/v1 3 | kind: Job 4 | metadata: 5 | generateName: litmus-mysql-storage-benchmark- 6 | namespace: litmus 7 | spec: 8 | template: 9 | metadata: 10 | labels: 11 | name: litmus 12 | spec: 13 | serviceAccountName: litmus 14 | restartPolicy: Never 15 | containers: 16 | - name: ansibletest 17 | image: openebs/ansible-runner:ci 18 | env: 19 | - name: ANSIBLE_STDOUT_CALLBACK 20 | #value: log_plays 21 | value: default 22 | 23 | - name: PROVIDER_STORAGE_CLASS 24 | value: openebs-standard 25 | #value: local-storage 26 | 27 | - name: APP_NODE_SELECTOR 28 | value: kubeminion-01 29 | 30 | command: ["/bin/bash"] 31 | args: ["-c", "ansible-playbook ./percona/tests/mysql_storage_benchmark/test.yml -i /etc/ansible/hosts -v; exit 0"] 32 | volumeMounts: 33 | - name: logs 34 | mountPath: /var/log/ansible 35 | tty: true 36 | - name: logger 37 | image: openebs/logger 38 | command: ["/bin/bash"] 39 | args: ["-c", "./logger.sh -d 10 -r maya,openebs,pvc,percona; exit 0"] 40 | volumeMounts: 41 | - name: kubeconfig 42 | mountPath: /root/admin.conf 43 | subPath: admin.conf 44 | - name: logs 45 | mountPath: /mnt 46 | tty: true 47 | volumes: 48 | - name: kubeconfig 49 | configMap: 50 | name: kubeconfig 51 | - name: logs 52 | hostPath: 53 | path: /mnt/mysql_storage_benchmark 54 | type: "" 55 | 56 | -------------------------------------------------------------------------------- /apps/percona/chaos/openebs_replica_network_delay/test_prerequisites.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Identify the storage class used by the PVC 3 | shell: > 4 | kubectl get pvc {{ pvc }} -n {{ namespace }} 5 | --no-headers -o custom-columns=:spec.storageClassName 6 | args: 7 | executable: /bin/bash 8 | register: storage_class 9 | 10 | - name: Identify the storage provisioner used by the SC 11 | shell: > 12 | kubectl get sc {{ storage_class.stdout }} 13 | --no-headers -o custom-columns=:provisioner 14 | args: 15 | executable: /bin/bash 16 | register: provisioner 17 | 18 | - name: Record the storage class name 19 | set_fact: 20 | sc: "{{ storage_class.stdout }}" 21 | 22 | - name: Record the storage provisioner name 23 | set_fact: 24 | stg_prov: "{{ provisioner.stdout }}" 25 | 26 | - block: 27 | - name: Derive PV name from PVC to query storage engine type (openebs) 28 | shell: > 29 | kubectl get pvc {{ pvc }} -n {{ namespace }} 30 | --no-headers -o custom-columns=:spec.volumeName 31 | args: 32 | executable: /bin/bash 33 | register: pv 34 | 35 | - name: Check for presence & value of cas type annotation 36 | shell: > 37 | kubectl get pv {{ pv.stdout }} --no-headers 38 | -o jsonpath="{.metadata.annotations.openebs\\.io/cas-type}" 39 | args: 40 | executable: /bin/bash 41 | register: openebs_stg_engine 42 | 43 | - name: Record the storage engine name 44 | set_fact: 45 | stg_engine: "{{ openebs_stg_engine.stdout }}" 46 | when: stg_prov == "openebs.io/provisioner-iscsi" 47 | 48 | - name: Identify the chaos util to be invoked 49 | template: 50 | src: chaosutil.j2 51 | dest: chaosutil.yml 52 | 53 | -------------------------------------------------------------------------------- /apps/percona/chaos/openebs_target_network_delay/test_prerequisites.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Identify the storage class used by the PVC 3 | shell: > 4 | kubectl get pvc {{ pvc }} -n {{ namespace }} 5 | --no-headers -o custom-columns=:spec.storageClassName 6 | args: 7 | executable: /bin/bash 8 | register: storage_class 9 | 10 | - name: Identify the storage provisioner used by the SC 11 | shell: > 12 | kubectl get sc {{ storage_class.stdout }} 13 | --no-headers -o custom-columns=:provisioner 14 | args: 15 | executable: /bin/bash 16 | register: provisioner 17 | 18 | - name: Record the storage class name 19 | set_fact: 20 | sc: "{{ storage_class.stdout }}" 21 | 22 | - name: Record the storage provisioner name 23 | set_fact: 24 | stg_prov: "{{ provisioner.stdout }}" 25 | 26 | - block: 27 | - name: Derive PV name from PVC to query storage engine type (openebs) 28 | shell: > 29 | kubectl get pvc {{ pvc }} -n {{ namespace }} 30 | --no-headers -o custom-columns=:spec.volumeName 31 | args: 32 | executable: /bin/bash 33 | register: pv 34 | 35 | - name: Check for presence & value of cas type annotation 36 | shell: > 37 | kubectl get pv {{ pv.stdout }} --no-headers 38 | -o jsonpath="{.metadata.annotations.openebs\\.io/cas-type}" 39 | args: 40 | executable: /bin/bash 41 | register: openebs_stg_engine 42 | 43 | - name: Record the storage engine name 44 | set_fact: 45 | stg_engine: "{{ openebs_stg_engine.stdout }}" 46 | when: stg_prov == "openebs.io/provisioner-iscsi" 47 | 48 | - name: Identify the chaos util to be invoked 49 | template: 50 | src: chaosutil.j2 51 | dest: chaosutil.yml 52 | 53 | -------------------------------------------------------------------------------- /apps/percona/chaos/openebs_volume_replica_failure/test_prerequisites.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Identify the storage class used by the PVC 3 | shell: > 4 | kubectl get pvc {{ pvc }} -n {{ namespace }} 5 | --no-headers -o custom-columns=:spec.storageClassName 6 | args: 7 | executable: /bin/bash 8 | register: storage_class 9 | 10 | - name: Identify the storage provisioner used by the SC 11 | shell: > 12 | kubectl get sc {{ storage_class.stdout }} 13 | --no-headers -o custom-columns=:provisioner 14 | args: 15 | executable: /bin/bash 16 | register: provisioner 17 | 18 | - name: Record the storage class name 19 | set_fact: 20 | sc: "{{ storage_class.stdout }}" 21 | 22 | - name: Record the storage provisioner name 23 | set_fact: 24 | stg_prov: "{{ provisioner.stdout }}" 25 | 26 | - block: 27 | - name: Derive PV name from PVC to query storage engine type (openebs) 28 | shell: > 29 | kubectl get pvc {{ pvc }} -n {{ namespace }} 30 | --no-headers -o custom-columns=:spec.volumeName 31 | args: 32 | executable: /bin/bash 33 | register: pv 34 | 35 | - name: Check for presence & value of cas type annotation 36 | shell: > 37 | kubectl get pv {{ pv.stdout }} --no-headers 38 | -o jsonpath="{.metadata.annotations.openebs\\.io/cas-type}" 39 | args: 40 | executable: /bin/bash 41 | register: openebs_stg_engine 42 | 43 | - name: Record the storage engine name 44 | set_fact: 45 | stg_engine: "{{ openebs_stg_engine.stdout }}" 46 | when: stg_prov == "openebs.io/provisioner-iscsi" 47 | 48 | - name: Identify the chaos util to be invoked 49 | template: 50 | src: chaosutil.j2 51 | dest: chaosutil.yml 52 | 53 | -------------------------------------------------------------------------------- /apps/fio/tests/data-integrity/run_litmus_test.yml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: batch/v1 3 | kind: Job 4 | metadata: 5 | generateName: litmus-di-fio- 6 | namespace: litmus 7 | spec: 8 | template: 9 | metadata: 10 | labels: 11 | name: litmus 12 | app: fio-di-litmus 13 | spec: 14 | serviceAccountName: litmus 15 | restartPolicy: Never 16 | containers: 17 | - name: ansibletest 18 | image: openebs/ansible-runner:ci 19 | env: 20 | - name: ANSIBLE_STDOUT_CALLBACK 21 | value: default 22 | 23 | - name: PROVIDER_STORAGE_CLASS 24 | value: openebs-standard 25 | 26 | - name: FIO_NAMESPACE 27 | value: fio 28 | 29 | - name: FIO_SAMPLE_SIZE 30 | value: "128m" 31 | 32 | - name: FIO_TESTRUN_PERIOD 33 | value: "60" 34 | 35 | command: ["/bin/bash"] 36 | args: ["-c", "ansible-playbook ./fio/tests/data-integrity/test.yml -i /etc/ansible/hosts -v; exit 0"] 37 | volumeMounts: 38 | - name: logs 39 | mountPath: /var/log/ansible 40 | tty: true 41 | - name: logger 42 | image: openebs/logger 43 | command: ["/bin/bash"] 44 | args: ["-c", "./logger.sh -d 10 -r maya,openebs,pvc,fio; exit 0"] 45 | volumeMounts: 46 | - name: kubeconfig 47 | mountPath: /root/admin.conf 48 | subPath: admin.conf 49 | - name: logs 50 | mountPath: /mnt 51 | tty: true 52 | volumes: 53 | - name: kubeconfig 54 | configMap: 55 | name: kubeconfig 56 | - name: logs 57 | hostPath: 58 | path: /mnt/fio 59 | type: "" 60 | -------------------------------------------------------------------------------- /chaoslib/pumba/pumba_kube.yaml: -------------------------------------------------------------------------------- 1 | # If you are running Kubernetes >= 1.1.0. You can take advantage of DaemonSets to automatically deploy the Pumba on all your nodes. 2 | # On 1.1.x you'll need to explicitly enable the DaemonSets extension, see http://kubernetes.io/v1.1/docs/admin/daemons.html#caveats. 3 | 4 | # You'll then be able to deploy the DaemonSet with the command 5 | # `kubectl create -f pumba_kube.yaml` 6 | 7 | # If you are not running Kubernetes >= 1.1.0 or do not want to use DaemonSets, you can also run the Pumba as a regular docker container on each node you want to make chaos. 8 | # `docker run -d -v /var/run/docker.sock:/var/run/docker.sock gaiaadm/pumba pumba --random --interval 3m kill --signal SIGKILL"` 9 | 10 | apiVersion: extensions/v1beta1 11 | kind: DaemonSet 12 | metadata: 13 | name: pumba 14 | spec: 15 | template: 16 | metadata: 17 | labels: 18 | app: pumba 19 | com.gaiaadm.pumba: "true" # prevent pumba from killing itself 20 | name: pumba 21 | spec: 22 | containers: 23 | - image: gaiaadm/pumba:0.4.8 24 | imagePullPolicy: Always 25 | name: pumba 26 | # Pumba command: modify it to suite your needs 27 | # Dry run: Randomly try to kill some container every 3 minutes 28 | command: ["pumba", "--dry", "--random", "--interval", "3m", "kill", "--signal", "SIGTERM"] 29 | resources: 30 | requests: 31 | cpu: 10m 32 | memory: 5M 33 | limits: 34 | cpu: 100m 35 | memory: 20M 36 | volumeMounts: 37 | - name: dockersocket 38 | mountPath: /var/run/docker.sock 39 | volumes: 40 | - hostPath: 41 | path: /var/run/docker.sock 42 | name: dockersocket 43 | -------------------------------------------------------------------------------- /hack/rbac.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | name: litmus 5 | --- 6 | apiVersion: v1 7 | kind: ServiceAccount 8 | metadata: 9 | name: litmus 10 | namespace: litmus 11 | labels: 12 | name: litmus 13 | --- 14 | # Source: openebs/templates/clusterrole.yaml 15 | apiVersion: rbac.authorization.k8s.io/v1beta1 16 | kind: ClusterRole 17 | metadata: 18 | name: litmus 19 | labels: 20 | name: litmus 21 | rules: 22 | - apiGroups: ["*"] 23 | resources: ["*"] 24 | verbs: ["*"] 25 | --- 26 | apiVersion: rbac.authorization.k8s.io/v1beta1 27 | kind: ClusterRoleBinding 28 | metadata: 29 | name: litmus 30 | labels: 31 | name: litmus 32 | roleRef: 33 | apiGroup: rbac.authorization.k8s.io 34 | kind: ClusterRole 35 | name: litmus 36 | subjects: 37 | - kind: ServiceAccount 38 | name: litmus 39 | namespace: litmus 40 | --- 41 | apiVersion: apiextensions.k8s.io/v1beta1 42 | kind: CustomResourceDefinition 43 | metadata: 44 | # name must match the spec fields below, and be in the form: . 45 | name: litmusresults.litmus.io 46 | spec: 47 | # group name to use for REST API: /apis// 48 | group: litmus.io 49 | # version name to use for REST API: /apis// 50 | version: v1alpha1 51 | # either Namespaced or Cluster 52 | scope: Cluster 53 | names: 54 | # plural name to be used in the URL: /apis/// 55 | plural: litmusresults 56 | # singular name to be used as an alias on the CLI and for display 57 | singular: litmusresult 58 | # kind is normally the CamelCased singular type. Your resource manifests use this. 59 | kind: LitmusResult 60 | # shortNames allow shorter string to match your resource on the CLI 61 | shortNames: 62 | - lr 63 | -------------------------------------------------------------------------------- /k8s/gke/k8s-installer/README.md: -------------------------------------------------------------------------------- 1 | # GKE platform specific code and scripts 2 | ## Google Cloud provisioning and setting up Kubernetes cluster using Ansible 3 | The playbook uses google's ansible module `gcp_container_cluster` to bring up a cluster on GKE. 4 | 5 | ### Prerequisites 6 | - kubectl 7 | - gcloud 8 | - Python Library: requests >= 2.18.4 9 | - Python Library: google-auth >= 1.3.0 10 | 11 | ### Setting up 12 | 13 | - Run `glcloud init`, and authenticate into your google account linked with the Google Cloud 14 | - Set Up the following environment variables. 15 | 16 | ``` 17 | export GCP_SERVICE_ACCOUNT_FILE= 18 | export GKEUSER= 19 | ``` 20 | --- 21 | 22 | ### Running 23 | 24 | - Run `create-gke-cluster.yml`, to bring up a 3 node kubernetes cluster. 25 | 26 | ```bash 27 | ansible-playbook create-gke-cluster.yml 28 | ``` 29 | **Optional --extra-vars** 30 | 31 | > initial_node_code=3 32 | > cluster_name=my-Cluster 33 | 34 | ```bash 35 | ansible-playbook create-gke-cluster.yml --extra-vars "cluster_name=my-Cluster initial_node_count=3" 36 | ``` 37 | --- 38 | 39 | ### Deleting the cluster 40 | 41 | - Run `delete-gke-cluster.yml`, this will delete the cluster. 42 | 43 | ```bash 44 | ansible-playbook delete-gke-cluster.yml 45 | ``` 46 | This will delete the cluster which was last created using the `create-gke-cluster.yml`. 47 | 48 | **Optional --extra-vars** 49 | 50 | > cluster_name=my-Cluster 51 | 52 | ```bash 53 | ansible-playbook delete-gke-cluster.yml --extra-vars "cluster_name=my-Cluster" 54 | ``` 55 | --- 56 | 57 | It will delete the cluster which was provided as an extra-vars. 58 | If you have created **multiple** clusters, you will have to explicitly specify the name of existing cluster to be deleted in extra-vars. -------------------------------------------------------------------------------- /k8s/eks/k8s-installer/README.md: -------------------------------------------------------------------------------- 1 | # EKS platform specific code and scripts 2 | ## Setting up Amazon Elastic Container Service for Kubernetes using Ansible 3 | The playbook uses weaveworks `eksctl` to bring up a cluster on EKS. 4 | 5 | ### Prerequisites 6 | - kubectl 7 | - eksctl >= 0.1.5 8 | - Python Library: boto 9 | - Python Library: boto3 10 | 11 | ### Setting up 12 | 13 | - Create `.aws` directory in user home and drop the `credentials` file in the directory. 14 | - Run `aws configure`, and authenticate your credentials with AWS. 15 | - Set region as us-west-2. 16 | - Set Up the following environment variables. 17 | 18 | ``` 19 | export AWS_ACCESS_KEY_ID= 20 | export AWS_SECRET_ACCESS_KEY= 21 | ``` 22 | --- 23 | 24 | ### Running 25 | 26 | - Run `create-eks-cluster.yml`, to bring up a 3 node kubernetes cluster. 27 | 28 | ```bash 29 | ansible-playbook create-eks-cluster.yml 30 | ``` 31 | **Optional --extra-vars** 32 | 33 | > initial_node_code=3 34 | > cluster_name=my-Cluster 35 | 36 | ```bash 37 | ansible-playbook create-eks-cluster.yml --extra-vars "cluster_name=my-Cluster initial_node_count=3" 38 | ``` 39 | --- 40 | 41 | ### Deleting the cluster 42 | 43 | - Run `delete-eks-cluster.yml`, this will delete the cluster. 44 | 45 | ```bash 46 | ansible-playbook delete-eks-cluster.yml 47 | ``` 48 | This will delete the cluster which was last created using the `create-eks-cluster.yml`. 49 | 50 | **Optional --extra-vars** 51 | 52 | > cluster_name=my-Cluster 53 | 54 | ```bash 55 | ansible-playbook delete-eks-cluster.yml --extra-vars "cluster_name=my-Cluster" 56 | ``` 57 | --- 58 | 59 | It will delete the cluster which was provided as an extra-vars. 60 | If you have created **multiple** clusters, you will have to explicitly specify the name of existing cluster to be deleted in extra-vars. -------------------------------------------------------------------------------- /apps/percona/tests/mysql_data_persistence/mysql.yml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: apps/v1beta1 3 | kind: Deployment 4 | metadata: 5 | name: percona 6 | labels: 7 | name: percona 8 | spec: 9 | replicas: 1 10 | selector: 11 | matchLabels: 12 | name: percona 13 | template: 14 | metadata: 15 | labels: 16 | name: percona 17 | spec: 18 | affinity: 19 | podAntiAffinity: 20 | requiredDuringSchedulingIgnoredDuringExecution: 21 | - labelSelector: 22 | matchLabels: 23 | name: litmus 24 | topologyKey: kubernetes.io/hostname 25 | containers: 26 | - resources: 27 | limits: 28 | cpu: 0.5 29 | name: percona 30 | image: percona 31 | imagePullPolicy: IfNotPresent 32 | args: 33 | - "--ignore-db-dir" 34 | - "lost+found" 35 | env: 36 | - name: MYSQL_ROOT_PASSWORD 37 | value: k8sDem0 38 | ports: 39 | - containerPort: 3306 40 | name: percona 41 | volumeMounts: 42 | - mountPath: /var/lib/mysql 43 | name: data-vol 44 | volumes: 45 | - name: data-vol 46 | persistentVolumeClaim: 47 | claimName: testClaim 48 | --- 49 | kind: PersistentVolumeClaim 50 | apiVersion: v1 51 | metadata: 52 | name: testClaim 53 | spec: 54 | storageClassName: testClass 55 | accessModes: 56 | - ReadWriteOnce 57 | resources: 58 | requests: 59 | storage: 5G 60 | --- 61 | apiVersion: v1 62 | kind: Service 63 | metadata: 64 | name: percona-mysql 65 | labels: 66 | name: percona-mysql 67 | spec: 68 | ports: 69 | - port: 3306 70 | targetPort: 3306 71 | selector: 72 | name: percona 73 | 74 | -------------------------------------------------------------------------------- /vendor/github.com/DATA-DOG/godog/options.go: -------------------------------------------------------------------------------- 1 | package godog 2 | 3 | import ( 4 | "io" 5 | ) 6 | 7 | // Options are suite run options 8 | // flags are mapped to these options. 9 | // 10 | // It can also be used together with godog.RunWithOptions 11 | // to run test suite from go source directly 12 | // 13 | // See the flags for more details 14 | type Options struct { 15 | // Print step definitions found and exit 16 | ShowStepDefinitions bool 17 | 18 | // Randomize, if not `0`, will be used to run scenarios in a random order. 19 | // 20 | // Randomizing scenario order is especially helpful for detecting 21 | // situations where you have state leaking between scenarios, which can 22 | // cause flickering or fragile tests. 23 | // 24 | // The default value of `0` means "do not randomize". 25 | // 26 | // The magic value of `-1` means "pick a random seed for me", and godog will 27 | // assign a seed on it's own during the `RunWithOptions` phase, similar to if 28 | // you specified `--random` on the command line. 29 | // 30 | // Any other value will be used as the random seed for shuffling. Re-using the 31 | // same seed will allow you to reproduce the shuffle order of a previous run 32 | // to isolate an error condition. 33 | Randomize int64 34 | 35 | // Stops on the first failure 36 | StopOnFailure bool 37 | 38 | // Fail suite when there are pending or undefined steps 39 | Strict bool 40 | 41 | // Forces ansi color stripping 42 | NoColors bool 43 | 44 | // Various filters for scenarios parsed 45 | // from feature files 46 | Tags string 47 | 48 | // The formatter name 49 | Format string 50 | 51 | // Concurrency rate, not all formatters accepts this 52 | Concurrency int 53 | 54 | // All feature file paths 55 | Paths []string 56 | 57 | // Where it should print formatter output 58 | Output io.Writer 59 | } 60 | -------------------------------------------------------------------------------- /k8s/aws/k8s-installer/README.md: -------------------------------------------------------------------------------- 1 | # AWS platform specific code and scripts 2 | 3 | ## Amazon web service and setting up Kubernetes cluster using KOPS 4 | 5 | These playbook act as a wrapper class for all the `kops`, `aws` command 6 | 7 | ### Prerequisites 8 | 9 | - kubectl 10 | - aws 11 | - kops 12 | 13 | ### Setting up 14 | 15 | - Run `aws configure`, and authenticate into your aws account linked with the AWS Cloud 16 | 17 | ### Setup pre-requiste 18 | 19 | - Run `pre-requisite` using anisble-playbook, that will create a Virtual Private Cloud, Subnet, Internet-Gateway and route-table. 20 | 21 | ```bash 22 | ansible-playbook pre-requisite.yml -vv 23 | ``` 24 | 25 | **Optional** 26 | 27 | - User can also provide the VPC name at the time of creation in `--extra-vars` 28 | 29 | ```bash 30 | ansible-playbook pre-requisite.yml -vv --extra-vars "vpc_name=" 31 | ``` 32 | 33 | ### Creating AWS Cluster 34 | 35 | - Run `create-aws-cluster`, this will create a ssh public key, Bucket and the AWS cluster. 36 | 37 | ```bash 38 | ansible-playbook create-aws-cluster.yml -vv --extra-vars "k8s_version=" 39 | ``` 40 | 41 | **Optional** 42 | 43 | - User can also provide the Cluster name at the time of creation in `--extra-vars` 44 | 45 | ```bash 46 | ansible-playbook create-aws-cluster.yml -vv --extra-vars "k8s_version= cluster_name=" 47 | ``` 48 | 49 | ### Deleting AWS cluster 50 | 51 | - Run `delete-aws-cluster`, this will delete the cluster as well as the Bucket associated with it. 52 | 53 | ```bash 54 | ansible-playbook delete-aws-cluster.yml -vv 55 | ``` 56 | 57 | ### Deleting Pre-requiste 58 | 59 | - Run `delete-pre-requisite` to delete the existing VPC, Subnets, Internet-Gateway and route-table. 60 | 61 | ```bash 62 | ansible-playbook delete-pre-requisite.yml -vv 63 | ``` -------------------------------------------------------------------------------- /chaoslib/chaoskube/chaoskube.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1beta1 2 | kind: Deployment 3 | metadata: 4 | name: chaoskube 5 | labels: 6 | app: chaoskube 7 | spec: 8 | replicas: 1 9 | template: 10 | metadata: 11 | labels: 12 | app: chaoskube 13 | spec: 14 | serviceAccountName: chaoskube 15 | containers: 16 | - name: chaoskube 17 | image: quay.io/linki/chaoskube:v0.8.0 18 | args: 19 | # Provided below are possible flags to set in chaoskube. 20 | # This deployment will create a vanilla chaoskube w/o policies 21 | # which will be configured by a test playbook 22 | 23 | # kill a pod every 10 minutes 24 | #- --interval=10m 25 | 26 | # only target pods in the test environment 27 | #- --labels=openebs.io/controller=jiva-controller 28 | 29 | # only consider pods with this annotation 30 | #- --annotations=chaos.alpha.kubernetes.io/enabled=true 31 | 32 | # exclude all pods in the kube-system namespace 33 | #- --namespaces=!kube-system 34 | 35 | # don't kill anything on weekends 36 | #- --excluded-weekdays=Sat,Sun 37 | 38 | # don't kill anything during the night or at lunchtime 39 | #- --excluded-times-of-day=22:00-08:00,11:00-13:00 40 | 41 | # don't kill anything as a joke or on christmas eve 42 | #- --excluded-days-of-year=Apr1,Dec24 43 | 44 | # let's make sure we all agree on what the above times mean 45 | #- --timezone=UTC 46 | 47 | # terminate pods for real: this disables dry-run mode which is on by default 48 | #- --no-dry-run 49 | --- 50 | apiVersion: v1 51 | kind: ServiceAccount 52 | metadata: 53 | name: chaoskube 54 | labels: 55 | app: chaoskube 56 | -------------------------------------------------------------------------------- /apps/memleak/tests/test-mem.py: -------------------------------------------------------------------------------- 1 | #------------------------------------------------------------------------------ 2 | #!/usr/bin/env python 3 | #description :Test to verify the memory consumed with sample workloads. 4 | #============================================================================== 5 | 6 | from __future__ import division 7 | import subprocess 8 | import time, os, sys 9 | list = [] 10 | namespace = sys.argv[1] 11 | cmd_cntrl_name = "kubectl get pod -n %s -l openebs.io/controller=jiva-controller --no-headers | awk '{print $1}'" %(namespace) 12 | print cmd_cntrl_name 13 | out = subprocess.Popen(cmd_cntrl_name,stdout=subprocess.PIPE,shell=True) 14 | cntrl_name = out.communicate() 15 | cntrl_pod_name = cntrl_name[0].strip('\n') 16 | n = cntrl_pod_name.split('-') 17 | lst = n[:len(n)-2] 18 | lst.append("con") 19 | container_name = "-".join(lst) 20 | print container_name 21 | used_mem_process = "kubectl exec %s -c %s -n %s -- pmap -x 1 | awk ''/total'/ {print $3}'" %(cntrl_pod_name,container_name,namespace) 22 | print used_mem_process 23 | n = 10 24 | count = 0 25 | #Obtaining memory consumed by longhorn process from the cntroller pod. 26 | while count < n: 27 | count = count + 1 28 | out = subprocess.Popen(used_mem_process,stdout=subprocess.PIPE,shell=True) 29 | used_mem = out.communicate() 30 | mem_in_mb = int(used_mem[0])/1024 31 | print mem_in_mb, "MB" 32 | if mem_in_mb < 800: 33 | time.sleep(20) 34 | else: 35 | print "Fail" 36 | #break 37 | quit() 38 | list.append(mem_in_mb) 39 | print list 40 | # A watermark of 800MB(re-calibrated based on results oberved from latest sanity run) 41 | # profile chosen in this test 42 | # TODO: Identify better mem consumption strategies 43 | if all(i <= 800 for i in list): 44 | print "Pass" 45 | else: 46 | print "Fail" 47 | 48 | -------------------------------------------------------------------------------- /apps/percona/tests/mysql_data_persistence/run_litmus_test.yml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: batch/v1 3 | kind: Job 4 | metadata: 5 | generateName: litmus-mysql-data-persistence- 6 | namespace: litmus 7 | spec: 8 | template: 9 | metadata: 10 | labels: 11 | name: litmus 12 | spec: 13 | serviceAccountName: litmus 14 | restartPolicy: Never 15 | containers: 16 | - name: ansibletest 17 | image: openebs/ansible-runner:ci 18 | env: 19 | - name: ANSIBLE_STDOUT_CALLBACK 20 | #value: log_plays 21 | value: default 22 | 23 | - name: PROVIDER_STORAGE_CLASS 24 | # Supported values: openebs-standard, local-storage 25 | value: openebs-standard 26 | 27 | - name: CHAOS_TYPE 28 | # Supported values : APP_POD_EVICT/KUBECTL, APP_NODE_DRAIN/KUBECTL, APP_POD_KILL/PUMBA 29 | value: "APP_POD_KILL/PUMBA" 30 | 31 | command: ["/bin/bash"] 32 | args: ["-c", "ansible-playbook ./percona/tests/mysql_data_persistence/test.yml -i /etc/ansible/hosts -v; exit 0"] 33 | volumeMounts: 34 | - name: logs 35 | mountPath: /var/log/ansible 36 | tty: true 37 | - name: logger 38 | image: openebs/logger 39 | command: ["/bin/bash"] 40 | args: ["-c", "./logger.sh -d 10 -r maya,openebs,pvc,percona; exit 0"] 41 | volumeMounts: 42 | - name: kubeconfig 43 | mountPath: /root/admin.conf 44 | subPath: admin.conf 45 | - name: logs 46 | mountPath: /mnt 47 | tty: true 48 | volumes: 49 | - name: kubeconfig 50 | configMap: 51 | name: kubeconfig 52 | - name: logs 53 | hostPath: 54 | path: /mnt/mysql_data_persistence 55 | type: "" 56 | 57 | -------------------------------------------------------------------------------- /chaoslib/openebs/openebs_cstor_target_failure.yaml: -------------------------------------------------------------------------------- 1 | - name: Derive PV from application PVC 2 | shell: > 3 | kubectl get pvc {{ app_pvc }} 4 | -o custom-columns=:spec.volumeName -n {{ app_ns }} 5 | --no-headers 6 | args: 7 | executable: /bin/bash 8 | register: pv 9 | 10 | - name: Record the cstor target deployment of the PV 11 | set_fact: 12 | # Depends on the naming convention in maya-apiserver (-rep) 13 | cstor_target_deploy: "{{ pv.stdout }}-target" 14 | 15 | - name: Get the resourceVersion of the target deploy before fault injection 16 | shell: > 17 | kubectl get deploy {{ cstor_target_deploy }} -n {{ target_ns }} 18 | -o yaml | grep resourceVersion | awk '{print $2}' | sed 's|"||g' 19 | args: 20 | executable: /bin/bash 21 | register: rv_bef 22 | 23 | - name: Pick a cStor target pod belonging to the PV 24 | shell: > 25 | kubectl get pods -l openebs.io/target=cstor-target 26 | -n {{ target_ns }} --no-headers | grep {{ pv.stdout }} 27 | | shuf -n1 | awk '{print $1}' 28 | args: 29 | executable: /bin/bash 30 | register: cstor_target_pod 31 | 32 | - name: Kill the cstor target pod 33 | shell: kubectl delete pod {{ cstor_target_pod.stdout }} -n {{ target_ns }} --grace-period=0 --force 34 | 35 | - name: Wait for 10s post fault injection 36 | wait_for: 37 | timeout: 10 38 | 39 | - name: Get the resourceVersion of the target deploy after fault injection 40 | shell: > 41 | kubectl get deploy {{ cstor_target_deploy }} -n {{ target_ns }} 42 | -o yaml | grep resourceVersion | awk '{print $2}' | sed 's|"||g' 43 | args: 44 | executable: /bin/bash 45 | register: rv_aft 46 | 47 | - name: Compare resourceVersions of target deployment 48 | debug: 49 | msg: "Verified target pods were restarted by fault injection" 50 | failed_when: "rv_bef.stdout | int == rv_aft.stdout | int" 51 | 52 | -------------------------------------------------------------------------------- /providers/openebs/installers/operator/0.6/litmusbook/openebs_setup.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: batch/v1 3 | kind: Job 4 | metadata: 5 | name: litmus-openebs-setup-v0.6 6 | namespace: litmus 7 | spec: 8 | template: 9 | metadata: 10 | name: litmus 11 | spec: 12 | serviceAccountName: litmus 13 | restartPolicy: Never 14 | containers: 15 | - name: ansibletest 16 | image: openebs/ansible-runner:ci 17 | imagePullPolicy: Always 18 | env: 19 | - name: mountPath 20 | value: /mnt/openebs 21 | - name: ANSIBLE_STDOUT_CALLBACK 22 | value: actionable 23 | - name: MAYA_APISERVER_IMAGE 24 | value: 25 | - name: OPENEBS_PROVISIONER_IMAGE 26 | value: 27 | - name: OPENEBS_SNAPSHOT_CONTROLLER_IMAGE 28 | value: 29 | - name: OPENEBS_SNAPSHOT_PROVISIONER_IMAGE 30 | value: 31 | - name: OPENEBS_IO_JIVA_CONTROLLER_IMAGE 32 | value: 33 | - name: OPENEBS_IO_JIVA_REPLICA_IMAGE 34 | value: 35 | - name: OPENEBS_IO_VOLUME_MONITOR_IMAGE 36 | value: 37 | - name: OPENEBS_IO_JIVA_REPLICA_COUNT 38 | value: 39 | - name: RUN_ID 40 | value: 41 | command: ["/bin/bash"] 42 | args: ["-c", "ansible-playbook ./operator/0.6/ansible/openebs_setup.yaml -i /etc/ansible/hosts -vv; exit 0"] 43 | volumeMounts: 44 | - name: kubeconfig 45 | mountPath: /root/admin.conf 46 | subPath: admin.conf 47 | - name: logs 48 | mountPath: /var/log/ansible 49 | volumes: 50 | - name: kubeconfig 51 | configMap: 52 | name: kubeconfig 53 | - name: logs 54 | hostPath: 55 | path: /mnt/openebs 56 | type: "" 57 | -------------------------------------------------------------------------------- /apps/fio/tests/performance/run_litmus_test.yml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: batch/v1 3 | kind: Job 4 | metadata: 5 | generateName: litmus-fio- 6 | namespace: litmus 7 | spec: 8 | template: 9 | metadata: 10 | name: litmus 11 | labels: 12 | app: fio-benchmark-litmus 13 | spec: 14 | serviceAccountName: litmus 15 | restartPolicy: Never 16 | containers: 17 | - name: ansibletest 18 | image: openebs/ansible-runner:ci 19 | env: 20 | - name: ANSIBLE_STDOUT_CALLBACK 21 | value: default 22 | 23 | - name: PROVIDER_STORAGE_CLASS 24 | value: openebs-standard 25 | #value: local-storage 26 | 27 | - name: APP_NODE_SELECTOR 28 | value: kubeminion-01 29 | 30 | - name: FIO_TEST_PROFILE 31 | value: standard-ssd 32 | 33 | - name: FIO_SAMPLE_SIZE 34 | value: "128m" 35 | 36 | - name: FIO_TESTRUN_PERIOD 37 | value: "60" 38 | 39 | command: ["/bin/bash"] 40 | args: ["-c", "ansible-playbook ./fio/tests/performance/test.yml -i /etc/ansible/hosts -v; exit 0"] 41 | volumeMounts: 42 | - name: logs 43 | mountPath: /var/log/ansible 44 | tty: true 45 | - name: logger 46 | image: openebs/logger 47 | command: ["/bin/bash"] 48 | args: ["-c", "./logger.sh -d 10 -r maya,openebs,pvc,fio; exit 0"] 49 | volumeMounts: 50 | - name: kubeconfig 51 | mountPath: /root/admin.conf 52 | subPath: admin.conf 53 | - name: logs 54 | mountPath: /mnt 55 | tty: true 56 | volumes: 57 | - name: kubeconfig 58 | configMap: 59 | name: kubeconfig 60 | - name: logs 61 | hostPath: 62 | path: /mnt/fio 63 | type: "" 64 | 65 | -------------------------------------------------------------------------------- /chaoslib/openebs/jiva_replica_pod_failure.yaml: -------------------------------------------------------------------------------- 1 | - name: Derive PV from application PVC 2 | shell: > 3 | kubectl get pvc {{ app_pvc }} 4 | -o custom-columns=:spec.volumeName -n {{ app_ns }} 5 | --no-headers 6 | args: 7 | executable: /bin/bash 8 | register: pv 9 | 10 | - name: Record the jiva replica deployment of the PV 11 | set_fact: 12 | # Depends on the naming convention in maya-apiserver (-rep) 13 | jiva_replica_deploy: "{{ pv.stdout }}-rep" 14 | 15 | - name: Get the resourceVersion of the replica deploy before fault injection 16 | shell: > 17 | kubectl get deploy {{ jiva_replica_deploy }} -n {{ app_ns }} 18 | -o yaml | grep resourceVersion | awk '{print $2}' | sed 's|"||g' 19 | args: 20 | executable: /bin/bash 21 | register: rv_bef 22 | 23 | - name: Randomly pick a jiva replica pod belonging to the PV 24 | shell: > 25 | kubectl get pods -l openebs.io/replica=jiva-replica 26 | -n {{ app_ns }} --no-headers | grep {{ pv.stdout }} 27 | | shuf -n1 | awk '{print $1}' 28 | args: 29 | executable: /bin/bash 30 | register: jiva_replica_pod 31 | 32 | - name: Kill the jiva replica pod 33 | shell: > 34 | kubectl delete pod {{ jiva_replica_pod.stdout }} -n {{ app_ns }} 35 | args: 36 | executable: /bin/bash 37 | 38 | - name: Wait for 10s post fault injection 39 | wait_for: 40 | timeout: 10 41 | 42 | - name: Get the resourceVersion of the replica deploy after fault injection 43 | shell: > 44 | kubectl get deploy {{ jiva_replica_deploy }} -n {{ app_ns }} 45 | -o yaml | grep resourceVersion | awk '{print $2}' | sed 's|"||g' 46 | args: 47 | executable: /bin/bash 48 | register: rv_aft 49 | 50 | - name: Compare resourceVersions of replica deployment 51 | debug: 52 | msg: "Verified replica pods were restarted by fault injection" 53 | failed_when: "rv_bef.stdout | int == rv_aft.stdout | int" 54 | -------------------------------------------------------------------------------- /apps/crunchy-postgres/workload/run_litmus_test.yml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: batch/v1 3 | kind: Job 4 | metadata: 5 | generateName: crunchy-loadgen-litmus- 6 | namespace: litmus 7 | spec: 8 | template: 9 | metadata: 10 | name: crunchy-loadgen 11 | namespace: litmus 12 | labels: 13 | loadgen: postgres-loadjob 14 | spec: 15 | serviceAccountName: litmus 16 | restartPolicy: Never 17 | containers: 18 | - name: ansibletest 19 | image: openebs/ansible-runner:ci 20 | imagePullPolicy: Always 21 | env: 22 | 23 | - name: ANSIBLE_STDOUT_CALLBACK 24 | #value: log_plays 25 | value: default 26 | 27 | - name: APP_LABEL 28 | value: 'app=pgset' 29 | 30 | - name: LOADGEN_LABEL 31 | value: 'loadgen=crunchy-loadgen' 32 | 33 | #namespace in which loadgen will run 34 | - name: APP_NAMESPACE 35 | value: app-pgres-ns 36 | 37 | # Database Name 38 | - name: DATABASE_NAME 39 | value: postgres 40 | 41 | # Password to access Database 42 | - name: PASSWORD 43 | value: password 44 | 45 | # Database user 46 | - name: DATABASE_USER 47 | value: postgres 48 | 49 | #Port on which crunchy databse is listening 50 | - name: PORT 51 | value: "5432" 52 | 53 | #Number of parallel transactions to perform 54 | - name: PARALLEL_TRANSACTION 55 | value: "5" 56 | 57 | #Number of transaction to perform 58 | - name: TRANSACTIONS 59 | value: "200" 60 | 61 | command: ["/bin/bash"] 62 | args: ["-c", "ansible-playbook ./crunchy-postgres/workload/test.yml -i /etc/ansible/hosts -v; exit 0"] 63 | -------------------------------------------------------------------------------- /k8s/azure/k8s-installer/create-k8s-cluster.yml: -------------------------------------------------------------------------------- 1 | # Description: Generates a random name, & creates a bucket, InstanceGroup object and Initializes 2 | # VMs running Kubernetes, in accordance to the node_count specified using kops in Google Cloud 3 | # Author: Harshvardhan Karn 4 | ############################################################################################### 5 | #Steps: 6 | #1. Generate a random Name Infix 7 | #2. Create a Resourse Group 8 | #3. Create the Clusters inside the resource group 9 | #4. Generate the kubeconfig file and merge it in ~/.kube/config 10 | #5. Log a file with the name of Cluster inside ~/logs/azure_clusters 11 | ############################################################################################### 12 | 13 | --- 14 | - hosts: localhost 15 | vars: 16 | cluster_name: 17 | tasks: 18 | - name: Generating Random Cluster Name 19 | shell: python ../../utils/name_generator/namesgenerator.py 20 | register: cluster_name 21 | when: not cluster_name 22 | - set_fact: 23 | cluster_name: "{{ cluster_name.stdout }}" 24 | when: cluster_name.stdout is defined 25 | 26 | - name: Creating Resource Group 27 | shell: az group create -l eastus -n aks-{{ cluster_name }}-rg 28 | 29 | - name: Creating AKS Cluster 30 | shell: az aks create -n aks-{{ cluster_name }}-cluster -g aks-{{ cluster_name }}-rg --node-count {{ nodes }} --node-vm-size {{ node_size }} --generate-ssh-key 31 | 32 | - name: Getting Kubeconfig for the AKS Cluster 33 | shell: az aks get-credentials --resource-group aks-{{ cluster_name }}-rg --name aks-{{ cluster_name }}-cluster 34 | 35 | - name: Logging Cluster Name 36 | lineinfile: 37 | create: yes 38 | state: present 39 | path: "~/logs/azure_clusters" 40 | line: "{{ cluster_name }}" 41 | 42 | - name: Test Passed 43 | set_fact: 44 | flag: "Test Passed" -------------------------------------------------------------------------------- /k8s/utils/health/test_health.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | import cluster_health_check 3 | from mock import patch, Mock 4 | 5 | class Test_Object(object): 6 | pass 7 | 8 | class TestHealthCheck(unittest.TestCase): 9 | 10 | def setUp(self): 11 | self.node_count = 3 12 | self.conditions = ["Ready", "Not Ready", "Maybe"] 13 | 14 | def add_condition(self, item): 15 | item.status = Test_Object() 16 | item.status.conditions = [] 17 | for condition in self.conditions: 18 | new_condition = Test_Object() 19 | new_condition.type = condition 20 | item.status.conditions.append(new_condition) 21 | 22 | def get_node_list_with_conditions(self): 23 | test_object = Test_Object() 24 | item_array = [Test_Object() for i in range(self.node_count)] 25 | for item in item_array: 26 | self.add_condition(item) 27 | 28 | test_object.items = item_array 29 | return test_object 30 | 31 | def test_create_api(self): 32 | self.assertNotEqual(cluster_health_check.create_api(), None) 33 | 34 | @patch('cluster_health_check.client.CoreV1Api') 35 | def test_get_nodes(self, v1_mock): 36 | test_object = Test_Object() 37 | test_object.items = [i for i in range(self.node_count)] 38 | v1_mock.return_value.list_node.return_value = test_object 39 | self.assertEqual(len(cluster_health_check.get_nodes(self.node_count)), self.node_count) 40 | 41 | @patch('cluster_health_check.client.CoreV1Api') 42 | def test_get_node_status(self, v1_mock): 43 | v1_mock.return_value.list_node.return_value = self.get_node_list_with_conditions() 44 | self.assertEqual(cluster_health_check.get_node_status(self.node_count), self.node_count) 45 | 46 | if __name__ == '__main__': 47 | unittest.main() -------------------------------------------------------------------------------- /vendor/github.com/davecgh/go-spew/spew/bypasssafe.go: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2015-2016 Dave Collins 2 | // 3 | // Permission to use, copy, modify, and distribute this software for any 4 | // purpose with or without fee is hereby granted, provided that the above 5 | // copyright notice and this permission notice appear in all copies. 6 | // 7 | // THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 8 | // WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 9 | // MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 10 | // ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 11 | // WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 12 | // ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 13 | // OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 14 | 15 | // NOTE: Due to the following build constraints, this file will only be compiled 16 | // when the code is running on Google App Engine, compiled by GopherJS, or 17 | // "-tags safe" is added to the go build command line. The "disableunsafe" 18 | // tag is deprecated and thus should not be used. 19 | // +build js appengine safe disableunsafe !go1.4 20 | 21 | package spew 22 | 23 | import "reflect" 24 | 25 | const ( 26 | // UnsafeDisabled is a build-time constant which specifies whether or 27 | // not access to the unsafe package is available. 28 | UnsafeDisabled = true 29 | ) 30 | 31 | // unsafeReflectValue typically converts the passed reflect.Value into a one 32 | // that bypasses the typical safety restrictions preventing access to 33 | // unaddressable and unexported data. However, doing this relies on access to 34 | // the unsafe package. This is a stub version which simply returns the passed 35 | // reflect.Value when the unsafe package is not available. 36 | func unsafeReflectValue(v reflect.Value) reflect.Value { 37 | return v 38 | } 39 | -------------------------------------------------------------------------------- /pkg/exec/exec.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2018 The OpenEBS Authors 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | 17 | package exec 18 | 19 | import ( 20 | "bytes" 21 | "fmt" 22 | osexec "os/exec" 23 | "strings" 24 | ) 25 | 26 | // Executor acts as a contract for various execution based logic 27 | type Executor interface { 28 | Output(args []string) (output string, err error) 29 | } 30 | 31 | // ShellExec is a shell based struct that implements Executor interface 32 | type ShellExec struct { 33 | binary string 34 | } 35 | 36 | // NewShellExec returns a new instance of shellExec 37 | // based on the provided binary name 38 | func NewShellExec(binary string) *ShellExec { 39 | return &ShellExec{ 40 | binary: binary, 41 | } 42 | } 43 | 44 | // Output executes the shell command and returns the output or error 45 | func (e *ShellExec) Output(args []string) (output string, err error) { 46 | var out bytes.Buffer 47 | var stderr bytes.Buffer 48 | 49 | cmd := osexec.Command(e.binary, args...) 50 | cmd.Stdout = &out 51 | cmd.Stderr = &stderr 52 | 53 | err = cmd.Run() 54 | if err != nil { 55 | err = fmt.Errorf("failed to run cmd '%s': %s: %s", cmd.Args, fmt.Sprint(err), stderr.String()) 56 | return 57 | } 58 | 59 | // This removes the beginning & trailing single quotes from the output 60 | // It has been observed that kubectl execution results in such single quotes 61 | output = strings.Trim(out.String(), "'") 62 | return 63 | } 64 | -------------------------------------------------------------------------------- /chaoslib/openebs/jiva_controller_pod_failure.yaml: -------------------------------------------------------------------------------- 1 | - name: Derive PV from application PVC 2 | shell: > 3 | kubectl get pvc {{ app_pvc }} 4 | -o custom-columns=:spec.volumeName -n {{ app_ns }} 5 | --no-headers 6 | args: 7 | executable: /bin/bash 8 | register: pv 9 | 10 | - name: Record the jiva controller deployment of the PV 11 | set_fact: 12 | # Depends on the naming convention in maya-apiserver (-rep) 13 | jiva_controller_deploy: "{{ pv.stdout }}-ctrl" 14 | 15 | - name: Get the resourceVersion of the target deploy before fault injection 16 | shell: > 17 | kubectl get deploy {{ jiva_controller_deploy }} -n {{ app_ns }} 18 | -o=custom-columns=NAME:".metadata.resourceVersion" --no-headers 19 | args: 20 | executable: /bin/bash 21 | register: rv_bef 22 | 23 | - name: Get jiva controller pod belonging to the PV 24 | shell: > 25 | kubectl get pods --no-headers -l openebs.io/controller=jiva-controller -n {{ app_ns }} 26 | -o jsonpath="{.items[?(@.metadata.labels.openebs\\.io/persistent-volume==\"{{pv.stdout}}\")].metadata.name}" 27 | args: 28 | executable: /bin/bash 29 | register: jiva_controller_pod 30 | 31 | - name: Kill the jiva controller pod 32 | shell: > 33 | kubectl delete pod {{ jiva_controller_pod.stdout }} -n {{ app_ns }} 34 | args: 35 | executable: /bin/bash 36 | 37 | - name: Wait for 10s post fault injection 38 | wait_for: 39 | timeout: 10 40 | 41 | - name: Get the resourceVersion of the target deploy after fault injection 42 | shell: > 43 | kubectl get deploy {{ jiva_controller_deploy }} -n {{ app_ns }} 44 | -o=custom-columns=NAME:".metadata.resourceVersion" --no-headers 45 | args: 46 | executable: /bin/bash 47 | register: rv_aft 48 | 49 | - name: Compare resourceVersions of target deployment 50 | debug: 51 | msg: "Verified target pods were restarted by fault injection" 52 | failed_when: "rv_bef.stdout | int == rv_aft.stdout | int" 53 | 54 | -------------------------------------------------------------------------------- /apps/crunchy-postgres/liveness/run_litmus_test.yml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: batch/v1 3 | kind: Job 4 | metadata: 5 | generateName: liveness-pg- 6 | namespace: litmus 7 | 8 | spec: 9 | template: 10 | metadata: 11 | name: liveness 12 | 13 | spec: 14 | restartPolicy: Never 15 | containers: 16 | - name: liveness 17 | image: openebs/postgres-client 18 | imagePullPolicy: Always 19 | 20 | env: 21 | 22 | # Time period (in sec) b/w retries for DB init check 23 | - name: INIT_WAIT_DELAY 24 | value: "30" 25 | 26 | # No of retries for DB init check 27 | - name: INIT_RETRY_COUNT 28 | value: "10" 29 | 30 | # Time period (in sec) b/w liveness checks 31 | - name: LIVENESS_PERIOD_SECONDS 32 | value: "10" 33 | 34 | # Time period (in sec) b/w retries for db_connect failure 35 | - name: LIVENESS_TIMEOUT_SECONDS 36 | value: "10" 37 | 38 | # No of retries after a db_connect failure before declaring liveness fail 39 | - name: LIVENESS_RETRY_COUNT 40 | value: "6" 41 | 42 | # Namespace in Which Postgres is Running 43 | - name: NAMESPACE 44 | value: postgres 45 | 46 | # Service Name of postgres 47 | - name: SERVICE_NAME 48 | value: pgset 49 | 50 | #Database name of postgres 51 | - name: DATABASE 52 | value: userdb 53 | 54 | # Usr name of Postgres 55 | - name: USER 56 | value: testuser 57 | 58 | # Password for the Postgres 59 | - name: PASSWORD 60 | value: password 61 | 62 | # Port on which application is listening 63 | - name: PORT 64 | value: "5432" 65 | 66 | command: ["/bin/bash"] 67 | args: ["-c", "python ./liveness.py ; exit 0"] 68 | -------------------------------------------------------------------------------- /providers/openebs/installers/storageclass/0.7/litmusbook/storageclass_cleanup.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: batch/v1 3 | kind: Job 4 | metadata: 5 | name: litmus-storageclass-cleanup-v0.7 6 | namespace: litmus 7 | spec: 8 | template: 9 | metadata: 10 | name: litmus 11 | spec: 12 | serviceAccountName: litmus 13 | restartPolicy: Never 14 | containers: 15 | - name: ansibletest 16 | image: openebs/ansible-runner:ci 17 | imagePullPolicy: Always 18 | env: 19 | - name: mountPath 20 | value: /mnt/openebs 21 | - name: ANSIBLE_STDOUT_CALLBACK 22 | value: actionable 23 | - name: RUN_ID 24 | value: 25 | # Enable storageclass 26 | - name: DELETE_JIVA 27 | value: "true" 28 | - name: DELETE_CSTOR 29 | value: "false" 30 | # Jiva configuration 31 | - name: JIVA_POOL_NAME 32 | value: openebs-mntdir 33 | - name: JIVA_STORAGECLASS_NAME 34 | value: openebs-storageclass 35 | - name: STORAGE_PATH 36 | value: /var/openebs 37 | - name: OPENEBS_IO_JIVA_REPLICA_COUNT 38 | value: "3" 39 | - name: OPENEBS_IO_JIVA_CONTROLLER_IMAGE 40 | value: openebs/jiva:0.7.0 41 | - name: OPENEBS_IO_JIVA_REPLICA_IMAGE 42 | value: openebs/jiva:0.7.0 43 | - name: OPENEBS_IO_VOLUME_MONITOR_IMAGE 44 | value: openebs/m-exporter:ci 45 | # Cstor configuration 46 | - name: CSTOR_POOL_NAME 47 | value: cstor-pool-default-0.7.0 48 | - name: MAX_POOLS 49 | value: "3" 50 | - name: CSTOR_STORAGECLASS_NAME 51 | value: openebs-cstor-default-0.7.0 52 | - name: DISK_LIST 53 | value: 54 | command: ["/bin/bash"] 55 | args: ["-c", "ansible-playbook ./storageclass/0.7/ansible/storageclass_cleanup.yaml -i /etc/ansible/hosts -vv; exit 0"] 56 | -------------------------------------------------------------------------------- /k8s/gcp/k8s-installer/delete-k8s-cluster.yml: -------------------------------------------------------------------------------- 1 | # Description: Deletes the Cluster and the Bucket associated the NAME passed as the extra-var 2 | # in playbook, e.g. --extra-vars="NAME=" 3 | # Author: Harshvardhan Karn 4 | ############################################################################################### 5 | # Steps: 6 | # 1. Delete the Cluster 7 | # 2. Delete the Bucket 8 | ############################################################################################### 9 | --- 10 | - hosts: localhost 11 | vars: 12 | cluster_name: 13 | tasks: 14 | - block: 15 | - name: Fetching Cluster Name 16 | shell: cat ~/logs/clusters 17 | register: cluster_name 18 | when: not cluster_name 19 | - set_fact: 20 | cluster_name: "{{ cluster_name.stdout }}" 21 | when: cluster_name.stdout is defined 22 | 23 | - name: Deleting Cluster 24 | shell: kops delete cluster --yes --name {{ cluster_name }}.k8s.local --state gs://{{ cluster_name }}/ 25 | 26 | - name: Deleting Bucket 27 | shell: gsutil rm -r gs://{{ cluster_name }}/ 28 | 29 | - name: Deleting SSD/Disks 30 | shell: | 31 | gcloud compute disks delete a-etcd-events-{{ cluster_name }}-k8s-local --zone=us-central1-a -q 32 | gcloud compute disks delete a-etcd-main-{{ cluster_name }}-k8s-local --zone=us-central1-a -q 33 | ignore_errors: yes 34 | 35 | - name: Removing Cluster Name entry from log 36 | lineinfile: 37 | path: ~/logs/clusters 38 | state: absent 39 | regexp: '{{ cluster_name }}' 40 | 41 | - name: Test Passed 42 | set_fact: 43 | flag: "Test Passed" 44 | rescue: 45 | - name: Test Failed 46 | set_fact: 47 | flag: "Test Failed" -------------------------------------------------------------------------------- /funclib/kubectl/app_upgrade.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # This utilty task file can upgrade the statefulset or deployment application in K8s cluster. 3 | # The parameters required are 4 | # - app_ns ( namespace in which applicaion is deployed) 5 | # - app_label( Applications's label in the form key=value) 6 | # - deploy_type (Either 'deployment' or 'statefulset') 7 | # The above parameters should be obtained as environmental variables from the litmus-book. 8 | - name: Obtaining the deployment name. 9 | shell: kubectl get {{ deploy_type }} -n {{ app_ns }} --no-headers -l {{ app_label }} -o custom-columns=:metadata.name 10 | args: 11 | executable: /bin/bash 12 | register: result 13 | 14 | - name: Recording the application pod name. 15 | set_fact: 16 | app_name: "{{ result.stdout }}" 17 | 18 | - name: Get the image of application pre-upgrade. 19 | shell: kubectl get {{ deploy_type }} {{ app_name }} -n {{ app_ns}} -o jsonpath="{..image}" 20 | args: 21 | executable: /bin/bash 22 | register: pre_image 23 | 24 | - name: Record the image name of existing running application. 25 | set_fact: 26 | image: "{{pre_image.stdout }}" 27 | 28 | - block: 29 | 30 | - name: Updating image of application to a required version. 31 | shell: kubectl set image {{ deploy_type}}/{{ app_name }} {{ app_name }}={{ image_name }} -n {{ app_ns }} 32 | args: 33 | executable: /bin/bash 34 | register: result 35 | failed_when: "'image updated' not in result.stdout" 36 | 37 | - name: Check if the application pod is running after upgrade. 38 | shell: kubectl get pod -n {{ app_ns }} --no-headers -l {{ app_label }} -o jsonpath='{.items[0].status.phase}' 39 | args: 40 | executable: /bin/bash 41 | register: Pod_status 42 | until: "'Running' in Pod_status.stdout" 43 | delay: 30 44 | retries: 15 45 | when: lookup('env','IMAGE_NAME') != image 46 | 47 | - debug: 48 | msg: Provided image is already present 49 | when: lookup('env','IMAGE_NAME') == image 50 | --------------------------------------------------------------------------------