├── .gitignore ├── .travis.yml ├── EXTRA ├── configtx.yaml ├── crypto-config.yaml ├── crypto-config │ ├── ordererOrganizations │ │ └── test.svc.cluster.local │ │ │ ├── ca │ │ │ ├── 974cf60c6277a10ca880599a7ef8dcd2ede3789de72be59a6168e39aef097075_sk │ │ │ └── ca.test.svc.cluster.local-cert.pem │ │ │ ├── msp │ │ │ ├── admincerts │ │ │ │ └── Admin@test.svc.cluster.local-cert.pem │ │ │ ├── cacerts │ │ │ │ └── ca.test.svc.cluster.local-cert.pem │ │ │ └── tlscacerts │ │ │ │ └── tlsca.test.svc.cluster.local-cert.pem │ │ │ ├── orderers │ │ │ └── ord0-hlf-ord.test.svc.cluster.local │ │ │ │ ├── msp │ │ │ │ ├── admincerts │ │ │ │ │ └── Admin@test.svc.cluster.local-cert.pem │ │ │ │ ├── cacerts │ │ │ │ │ └── ca.test.svc.cluster.local-cert.pem │ │ │ │ ├── keystore │ │ │ │ │ └── 4ca789b7ba945262b58f873358ef32947fe450d949084a887995a7aa89a95ee1_sk │ │ │ │ ├── signcerts │ │ │ │ │ └── ord0-hlf-ord.test.svc.cluster.local-cert.pem │ │ │ │ └── tlscacerts │ │ │ │ │ └── tlsca.test.svc.cluster.local-cert.pem │ │ │ │ └── tls │ │ │ │ ├── ca.crt │ │ │ │ ├── server.crt │ │ │ │ └── server.key │ │ │ ├── tlsca │ │ │ ├── 6e040ed78d0c432ee8664642d67828ba1a5f228e257d0b4e3abc0920c2d4a9e0_sk │ │ │ └── tlsca.test.svc.cluster.local-cert.pem │ │ │ └── users │ │ │ └── Admin@test.svc.cluster.local │ │ │ ├── msp │ │ │ ├── admincerts │ │ │ │ └── Admin@test.svc.cluster.local-cert.pem │ │ │ ├── cacerts │ │ │ │ └── ca.test.svc.cluster.local-cert.pem │ │ │ ├── keystore │ │ │ │ └── f6711dc9fde313abc6ee14ed6193939662947b3b713317ad04d98c2d7318d6a1_sk │ │ │ ├── signcerts │ │ │ │ └── Admin@test.svc.cluster.local-cert.pem │ │ │ └── tlscacerts │ │ │ │ └── tlsca.test.svc.cluster.local-cert.pem │ │ │ └── tls │ │ │ ├── ca.crt │ │ │ ├── client.crt │ │ │ └── client.key │ └── peerOrganizations │ │ └── test.svc.cluster.local │ │ ├── ca │ │ ├── 1b0e12cfa08fd0bbd05e19d67dbd0ebede2f363487e877b97a68a60c67b10bb2_sk │ │ └── ca.test.svc.cluster.local-cert.pem │ │ ├── msp │ │ ├── admincerts │ │ │ └── Admin@test.svc.cluster.local-cert.pem │ │ ├── cacerts │ │ │ └── ca.test.svc.cluster.local-cert.pem │ │ └── tlscacerts │ │ │ └── tlsca.test.svc.cluster.local-cert.pem │ │ ├── peers │ │ └── peer0-hlf-peer.test.svc.cluster.local │ │ │ ├── msp │ │ │ ├── admincerts │ │ │ │ └── Admin@test.svc.cluster.local-cert.pem │ │ │ ├── cacerts │ │ │ │ └── ca.test.svc.cluster.local-cert.pem │ │ │ ├── keystore │ │ │ │ └── 22de23eeb2b0259b6b739a6954408780beb9e614858c879c75df2e6d04325bf4_sk │ │ │ ├── signcerts │ │ │ │ └── peer0-hlf-peer.test.svc.cluster.local-cert.pem │ │ │ └── tlscacerts │ │ │ │ └── tlsca.test.svc.cluster.local-cert.pem │ │ │ └── tls │ │ │ ├── ca.crt │ │ │ ├── server.crt │ │ │ └── server.key │ │ ├── tlsca │ │ ├── 8ac0bc9f7882528bf7a611abfaa86332141fa0dcec6ea20e242e18345b282f72_sk │ │ └── tlsca.test.svc.cluster.local-cert.pem │ │ └── users │ │ └── Admin@test.svc.cluster.local │ │ ├── msp │ │ ├── admincerts │ │ │ └── Admin@test.svc.cluster.local-cert.pem │ │ ├── cacerts │ │ │ └── ca.test.svc.cluster.local-cert.pem │ │ ├── keystore │ │ │ └── 478bb28c12190a41a8eb361c6815cf91a0d0ec291f0977f562661a67b1c250dd_sk │ │ ├── signcerts │ │ │ └── Admin@test.svc.cluster.local-cert.pem │ │ └── tlscacerts │ │ │ └── tlsca.test.svc.cluster.local-cert.pem │ │ └── tls │ │ ├── ca.crt │ │ ├── client.crt │ │ └── client.key └── mychannel.tx ├── chart_museum.sh ├── hl-composer ├── .helmignore ├── Chart.yaml ├── OWNERS ├── README.md ├── templates │ ├── NOTES.txt │ ├── _helpers.tpl │ ├── hl-composer-cli-deployment.yaml │ ├── hl-composer-pg-deployment.yaml │ ├── hl-composer-pg-ingress.yaml │ ├── hl-composer-pg-service.yaml │ ├── hl-composer-pvc.yaml │ ├── hl-composer-rest-deployment.yaml │ ├── hl-composer-rest-ingress.yaml │ ├── hl-composer-rest-secret.yaml │ └── hl-composer-rest-service.yaml └── values.yaml ├── hlf-ca ├── .helmignore ├── Chart.yaml ├── FABRIC_UPGRADE.md ├── OWNERS ├── README.md ├── requirements.lock ├── requirements.yaml ├── templates │ ├── NOTES.txt │ ├── _helpers.tpl │ ├── configmap--ca.yaml │ ├── configmap--config.yaml │ ├── configmap--db.yaml │ ├── deployment.yaml │ ├── ingress.yaml │ ├── pvc.yaml │ ├── secret--ca.yaml │ ├── secret--db.yaml │ └── service.yaml ├── tests │ ├── README.md │ └── values │ │ ├── intermediate.yaml │ │ └── root.yaml └── values.yaml ├── hlf-couchdb ├── .helmignore ├── Chart.yaml ├── FABRIC_UPGRADE.md ├── OWNERS ├── README.md ├── templates │ ├── NOTES.txt │ ├── _helpers.tpl │ ├── deployment.yaml │ ├── ingress.yaml │ ├── pvc.yaml │ ├── secret.yaml │ └── service.yaml └── values.yaml ├── hlf-ord ├── .helmignore ├── Chart.yaml ├── FABRIC_UPGRADE.md ├── OWNERS ├── README.md ├── UPGRADE_1-1-x.md ├── templates │ ├── NOTES.txt │ ├── _helpers.tpl │ ├── configmap--ord.yaml │ ├── deployment.yaml │ ├── ingress.yaml │ ├── pvc.yaml │ └── service.yaml ├── tests │ ├── README.md │ ├── fixtures │ │ └── crypto │ │ │ ├── admin │ │ │ └── Admin@test.svc.cluster.local-cert.pem │ │ │ ├── ca │ │ │ └── ca.test.svc.cluster.local-cert.pem │ │ │ ├── genesis.block │ │ │ └── orderer │ │ │ ├── 4ca789b7ba945262b58f873358ef32947fe450d949084a887995a7aa89a95ee1_sk │ │ │ └── ord0-hlf-ord.test.svc.cluster.local-cert.pem │ └── values │ │ └── orderer.yaml └── values.yaml ├── hlf-peer ├── .helmignore ├── Chart.yaml ├── FABRIC_UPGRADE.md ├── OWNERS ├── README.md ├── UPGRADE_1-1-x.md ├── templates │ ├── NOTES.txt │ ├── _helpers.tpl │ ├── configmap--peer.yaml │ ├── deployment.yaml │ ├── ingress.yaml │ ├── pvc.yaml │ └── service.yaml ├── tests │ ├── README.md │ ├── fixtures │ │ └── crypto │ │ │ ├── admin │ │ │ ├── 478bb28c12190a41a8eb361c6815cf91a0d0ec291f0977f562661a67b1c250dd_sk │ │ │ └── Admin@test.svc.cluster.local-cert.pem │ │ │ ├── ca │ │ │ └── ca.test.svc.cluster.local-cert.pem │ │ │ ├── mychannel.tx │ │ │ └── peer │ │ │ ├── 22de23eeb2b0259b6b739a6954408780beb9e614858c879c75df2e6d04325bf4_sk │ │ │ └── peer0-hlf-peer.test.svc.cluster.local-cert.pem │ └── values │ │ └── peer.yaml └── values.yaml ├── integration ├── __init__.py └── test_qa.py ├── networks ├── ca-nephos-local.crt ├── ca-nephos-local.key ├── ca-nephos-local.pem └── qa │ ├── config │ ├── configtx.yaml │ └── fabric-ca-client-config.yaml │ ├── helm_values │ ├── AlphaMSP │ │ └── hlf-ord │ │ │ └── ord1.yaml │ ├── BetaMSP │ │ ├── hlf-couchdb │ │ │ └── cdb-beta-peer1.yaml │ │ └── hlf-peer │ │ │ └── beta-peer1.yaml │ ├── hl-composer │ │ └── hlc.yaml │ ├── hlf-ca │ │ └── ca.yaml │ └── postgres-ca │ │ └── ca-pg.yaml │ └── nephos_config.yaml ├── package.sh ├── requirements.txt ├── sonar-project.properties └── stable └── prometheus ├── .helmignore ├── Chart.yaml ├── OWNERS ├── README.md ├── requirements.yaml ├── templates ├── NOTES.txt ├── _helpers.tpl ├── alertmanager-configmap.yaml ├── alertmanager-deployment.yaml ├── alertmanager-ingress.yaml ├── alertmanager-networkpolicy.yaml ├── alertmanager-pvc.yaml ├── alertmanager-service-headless.yaml ├── alertmanager-service.yaml ├── alertmanager-serviceaccount.yaml ├── alertmanager-statefulset.yaml ├── kube-state-metrics-clusterrole.yaml ├── kube-state-metrics-clusterrolebinding.yaml ├── kube-state-metrics-deployment.yaml ├── kube-state-metrics-networkpolicy.yaml ├── kube-state-metrics-serviceaccount.yaml ├── kube-state-metrics-svc.yaml ├── node-exporter-daemonset.yaml ├── node-exporter-podsecuritypolicy.yaml ├── node-exporter-role.yaml ├── node-exporter-rolebinding.yaml ├── node-exporter-service.yaml ├── node-exporter-serviceaccount.yaml ├── pushgateway-deployment.yaml ├── pushgateway-ingress.yaml ├── pushgateway-pvc.yaml ├── pushgateway-service.yaml ├── pushgateway-serviceaccount.yaml ├── server-clusterrole.yaml ├── server-clusterrolebinding.yaml ├── server-configmap.yaml ├── server-deployment.yaml ├── server-ingress.yaml ├── server-networkpolicy.yaml ├── server-pvc.yaml ├── server-service-headless.yaml ├── server-service.yaml ├── server-serviceaccount.yaml └── server-statefulset.yaml └── values.yaml /.gitignore: -------------------------------------------------------------------------------- 1 | # Python 2 | **/__pycache__/** 3 | 4 | # Python environment 5 | venv 6 | .envrc 7 | 8 | # JetBrains 9 | .idea 10 | 11 | # Packages 12 | *.tgz 13 | 14 | # Crypto 15 | **/crypto/** -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | # Based on: https://github.com/LiliC/travis-minikube/blob/minikube-30-kube-1.12/.travis.yml 2 | 3 | sudo: required 4 | 5 | # We need the systemd for the kubeadm and it's default from 16.04+ 6 | dist: xenial 7 | 8 | env: 9 | - CHANGE_MINIKUBE_NONE_USER=true 10 | 11 | language: python 12 | python: 13 | - "3.7-dev" 14 | 15 | addons: 16 | sonarcloud: 17 | organization: "aidtechnology" 18 | 19 | 20 | install: 21 | - pip install -r requirements.txt 22 | 23 | # Setup Minikube, needed for testing 24 | before_script: 25 | - sudo mount --make-rshared / 26 | # Download kubectl, which is a requirement for using minikube. 27 | - curl -Lo kubectl https://storage.googleapis.com/kubernetes-release/release/v1.12.0/bin/linux/amd64/kubectl && chmod +x kubectl && sudo mv kubectl /usr/local/bin/ 28 | # Download minikube. 29 | - curl -Lo minikube https://storage.googleapis.com/minikube/releases/v0.30.0/minikube-linux-amd64 && chmod +x minikube && sudo mv minikube /usr/local/bin/ 30 | - sudo minikube start --vm-driver=none --bootstrapper=kubeadm --kubernetes-version=v1.12.0 31 | # Fix the kubectl context, as it's often stale. 32 | - minikube update-context 33 | # Wait for Kubernetes to be up and ready. 34 | - JSONPATH='{range .items[*]}{@.metadata.name}:{range @.status.conditions[*]}{@.type}={@.status};{end}{end}'; until kubectl get nodes -o jsonpath="$JSONPATH" 2>&1 | grep -q "Ready=True"; do sleep 1; done 35 | # Start the ingress addon 36 | - sudo minikube addons enable ingress 37 | # Add the minikube IP to the hosts file 38 | - echo "$(minikube ip) ca.nephos.local" | sudo tee -a /etc/hosts 39 | # Install Helm 40 | - sudo apt-get install socat 41 | - curl -Lo /tmp/helm.tar.gz https://kubernetes-helm.storage.googleapis.com/helm-${HELM_VERSION}-linux-amd64.tar.gz 42 | - tar -zxvf /tmp/helm.tar.gz -C /tmp 43 | - sudo mv /tmp/linux-amd64/helm /usr/local/bin/helm 44 | - helm init --wait 45 | # Install Hyperledger Fabric tools 46 | - curl -sSL https://raw.githubusercontent.com/hyperledger/fabric/master/scripts/bootstrap.sh | bash -s ${FABRIC_VERSION} -ds 47 | - export PATH=$(pwd)/bin:$PATH 48 | 49 | script: 50 | # Integration testing 51 | - bash ./package.sh 52 | - pytest . -s 53 | - sonar-scanner 54 | after_success: 55 | - bash ./chart_museum.sh 56 | -------------------------------------------------------------------------------- /EXTRA/crypto-config.yaml: -------------------------------------------------------------------------------- 1 | # Copyright IBM Corp. All Rights Reserved. 2 | # 3 | # SPDX-License-Identifier: Apache-2.0 4 | # 5 | 6 | # --------------------------------------------------------------------------- 7 | # "OrdererOrgs" - Definition of organizations managing orderer nodes 8 | # --------------------------------------------------------------------------- 9 | OrdererOrgs: 10 | # --------------------------------------------------------------------------- 11 | # Orderer 12 | # --------------------------------------------------------------------------- 13 | - Name: Orderer 14 | Domain: test.svc.cluster.local 15 | # --------------------------------------------------------------------------- 16 | # "Specs" - See PeerOrgs below for complete description 17 | # --------------------------------------------------------------------------- 18 | Specs: 19 | - Hostname: ord0-hlf-ord 20 | # --------------------------------------------------------------------------- 21 | # "PeerOrgs" - Definition of organizations managing peer nodes 22 | # --------------------------------------------------------------------------- 23 | PeerOrgs: 24 | # --------------------------------------------------------------------------- 25 | # Org1 26 | # --------------------------------------------------------------------------- 27 | - Name: Org1 28 | Domain: test.svc.cluster.local 29 | # --------------------------------------------------------------------------- 30 | # "Specs" 31 | # --------------------------------------------------------------------------- 32 | # Uncomment this section to enable the explicit definition of hosts in your 33 | # configuration. Most users will want to use Template, below 34 | # 35 | # Specs is an array of Spec entries. Each Spec entry consists of two fields: 36 | # - Hostname: (Required) The desired hostname, sans the domain. 37 | # - CommonName: (Optional) Specifies the template or explicit override for 38 | # the CN. By default, this is the template: 39 | # 40 | # "{{.Hostname}}.{{.Domain}}" 41 | # 42 | # which obtains its values from the Spec.Hostname and 43 | # Org.Domain, respectively. 44 | # --------------------------------------------------------------------------- 45 | # Specs: 46 | # - Hostname: foo # implicitly "foo.org1.example.com" 47 | # CommonName: foo27.org5.example.com # overrides Hostname-based FQDN set above 48 | # - Hostname: bar 49 | # - Hostname: baz 50 | # --------------------------------------------------------------------------- 51 | # "Template" 52 | # --------------------------------------------------------------------------- 53 | # Allows for the definition of 1 or more hosts that are created sequentially 54 | # from a template. By default, this looks like "peer%d" from 0 to Count-1. 55 | # You may override the number of nodes (Count), the starting index (Start) 56 | # or the template used to construct the name (Hostname). 57 | # 58 | # Note: Template and Specs are not mutually exclusive. You may define both 59 | # sections and the aggregate nodes will be created for you. Take care with 60 | # name collisions 61 | # --------------------------------------------------------------------------- 62 | Template: 63 | Count: 1 64 | # Start: 5 65 | Hostname: "{{.Prefix}}{{.Index}}-hlf-peer" # default 66 | # --------------------------------------------------------------------------- 67 | # "Users" 68 | # --------------------------------------------------------------------------- 69 | # Count: The number of user accounts _in addition_ to Admin 70 | # --------------------------------------------------------------------------- 71 | Users: 72 | Count: 0 73 | -------------------------------------------------------------------------------- /EXTRA/crypto-config/ordererOrganizations/test.svc.cluster.local/ca/974cf60c6277a10ca880599a7ef8dcd2ede3789de72be59a6168e39aef097075_sk: -------------------------------------------------------------------------------- 1 | -----BEGIN PRIVATE KEY----- 2 | MIGHAgEAMBMGByqGSM49AgEGCCqGSM49AwEHBG0wawIBAQQgMwV2LZmJEz6WK/vO 3 | JsZw9NUeJg+K//PX2t7jo3XshhOhRANCAAQ2v4m4C9nzWCaOv2mxhFUsUN/hYs/e 4 | wZCE50p8JcTR9oIjQRhCZ77iTtyyJ4Hyi0YJ7oSO/bvn19bvPnJPpG2X 5 | -----END PRIVATE KEY----- 6 | -------------------------------------------------------------------------------- /EXTRA/crypto-config/ordererOrganizations/test.svc.cluster.local/ca/ca.test.svc.cluster.local-cert.pem: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE----- 2 | MIICXDCCAgKgAwIBAgIRAJtr236LoQiamWimE10C6/UwCgYIKoZIzj0EAwIwfzEL 3 | MAkGA1UEBhMCVVMxEzARBgNVBAgTCkNhbGlmb3JuaWExFjAUBgNVBAcTDVNhbiBG 4 | cmFuY2lzY28xHzAdBgNVBAoTFnRlc3Quc3ZjLmNsdXN0ZXIubG9jYWwxIjAgBgNV 5 | BAMTGWNhLnRlc3Quc3ZjLmNsdXN0ZXIubG9jYWwwHhcNMTgxMTE2MTYzNjAwWhcN 6 | MjgxMTEzMTYzNjAwWjB/MQswCQYDVQQGEwJVUzETMBEGA1UECBMKQ2FsaWZvcm5p 7 | YTEWMBQGA1UEBxMNU2FuIEZyYW5jaXNjbzEfMB0GA1UEChMWdGVzdC5zdmMuY2x1 8 | c3Rlci5sb2NhbDEiMCAGA1UEAxMZY2EudGVzdC5zdmMuY2x1c3Rlci5sb2NhbDBZ 9 | MBMGByqGSM49AgEGCCqGSM49AwEHA0IABDa/ibgL2fNYJo6/abGEVSxQ3+Fiz97B 10 | kITnSnwlxNH2giNBGEJnvuJO3LIngfKLRgnuhI79u+fX1u8+ck+kbZejXzBdMA4G 11 | A1UdDwEB/wQEAwIBpjAPBgNVHSUECDAGBgRVHSUAMA8GA1UdEwEB/wQFMAMBAf8w 12 | KQYDVR0OBCIEIJdM9gxid6EMqIBZmn743NLt43id5yvlmmFo45rvCXB1MAoGCCqG 13 | SM49BAMCA0gAMEUCIQDF5R5JN8TyxkXou2rnwQVb2L05lEfqXLBdWfgRJfFYWAIg 14 | YNd2FfDj5h8r0yUm1TsIYm8R4zubIcSUYlXWYIEM7ng= 15 | -----END CERTIFICATE----- 16 | -------------------------------------------------------------------------------- /EXTRA/crypto-config/ordererOrganizations/test.svc.cluster.local/msp/admincerts/Admin@test.svc.cluster.local-cert.pem: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE----- 2 | MIICLDCCAdKgAwIBAgIRAK1vx1TifWv50bGNHoqKeq4wCgYIKoZIzj0EAwIwfzEL 3 | MAkGA1UEBhMCVVMxEzARBgNVBAgTCkNhbGlmb3JuaWExFjAUBgNVBAcTDVNhbiBG 4 | cmFuY2lzY28xHzAdBgNVBAoTFnRlc3Quc3ZjLmNsdXN0ZXIubG9jYWwxIjAgBgNV 5 | BAMTGWNhLnRlc3Quc3ZjLmNsdXN0ZXIubG9jYWwwHhcNMTgxMTE2MTYzNjAwWhcN 6 | MjgxMTEzMTYzNjAwWjBhMQswCQYDVQQGEwJVUzETMBEGA1UECBMKQ2FsaWZvcm5p 7 | YTEWMBQGA1UEBxMNU2FuIEZyYW5jaXNjbzElMCMGA1UEAwwcQWRtaW5AdGVzdC5z 8 | dmMuY2x1c3Rlci5sb2NhbDBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABO1mivkH 9 | RD4pFuWrJJnxG9FVAC9vGXWnq7RehPVLNYQMFZfC0o7zfrh24BPl5QorqhnfjPWS 10 | npN9gi+76goBJWKjTTBLMA4GA1UdDwEB/wQEAwIHgDAMBgNVHRMBAf8EAjAAMCsG 11 | A1UdIwQkMCKAIJdM9gxid6EMqIBZmn743NLt43id5yvlmmFo45rvCXB1MAoGCCqG 12 | SM49BAMCA0gAMEUCIQCmKozEPPRC4UXCNWQDXL6bz164btrY/kqSZGRpYOHD+AIg 13 | MHwiwlFArGH+JHDCwUzpUjMhqUlBuy5aIRMt3pSJ/ZM= 14 | -----END CERTIFICATE----- 15 | -------------------------------------------------------------------------------- /EXTRA/crypto-config/ordererOrganizations/test.svc.cluster.local/msp/cacerts/ca.test.svc.cluster.local-cert.pem: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE----- 2 | MIICXDCCAgKgAwIBAgIRAJtr236LoQiamWimE10C6/UwCgYIKoZIzj0EAwIwfzEL 3 | MAkGA1UEBhMCVVMxEzARBgNVBAgTCkNhbGlmb3JuaWExFjAUBgNVBAcTDVNhbiBG 4 | cmFuY2lzY28xHzAdBgNVBAoTFnRlc3Quc3ZjLmNsdXN0ZXIubG9jYWwxIjAgBgNV 5 | BAMTGWNhLnRlc3Quc3ZjLmNsdXN0ZXIubG9jYWwwHhcNMTgxMTE2MTYzNjAwWhcN 6 | MjgxMTEzMTYzNjAwWjB/MQswCQYDVQQGEwJVUzETMBEGA1UECBMKQ2FsaWZvcm5p 7 | YTEWMBQGA1UEBxMNU2FuIEZyYW5jaXNjbzEfMB0GA1UEChMWdGVzdC5zdmMuY2x1 8 | c3Rlci5sb2NhbDEiMCAGA1UEAxMZY2EudGVzdC5zdmMuY2x1c3Rlci5sb2NhbDBZ 9 | MBMGByqGSM49AgEGCCqGSM49AwEHA0IABDa/ibgL2fNYJo6/abGEVSxQ3+Fiz97B 10 | kITnSnwlxNH2giNBGEJnvuJO3LIngfKLRgnuhI79u+fX1u8+ck+kbZejXzBdMA4G 11 | A1UdDwEB/wQEAwIBpjAPBgNVHSUECDAGBgRVHSUAMA8GA1UdEwEB/wQFMAMBAf8w 12 | KQYDVR0OBCIEIJdM9gxid6EMqIBZmn743NLt43id5yvlmmFo45rvCXB1MAoGCCqG 13 | SM49BAMCA0gAMEUCIQDF5R5JN8TyxkXou2rnwQVb2L05lEfqXLBdWfgRJfFYWAIg 14 | YNd2FfDj5h8r0yUm1TsIYm8R4zubIcSUYlXWYIEM7ng= 15 | -----END CERTIFICATE----- 16 | -------------------------------------------------------------------------------- /EXTRA/crypto-config/ordererOrganizations/test.svc.cluster.local/msp/tlscacerts/tlsca.test.svc.cluster.local-cert.pem: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE----- 2 | MIICYjCCAgmgAwIBAgIQEHO2jBPpk2xriW1grJyzpjAKBggqhkjOPQQDAjCBgjEL 3 | MAkGA1UEBhMCVVMxEzARBgNVBAgTCkNhbGlmb3JuaWExFjAUBgNVBAcTDVNhbiBG 4 | cmFuY2lzY28xHzAdBgNVBAoTFnRlc3Quc3ZjLmNsdXN0ZXIubG9jYWwxJTAjBgNV 5 | BAMTHHRsc2NhLnRlc3Quc3ZjLmNsdXN0ZXIubG9jYWwwHhcNMTgxMTE2MTYzNjAw 6 | WhcNMjgxMTEzMTYzNjAwWjCBgjELMAkGA1UEBhMCVVMxEzARBgNVBAgTCkNhbGlm 7 | b3JuaWExFjAUBgNVBAcTDVNhbiBGcmFuY2lzY28xHzAdBgNVBAoTFnRlc3Quc3Zj 8 | LmNsdXN0ZXIubG9jYWwxJTAjBgNVBAMTHHRsc2NhLnRlc3Quc3ZjLmNsdXN0ZXIu 9 | bG9jYWwwWTATBgcqhkjOPQIBBggqhkjOPQMBBwNCAATekiayGb4Bimsg1hI9fjou 10 | lhaeZ3Jkt1hClxArH3DTjomruMDnfwwK7cNhYaZlFhTboJY87y5yFK7VL+jcn//6 11 | o18wXTAOBgNVHQ8BAf8EBAMCAaYwDwYDVR0lBAgwBgYEVR0lADAPBgNVHRMBAf8E 12 | BTADAQH/MCkGA1UdDgQiBCBuBA7XjQxDLuhmRkLWeCi6Gl8ijiV9C046vAkgwtSp 13 | 4DAKBggqhkjOPQQDAgNHADBEAiAzA9LiXfg9JpV2caN/DT4RAK/n4i4BtDAFQz1p 14 | s8UqwgIgBC2an3rAkHtBPcfUW2jTe8ndpuVU5kCkNChzcOHFNi8= 15 | -----END CERTIFICATE----- 16 | -------------------------------------------------------------------------------- /EXTRA/crypto-config/ordererOrganizations/test.svc.cluster.local/orderers/ord0-hlf-ord.test.svc.cluster.local/msp/admincerts/Admin@test.svc.cluster.local-cert.pem: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE----- 2 | MIICLDCCAdKgAwIBAgIRAK1vx1TifWv50bGNHoqKeq4wCgYIKoZIzj0EAwIwfzEL 3 | MAkGA1UEBhMCVVMxEzARBgNVBAgTCkNhbGlmb3JuaWExFjAUBgNVBAcTDVNhbiBG 4 | cmFuY2lzY28xHzAdBgNVBAoTFnRlc3Quc3ZjLmNsdXN0ZXIubG9jYWwxIjAgBgNV 5 | BAMTGWNhLnRlc3Quc3ZjLmNsdXN0ZXIubG9jYWwwHhcNMTgxMTE2MTYzNjAwWhcN 6 | MjgxMTEzMTYzNjAwWjBhMQswCQYDVQQGEwJVUzETMBEGA1UECBMKQ2FsaWZvcm5p 7 | YTEWMBQGA1UEBxMNU2FuIEZyYW5jaXNjbzElMCMGA1UEAwwcQWRtaW5AdGVzdC5z 8 | dmMuY2x1c3Rlci5sb2NhbDBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABO1mivkH 9 | RD4pFuWrJJnxG9FVAC9vGXWnq7RehPVLNYQMFZfC0o7zfrh24BPl5QorqhnfjPWS 10 | npN9gi+76goBJWKjTTBLMA4GA1UdDwEB/wQEAwIHgDAMBgNVHRMBAf8EAjAAMCsG 11 | A1UdIwQkMCKAIJdM9gxid6EMqIBZmn743NLt43id5yvlmmFo45rvCXB1MAoGCCqG 12 | SM49BAMCA0gAMEUCIQCmKozEPPRC4UXCNWQDXL6bz164btrY/kqSZGRpYOHD+AIg 13 | MHwiwlFArGH+JHDCwUzpUjMhqUlBuy5aIRMt3pSJ/ZM= 14 | -----END CERTIFICATE----- 15 | -------------------------------------------------------------------------------- /EXTRA/crypto-config/ordererOrganizations/test.svc.cluster.local/orderers/ord0-hlf-ord.test.svc.cluster.local/msp/cacerts/ca.test.svc.cluster.local-cert.pem: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE----- 2 | MIICXDCCAgKgAwIBAgIRAJtr236LoQiamWimE10C6/UwCgYIKoZIzj0EAwIwfzEL 3 | MAkGA1UEBhMCVVMxEzARBgNVBAgTCkNhbGlmb3JuaWExFjAUBgNVBAcTDVNhbiBG 4 | cmFuY2lzY28xHzAdBgNVBAoTFnRlc3Quc3ZjLmNsdXN0ZXIubG9jYWwxIjAgBgNV 5 | BAMTGWNhLnRlc3Quc3ZjLmNsdXN0ZXIubG9jYWwwHhcNMTgxMTE2MTYzNjAwWhcN 6 | MjgxMTEzMTYzNjAwWjB/MQswCQYDVQQGEwJVUzETMBEGA1UECBMKQ2FsaWZvcm5p 7 | YTEWMBQGA1UEBxMNU2FuIEZyYW5jaXNjbzEfMB0GA1UEChMWdGVzdC5zdmMuY2x1 8 | c3Rlci5sb2NhbDEiMCAGA1UEAxMZY2EudGVzdC5zdmMuY2x1c3Rlci5sb2NhbDBZ 9 | MBMGByqGSM49AgEGCCqGSM49AwEHA0IABDa/ibgL2fNYJo6/abGEVSxQ3+Fiz97B 10 | kITnSnwlxNH2giNBGEJnvuJO3LIngfKLRgnuhI79u+fX1u8+ck+kbZejXzBdMA4G 11 | A1UdDwEB/wQEAwIBpjAPBgNVHSUECDAGBgRVHSUAMA8GA1UdEwEB/wQFMAMBAf8w 12 | KQYDVR0OBCIEIJdM9gxid6EMqIBZmn743NLt43id5yvlmmFo45rvCXB1MAoGCCqG 13 | SM49BAMCA0gAMEUCIQDF5R5JN8TyxkXou2rnwQVb2L05lEfqXLBdWfgRJfFYWAIg 14 | YNd2FfDj5h8r0yUm1TsIYm8R4zubIcSUYlXWYIEM7ng= 15 | -----END CERTIFICATE----- 16 | -------------------------------------------------------------------------------- /EXTRA/crypto-config/ordererOrganizations/test.svc.cluster.local/orderers/ord0-hlf-ord.test.svc.cluster.local/msp/keystore/4ca789b7ba945262b58f873358ef32947fe450d949084a887995a7aa89a95ee1_sk: -------------------------------------------------------------------------------- 1 | -----BEGIN PRIVATE KEY----- 2 | MIGHAgEAMBMGByqGSM49AgEGCCqGSM49AwEHBG0wawIBAQQgdvsu8Mj4jO5+taw7 3 | nJq09sPBi+SaoLe2KhEdOSsOExKhRANCAATeLiGC3f7NVWiZJJuRBhtOrl4mXPVX 4 | wYSyQlqBatljxZK0jRmSPQ/qd8mBBSkYaBEE5qKMbWJXlMxtm+m9+9Ot 5 | -----END PRIVATE KEY----- 6 | -------------------------------------------------------------------------------- /EXTRA/crypto-config/ordererOrganizations/test.svc.cluster.local/orderers/ord0-hlf-ord.test.svc.cluster.local/msp/signcerts/ord0-hlf-ord.test.svc.cluster.local-cert.pem: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE----- 2 | MIICMjCCAdigAwIBAgIQeSG4ubdKA9OtbVtvlljQZjAKBggqhkjOPQQDAjB/MQsw 3 | CQYDVQQGEwJVUzETMBEGA1UECBMKQ2FsaWZvcm5pYTEWMBQGA1UEBxMNU2FuIEZy 4 | YW5jaXNjbzEfMB0GA1UEChMWdGVzdC5zdmMuY2x1c3Rlci5sb2NhbDEiMCAGA1UE 5 | AxMZY2EudGVzdC5zdmMuY2x1c3Rlci5sb2NhbDAeFw0xODExMTYxNjM2MDBaFw0y 6 | ODExMTMxNjM2MDBaMGgxCzAJBgNVBAYTAlVTMRMwEQYDVQQIEwpDYWxpZm9ybmlh 7 | MRYwFAYDVQQHEw1TYW4gRnJhbmNpc2NvMSwwKgYDVQQDEyNvcmQwLWhsZi1vcmQu 8 | dGVzdC5zdmMuY2x1c3Rlci5sb2NhbDBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IA 9 | BN4uIYLd/s1VaJkkm5EGG06uXiZc9VfBhLJCWoFq2WPFkrSNGZI9D+p3yYEFKRho 10 | EQTmooxtYleUzG2b6b37062jTTBLMA4GA1UdDwEB/wQEAwIHgDAMBgNVHRMBAf8E 11 | AjAAMCsGA1UdIwQkMCKAIJdM9gxid6EMqIBZmn743NLt43id5yvlmmFo45rvCXB1 12 | MAoGCCqGSM49BAMCA0gAMEUCIQCcbdX/kv+Hhh/P0vqxzZam6Di7cZC47zCHv1QU 13 | +NgOOQIgWHR03dYXStfggla4PVhS7ha+rGOBS8uQ9bVhDyum0eY= 14 | -----END CERTIFICATE----- 15 | -------------------------------------------------------------------------------- /EXTRA/crypto-config/ordererOrganizations/test.svc.cluster.local/orderers/ord0-hlf-ord.test.svc.cluster.local/msp/tlscacerts/tlsca.test.svc.cluster.local-cert.pem: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE----- 2 | MIICYjCCAgmgAwIBAgIQEHO2jBPpk2xriW1grJyzpjAKBggqhkjOPQQDAjCBgjEL 3 | MAkGA1UEBhMCVVMxEzARBgNVBAgTCkNhbGlmb3JuaWExFjAUBgNVBAcTDVNhbiBG 4 | cmFuY2lzY28xHzAdBgNVBAoTFnRlc3Quc3ZjLmNsdXN0ZXIubG9jYWwxJTAjBgNV 5 | BAMTHHRsc2NhLnRlc3Quc3ZjLmNsdXN0ZXIubG9jYWwwHhcNMTgxMTE2MTYzNjAw 6 | WhcNMjgxMTEzMTYzNjAwWjCBgjELMAkGA1UEBhMCVVMxEzARBgNVBAgTCkNhbGlm 7 | b3JuaWExFjAUBgNVBAcTDVNhbiBGcmFuY2lzY28xHzAdBgNVBAoTFnRlc3Quc3Zj 8 | LmNsdXN0ZXIubG9jYWwxJTAjBgNVBAMTHHRsc2NhLnRlc3Quc3ZjLmNsdXN0ZXIu 9 | bG9jYWwwWTATBgcqhkjOPQIBBggqhkjOPQMBBwNCAATekiayGb4Bimsg1hI9fjou 10 | lhaeZ3Jkt1hClxArH3DTjomruMDnfwwK7cNhYaZlFhTboJY87y5yFK7VL+jcn//6 11 | o18wXTAOBgNVHQ8BAf8EBAMCAaYwDwYDVR0lBAgwBgYEVR0lADAPBgNVHRMBAf8E 12 | BTADAQH/MCkGA1UdDgQiBCBuBA7XjQxDLuhmRkLWeCi6Gl8ijiV9C046vAkgwtSp 13 | 4DAKBggqhkjOPQQDAgNHADBEAiAzA9LiXfg9JpV2caN/DT4RAK/n4i4BtDAFQz1p 14 | s8UqwgIgBC2an3rAkHtBPcfUW2jTe8ndpuVU5kCkNChzcOHFNi8= 15 | -----END CERTIFICATE----- 16 | -------------------------------------------------------------------------------- /EXTRA/crypto-config/ordererOrganizations/test.svc.cluster.local/orderers/ord0-hlf-ord.test.svc.cluster.local/tls/ca.crt: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE----- 2 | MIICYjCCAgmgAwIBAgIQEHO2jBPpk2xriW1grJyzpjAKBggqhkjOPQQDAjCBgjEL 3 | MAkGA1UEBhMCVVMxEzARBgNVBAgTCkNhbGlmb3JuaWExFjAUBgNVBAcTDVNhbiBG 4 | cmFuY2lzY28xHzAdBgNVBAoTFnRlc3Quc3ZjLmNsdXN0ZXIubG9jYWwxJTAjBgNV 5 | BAMTHHRsc2NhLnRlc3Quc3ZjLmNsdXN0ZXIubG9jYWwwHhcNMTgxMTE2MTYzNjAw 6 | WhcNMjgxMTEzMTYzNjAwWjCBgjELMAkGA1UEBhMCVVMxEzARBgNVBAgTCkNhbGlm 7 | b3JuaWExFjAUBgNVBAcTDVNhbiBGcmFuY2lzY28xHzAdBgNVBAoTFnRlc3Quc3Zj 8 | LmNsdXN0ZXIubG9jYWwxJTAjBgNVBAMTHHRsc2NhLnRlc3Quc3ZjLmNsdXN0ZXIu 9 | bG9jYWwwWTATBgcqhkjOPQIBBggqhkjOPQMBBwNCAATekiayGb4Bimsg1hI9fjou 10 | lhaeZ3Jkt1hClxArH3DTjomruMDnfwwK7cNhYaZlFhTboJY87y5yFK7VL+jcn//6 11 | o18wXTAOBgNVHQ8BAf8EBAMCAaYwDwYDVR0lBAgwBgYEVR0lADAPBgNVHRMBAf8E 12 | BTADAQH/MCkGA1UdDgQiBCBuBA7XjQxDLuhmRkLWeCi6Gl8ijiV9C046vAkgwtSp 13 | 4DAKBggqhkjOPQQDAgNHADBEAiAzA9LiXfg9JpV2caN/DT4RAK/n4i4BtDAFQz1p 14 | s8UqwgIgBC2an3rAkHtBPcfUW2jTe8ndpuVU5kCkNChzcOHFNi8= 15 | -----END CERTIFICATE----- 16 | -------------------------------------------------------------------------------- /EXTRA/crypto-config/ordererOrganizations/test.svc.cluster.local/orderers/ord0-hlf-ord.test.svc.cluster.local/tls/server.crt: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE----- 2 | MIIClTCCAjygAwIBAgIRANcsTbrq0z5wi93nNAn44scwCgYIKoZIzj0EAwIwgYIx 3 | CzAJBgNVBAYTAlVTMRMwEQYDVQQIEwpDYWxpZm9ybmlhMRYwFAYDVQQHEw1TYW4g 4 | RnJhbmNpc2NvMR8wHQYDVQQKExZ0ZXN0LnN2Yy5jbHVzdGVyLmxvY2FsMSUwIwYD 5 | VQQDExx0bHNjYS50ZXN0LnN2Yy5jbHVzdGVyLmxvY2FsMB4XDTE4MTExNjE2MzYw 6 | MFoXDTI4MTExMzE2MzYwMFowaDELMAkGA1UEBhMCVVMxEzARBgNVBAgTCkNhbGlm 7 | b3JuaWExFjAUBgNVBAcTDVNhbiBGcmFuY2lzY28xLDAqBgNVBAMTI29yZDAtaGxm 8 | LW9yZC50ZXN0LnN2Yy5jbHVzdGVyLmxvY2FsMFkwEwYHKoZIzj0CAQYIKoZIzj0D 9 | AQcDQgAEUMMzwz2Wt07I4yn1WfywHuEGhoLTdyusMKwYO+7PHhkrSgI3iP5itYti 10 | ko8Ef3QBHyA/wE0OdMZLCJYj0hyW+qOBqzCBqDAOBgNVHQ8BAf8EBAMCBaAwHQYD 11 | VR0lBBYwFAYIKwYBBQUHAwEGCCsGAQUFBwMCMAwGA1UdEwEB/wQCMAAwKwYDVR0j 12 | BCQwIoAgbgQO140MQy7oZkZC1ngouhpfIo4lfQtOOrwJIMLUqeAwPAYDVR0RBDUw 13 | M4Ijb3JkMC1obGYtb3JkLnRlc3Quc3ZjLmNsdXN0ZXIubG9jYWyCDG9yZDAtaGxm 14 | LW9yZDAKBggqhkjOPQQDAgNHADBEAiAKh+JWdN2h+RTESTKZ7cLA3xpLHRmN9VA5 15 | snT9QKoPVAIgS7XR9jYlIi8dGlnwPxAip4m25NmAAkGnjkk3h+YwzYk= 16 | -----END CERTIFICATE----- 17 | -------------------------------------------------------------------------------- /EXTRA/crypto-config/ordererOrganizations/test.svc.cluster.local/orderers/ord0-hlf-ord.test.svc.cluster.local/tls/server.key: -------------------------------------------------------------------------------- 1 | -----BEGIN PRIVATE KEY----- 2 | MIGHAgEAMBMGByqGSM49AgEGCCqGSM49AwEHBG0wawIBAQQgRJ3T9c8OUaYxnB7W 3 | odguFMdDTh6Cv4Dka/OkEe6iPRqhRANCAARQwzPDPZa3TsjjKfVZ/LAe4QaGgtN3 4 | K6wwrBg77s8eGStKAjeI/mK1i2KSjwR/dAEfID/ATQ50xksIliPSHJb6 5 | -----END PRIVATE KEY----- 6 | -------------------------------------------------------------------------------- /EXTRA/crypto-config/ordererOrganizations/test.svc.cluster.local/tlsca/6e040ed78d0c432ee8664642d67828ba1a5f228e257d0b4e3abc0920c2d4a9e0_sk: -------------------------------------------------------------------------------- 1 | -----BEGIN PRIVATE KEY----- 2 | MIGHAgEAMBMGByqGSM49AgEGCCqGSM49AwEHBG0wawIBAQQgtu1C8rJ6Llodu+38 3 | LoEvuUTJFZU1nGu2MZQW/gSBGBehRANCAATekiayGb4Bimsg1hI9fjoulhaeZ3Jk 4 | t1hClxArH3DTjomruMDnfwwK7cNhYaZlFhTboJY87y5yFK7VL+jcn//6 5 | -----END PRIVATE KEY----- 6 | -------------------------------------------------------------------------------- /EXTRA/crypto-config/ordererOrganizations/test.svc.cluster.local/tlsca/tlsca.test.svc.cluster.local-cert.pem: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE----- 2 | MIICYjCCAgmgAwIBAgIQEHO2jBPpk2xriW1grJyzpjAKBggqhkjOPQQDAjCBgjEL 3 | MAkGA1UEBhMCVVMxEzARBgNVBAgTCkNhbGlmb3JuaWExFjAUBgNVBAcTDVNhbiBG 4 | cmFuY2lzY28xHzAdBgNVBAoTFnRlc3Quc3ZjLmNsdXN0ZXIubG9jYWwxJTAjBgNV 5 | BAMTHHRsc2NhLnRlc3Quc3ZjLmNsdXN0ZXIubG9jYWwwHhcNMTgxMTE2MTYzNjAw 6 | WhcNMjgxMTEzMTYzNjAwWjCBgjELMAkGA1UEBhMCVVMxEzARBgNVBAgTCkNhbGlm 7 | b3JuaWExFjAUBgNVBAcTDVNhbiBGcmFuY2lzY28xHzAdBgNVBAoTFnRlc3Quc3Zj 8 | LmNsdXN0ZXIubG9jYWwxJTAjBgNVBAMTHHRsc2NhLnRlc3Quc3ZjLmNsdXN0ZXIu 9 | bG9jYWwwWTATBgcqhkjOPQIBBggqhkjOPQMBBwNCAATekiayGb4Bimsg1hI9fjou 10 | lhaeZ3Jkt1hClxArH3DTjomruMDnfwwK7cNhYaZlFhTboJY87y5yFK7VL+jcn//6 11 | o18wXTAOBgNVHQ8BAf8EBAMCAaYwDwYDVR0lBAgwBgYEVR0lADAPBgNVHRMBAf8E 12 | BTADAQH/MCkGA1UdDgQiBCBuBA7XjQxDLuhmRkLWeCi6Gl8ijiV9C046vAkgwtSp 13 | 4DAKBggqhkjOPQQDAgNHADBEAiAzA9LiXfg9JpV2caN/DT4RAK/n4i4BtDAFQz1p 14 | s8UqwgIgBC2an3rAkHtBPcfUW2jTe8ndpuVU5kCkNChzcOHFNi8= 15 | -----END CERTIFICATE----- 16 | -------------------------------------------------------------------------------- /EXTRA/crypto-config/ordererOrganizations/test.svc.cluster.local/users/Admin@test.svc.cluster.local/msp/admincerts/Admin@test.svc.cluster.local-cert.pem: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE----- 2 | MIICLDCCAdKgAwIBAgIRAK1vx1TifWv50bGNHoqKeq4wCgYIKoZIzj0EAwIwfzEL 3 | MAkGA1UEBhMCVVMxEzARBgNVBAgTCkNhbGlmb3JuaWExFjAUBgNVBAcTDVNhbiBG 4 | cmFuY2lzY28xHzAdBgNVBAoTFnRlc3Quc3ZjLmNsdXN0ZXIubG9jYWwxIjAgBgNV 5 | BAMTGWNhLnRlc3Quc3ZjLmNsdXN0ZXIubG9jYWwwHhcNMTgxMTE2MTYzNjAwWhcN 6 | MjgxMTEzMTYzNjAwWjBhMQswCQYDVQQGEwJVUzETMBEGA1UECBMKQ2FsaWZvcm5p 7 | YTEWMBQGA1UEBxMNU2FuIEZyYW5jaXNjbzElMCMGA1UEAwwcQWRtaW5AdGVzdC5z 8 | dmMuY2x1c3Rlci5sb2NhbDBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABO1mivkH 9 | RD4pFuWrJJnxG9FVAC9vGXWnq7RehPVLNYQMFZfC0o7zfrh24BPl5QorqhnfjPWS 10 | npN9gi+76goBJWKjTTBLMA4GA1UdDwEB/wQEAwIHgDAMBgNVHRMBAf8EAjAAMCsG 11 | A1UdIwQkMCKAIJdM9gxid6EMqIBZmn743NLt43id5yvlmmFo45rvCXB1MAoGCCqG 12 | SM49BAMCA0gAMEUCIQCmKozEPPRC4UXCNWQDXL6bz164btrY/kqSZGRpYOHD+AIg 13 | MHwiwlFArGH+JHDCwUzpUjMhqUlBuy5aIRMt3pSJ/ZM= 14 | -----END CERTIFICATE----- 15 | -------------------------------------------------------------------------------- /EXTRA/crypto-config/ordererOrganizations/test.svc.cluster.local/users/Admin@test.svc.cluster.local/msp/cacerts/ca.test.svc.cluster.local-cert.pem: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE----- 2 | MIICXDCCAgKgAwIBAgIRAJtr236LoQiamWimE10C6/UwCgYIKoZIzj0EAwIwfzEL 3 | MAkGA1UEBhMCVVMxEzARBgNVBAgTCkNhbGlmb3JuaWExFjAUBgNVBAcTDVNhbiBG 4 | cmFuY2lzY28xHzAdBgNVBAoTFnRlc3Quc3ZjLmNsdXN0ZXIubG9jYWwxIjAgBgNV 5 | BAMTGWNhLnRlc3Quc3ZjLmNsdXN0ZXIubG9jYWwwHhcNMTgxMTE2MTYzNjAwWhcN 6 | MjgxMTEzMTYzNjAwWjB/MQswCQYDVQQGEwJVUzETMBEGA1UECBMKQ2FsaWZvcm5p 7 | YTEWMBQGA1UEBxMNU2FuIEZyYW5jaXNjbzEfMB0GA1UEChMWdGVzdC5zdmMuY2x1 8 | c3Rlci5sb2NhbDEiMCAGA1UEAxMZY2EudGVzdC5zdmMuY2x1c3Rlci5sb2NhbDBZ 9 | MBMGByqGSM49AgEGCCqGSM49AwEHA0IABDa/ibgL2fNYJo6/abGEVSxQ3+Fiz97B 10 | kITnSnwlxNH2giNBGEJnvuJO3LIngfKLRgnuhI79u+fX1u8+ck+kbZejXzBdMA4G 11 | A1UdDwEB/wQEAwIBpjAPBgNVHSUECDAGBgRVHSUAMA8GA1UdEwEB/wQFMAMBAf8w 12 | KQYDVR0OBCIEIJdM9gxid6EMqIBZmn743NLt43id5yvlmmFo45rvCXB1MAoGCCqG 13 | SM49BAMCA0gAMEUCIQDF5R5JN8TyxkXou2rnwQVb2L05lEfqXLBdWfgRJfFYWAIg 14 | YNd2FfDj5h8r0yUm1TsIYm8R4zubIcSUYlXWYIEM7ng= 15 | -----END CERTIFICATE----- 16 | -------------------------------------------------------------------------------- /EXTRA/crypto-config/ordererOrganizations/test.svc.cluster.local/users/Admin@test.svc.cluster.local/msp/keystore/f6711dc9fde313abc6ee14ed6193939662947b3b713317ad04d98c2d7318d6a1_sk: -------------------------------------------------------------------------------- 1 | -----BEGIN PRIVATE KEY----- 2 | MIGHAgEAMBMGByqGSM49AgEGCCqGSM49AwEHBG0wawIBAQQgESoEkvVLjg/W1c5d 3 | P8fEhOKZSD4Bi8Ki63nd2PjuSF6hRANCAATtZor5B0Q+KRblqySZ8RvRVQAvbxl1 4 | p6u0XoT1SzWEDBWXwtKO8364duAT5eUKK6oZ34z1kp6TfYIvu+oKASVi 5 | -----END PRIVATE KEY----- 6 | -------------------------------------------------------------------------------- /EXTRA/crypto-config/ordererOrganizations/test.svc.cluster.local/users/Admin@test.svc.cluster.local/msp/signcerts/Admin@test.svc.cluster.local-cert.pem: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE----- 2 | MIICLDCCAdKgAwIBAgIRAK1vx1TifWv50bGNHoqKeq4wCgYIKoZIzj0EAwIwfzEL 3 | MAkGA1UEBhMCVVMxEzARBgNVBAgTCkNhbGlmb3JuaWExFjAUBgNVBAcTDVNhbiBG 4 | cmFuY2lzY28xHzAdBgNVBAoTFnRlc3Quc3ZjLmNsdXN0ZXIubG9jYWwxIjAgBgNV 5 | BAMTGWNhLnRlc3Quc3ZjLmNsdXN0ZXIubG9jYWwwHhcNMTgxMTE2MTYzNjAwWhcN 6 | MjgxMTEzMTYzNjAwWjBhMQswCQYDVQQGEwJVUzETMBEGA1UECBMKQ2FsaWZvcm5p 7 | YTEWMBQGA1UEBxMNU2FuIEZyYW5jaXNjbzElMCMGA1UEAwwcQWRtaW5AdGVzdC5z 8 | dmMuY2x1c3Rlci5sb2NhbDBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABO1mivkH 9 | RD4pFuWrJJnxG9FVAC9vGXWnq7RehPVLNYQMFZfC0o7zfrh24BPl5QorqhnfjPWS 10 | npN9gi+76goBJWKjTTBLMA4GA1UdDwEB/wQEAwIHgDAMBgNVHRMBAf8EAjAAMCsG 11 | A1UdIwQkMCKAIJdM9gxid6EMqIBZmn743NLt43id5yvlmmFo45rvCXB1MAoGCCqG 12 | SM49BAMCA0gAMEUCIQCmKozEPPRC4UXCNWQDXL6bz164btrY/kqSZGRpYOHD+AIg 13 | MHwiwlFArGH+JHDCwUzpUjMhqUlBuy5aIRMt3pSJ/ZM= 14 | -----END CERTIFICATE----- 15 | -------------------------------------------------------------------------------- /EXTRA/crypto-config/ordererOrganizations/test.svc.cluster.local/users/Admin@test.svc.cluster.local/msp/tlscacerts/tlsca.test.svc.cluster.local-cert.pem: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE----- 2 | MIICYjCCAgmgAwIBAgIQEHO2jBPpk2xriW1grJyzpjAKBggqhkjOPQQDAjCBgjEL 3 | MAkGA1UEBhMCVVMxEzARBgNVBAgTCkNhbGlmb3JuaWExFjAUBgNVBAcTDVNhbiBG 4 | cmFuY2lzY28xHzAdBgNVBAoTFnRlc3Quc3ZjLmNsdXN0ZXIubG9jYWwxJTAjBgNV 5 | BAMTHHRsc2NhLnRlc3Quc3ZjLmNsdXN0ZXIubG9jYWwwHhcNMTgxMTE2MTYzNjAw 6 | WhcNMjgxMTEzMTYzNjAwWjCBgjELMAkGA1UEBhMCVVMxEzARBgNVBAgTCkNhbGlm 7 | b3JuaWExFjAUBgNVBAcTDVNhbiBGcmFuY2lzY28xHzAdBgNVBAoTFnRlc3Quc3Zj 8 | LmNsdXN0ZXIubG9jYWwxJTAjBgNVBAMTHHRsc2NhLnRlc3Quc3ZjLmNsdXN0ZXIu 9 | bG9jYWwwWTATBgcqhkjOPQIBBggqhkjOPQMBBwNCAATekiayGb4Bimsg1hI9fjou 10 | lhaeZ3Jkt1hClxArH3DTjomruMDnfwwK7cNhYaZlFhTboJY87y5yFK7VL+jcn//6 11 | o18wXTAOBgNVHQ8BAf8EBAMCAaYwDwYDVR0lBAgwBgYEVR0lADAPBgNVHRMBAf8E 12 | BTADAQH/MCkGA1UdDgQiBCBuBA7XjQxDLuhmRkLWeCi6Gl8ijiV9C046vAkgwtSp 13 | 4DAKBggqhkjOPQQDAgNHADBEAiAzA9LiXfg9JpV2caN/DT4RAK/n4i4BtDAFQz1p 14 | s8UqwgIgBC2an3rAkHtBPcfUW2jTe8ndpuVU5kCkNChzcOHFNi8= 15 | -----END CERTIFICATE----- 16 | -------------------------------------------------------------------------------- /EXTRA/crypto-config/ordererOrganizations/test.svc.cluster.local/users/Admin@test.svc.cluster.local/tls/ca.crt: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE----- 2 | MIICYjCCAgmgAwIBAgIQEHO2jBPpk2xriW1grJyzpjAKBggqhkjOPQQDAjCBgjEL 3 | MAkGA1UEBhMCVVMxEzARBgNVBAgTCkNhbGlmb3JuaWExFjAUBgNVBAcTDVNhbiBG 4 | cmFuY2lzY28xHzAdBgNVBAoTFnRlc3Quc3ZjLmNsdXN0ZXIubG9jYWwxJTAjBgNV 5 | BAMTHHRsc2NhLnRlc3Quc3ZjLmNsdXN0ZXIubG9jYWwwHhcNMTgxMTE2MTYzNjAw 6 | WhcNMjgxMTEzMTYzNjAwWjCBgjELMAkGA1UEBhMCVVMxEzARBgNVBAgTCkNhbGlm 7 | b3JuaWExFjAUBgNVBAcTDVNhbiBGcmFuY2lzY28xHzAdBgNVBAoTFnRlc3Quc3Zj 8 | LmNsdXN0ZXIubG9jYWwxJTAjBgNVBAMTHHRsc2NhLnRlc3Quc3ZjLmNsdXN0ZXIu 9 | bG9jYWwwWTATBgcqhkjOPQIBBggqhkjOPQMBBwNCAATekiayGb4Bimsg1hI9fjou 10 | lhaeZ3Jkt1hClxArH3DTjomruMDnfwwK7cNhYaZlFhTboJY87y5yFK7VL+jcn//6 11 | o18wXTAOBgNVHQ8BAf8EBAMCAaYwDwYDVR0lBAgwBgYEVR0lADAPBgNVHRMBAf8E 12 | BTADAQH/MCkGA1UdDgQiBCBuBA7XjQxDLuhmRkLWeCi6Gl8ijiV9C046vAkgwtSp 13 | 4DAKBggqhkjOPQQDAgNHADBEAiAzA9LiXfg9JpV2caN/DT4RAK/n4i4BtDAFQz1p 14 | s8UqwgIgBC2an3rAkHtBPcfUW2jTe8ndpuVU5kCkNChzcOHFNi8= 15 | -----END CERTIFICATE----- 16 | -------------------------------------------------------------------------------- /EXTRA/crypto-config/ordererOrganizations/test.svc.cluster.local/users/Admin@test.svc.cluster.local/tls/client.crt: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE----- 2 | MIICTzCCAfWgAwIBAgIRAOXSmwTXQ/MgBFTM9cgpeBgwCgYIKoZIzj0EAwIwgYIx 3 | CzAJBgNVBAYTAlVTMRMwEQYDVQQIEwpDYWxpZm9ybmlhMRYwFAYDVQQHEw1TYW4g 4 | RnJhbmNpc2NvMR8wHQYDVQQKExZ0ZXN0LnN2Yy5jbHVzdGVyLmxvY2FsMSUwIwYD 5 | VQQDExx0bHNjYS50ZXN0LnN2Yy5jbHVzdGVyLmxvY2FsMB4XDTE4MTExNjE2MzYw 6 | MFoXDTI4MTExMzE2MzYwMFowYTELMAkGA1UEBhMCVVMxEzARBgNVBAgTCkNhbGlm 7 | b3JuaWExFjAUBgNVBAcTDVNhbiBGcmFuY2lzY28xJTAjBgNVBAMMHEFkbWluQHRl 8 | c3Quc3ZjLmNsdXN0ZXIubG9jYWwwWTATBgcqhkjOPQIBBggqhkjOPQMBBwNCAASs 9 | 6JRKp647HfbvxFAVrZrOHIlSxQ/vmXyZGXD9SMO2r6ixzwXUs5448pXx3D4x5zvL 10 | ULHKw6tIKCd8q9/WqtX6o2wwajAOBgNVHQ8BAf8EBAMCBaAwHQYDVR0lBBYwFAYI 11 | KwYBBQUHAwEGCCsGAQUFBwMCMAwGA1UdEwEB/wQCMAAwKwYDVR0jBCQwIoAgbgQO 12 | 140MQy7oZkZC1ngouhpfIo4lfQtOOrwJIMLUqeAwCgYIKoZIzj0EAwIDSAAwRQIh 13 | ANUD2zz0MpDD9Dcu1kczYUt+3tf7WZhwkkoU0WnkIah0AiBY15IB6PHmvH5vlfVy 14 | prvLlBuvBNPokgvxqSJVX+DQNw== 15 | -----END CERTIFICATE----- 16 | -------------------------------------------------------------------------------- /EXTRA/crypto-config/ordererOrganizations/test.svc.cluster.local/users/Admin@test.svc.cluster.local/tls/client.key: -------------------------------------------------------------------------------- 1 | -----BEGIN PRIVATE KEY----- 2 | MIGHAgEAMBMGByqGSM49AgEGCCqGSM49AwEHBG0wawIBAQQghBdL5zjfkmXRo/dd 3 | A9e/r2oLole2AXuwBiY9fKb+11KhRANCAASs6JRKp647HfbvxFAVrZrOHIlSxQ/v 4 | mXyZGXD9SMO2r6ixzwXUs5448pXx3D4x5zvLULHKw6tIKCd8q9/WqtX6 5 | -----END PRIVATE KEY----- 6 | -------------------------------------------------------------------------------- /EXTRA/crypto-config/peerOrganizations/test.svc.cluster.local/ca/1b0e12cfa08fd0bbd05e19d67dbd0ebede2f363487e877b97a68a60c67b10bb2_sk: -------------------------------------------------------------------------------- 1 | -----BEGIN PRIVATE KEY----- 2 | MIGHAgEAMBMGByqGSM49AgEGCCqGSM49AwEHBG0wawIBAQQgtecyWQHo6w2FIFHo 3 | PE+4rT1nNAsb9DMFACl9lJrnrJuhRANCAAQTxQDtheMJyN8zFqS8G4wcA6PsGBUf 4 | a1qo5Vw+/SRS1JjlYpqHq4KF4hipNwbn8ODru8LRKwnZgbEklcQyKjYU 5 | -----END PRIVATE KEY----- 6 | -------------------------------------------------------------------------------- /EXTRA/crypto-config/peerOrganizations/test.svc.cluster.local/ca/ca.test.svc.cluster.local-cert.pem: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE----- 2 | MIICXDCCAgKgAwIBAgIRAN6p+R5Yq31fzE+ZBoxSoFowCgYIKoZIzj0EAwIwfzEL 3 | MAkGA1UEBhMCVVMxEzARBgNVBAgTCkNhbGlmb3JuaWExFjAUBgNVBAcTDVNhbiBG 4 | cmFuY2lzY28xHzAdBgNVBAoTFnRlc3Quc3ZjLmNsdXN0ZXIubG9jYWwxIjAgBgNV 5 | BAMTGWNhLnRlc3Quc3ZjLmNsdXN0ZXIubG9jYWwwHhcNMTgxMTE2MTYzNjAwWhcN 6 | MjgxMTEzMTYzNjAwWjB/MQswCQYDVQQGEwJVUzETMBEGA1UECBMKQ2FsaWZvcm5p 7 | YTEWMBQGA1UEBxMNU2FuIEZyYW5jaXNjbzEfMB0GA1UEChMWdGVzdC5zdmMuY2x1 8 | c3Rlci5sb2NhbDEiMCAGA1UEAxMZY2EudGVzdC5zdmMuY2x1c3Rlci5sb2NhbDBZ 9 | MBMGByqGSM49AgEGCCqGSM49AwEHA0IABBPFAO2F4wnI3zMWpLwbjBwDo+wYFR9r 10 | WqjlXD79JFLUmOVimoergoXiGKk3Bufw4Ou7wtErCdmBsSSVxDIqNhSjXzBdMA4G 11 | A1UdDwEB/wQEAwIBpjAPBgNVHSUECDAGBgRVHSUAMA8GA1UdEwEB/wQFMAMBAf8w 12 | KQYDVR0OBCIEIBsOEs+gj9C70F4Z1n29Dr7eLzY0h+h3uXpopgxnsQuyMAoGCCqG 13 | SM49BAMCA0gAMEUCIQCyD/XQi0NafSRf/NgYQexzLkh2w/PAtiWaE9wSazvRKgIg 14 | UPvJDyI9nQGlXaXhSrA6m/09QJSQCKd3shS1Nu1YXwA= 15 | -----END CERTIFICATE----- 16 | -------------------------------------------------------------------------------- /EXTRA/crypto-config/peerOrganizations/test.svc.cluster.local/msp/admincerts/Admin@test.svc.cluster.local-cert.pem: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE----- 2 | MIICKzCCAdKgAwIBAgIRAORmUlx9x7GR5NlM9z1zP4QwCgYIKoZIzj0EAwIwfzEL 3 | MAkGA1UEBhMCVVMxEzARBgNVBAgTCkNhbGlmb3JuaWExFjAUBgNVBAcTDVNhbiBG 4 | cmFuY2lzY28xHzAdBgNVBAoTFnRlc3Quc3ZjLmNsdXN0ZXIubG9jYWwxIjAgBgNV 5 | BAMTGWNhLnRlc3Quc3ZjLmNsdXN0ZXIubG9jYWwwHhcNMTgxMTE2MTYzNjAwWhcN 6 | MjgxMTEzMTYzNjAwWjBhMQswCQYDVQQGEwJVUzETMBEGA1UECBMKQ2FsaWZvcm5p 7 | YTEWMBQGA1UEBxMNU2FuIEZyYW5jaXNjbzElMCMGA1UEAwwcQWRtaW5AdGVzdC5z 8 | dmMuY2x1c3Rlci5sb2NhbDBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABCZsxwcv 9 | /j8tMv38KYh70kI72RzLKttceGNpQy9zuUEhzHsL8U1xwQUv11tTosjf+bc2yzS0 10 | Pg9mHa/+go5mLDqjTTBLMA4GA1UdDwEB/wQEAwIHgDAMBgNVHRMBAf8EAjAAMCsG 11 | A1UdIwQkMCKAIBsOEs+gj9C70F4Z1n29Dr7eLzY0h+h3uXpopgxnsQuyMAoGCCqG 12 | SM49BAMCA0cAMEQCIAwfowKCEW2f33N4vy6HIj5LhqVRbZivbH4pydf1El0+AiBC 13 | UUnlvLofHi2IseM7WQx5UC78Hv7FGrQr1Sut4P+GBQ== 14 | -----END CERTIFICATE----- 15 | -------------------------------------------------------------------------------- /EXTRA/crypto-config/peerOrganizations/test.svc.cluster.local/msp/cacerts/ca.test.svc.cluster.local-cert.pem: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE----- 2 | MIICXDCCAgKgAwIBAgIRAN6p+R5Yq31fzE+ZBoxSoFowCgYIKoZIzj0EAwIwfzEL 3 | MAkGA1UEBhMCVVMxEzARBgNVBAgTCkNhbGlmb3JuaWExFjAUBgNVBAcTDVNhbiBG 4 | cmFuY2lzY28xHzAdBgNVBAoTFnRlc3Quc3ZjLmNsdXN0ZXIubG9jYWwxIjAgBgNV 5 | BAMTGWNhLnRlc3Quc3ZjLmNsdXN0ZXIubG9jYWwwHhcNMTgxMTE2MTYzNjAwWhcN 6 | MjgxMTEzMTYzNjAwWjB/MQswCQYDVQQGEwJVUzETMBEGA1UECBMKQ2FsaWZvcm5p 7 | YTEWMBQGA1UEBxMNU2FuIEZyYW5jaXNjbzEfMB0GA1UEChMWdGVzdC5zdmMuY2x1 8 | c3Rlci5sb2NhbDEiMCAGA1UEAxMZY2EudGVzdC5zdmMuY2x1c3Rlci5sb2NhbDBZ 9 | MBMGByqGSM49AgEGCCqGSM49AwEHA0IABBPFAO2F4wnI3zMWpLwbjBwDo+wYFR9r 10 | WqjlXD79JFLUmOVimoergoXiGKk3Bufw4Ou7wtErCdmBsSSVxDIqNhSjXzBdMA4G 11 | A1UdDwEB/wQEAwIBpjAPBgNVHSUECDAGBgRVHSUAMA8GA1UdEwEB/wQFMAMBAf8w 12 | KQYDVR0OBCIEIBsOEs+gj9C70F4Z1n29Dr7eLzY0h+h3uXpopgxnsQuyMAoGCCqG 13 | SM49BAMCA0gAMEUCIQCyD/XQi0NafSRf/NgYQexzLkh2w/PAtiWaE9wSazvRKgIg 14 | UPvJDyI9nQGlXaXhSrA6m/09QJSQCKd3shS1Nu1YXwA= 15 | -----END CERTIFICATE----- 16 | -------------------------------------------------------------------------------- /EXTRA/crypto-config/peerOrganizations/test.svc.cluster.local/msp/tlscacerts/tlsca.test.svc.cluster.local-cert.pem: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE----- 2 | MIICYzCCAgmgAwIBAgIQfUuYacdcwaa+y0ezjSEjbTAKBggqhkjOPQQDAjCBgjEL 3 | MAkGA1UEBhMCVVMxEzARBgNVBAgTCkNhbGlmb3JuaWExFjAUBgNVBAcTDVNhbiBG 4 | cmFuY2lzY28xHzAdBgNVBAoTFnRlc3Quc3ZjLmNsdXN0ZXIubG9jYWwxJTAjBgNV 5 | BAMTHHRsc2NhLnRlc3Quc3ZjLmNsdXN0ZXIubG9jYWwwHhcNMTgxMTE2MTYzNjAw 6 | WhcNMjgxMTEzMTYzNjAwWjCBgjELMAkGA1UEBhMCVVMxEzARBgNVBAgTCkNhbGlm 7 | b3JuaWExFjAUBgNVBAcTDVNhbiBGcmFuY2lzY28xHzAdBgNVBAoTFnRlc3Quc3Zj 8 | LmNsdXN0ZXIubG9jYWwxJTAjBgNVBAMTHHRsc2NhLnRlc3Quc3ZjLmNsdXN0ZXIu 9 | bG9jYWwwWTATBgcqhkjOPQIBBggqhkjOPQMBBwNCAAR6ANH7iwyOELPKAd+r4XYG 10 | VJV+atcirsON2z6WNuNEoxXDVQDCuthdBJQfHveIPRuv6CY53rHXz+653NqLtZOM 11 | o18wXTAOBgNVHQ8BAf8EBAMCAaYwDwYDVR0lBAgwBgYEVR0lADAPBgNVHRMBAf8E 12 | BTADAQH/MCkGA1UdDgQiBCCKwLyfeIJSi/emEav6qGMyFB+g3Oxuog4kLhg0Wygv 13 | cjAKBggqhkjOPQQDAgNIADBFAiEAl3Gp5JPsaXcbq6qMOtWSeW80miL0+N+6T0/O 14 | XvSV+mgCIGJ+bsXxtcgF/xSAH5K4BU00jPTQx2MX0WkL69Z1MOkl 15 | -----END CERTIFICATE----- 16 | -------------------------------------------------------------------------------- /EXTRA/crypto-config/peerOrganizations/test.svc.cluster.local/peers/peer0-hlf-peer.test.svc.cluster.local/msp/admincerts/Admin@test.svc.cluster.local-cert.pem: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE----- 2 | MIICKzCCAdKgAwIBAgIRAORmUlx9x7GR5NlM9z1zP4QwCgYIKoZIzj0EAwIwfzEL 3 | MAkGA1UEBhMCVVMxEzARBgNVBAgTCkNhbGlmb3JuaWExFjAUBgNVBAcTDVNhbiBG 4 | cmFuY2lzY28xHzAdBgNVBAoTFnRlc3Quc3ZjLmNsdXN0ZXIubG9jYWwxIjAgBgNV 5 | BAMTGWNhLnRlc3Quc3ZjLmNsdXN0ZXIubG9jYWwwHhcNMTgxMTE2MTYzNjAwWhcN 6 | MjgxMTEzMTYzNjAwWjBhMQswCQYDVQQGEwJVUzETMBEGA1UECBMKQ2FsaWZvcm5p 7 | YTEWMBQGA1UEBxMNU2FuIEZyYW5jaXNjbzElMCMGA1UEAwwcQWRtaW5AdGVzdC5z 8 | dmMuY2x1c3Rlci5sb2NhbDBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABCZsxwcv 9 | /j8tMv38KYh70kI72RzLKttceGNpQy9zuUEhzHsL8U1xwQUv11tTosjf+bc2yzS0 10 | Pg9mHa/+go5mLDqjTTBLMA4GA1UdDwEB/wQEAwIHgDAMBgNVHRMBAf8EAjAAMCsG 11 | A1UdIwQkMCKAIBsOEs+gj9C70F4Z1n29Dr7eLzY0h+h3uXpopgxnsQuyMAoGCCqG 12 | SM49BAMCA0cAMEQCIAwfowKCEW2f33N4vy6HIj5LhqVRbZivbH4pydf1El0+AiBC 13 | UUnlvLofHi2IseM7WQx5UC78Hv7FGrQr1Sut4P+GBQ== 14 | -----END CERTIFICATE----- 15 | -------------------------------------------------------------------------------- /EXTRA/crypto-config/peerOrganizations/test.svc.cluster.local/peers/peer0-hlf-peer.test.svc.cluster.local/msp/cacerts/ca.test.svc.cluster.local-cert.pem: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE----- 2 | MIICXDCCAgKgAwIBAgIRAN6p+R5Yq31fzE+ZBoxSoFowCgYIKoZIzj0EAwIwfzEL 3 | MAkGA1UEBhMCVVMxEzARBgNVBAgTCkNhbGlmb3JuaWExFjAUBgNVBAcTDVNhbiBG 4 | cmFuY2lzY28xHzAdBgNVBAoTFnRlc3Quc3ZjLmNsdXN0ZXIubG9jYWwxIjAgBgNV 5 | BAMTGWNhLnRlc3Quc3ZjLmNsdXN0ZXIubG9jYWwwHhcNMTgxMTE2MTYzNjAwWhcN 6 | MjgxMTEzMTYzNjAwWjB/MQswCQYDVQQGEwJVUzETMBEGA1UECBMKQ2FsaWZvcm5p 7 | YTEWMBQGA1UEBxMNU2FuIEZyYW5jaXNjbzEfMB0GA1UEChMWdGVzdC5zdmMuY2x1 8 | c3Rlci5sb2NhbDEiMCAGA1UEAxMZY2EudGVzdC5zdmMuY2x1c3Rlci5sb2NhbDBZ 9 | MBMGByqGSM49AgEGCCqGSM49AwEHA0IABBPFAO2F4wnI3zMWpLwbjBwDo+wYFR9r 10 | WqjlXD79JFLUmOVimoergoXiGKk3Bufw4Ou7wtErCdmBsSSVxDIqNhSjXzBdMA4G 11 | A1UdDwEB/wQEAwIBpjAPBgNVHSUECDAGBgRVHSUAMA8GA1UdEwEB/wQFMAMBAf8w 12 | KQYDVR0OBCIEIBsOEs+gj9C70F4Z1n29Dr7eLzY0h+h3uXpopgxnsQuyMAoGCCqG 13 | SM49BAMCA0gAMEUCIQCyD/XQi0NafSRf/NgYQexzLkh2w/PAtiWaE9wSazvRKgIg 14 | UPvJDyI9nQGlXaXhSrA6m/09QJSQCKd3shS1Nu1YXwA= 15 | -----END CERTIFICATE----- 16 | -------------------------------------------------------------------------------- /EXTRA/crypto-config/peerOrganizations/test.svc.cluster.local/peers/peer0-hlf-peer.test.svc.cluster.local/msp/keystore/22de23eeb2b0259b6b739a6954408780beb9e614858c879c75df2e6d04325bf4_sk: -------------------------------------------------------------------------------- 1 | -----BEGIN PRIVATE KEY----- 2 | MIGHAgEAMBMGByqGSM49AgEGCCqGSM49AwEHBG0wawIBAQQgYxXOQsbAWgGe3jTk 3 | mCXvrW/nQgdDTx6w8qR8RTccREWhRANCAAQfU3F6LblDCfBfAbQX/Znk9SUXjG0i 4 | LyWVNIULoj+PzcC/5vRD18NcGkVn2gGZq9VnF4vEjv6Lv4yp87/GcUmW 5 | -----END PRIVATE KEY----- 6 | -------------------------------------------------------------------------------- /EXTRA/crypto-config/peerOrganizations/test.svc.cluster.local/peers/peer0-hlf-peer.test.svc.cluster.local/msp/signcerts/peer0-hlf-peer.test.svc.cluster.local-cert.pem: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE----- 2 | MIICNTCCAdugAwIBAgIRAK5bmw+kGL1WnGMKedQz3LEwCgYIKoZIzj0EAwIwfzEL 3 | MAkGA1UEBhMCVVMxEzARBgNVBAgTCkNhbGlmb3JuaWExFjAUBgNVBAcTDVNhbiBG 4 | cmFuY2lzY28xHzAdBgNVBAoTFnRlc3Quc3ZjLmNsdXN0ZXIubG9jYWwxIjAgBgNV 5 | BAMTGWNhLnRlc3Quc3ZjLmNsdXN0ZXIubG9jYWwwHhcNMTgxMTE2MTYzNjAwWhcN 6 | MjgxMTEzMTYzNjAwWjBqMQswCQYDVQQGEwJVUzETMBEGA1UECBMKQ2FsaWZvcm5p 7 | YTEWMBQGA1UEBxMNU2FuIEZyYW5jaXNjbzEuMCwGA1UEAxMlcGVlcjAtaGxmLXBl 8 | ZXIudGVzdC5zdmMuY2x1c3Rlci5sb2NhbDBZMBMGByqGSM49AgEGCCqGSM49AwEH 9 | A0IABB9TcXotuUMJ8F8BtBf9meT1JReMbSIvJZU0hQuiP4/NwL/m9EPXw1waRWfa 10 | AZmr1WcXi8SO/ou/jKnzv8ZxSZajTTBLMA4GA1UdDwEB/wQEAwIHgDAMBgNVHRMB 11 | Af8EAjAAMCsGA1UdIwQkMCKAIBsOEs+gj9C70F4Z1n29Dr7eLzY0h+h3uXpopgxn 12 | sQuyMAoGCCqGSM49BAMCA0gAMEUCIQCjLN4bBUuCtxD8hOg1ZYp7CgCNQn8YHj2K 13 | zocqBCWI1wIgOnOrhlNq2LZwT1YA0vRwP3ljibsTEzJaCyVfY4ra/Zk= 14 | -----END CERTIFICATE----- 15 | -------------------------------------------------------------------------------- /EXTRA/crypto-config/peerOrganizations/test.svc.cluster.local/peers/peer0-hlf-peer.test.svc.cluster.local/msp/tlscacerts/tlsca.test.svc.cluster.local-cert.pem: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE----- 2 | MIICYzCCAgmgAwIBAgIQfUuYacdcwaa+y0ezjSEjbTAKBggqhkjOPQQDAjCBgjEL 3 | MAkGA1UEBhMCVVMxEzARBgNVBAgTCkNhbGlmb3JuaWExFjAUBgNVBAcTDVNhbiBG 4 | cmFuY2lzY28xHzAdBgNVBAoTFnRlc3Quc3ZjLmNsdXN0ZXIubG9jYWwxJTAjBgNV 5 | BAMTHHRsc2NhLnRlc3Quc3ZjLmNsdXN0ZXIubG9jYWwwHhcNMTgxMTE2MTYzNjAw 6 | WhcNMjgxMTEzMTYzNjAwWjCBgjELMAkGA1UEBhMCVVMxEzARBgNVBAgTCkNhbGlm 7 | b3JuaWExFjAUBgNVBAcTDVNhbiBGcmFuY2lzY28xHzAdBgNVBAoTFnRlc3Quc3Zj 8 | LmNsdXN0ZXIubG9jYWwxJTAjBgNVBAMTHHRsc2NhLnRlc3Quc3ZjLmNsdXN0ZXIu 9 | bG9jYWwwWTATBgcqhkjOPQIBBggqhkjOPQMBBwNCAAR6ANH7iwyOELPKAd+r4XYG 10 | VJV+atcirsON2z6WNuNEoxXDVQDCuthdBJQfHveIPRuv6CY53rHXz+653NqLtZOM 11 | o18wXTAOBgNVHQ8BAf8EBAMCAaYwDwYDVR0lBAgwBgYEVR0lADAPBgNVHRMBAf8E 12 | BTADAQH/MCkGA1UdDgQiBCCKwLyfeIJSi/emEav6qGMyFB+g3Oxuog4kLhg0Wygv 13 | cjAKBggqhkjOPQQDAgNIADBFAiEAl3Gp5JPsaXcbq6qMOtWSeW80miL0+N+6T0/O 14 | XvSV+mgCIGJ+bsXxtcgF/xSAH5K4BU00jPTQx2MX0WkL69Z1MOkl 15 | -----END CERTIFICATE----- 16 | -------------------------------------------------------------------------------- /EXTRA/crypto-config/peerOrganizations/test.svc.cluster.local/peers/peer0-hlf-peer.test.svc.cluster.local/tls/ca.crt: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE----- 2 | MIICYzCCAgmgAwIBAgIQfUuYacdcwaa+y0ezjSEjbTAKBggqhkjOPQQDAjCBgjEL 3 | MAkGA1UEBhMCVVMxEzARBgNVBAgTCkNhbGlmb3JuaWExFjAUBgNVBAcTDVNhbiBG 4 | cmFuY2lzY28xHzAdBgNVBAoTFnRlc3Quc3ZjLmNsdXN0ZXIubG9jYWwxJTAjBgNV 5 | BAMTHHRsc2NhLnRlc3Quc3ZjLmNsdXN0ZXIubG9jYWwwHhcNMTgxMTE2MTYzNjAw 6 | WhcNMjgxMTEzMTYzNjAwWjCBgjELMAkGA1UEBhMCVVMxEzARBgNVBAgTCkNhbGlm 7 | b3JuaWExFjAUBgNVBAcTDVNhbiBGcmFuY2lzY28xHzAdBgNVBAoTFnRlc3Quc3Zj 8 | LmNsdXN0ZXIubG9jYWwxJTAjBgNVBAMTHHRsc2NhLnRlc3Quc3ZjLmNsdXN0ZXIu 9 | bG9jYWwwWTATBgcqhkjOPQIBBggqhkjOPQMBBwNCAAR6ANH7iwyOELPKAd+r4XYG 10 | VJV+atcirsON2z6WNuNEoxXDVQDCuthdBJQfHveIPRuv6CY53rHXz+653NqLtZOM 11 | o18wXTAOBgNVHQ8BAf8EBAMCAaYwDwYDVR0lBAgwBgYEVR0lADAPBgNVHRMBAf8E 12 | BTADAQH/MCkGA1UdDgQiBCCKwLyfeIJSi/emEav6qGMyFB+g3Oxuog4kLhg0Wygv 13 | cjAKBggqhkjOPQQDAgNIADBFAiEAl3Gp5JPsaXcbq6qMOtWSeW80miL0+N+6T0/O 14 | XvSV+mgCIGJ+bsXxtcgF/xSAH5K4BU00jPTQx2MX0WkL69Z1MOkl 15 | -----END CERTIFICATE----- 16 | -------------------------------------------------------------------------------- /EXTRA/crypto-config/peerOrganizations/test.svc.cluster.local/peers/peer0-hlf-peer.test.svc.cluster.local/tls/server.crt: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE----- 2 | MIICnDCCAkKgAwIBAgIRAPgGRDkmA2zJJLgjS90bMbkwCgYIKoZIzj0EAwIwgYIx 3 | CzAJBgNVBAYTAlVTMRMwEQYDVQQIEwpDYWxpZm9ybmlhMRYwFAYDVQQHEw1TYW4g 4 | RnJhbmNpc2NvMR8wHQYDVQQKExZ0ZXN0LnN2Yy5jbHVzdGVyLmxvY2FsMSUwIwYD 5 | VQQDExx0bHNjYS50ZXN0LnN2Yy5jbHVzdGVyLmxvY2FsMB4XDTE4MTExNjE2MzYw 6 | MFoXDTI4MTExMzE2MzYwMFowajELMAkGA1UEBhMCVVMxEzARBgNVBAgTCkNhbGlm 7 | b3JuaWExFjAUBgNVBAcTDVNhbiBGcmFuY2lzY28xLjAsBgNVBAMTJXBlZXIwLWhs 8 | Zi1wZWVyLnRlc3Quc3ZjLmNsdXN0ZXIubG9jYWwwWTATBgcqhkjOPQIBBggqhkjO 9 | PQMBBwNCAARm5xxv+8Xh28MTC9T8OCzhprK8JxxT2ve4/uaCZpj2nbKOuqMSuSMC 10 | RSCTrfHamNXWJoFokmam4+oV9yvwhpHPo4GvMIGsMA4GA1UdDwEB/wQEAwIFoDAd 11 | BgNVHSUEFjAUBggrBgEFBQcDAQYIKwYBBQUHAwIwDAYDVR0TAQH/BAIwADArBgNV 12 | HSMEJDAigCCKwLyfeIJSi/emEav6qGMyFB+g3Oxuog4kLhg0WygvcjBABgNVHREE 13 | OTA3giVwZWVyMC1obGYtcGVlci50ZXN0LnN2Yy5jbHVzdGVyLmxvY2Fsgg5wZWVy 14 | MC1obGYtcGVlcjAKBggqhkjOPQQDAgNIADBFAiEAjL2R0yAem68k+4LVI2SB6GLI 15 | 1jnLBGmqADdVMA4vMcgCIFEPNug5hTmdlXERaog2j3wo3csN5OK3pG7QV6hQA+Bb 16 | -----END CERTIFICATE----- 17 | -------------------------------------------------------------------------------- /EXTRA/crypto-config/peerOrganizations/test.svc.cluster.local/peers/peer0-hlf-peer.test.svc.cluster.local/tls/server.key: -------------------------------------------------------------------------------- 1 | -----BEGIN PRIVATE KEY----- 2 | MIGHAgEAMBMGByqGSM49AgEGCCqGSM49AwEHBG0wawIBAQQgwImhkW2WJ6GHDPBx 3 | TIJkIBGHzZf0cAyPDapcwiO1g3OhRANCAARm5xxv+8Xh28MTC9T8OCzhprK8JxxT 4 | 2ve4/uaCZpj2nbKOuqMSuSMCRSCTrfHamNXWJoFokmam4+oV9yvwhpHP 5 | -----END PRIVATE KEY----- 6 | -------------------------------------------------------------------------------- /EXTRA/crypto-config/peerOrganizations/test.svc.cluster.local/tlsca/8ac0bc9f7882528bf7a611abfaa86332141fa0dcec6ea20e242e18345b282f72_sk: -------------------------------------------------------------------------------- 1 | -----BEGIN PRIVATE KEY----- 2 | MIGHAgEAMBMGByqGSM49AgEGCCqGSM49AwEHBG0wawIBAQQgJwbFysUmoNSmvgK7 3 | DShRMAVg5mx/W5wnNEZfuUGSDRihRANCAAR6ANH7iwyOELPKAd+r4XYGVJV+atci 4 | rsON2z6WNuNEoxXDVQDCuthdBJQfHveIPRuv6CY53rHXz+653NqLtZOM 5 | -----END PRIVATE KEY----- 6 | -------------------------------------------------------------------------------- /EXTRA/crypto-config/peerOrganizations/test.svc.cluster.local/tlsca/tlsca.test.svc.cluster.local-cert.pem: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE----- 2 | MIICYzCCAgmgAwIBAgIQfUuYacdcwaa+y0ezjSEjbTAKBggqhkjOPQQDAjCBgjEL 3 | MAkGA1UEBhMCVVMxEzARBgNVBAgTCkNhbGlmb3JuaWExFjAUBgNVBAcTDVNhbiBG 4 | cmFuY2lzY28xHzAdBgNVBAoTFnRlc3Quc3ZjLmNsdXN0ZXIubG9jYWwxJTAjBgNV 5 | BAMTHHRsc2NhLnRlc3Quc3ZjLmNsdXN0ZXIubG9jYWwwHhcNMTgxMTE2MTYzNjAw 6 | WhcNMjgxMTEzMTYzNjAwWjCBgjELMAkGA1UEBhMCVVMxEzARBgNVBAgTCkNhbGlm 7 | b3JuaWExFjAUBgNVBAcTDVNhbiBGcmFuY2lzY28xHzAdBgNVBAoTFnRlc3Quc3Zj 8 | LmNsdXN0ZXIubG9jYWwxJTAjBgNVBAMTHHRsc2NhLnRlc3Quc3ZjLmNsdXN0ZXIu 9 | bG9jYWwwWTATBgcqhkjOPQIBBggqhkjOPQMBBwNCAAR6ANH7iwyOELPKAd+r4XYG 10 | VJV+atcirsON2z6WNuNEoxXDVQDCuthdBJQfHveIPRuv6CY53rHXz+653NqLtZOM 11 | o18wXTAOBgNVHQ8BAf8EBAMCAaYwDwYDVR0lBAgwBgYEVR0lADAPBgNVHRMBAf8E 12 | BTADAQH/MCkGA1UdDgQiBCCKwLyfeIJSi/emEav6qGMyFB+g3Oxuog4kLhg0Wygv 13 | cjAKBggqhkjOPQQDAgNIADBFAiEAl3Gp5JPsaXcbq6qMOtWSeW80miL0+N+6T0/O 14 | XvSV+mgCIGJ+bsXxtcgF/xSAH5K4BU00jPTQx2MX0WkL69Z1MOkl 15 | -----END CERTIFICATE----- 16 | -------------------------------------------------------------------------------- /EXTRA/crypto-config/peerOrganizations/test.svc.cluster.local/users/Admin@test.svc.cluster.local/msp/admincerts/Admin@test.svc.cluster.local-cert.pem: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE----- 2 | MIICKzCCAdKgAwIBAgIRAORmUlx9x7GR5NlM9z1zP4QwCgYIKoZIzj0EAwIwfzEL 3 | MAkGA1UEBhMCVVMxEzARBgNVBAgTCkNhbGlmb3JuaWExFjAUBgNVBAcTDVNhbiBG 4 | cmFuY2lzY28xHzAdBgNVBAoTFnRlc3Quc3ZjLmNsdXN0ZXIubG9jYWwxIjAgBgNV 5 | BAMTGWNhLnRlc3Quc3ZjLmNsdXN0ZXIubG9jYWwwHhcNMTgxMTE2MTYzNjAwWhcN 6 | MjgxMTEzMTYzNjAwWjBhMQswCQYDVQQGEwJVUzETMBEGA1UECBMKQ2FsaWZvcm5p 7 | YTEWMBQGA1UEBxMNU2FuIEZyYW5jaXNjbzElMCMGA1UEAwwcQWRtaW5AdGVzdC5z 8 | dmMuY2x1c3Rlci5sb2NhbDBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABCZsxwcv 9 | /j8tMv38KYh70kI72RzLKttceGNpQy9zuUEhzHsL8U1xwQUv11tTosjf+bc2yzS0 10 | Pg9mHa/+go5mLDqjTTBLMA4GA1UdDwEB/wQEAwIHgDAMBgNVHRMBAf8EAjAAMCsG 11 | A1UdIwQkMCKAIBsOEs+gj9C70F4Z1n29Dr7eLzY0h+h3uXpopgxnsQuyMAoGCCqG 12 | SM49BAMCA0cAMEQCIAwfowKCEW2f33N4vy6HIj5LhqVRbZivbH4pydf1El0+AiBC 13 | UUnlvLofHi2IseM7WQx5UC78Hv7FGrQr1Sut4P+GBQ== 14 | -----END CERTIFICATE----- 15 | -------------------------------------------------------------------------------- /EXTRA/crypto-config/peerOrganizations/test.svc.cluster.local/users/Admin@test.svc.cluster.local/msp/cacerts/ca.test.svc.cluster.local-cert.pem: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE----- 2 | MIICXDCCAgKgAwIBAgIRAN6p+R5Yq31fzE+ZBoxSoFowCgYIKoZIzj0EAwIwfzEL 3 | MAkGA1UEBhMCVVMxEzARBgNVBAgTCkNhbGlmb3JuaWExFjAUBgNVBAcTDVNhbiBG 4 | cmFuY2lzY28xHzAdBgNVBAoTFnRlc3Quc3ZjLmNsdXN0ZXIubG9jYWwxIjAgBgNV 5 | BAMTGWNhLnRlc3Quc3ZjLmNsdXN0ZXIubG9jYWwwHhcNMTgxMTE2MTYzNjAwWhcN 6 | MjgxMTEzMTYzNjAwWjB/MQswCQYDVQQGEwJVUzETMBEGA1UECBMKQ2FsaWZvcm5p 7 | YTEWMBQGA1UEBxMNU2FuIEZyYW5jaXNjbzEfMB0GA1UEChMWdGVzdC5zdmMuY2x1 8 | c3Rlci5sb2NhbDEiMCAGA1UEAxMZY2EudGVzdC5zdmMuY2x1c3Rlci5sb2NhbDBZ 9 | MBMGByqGSM49AgEGCCqGSM49AwEHA0IABBPFAO2F4wnI3zMWpLwbjBwDo+wYFR9r 10 | WqjlXD79JFLUmOVimoergoXiGKk3Bufw4Ou7wtErCdmBsSSVxDIqNhSjXzBdMA4G 11 | A1UdDwEB/wQEAwIBpjAPBgNVHSUECDAGBgRVHSUAMA8GA1UdEwEB/wQFMAMBAf8w 12 | KQYDVR0OBCIEIBsOEs+gj9C70F4Z1n29Dr7eLzY0h+h3uXpopgxnsQuyMAoGCCqG 13 | SM49BAMCA0gAMEUCIQCyD/XQi0NafSRf/NgYQexzLkh2w/PAtiWaE9wSazvRKgIg 14 | UPvJDyI9nQGlXaXhSrA6m/09QJSQCKd3shS1Nu1YXwA= 15 | -----END CERTIFICATE----- 16 | -------------------------------------------------------------------------------- /EXTRA/crypto-config/peerOrganizations/test.svc.cluster.local/users/Admin@test.svc.cluster.local/msp/keystore/478bb28c12190a41a8eb361c6815cf91a0d0ec291f0977f562661a67b1c250dd_sk: -------------------------------------------------------------------------------- 1 | -----BEGIN PRIVATE KEY----- 2 | MIGHAgEAMBMGByqGSM49AgEGCCqGSM49AwEHBG0wawIBAQQgn3Rvk8JAivQEgxEV 3 | 1uweQa3KtYKx2iY9lP7F+r1/yZOhRANCAAQmbMcHL/4/LTL9/CmIe9JCO9kcyyrb 4 | XHhjaUMvc7lBIcx7C/FNccEFL9dbU6LI3/m3Nss0tD4PZh2v/oKOZiw6 5 | -----END PRIVATE KEY----- 6 | -------------------------------------------------------------------------------- /EXTRA/crypto-config/peerOrganizations/test.svc.cluster.local/users/Admin@test.svc.cluster.local/msp/signcerts/Admin@test.svc.cluster.local-cert.pem: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE----- 2 | MIICKzCCAdKgAwIBAgIRAORmUlx9x7GR5NlM9z1zP4QwCgYIKoZIzj0EAwIwfzEL 3 | MAkGA1UEBhMCVVMxEzARBgNVBAgTCkNhbGlmb3JuaWExFjAUBgNVBAcTDVNhbiBG 4 | cmFuY2lzY28xHzAdBgNVBAoTFnRlc3Quc3ZjLmNsdXN0ZXIubG9jYWwxIjAgBgNV 5 | BAMTGWNhLnRlc3Quc3ZjLmNsdXN0ZXIubG9jYWwwHhcNMTgxMTE2MTYzNjAwWhcN 6 | MjgxMTEzMTYzNjAwWjBhMQswCQYDVQQGEwJVUzETMBEGA1UECBMKQ2FsaWZvcm5p 7 | YTEWMBQGA1UEBxMNU2FuIEZyYW5jaXNjbzElMCMGA1UEAwwcQWRtaW5AdGVzdC5z 8 | dmMuY2x1c3Rlci5sb2NhbDBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABCZsxwcv 9 | /j8tMv38KYh70kI72RzLKttceGNpQy9zuUEhzHsL8U1xwQUv11tTosjf+bc2yzS0 10 | Pg9mHa/+go5mLDqjTTBLMA4GA1UdDwEB/wQEAwIHgDAMBgNVHRMBAf8EAjAAMCsG 11 | A1UdIwQkMCKAIBsOEs+gj9C70F4Z1n29Dr7eLzY0h+h3uXpopgxnsQuyMAoGCCqG 12 | SM49BAMCA0cAMEQCIAwfowKCEW2f33N4vy6HIj5LhqVRbZivbH4pydf1El0+AiBC 13 | UUnlvLofHi2IseM7WQx5UC78Hv7FGrQr1Sut4P+GBQ== 14 | -----END CERTIFICATE----- 15 | -------------------------------------------------------------------------------- /EXTRA/crypto-config/peerOrganizations/test.svc.cluster.local/users/Admin@test.svc.cluster.local/msp/tlscacerts/tlsca.test.svc.cluster.local-cert.pem: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE----- 2 | MIICYzCCAgmgAwIBAgIQfUuYacdcwaa+y0ezjSEjbTAKBggqhkjOPQQDAjCBgjEL 3 | MAkGA1UEBhMCVVMxEzARBgNVBAgTCkNhbGlmb3JuaWExFjAUBgNVBAcTDVNhbiBG 4 | cmFuY2lzY28xHzAdBgNVBAoTFnRlc3Quc3ZjLmNsdXN0ZXIubG9jYWwxJTAjBgNV 5 | BAMTHHRsc2NhLnRlc3Quc3ZjLmNsdXN0ZXIubG9jYWwwHhcNMTgxMTE2MTYzNjAw 6 | WhcNMjgxMTEzMTYzNjAwWjCBgjELMAkGA1UEBhMCVVMxEzARBgNVBAgTCkNhbGlm 7 | b3JuaWExFjAUBgNVBAcTDVNhbiBGcmFuY2lzY28xHzAdBgNVBAoTFnRlc3Quc3Zj 8 | LmNsdXN0ZXIubG9jYWwxJTAjBgNVBAMTHHRsc2NhLnRlc3Quc3ZjLmNsdXN0ZXIu 9 | bG9jYWwwWTATBgcqhkjOPQIBBggqhkjOPQMBBwNCAAR6ANH7iwyOELPKAd+r4XYG 10 | VJV+atcirsON2z6WNuNEoxXDVQDCuthdBJQfHveIPRuv6CY53rHXz+653NqLtZOM 11 | o18wXTAOBgNVHQ8BAf8EBAMCAaYwDwYDVR0lBAgwBgYEVR0lADAPBgNVHRMBAf8E 12 | BTADAQH/MCkGA1UdDgQiBCCKwLyfeIJSi/emEav6qGMyFB+g3Oxuog4kLhg0Wygv 13 | cjAKBggqhkjOPQQDAgNIADBFAiEAl3Gp5JPsaXcbq6qMOtWSeW80miL0+N+6T0/O 14 | XvSV+mgCIGJ+bsXxtcgF/xSAH5K4BU00jPTQx2MX0WkL69Z1MOkl 15 | -----END CERTIFICATE----- 16 | -------------------------------------------------------------------------------- /EXTRA/crypto-config/peerOrganizations/test.svc.cluster.local/users/Admin@test.svc.cluster.local/tls/ca.crt: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE----- 2 | MIICYzCCAgmgAwIBAgIQfUuYacdcwaa+y0ezjSEjbTAKBggqhkjOPQQDAjCBgjEL 3 | MAkGA1UEBhMCVVMxEzARBgNVBAgTCkNhbGlmb3JuaWExFjAUBgNVBAcTDVNhbiBG 4 | cmFuY2lzY28xHzAdBgNVBAoTFnRlc3Quc3ZjLmNsdXN0ZXIubG9jYWwxJTAjBgNV 5 | BAMTHHRsc2NhLnRlc3Quc3ZjLmNsdXN0ZXIubG9jYWwwHhcNMTgxMTE2MTYzNjAw 6 | WhcNMjgxMTEzMTYzNjAwWjCBgjELMAkGA1UEBhMCVVMxEzARBgNVBAgTCkNhbGlm 7 | b3JuaWExFjAUBgNVBAcTDVNhbiBGcmFuY2lzY28xHzAdBgNVBAoTFnRlc3Quc3Zj 8 | LmNsdXN0ZXIubG9jYWwxJTAjBgNVBAMTHHRsc2NhLnRlc3Quc3ZjLmNsdXN0ZXIu 9 | bG9jYWwwWTATBgcqhkjOPQIBBggqhkjOPQMBBwNCAAR6ANH7iwyOELPKAd+r4XYG 10 | VJV+atcirsON2z6WNuNEoxXDVQDCuthdBJQfHveIPRuv6CY53rHXz+653NqLtZOM 11 | o18wXTAOBgNVHQ8BAf8EBAMCAaYwDwYDVR0lBAgwBgYEVR0lADAPBgNVHRMBAf8E 12 | BTADAQH/MCkGA1UdDgQiBCCKwLyfeIJSi/emEav6qGMyFB+g3Oxuog4kLhg0Wygv 13 | cjAKBggqhkjOPQQDAgNIADBFAiEAl3Gp5JPsaXcbq6qMOtWSeW80miL0+N+6T0/O 14 | XvSV+mgCIGJ+bsXxtcgF/xSAH5K4BU00jPTQx2MX0WkL69Z1MOkl 15 | -----END CERTIFICATE----- 16 | -------------------------------------------------------------------------------- /EXTRA/crypto-config/peerOrganizations/test.svc.cluster.local/users/Admin@test.svc.cluster.local/tls/client.crt: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE----- 2 | MIICTjCCAfSgAwIBAgIQYFcTz6uk7Ch2Zk+ZTmNPtDAKBggqhkjOPQQDAjCBgjEL 3 | MAkGA1UEBhMCVVMxEzARBgNVBAgTCkNhbGlmb3JuaWExFjAUBgNVBAcTDVNhbiBG 4 | cmFuY2lzY28xHzAdBgNVBAoTFnRlc3Quc3ZjLmNsdXN0ZXIubG9jYWwxJTAjBgNV 5 | BAMTHHRsc2NhLnRlc3Quc3ZjLmNsdXN0ZXIubG9jYWwwHhcNMTgxMTE2MTYzNjAw 6 | WhcNMjgxMTEzMTYzNjAwWjBhMQswCQYDVQQGEwJVUzETMBEGA1UECBMKQ2FsaWZv 7 | cm5pYTEWMBQGA1UEBxMNU2FuIEZyYW5jaXNjbzElMCMGA1UEAwwcQWRtaW5AdGVz 8 | dC5zdmMuY2x1c3Rlci5sb2NhbDBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABKEH 9 | WaZVbHgcGGJ71cpsqTu//XwLeNuIRf2Pn9C2D3hCfH/6v5wLyjkNZMq8pgBfhdPL 10 | uDYmOoRJ/NrDE4doKhyjbDBqMA4GA1UdDwEB/wQEAwIFoDAdBgNVHSUEFjAUBggr 11 | BgEFBQcDAQYIKwYBBQUHAwIwDAYDVR0TAQH/BAIwADArBgNVHSMEJDAigCCKwLyf 12 | eIJSi/emEav6qGMyFB+g3Oxuog4kLhg0WygvcjAKBggqhkjOPQQDAgNIADBFAiEA 13 | lkJajZTmf40UTVSwgnCqDZS6nprAPYDH/KNlg4TJe2ACIFnuFCnC7TGA7vzgjt+i 14 | SovlU4MsDhdJ1Xb7nrOWJ1wJ 15 | -----END CERTIFICATE----- 16 | -------------------------------------------------------------------------------- /EXTRA/crypto-config/peerOrganizations/test.svc.cluster.local/users/Admin@test.svc.cluster.local/tls/client.key: -------------------------------------------------------------------------------- 1 | -----BEGIN PRIVATE KEY----- 2 | MIGHAgEAMBMGByqGSM49AgEGCCqGSM49AwEHBG0wawIBAQQg1ZgiioR/wyHfE0ZG 3 | /5rRTW4u2Sn/yoHmYaDOGzCD79+hRANCAAShB1mmVWx4HBhie9XKbKk7v/18C3jb 4 | iEX9j5/Qtg94Qnx/+r+cC8o5DWTKvKYAX4XTy7g2JjqESfzawxOHaCoc 5 | -----END PRIVATE KEY----- 6 | -------------------------------------------------------------------------------- /EXTRA/mychannel.tx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aidtechnology/at-charts/c785f1dc0cd0e53ff2dee23f3f5c3c81823f50d1/EXTRA/mychannel.tx -------------------------------------------------------------------------------- /chart_museum.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | CHART_LIST=(hl-composer hlf-ca hlf-couchdb hlf-ord hlf-peer) 4 | 5 | for CHART in ${CHART_LIST[*]} 6 | do 7 | CHART_VERSION=$(cat ./${CHART}/Chart.yaml | grep version | awk '{print $2}') 8 | echo "Chart version is $CHART_VERSION" 9 | CHART_MISSING=$(curl https://${CHARTMUSEUM_URL}/api/charts/${CHART}/${CHART_VERSION} --user ${CHARTMUSEUM_USER}:${CHARTMUSEUM_PASS} | grep error) 10 | if [[ -z "$CHART_MISSING" ]] 11 | then 12 | echo "Chart already saved to Chart Museum" 13 | else 14 | helm package ./${CHART} 15 | curl --data-binary "@$CHART-$CHART_VERSION.tgz" https://${CHARTMUSEUM_URL}/api/charts --user ${CHARTMUSEUM_USER}:${CHARTMUSEUM_PASS} 16 | fi 17 | done 18 | -------------------------------------------------------------------------------- /hl-composer/.helmignore: -------------------------------------------------------------------------------- 1 | # Patterns to ignore when building packages. 2 | # This supports shell glob matching, relative path matching, and 3 | # negation (prefixed with !). Only one pattern per line. 4 | .DS_Store 5 | # Common VCS dirs 6 | .git/ 7 | .gitignore 8 | .bzr/ 9 | .bzrignore 10 | .hg/ 11 | .hgignore 12 | .svn/ 13 | # Common backup files 14 | *.swp 15 | *.bak 16 | *.tmp 17 | *~ 18 | # Various IDEs 19 | .project 20 | .idea/ 21 | *.tmproj 22 | 23 | # OWNERS file for Helm repository 24 | OWNERS 25 | -------------------------------------------------------------------------------- /hl-composer/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | description: Hyperledger Composer REST Server chart 3 | name: hl-composer 4 | version: 1.1.0 5 | appVersion: 0.20.10 6 | keywords: 7 | - blockchain 8 | - hyperledger 9 | - fabric 10 | - composer 11 | home: https://hyperledger.github.io/composer 12 | sources: 13 | - https://github.com/hyperledger/composer 14 | maintainers: 15 | - name: alexvicegrab 16 | email: sasha@aid.technology 17 | - name: nicolapaoli 18 | email: nicola@aid.technology 19 | icon: https://www.hyperledger.org/wp-content/uploads/2018/04/composer-logo.png 20 | -------------------------------------------------------------------------------- /hl-composer/OWNERS: -------------------------------------------------------------------------------- 1 | approvers: 2 | - alexvicegrab 3 | - nicolapaoli 4 | reviewers: 5 | - alexvicegrab 6 | - nicolapaoli 7 | -------------------------------------------------------------------------------- /hl-composer/templates/NOTES.txt: -------------------------------------------------------------------------------- 1 | 1. Get the application URL by running these commands: 2 | {{- if .Values.rest.ingress.enabled }} 3 | {{- range .Values.rest.ingress.hosts }} 4 | http{{ if $.Values.rest.ingress.tls }}s{{ end }}://{{ . }}{{ $.Values.rest.ingress.path }} 5 | {{- end }} 6 | {{- else if contains "NodePort" .Values.rest.service.type }} 7 | export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "hl-composer.fullname" . }}) 8 | export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") 9 | echo http://$NODE_IP:$NODE_PORT 10 | {{- else if contains "LoadBalancer" .Values.rest.service.type }} 11 | NOTE: It may take a few minutes for the LoadBalancer IP to be available. 12 | You can watch the status of by running 'kubectl get svc -w {{ include "hl-composer.fullname" . }}' 13 | export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "hl-composer.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}') 14 | echo http://$SERVICE_IP:{{ .Values.rest.service.port }} 15 | {{- else if contains "ClusterIP" .Values.rest.service.type }} 16 | export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app={{ include "hl-composer.name" . }},release={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}") 17 | echo "Visit http://127.0.0.1:8080 to use your application" 18 | kubectl port-forward $POD_NAME 8080:3000 19 | {{- end }} 20 | 21 | 2. Obtain COMPOSER_APIKEY to connect to REST server: 22 | export COMPOSER_APIKEY=$(kubectl get secret --namespace {{ .Release.Namespace }} {{ include "hl-composer.fullname" . }}-rest -o jsonpath="{.data.COMPOSER_APIKEY}" | base64 --decode; echo) 23 | -------------------------------------------------------------------------------- /hl-composer/templates/_helpers.tpl: -------------------------------------------------------------------------------- 1 | {{/* vim: set filetype=mustache: */}} 2 | {{/* 3 | Expand the name of the chart. 4 | */}} 5 | {{- define "hl-composer.name" -}} 6 | {{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} 7 | {{- end -}} 8 | 9 | {{/* 10 | Create a default fully qualified app name. 11 | We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). 12 | If release name contains chart name it will be used as a full name. 13 | */}} 14 | {{- define "hl-composer.fullname" -}} 15 | {{- if .Values.fullnameOverride -}} 16 | {{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} 17 | {{- else -}} 18 | {{- $name := default .Chart.Name .Values.nameOverride -}} 19 | {{- if contains $name .Release.Name -}} 20 | {{- .Release.Name | trunc 63 | trimSuffix "-" -}} 21 | {{- else -}} 22 | {{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} 23 | {{- end -}} 24 | {{- end -}} 25 | {{- end -}} 26 | 27 | {{/* 28 | Create chart name and version as used by the chart label. 29 | */}} 30 | {{- define "hl-composer.chart" -}} 31 | {{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} 32 | {{- end -}} 33 | 34 | {{- /* 35 | Credit: @technosophos 36 | https://github.com/technosophos/common-chart/ 37 | labels.standard prints the standard Helm labels. 38 | The standard labels are frequently used in metadata. 39 | */ -}} 40 | {{- define "labels.standard" -}} 41 | app: {{ include "hl-composer.name" . }} 42 | heritage: {{ .Release.Service | quote }} 43 | release: {{ .Release.Name | quote }} 44 | chart: {{ include "hl-composer.chart" . }} 45 | {{- end -}} 46 | -------------------------------------------------------------------------------- /hl-composer/templates/hl-composer-cli-deployment.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: apps/v1beta2 3 | kind: Deployment 4 | metadata: 5 | name: {{ include "hl-composer.fullname" . }}-cli 6 | labels: 7 | name: {{ include "hl-composer.fullname" . }}-cli 8 | {{ include "labels.standard" . | indent 4 }} 9 | spec: 10 | replicas: 1 11 | selector: 12 | matchLabels: 13 | app: {{ include "hl-composer.name" . }} 14 | release: {{ .Release.Name }} 15 | template: 16 | metadata: 17 | name: {{ include "hl-composer.fullname" . }}-cli 18 | labels: 19 | name: {{ include "hl-composer.fullname" . }}-cli 20 | {{ include "labels.standard" . | indent 8 }} 21 | spec: 22 | volumes: 23 | - name: persistent-volume 24 | {{- if .Values.persistence.enabled }} 25 | persistentVolumeClaim: 26 | claimName: {{ .Values.persistence.existingClaim | default (include "hl-composer.fullname" .) }} 27 | {{- else }} 28 | emptyDir: {} 29 | {{- end }} 30 | {{- if .Values.cli.secrets.blockchainNetwork }} 31 | - name: blockchain-network 32 | secret: 33 | secretName: {{ .Values.cli.secrets.blockchainNetwork }} 34 | {{- end }} 35 | {{- if .Values.cli.secrets.adminCert }} 36 | - name: admin-cert 37 | secret: 38 | secretName: {{ .Values.cli.secrets.adminCert }} 39 | {{- end }} 40 | {{- if .Values.cli.secrets.adminKey }} 41 | - name: admin-key 42 | secret: 43 | secretName: {{ .Values.cli.secrets.adminKey }} 44 | {{- end }} 45 | {{- if .Values.cli.secrets.hlcConnection }} 46 | - name: hlc-connection 47 | configMap: 48 | name: {{ .Values.cli.secrets.hlcConnection }} 49 | {{- end }} 50 | containers: 51 | - name: cli 52 | image: "{{ .Values.cli.image.repository }}:{{ .Values.cli.image.tag }}" 53 | imagePullPolicy: {{ .Values.cli.image.pullPolicy }} 54 | # TODO: Add liveness and readiness probes 55 | # Run infinitely 56 | command: 57 | - sh 58 | - -c 59 | - | 60 | tail -f /dev/null 61 | volumeMounts: 62 | - mountPath: /home/composer/.composer 63 | name: persistent-volume 64 | {{- if .Values.cli.secrets.blockchainNetwork }} 65 | - mountPath: /hl_config/blockchain_network 66 | name: blockchain-network 67 | {{- end }} 68 | {{- if .Values.cli.secrets.adminCert }} 69 | - mountPath: /hl_config/admin/signcerts 70 | name: admin-cert 71 | {{- end }} 72 | {{- if .Values.cli.secrets.adminKey }} 73 | - mountPath: /hl_config/admin/keystore 74 | name: admin-key 75 | {{- end }} 76 | {{- if .Values.cli.secrets.hlcConnection }} 77 | - mountPath: /hl_config/hlc-connection 78 | name: hlc-connection 79 | {{- end }} 80 | resources: 81 | {{ toYaml .Values.cli.resources | indent 12 }} 82 | {{- with .Values.cli.nodeSelector }} 83 | nodeSelector: 84 | {{ toYaml . | indent 8 }} 85 | {{- end }} 86 | {{- with .Values.cli.affinity }} 87 | affinity: 88 | {{ toYaml . | indent 8 }} 89 | {{- end }} 90 | {{- with .Values.cli.tolerations }} 91 | tolerations: 92 | {{ toYaml . | indent 8 }} 93 | {{- end }} 94 | -------------------------------------------------------------------------------- /hl-composer/templates/hl-composer-pg-deployment.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.pg.enabled -}} 2 | apiVersion: apps/v1beta2 3 | kind: Deployment 4 | metadata: 5 | name: {{ include "hl-composer.fullname" . }}-pg 6 | labels: 7 | name: {{ include "hl-composer.fullname" . }}-pg 8 | {{ include "labels.standard" . | indent 4 }} 9 | spec: 10 | replicas: 1 11 | selector: 12 | matchLabels: 13 | app: {{ include "hl-composer.name" . }} 14 | release: {{ .Release.Name }} 15 | template: 16 | metadata: 17 | name: {{ include "hl-composer.fullname" . }}-pg 18 | labels: 19 | name: {{ include "hl-composer.fullname" . }}-pg 20 | {{ include "labels.standard" . | indent 8 }} 21 | spec: 22 | volumes: 23 | - name: persistent-volume 24 | {{- if .Values.persistence.enabled }} 25 | persistentVolumeClaim: 26 | claimName: {{ .Values.persistence.existingClaim | default (include "hl-composer.fullname" .) }} 27 | {{- else }} 28 | emptyDir: {} 29 | {{- end }} 30 | containers: 31 | - name: composer-playground 32 | image: "{{ .Values.pg.image.repository }}:{{ .Values.pg.image.tag }}" 33 | imagePullPolicy: {{ .Values.pg.image.pullPolicy }} 34 | # TODO: Add liveness and readiness probes 35 | volumeMounts: 36 | - mountPath: /home/composer/.composer 37 | name: persistent-volume 38 | resources: 39 | {{ toYaml .Values.pg.resources | indent 12 }} 40 | {{- with .Values.pg.nodeSelector }} 41 | nodeSelector: 42 | {{ toYaml . | indent 8 }} 43 | {{- end }} 44 | {{- with .Values.pg.affinity }} 45 | affinity: 46 | {{ toYaml . | indent 8 }} 47 | {{- end }} 48 | {{- with .Values.pg.tolerations }} 49 | tolerations: 50 | {{ toYaml . | indent 8 }} 51 | {{- end }} 52 | {{- end }} 53 | -------------------------------------------------------------------------------- /hl-composer/templates/hl-composer-pg-ingress.yaml: -------------------------------------------------------------------------------- 1 | {{- if and .Values.pg.enabled .Values.pg.ingress.enabled -}} 2 | {{- $fullName := include "hl-composer.fullname" . -}} 3 | {{- $ingressPath := .Values.pg.ingress.path -}} 4 | apiVersion: extensions/v1beta1 5 | kind: Ingress 6 | metadata: 7 | name: {{ $fullName }}-pg 8 | labels: 9 | {{ include "labels.standard" . | indent 4 }} 10 | {{- with .Values.pg.ingress.annotations }} 11 | annotations: 12 | {{ toYaml . | indent 4 }} 13 | {{- end }} 14 | spec: 15 | {{- if .Values.pg.ingress.tls }} 16 | tls: 17 | {{- range .Values.pg.ingress.tls }} 18 | - hosts: 19 | {{- range .hosts }} 20 | - {{ . }} 21 | {{- end }} 22 | secretName: {{ .secretName }} 23 | {{- end }} 24 | {{- end }} 25 | rules: 26 | {{- range .Values.pg.ingress.hosts }} 27 | - host: {{ . }} 28 | http: 29 | paths: 30 | - path: {{ $ingressPath }} 31 | backend: 32 | serviceName: {{ $fullName }}-pg 33 | servicePort: http 34 | {{- end }} 35 | {{- end }} 36 | -------------------------------------------------------------------------------- /hl-composer/templates/hl-composer-pg-service.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.pg.enabled -}} 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | name: {{ include "hl-composer.fullname" . }}-pg 6 | labels: 7 | {{ include "labels.standard" . | indent 4 }} 8 | spec: 9 | type: {{ .Values.pg.service.type }} 10 | ports: 11 | - port: {{ .Values.pg.service.port }} 12 | targetPort: 8080 13 | protocol: TCP 14 | name: http 15 | selector: 16 | name: {{ include "hl-composer.fullname" . }}-pg 17 | app: {{ include "hl-composer.name" . }} 18 | release: {{ .Release.Name }} 19 | {{- end}} -------------------------------------------------------------------------------- /hl-composer/templates/hl-composer-pvc.yaml: -------------------------------------------------------------------------------- 1 | {{- if and .Values.persistence.enabled (not .Values.persistence.existingClaim) }} 2 | kind: PersistentVolumeClaim 3 | apiVersion: v1 4 | metadata: 5 | name: {{ include "hl-composer.fullname" . }} 6 | labels: 7 | {{ include "labels.standard" . | indent 4 }} 8 | {{- if .Values.persistence.annotations }} 9 | annotations: 10 | {{ toYaml .Values.persistence.annotations | indent 4 }} 11 | {{- end }} 12 | spec: 13 | accessModes: 14 | - {{ .Values.persistence.accessMode | quote }} 15 | resources: 16 | requests: 17 | storage: {{ .Values.persistence.size | quote }} 18 | {{- if .Values.persistence.storageClass }} 19 | {{- if (eq "-" .Values.persistence.storageClass) }} 20 | storageClassName: "" 21 | {{- else }} 22 | storageClassName: "{{ .Values.persistence.storageClass }}" 23 | {{- end }} 24 | {{- end }} 25 | {{- end }} 26 | -------------------------------------------------------------------------------- /hl-composer/templates/hl-composer-rest-deployment.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: apps/v1beta2 3 | kind: Deployment 4 | metadata: 5 | name: {{ include "hl-composer.fullname" . }}-rest 6 | labels: 7 | name: {{ include "hl-composer.fullname" . }}-rest 8 | {{ include "labels.standard" . | indent 4 }} 9 | spec: 10 | replicas: 1 11 | selector: 12 | matchLabels: 13 | app: {{ include "hl-composer.name" . }} 14 | release: {{ .Release.Name }} 15 | template: 16 | metadata: 17 | name: {{ include "hl-composer.fullname" . }}-rest 18 | labels: 19 | name: {{ include "hl-composer.fullname" . }}-rest 20 | {{ include "labels.standard" . | indent 8 }} 21 | spec: 22 | volumes: 23 | - name: persistent-volume 24 | {{- if .Values.persistence.enabled }} 25 | persistentVolumeClaim: 26 | claimName: {{ .Values.persistence.existingClaim | default (include "hl-composer.fullname" .) }} 27 | {{- else }} 28 | emptyDir: {} 29 | {{- end }} 30 | containers: 31 | - name: rest-server 32 | image: "{{ .Values.rest.image.repository }}:{{ .Values.rest.image.tag }}" 33 | imagePullPolicy: {{ .Values.rest.image.pullPolicy }} 34 | # TODO: Add liveness and readiness probes 35 | envFrom: 36 | - secretRef: 37 | name: {{ include "hl-composer.fullname" . }}-rest 38 | env: 39 | - name: COMPOSER_CARD 40 | value: {{ .Values.rest.config.composerRestServerCard }} 41 | - name: COMPOSER_NAMESPACES 42 | value: never 43 | volumeMounts: 44 | - name: persistent-volume 45 | mountPath: /home/composer/.composer 46 | resources: 47 | {{ toYaml .Values.rest.resources | indent 12 }} 48 | {{- with .Values.rest.nodeSelector }} 49 | nodeSelector: 50 | {{ toYaml . | indent 8 }} 51 | {{- end }} 52 | {{- with .Values.rest.affinity }} 53 | affinity: 54 | {{ toYaml . | indent 8 }} 55 | {{- end }} 56 | {{- with .Values.rest.tolerations }} 57 | tolerations: 58 | {{ toYaml . | indent 8 }} 59 | {{- end }} 60 | -------------------------------------------------------------------------------- /hl-composer/templates/hl-composer-rest-ingress.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.rest.ingress.enabled -}} 2 | {{- $fullName := include "hl-composer.fullname" . -}} 3 | {{- $ingressPath := .Values.rest.ingress.path -}} 4 | apiVersion: extensions/v1beta1 5 | kind: Ingress 6 | metadata: 7 | name: {{ $fullName }}-rest 8 | labels: 9 | {{ include "labels.standard" . | indent 4 }} 10 | {{- with .Values.rest.ingress.annotations }} 11 | annotations: 12 | {{ toYaml . | indent 4 }} 13 | {{- end }} 14 | spec: 15 | {{- if .Values.rest.ingress.tls }} 16 | tls: 17 | {{- range .Values.rest.ingress.tls }} 18 | - hosts: 19 | {{- range .hosts }} 20 | - {{ . }} 21 | {{- end }} 22 | secretName: {{ .secretName }} 23 | {{- end }} 24 | {{- end }} 25 | rules: 26 | {{- range .Values.rest.ingress.hosts }} 27 | - host: {{ . }} 28 | http: 29 | paths: 30 | - path: {{ $ingressPath }} 31 | backend: 32 | serviceName: {{ $fullName }}-rest 33 | servicePort: http 34 | {{- end }} 35 | {{- end }} 36 | -------------------------------------------------------------------------------- /hl-composer/templates/hl-composer-rest-secret.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Secret 3 | metadata: 4 | name: {{ include "hl-composer.fullname" . }}-rest 5 | labels: 6 | {{ include "labels.standard" . | indent 4 }} 7 | type: Opaque 8 | data: 9 | {{ if .Values.rest.config.apiKey }} 10 | COMPOSER_APIKEY: {{ .Values.rest.config.apiKey | b64enc | quote }} 11 | {{ else }} 12 | COMPOSER_APIKEY: {{ randAlphaNum 24 | b64enc | quote }} 13 | {{ end }} 14 | -------------------------------------------------------------------------------- /hl-composer/templates/hl-composer-rest-service.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | name: {{ include "hl-composer.fullname" $ }}-rest 6 | labels: 7 | {{ include "labels.standard" . | indent 4 }} 8 | spec: 9 | type: {{ .Values.rest.service.type }} 10 | ports: 11 | - port: {{ .Values.rest.service.port }} 12 | targetPort: 3000 13 | protocol: TCP 14 | name: http 15 | selector: 16 | name: {{ include "hl-composer.fullname" . }}-rest 17 | app: {{ include "hl-composer.name" . }} 18 | release: {{ .Release.Name }} 19 | -------------------------------------------------------------------------------- /hl-composer/values.yaml: -------------------------------------------------------------------------------- 1 | # Default values for hl-composer. 2 | # This is a YAML-formatted file. 3 | # Declare variables to be passed into your templates. 4 | 5 | persistence: 6 | # By default we must set this to false to enable chart to pass tests 7 | # However, in a proper deployment, you should set this to "true", so that the different components can share data 8 | enabled: false 9 | annotations: {} 10 | capacity: 1Gi 11 | ## If defined, storageClassName: 12 | ## If set to "-", storageClassName: "", which disables dynamic provisioning 13 | ## If undefined (the default) or set to null, no storageClassName spec is 14 | ## set, choosing the default provisioner. (gp2 on AWS, standard on 15 | ## GKE, AWS & OpenStack) 16 | ## 17 | storageClass: "" 18 | accessMode: ReadWriteMany 19 | 20 | cli: 21 | image: 22 | repository: alexvicegrab/composer-cli 23 | tag: 0.20.10 24 | pullPolicy: IfNotPresent 25 | 26 | secrets: {} 27 | # This should contain the packaged .bna network file. 28 | # blockchainNetwork: bc--bna 29 | # We need a secret for the Organisation certificate and key 30 | # adminCert: hl--peer-admincert 31 | # adminKey: hl--peer-adminkey 32 | # Composer Connection JSON 33 | # hlcConnection: hl--connection 34 | 35 | resources: {} 36 | # We usually recommend not to specify default resources and to leave this as a conscious 37 | # choice for the user. This also increases chances charts run on environments with little 38 | # resources, such as Minikube. If you do want to specify resources, uncomment the following 39 | # lines, adjust them as necessary, and remove the curly braces after 'resources:'. 40 | # limits: 41 | # cpu: 100m 42 | # memory: 128Mi 43 | # requests: 44 | # cpu: 100m 45 | # memory: 128Mi 46 | 47 | nodeSelector: {} 48 | 49 | tolerations: [] 50 | 51 | affinity: {} 52 | 53 | rest: 54 | image: 55 | repository: alexvicegrab/composer-rest-server 56 | tag: 0.20.10 57 | pullPolicy: IfNotPresent 58 | 59 | service: 60 | # Cluster IP or LoadBalancer 61 | type: ClusterIP 62 | port: 3000 63 | 64 | # Ingress for Composer REST 65 | ingress: 66 | enabled: false 67 | annotations: {} 68 | # kubernetes.io/ingress.class: nginx 69 | # certmanager.k8s.io/cluster-issuer: "letsencrypt-staging" 70 | path: / 71 | hosts: [] 72 | # - hl-composer-rest.local 73 | tls: [] 74 | # - secretName: hl-composer-rest-tls 75 | # hosts: 76 | # - hl-composer-rest.local 77 | 78 | config: 79 | # Composer REST server API key 80 | # apiKey: 81 | # Card for network connection 82 | composerRestServerCard: admin@test-network 83 | 84 | resources: {} 85 | # We usually recommend not to specify default resources and to leave this as a conscious 86 | # choice for the user. This also increases chances charts run on environments with little 87 | # resources, such as Minikube. If you do want to specify resources, uncomment the following 88 | # lines, adjust them as necessary, and remove the curly braces after 'resources:'. 89 | # limits: 90 | # cpu: 100m 91 | # memory: 128Mi 92 | # requests: 93 | # cpu: 100m 94 | # memory: 128Mi 95 | 96 | nodeSelector: {} 97 | 98 | tolerations: [] 99 | 100 | affinity: {} 101 | 102 | pg: 103 | enabled: true 104 | 105 | image: 106 | repository: alexvicegrab/composer-playground 107 | tag: 0.20.10 108 | pullPolicy: IfNotPresent 109 | 110 | service: 111 | # Cluster IP or LoadBalancer 112 | type: ClusterIP 113 | port: 8080 114 | 115 | # Ingress for Composer PlayGround 116 | ingress: 117 | enabled: false 118 | annotations: {} 119 | # kubernetes.io/ingress.class: nginx 120 | # certmanager.k8s.io/cluster-issuer: "letsencrypt-staging" 121 | path: / 122 | hosts: [] 123 | # - hl-composer-pg.local 124 | tls: [] 125 | # - secretName: hl-composer-pg-tls 126 | # hosts: 127 | # - hl-composer-pg.local 128 | 129 | resources: {} 130 | # We usually recommend not to specify default resources and to leave this as a conscious 131 | # choice for the user. This also increases chances charts run on environments with little 132 | # resources, such as Minikube. If you do want to specify resources, uncomment the following 133 | # lines, adjust them as necessary, and remove the curly braces after 'resources:'. 134 | # limits: 135 | # cpu: 100m 136 | # memory: 128Mi 137 | # requests: 138 | # cpu: 100m 139 | # memory: 128Mi 140 | 141 | nodeSelector: {} 142 | 143 | tolerations: [] 144 | 145 | affinity: {} 146 | -------------------------------------------------------------------------------- /hlf-ca/.helmignore: -------------------------------------------------------------------------------- 1 | # Patterns to ignore when building packages. 2 | # This supports shell glob matching, relative path matching, and 3 | # negation (prefixed with !). Only one pattern per line. 4 | .DS_Store 5 | # Common VCS dirs 6 | .git/ 7 | .gitignore 8 | .bzr/ 9 | .bzrignore 10 | .hg/ 11 | .hgignore 12 | .svn/ 13 | # Common backup files 14 | *.swp 15 | *.bak 16 | *.tmp 17 | *~ 18 | # Various IDEs 19 | .project 20 | .idea/ 21 | *.tmproj 22 | 23 | # OWNERS file for Helm repository 24 | OWNERS 25 | 26 | # Test files 27 | tests 28 | -------------------------------------------------------------------------------- /hlf-ca/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | description: Hyperledger Fabric Certificate Authority chart (these charts are created by AID:Tech and are currently not directly associated with the Hyperledger project) 3 | name: hlf-ca 4 | version: 1.1.7 5 | appVersion: 1.4.1 6 | keywords: 7 | - blockchain 8 | - hyperledger 9 | - fabric 10 | home: http://hyperledger-fabric-ca.readthedocs.io 11 | sources: 12 | - https://github.com/hyperledger/fabric-ca 13 | maintainers: 14 | - name: alexvicegrab 15 | email: sasha@aid.technology 16 | - name: nicolapaoli 17 | email: nicola@aid.technology 18 | icon: https://www.hyperledger.org/wp-content/uploads/2018/04/fabric-logo.png 19 | -------------------------------------------------------------------------------- /hlf-ca/FABRIC_UPGRADE.md: -------------------------------------------------------------------------------- 1 | # Upgrade Fabric from version 1.3.0 to 1.4.1 2 | 3 | 1. Update image tag from 1.3.0 to 1.4.1 in *values.yaml* 4 | 5 | 2. Update version from 1.1.6 to 1.1.7 in *Chart.yaml* 6 | 7 | 3. Update appVersion from 1.3.0 to 1.4.1 in *Chart.yaml* 8 | 9 | 4. Variables to add 10 | 11 | * FABRIC_CA_SERVER_CA_CERTFILE: /etc/hyperledger/fabric-ca-server/ca-cert.pem 12 | 13 | * FABRIC_CA_SERVER_CA_KEYFILE: /etc/hyperledger/fabric-ca-server/ca-key.pem 14 | -------------------------------------------------------------------------------- /hlf-ca/OWNERS: -------------------------------------------------------------------------------- 1 | approvers: 2 | - alexvicegrab 3 | - nicolapaoli 4 | reviewers: 5 | - alexvicegrab 6 | - nicolapaoli 7 | -------------------------------------------------------------------------------- /hlf-ca/requirements.lock: -------------------------------------------------------------------------------- 1 | dependencies: 2 | - name: postgresql 3 | repository: https://kubernetes-charts.storage.googleapis.com/ 4 | version: 2.6.1 5 | - name: mysql 6 | repository: https://kubernetes-charts.storage.googleapis.com/ 7 | version: 0.10.2 8 | digest: sha256:1b5b212945abc2abc2317189f0d048b1c90f093db8ed5482db2c35a9d463259f 9 | generated: 2018-11-16T17:00:11.413931+02:00 10 | -------------------------------------------------------------------------------- /hlf-ca/requirements.yaml: -------------------------------------------------------------------------------- 1 | dependencies: 2 | - name: postgresql 3 | version: x.x.x 4 | repository: https://kubernetes-charts.storage.googleapis.com/ 5 | condition: postgresql.enabled 6 | tags: 7 | - postgres-database 8 | 9 | - name: mysql 10 | version: x.x.x 11 | repository: https://kubernetes-charts.storage.googleapis.com/ 12 | condition: mysql.enabled 13 | tags: 14 | - mysql-database -------------------------------------------------------------------------------- /hlf-ca/templates/NOTES.txt: -------------------------------------------------------------------------------- 1 | Run the following commands to... 2 | 1. Get the name of the pod running the Fabric CA Server: 3 | export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app={{ include "hlf-ca.name" . }},release={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}") 4 | 5 | 2. Get the application URL: 6 | {{- if .Values.ingress.enabled }} 7 | {{- range .Values.ingress.hosts }} 8 | http{{ if $.Values.ingress.tls }}s{{ end }}://{{ . }}{{ $.Values.ingress.path }} 9 | {{- end }} 10 | {{- else if contains "NodePort" .Values.service.type }} 11 | export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "hlf-ca.fullname" . }}) 12 | export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") 13 | echo http://$NODE_IP:$NODE_PORT 14 | {{- else if contains "LoadBalancer" .Values.service.type }} 15 | NOTE: It may take a few minutes for the LoadBalancer IP to be available. 16 | You can watch the status of by running 'kubectl get svc -w {{ include "hlf-ca.fullname" . }}' 17 | export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "hlf-ca.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}') 18 | echo http://$SERVICE_IP:{{ .Values.service.port }} 19 | {{- else if contains "ClusterIP" .Values.service.type }} 20 | echo "Visit http://127.0.0.1:8080 to use your application" 21 | kubectl port-forward $POD_NAME 8080:7054 22 | {{- end }} 23 | 24 | 3. Display local (admin "client" enrollment) certificate, if it has been created: 25 | kubectl exec $POD_NAME -- cat /var/hyperledger/fabric-ca/msp/signcerts/cert.pem 26 | 27 | 4. Enroll the bootstrap admin identity: 28 | kubectl exec $POD_NAME -- bash -c 'fabric-ca-client enroll -d -u http://$CA_ADMIN:$CA_PASSWORD@$SERVICE_DNS:{{ .Values.service.port }}' 29 | 30 | 5. Update the chart without resetting a password: 31 | export CA_ADMIN=$(kubectl get secret --namespace {{ .Release.Namespace }} {{ include "hlf-ca.fullname" . }}--ca -o jsonpath="{.data.CA_ADMIN}" | base64 --decode; echo) 32 | export CA_PASSWORD=$(kubectl get secret --namespace {{ .Release.Namespace }} {{ include "hlf-ca.fullname" . }}--ca -o jsonpath="{.data.CA_PASSWORD}" | base64 --decode; echo) 33 | helm upgrade {{ .Release.Name }} stable/hlf-ca --namespace {{ .Release.Namespace }} -f my-values.yaml --set adminUsername=$CA_ADMIN,adminPassword=$CA_PASSWORD 34 | -------------------------------------------------------------------------------- /hlf-ca/templates/_helpers.tpl: -------------------------------------------------------------------------------- 1 | {{/* vim: set filetype=mustache: */}} 2 | {{/* 3 | Expand the name of the chart. 4 | */}} 5 | {{- define "hlf-ca.name" -}} 6 | {{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} 7 | {{- end -}} 8 | 9 | {{/* 10 | Create a default fully qualified app name. 11 | We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). 12 | If release name contains chart name it will be used as a full name. 13 | */}} 14 | {{- define "hlf-ca.fullname" -}} 15 | {{- if .Values.fullnameOverride -}} 16 | {{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} 17 | {{- else -}} 18 | {{- $name := default .Chart.Name .Values.nameOverride -}} 19 | {{- if contains $name .Release.Name -}} 20 | {{- .Release.Name | trunc 63 | trimSuffix "-" -}} 21 | {{- else -}} 22 | {{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} 23 | {{- end -}} 24 | {{- end -}} 25 | {{- end -}} 26 | 27 | {{/* 28 | Create chart name and version as used by the chart label. 29 | */}} 30 | {{- define "hlf-ca.chart" -}} 31 | {{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} 32 | {{- end -}} 33 | 34 | {{- /* 35 | Credit: @technosophos 36 | https://github.com/technosophos/common-chart/ 37 | labels.standard prints the standard Helm labels. 38 | The standard labels are frequently used in metadata. 39 | */ -}} 40 | {{- define "labels.standard" -}} 41 | app: {{ include "hlf-ca.name" . }} 42 | heritage: {{ .Release.Service | quote }} 43 | release: {{ .Release.Name | quote }} 44 | chart: {{ include "hlf-ca.chart" . }} 45 | {{- end -}} 46 | 47 | {{/* 48 | Generate postgres chart name 49 | */}} 50 | {{- define "postgresql.fullname" -}} 51 | {{- printf "%s-%s" .Release.Name "postgresql" | trunc 63 | trimSuffix "-" -}} 52 | {{- end -}} 53 | 54 | {{/* 55 | Generate mysql chart name 56 | */}} 57 | {{- define "mysql.fullname" -}} 58 | {{- printf "%s-%s" .Release.Name "mysql" | trunc 63 | trimSuffix "-" -}} 59 | {{- end -}} 60 | -------------------------------------------------------------------------------- /hlf-ca/templates/configmap--ca.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: {{ include "hlf-ca.fullname" . }}--ca 5 | labels: 6 | {{ include "labels.standard" . | indent 4 }} 7 | data: 8 | GODEBUG: "netdns=go" 9 | FABRIC_CA_HOME: /var/hyperledger/fabric-ca 10 | FABRIC_CA_SERVER_CA_NAME: {{ .Values.caName | quote }} 11 | FABRIC_CA_SERVER_CA_CERTFILE: /etc/hyperledger/fabric-ca-server/ca-cert.pem 12 | FABRIC_CA_SERVER_CA_KEYFILE: /etc/hyperledger/fabric-ca-server/ca-key.pem 13 | SERVICE_DNS: 0.0.0.0 # Point to itself 14 | -------------------------------------------------------------------------------- /hlf-ca/templates/configmap--db.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: {{ include "hlf-ca.fullname" . }}--db 5 | labels: 6 | {{ include "labels.standard" . | indent 4 }} 7 | data: 8 | DB_SSL: {{ .Values.db.ssl | quote }} 9 | {{- if .Values.postgresql.enabled }} 10 | # PostgreSQL Database 11 | DB_TYPE: "postgres" 12 | DB_HOST: {{ template "postgresql.fullname" . }} 13 | DB_PORT: {{ .Values.postgresql.service.port | quote }} 14 | DB_USERNAME: {{ .Values.postgresql.postgresqlUsername | quote }} 15 | DB_DATABASE: {{ .Values.postgresql.postgresqlDatabase | quote }} 16 | {{- end }} 17 | {{- if .Values.mysql.enabled }} 18 | # MySQL Database 19 | DB_TYPE: "mysql" 20 | DB_HOST: {{ template "mysql.fullname" . }} 21 | DB_PORT: "3306" 22 | DB_USERNAME: {{ .Values.mysql.mysqlUser | quote }} 23 | DB_DATABASE: {{ .Values.mysql.mysqlDatabase | quote }} 24 | {{- end }} 25 | {{- if .Values.externalDatabase.type }} 26 | # External Database 27 | DB_TYPE: {{ .Values.externalDatabase.type | quote }} 28 | DB_HOST: {{ .Values.externalDatabase.host | quote }} 29 | {{- if eq .Values.externalDatabase.type "mysql" }} 30 | DB_PORT: {{ .Values.externalDatabase.port | default "3306" | quote }} 31 | {{- end }} 32 | {{- if eq .Values.externalDatabase.type "postgres" }} 33 | DB_PORT: {{ .Values.externalDatabase.port | default "5432" | quote }} 34 | {{- end }} 35 | DB_USERNAME: {{ .Values.externalDatabase.username | quote }} 36 | DB_DATABASE: {{ .Values.externalDatabase.database | quote }} 37 | {{- end }} -------------------------------------------------------------------------------- /hlf-ca/templates/ingress.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.ingress.enabled -}} 2 | {{- $fullName := include "hlf-ca.fullname" . -}} 3 | {{- $ingressPath := .Values.ingress.path -}} 4 | apiVersion: extensions/v1beta1 5 | kind: Ingress 6 | metadata: 7 | name: {{ $fullName }} 8 | labels: 9 | {{ include "labels.standard" . | indent 4 }} 10 | {{- with .Values.ingress.annotations }} 11 | annotations: 12 | {{ toYaml . | indent 4 }} 13 | {{- end }} 14 | spec: 15 | {{- if .Values.ingress.tls }} 16 | tls: 17 | {{- range .Values.ingress.tls }} 18 | - hosts: 19 | {{- range .hosts }} 20 | - {{ . }} 21 | {{- end }} 22 | secretName: {{ .secretName }} 23 | {{- end }} 24 | {{- end }} 25 | rules: 26 | {{- range .Values.ingress.hosts }} 27 | - host: {{ . }} 28 | http: 29 | paths: 30 | - path: {{ $ingressPath }} 31 | backend: 32 | serviceName: {{ $fullName }} 33 | servicePort: http 34 | {{- end }} 35 | {{- end }} 36 | -------------------------------------------------------------------------------- /hlf-ca/templates/pvc.yaml: -------------------------------------------------------------------------------- 1 | {{- if and .Values.persistence.enabled (not .Values.persistence.existingClaim) }} 2 | kind: PersistentVolumeClaim 3 | apiVersion: v1 4 | metadata: 5 | name: {{ include "hlf-ca.fullname" . }} 6 | labels: 7 | {{ include "labels.standard" . | indent 4 }} 8 | {{- if .Values.persistence.annotations }} 9 | annotations: 10 | {{ toYaml .Values.persistence.annotations | indent 4 }} 11 | {{- end }} 12 | spec: 13 | accessModes: 14 | - {{ .Values.persistence.accessMode | quote }} 15 | resources: 16 | requests: 17 | storage: {{ .Values.persistence.size | quote }} 18 | {{- if .Values.persistence.storageClass }} 19 | {{- if (eq "-" .Values.persistence.storageClass) }} 20 | storageClassName: "" 21 | {{- else }} 22 | storageClassName: "{{ .Values.persistence.storageClass }}" 23 | {{- end }} 24 | {{- end }} 25 | {{- end }} 26 | -------------------------------------------------------------------------------- /hlf-ca/templates/secret--ca.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Secret 3 | metadata: 4 | name: {{ include "hlf-ca.fullname" . }}--ca 5 | labels: 6 | {{ include "labels.standard" . | indent 4 }} 7 | type: Opaque 8 | data: 9 | CA_ADMIN: {{ .Values.adminUsername | b64enc | quote }} 10 | {{ if .Values.adminPassword }} 11 | CA_PASSWORD: {{ .Values.adminPassword | b64enc | quote }} 12 | {{ else }} 13 | CA_PASSWORD: {{ randAlphaNum 24 | b64enc | quote }} 14 | {{ end }} 15 | -------------------------------------------------------------------------------- /hlf-ca/templates/secret--db.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.externalDatabase.type }} 2 | apiVersion: v1 3 | kind: Secret 4 | metadata: 5 | name: {{ include "hlf-ca.fullname" . }}--db 6 | labels: 7 | {{ include "labels.standard" . | indent 4 }} 8 | type: Opaque 9 | data: 10 | db-password: {{ .Values.externalDatabase.password | b64enc | quote }} 11 | {{- end }} -------------------------------------------------------------------------------- /hlf-ca/templates/service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: {{ include "hlf-ca.fullname" . }} 5 | labels: 6 | {{ include "labels.standard" . | indent 4 }} 7 | spec: 8 | type: {{ .Values.service.type | quote }} 9 | ports: 10 | - port: {{ .Values.service.port }} 11 | targetPort: 7054 12 | protocol: TCP 13 | name: http 14 | selector: 15 | app: {{ include "hlf-ca.name" . }} 16 | release: {{ .Release.Name }} 17 | -------------------------------------------------------------------------------- /hlf-ca/tests/README.md: -------------------------------------------------------------------------------- 1 | # Chart testing 2 | 3 | > Eventually this will be replaced with an integration test, likely running with `pytest` 4 | 5 | Commands should be run from the root folder of the repository. 6 | 7 | ## Root CA 8 | 9 | ### Pre-install 10 | 11 | Due to presence of dependencies, please run inside the chart dir: 12 | 13 | helm dependency update 14 | 15 | ### Install 16 | 17 | Install Root CA 18 | 19 | helm install ./hlf-ca -n rca --namespace test -f ./hlf-ca/tests/values/root.yaml 20 | 21 | export RCA_POD=$(kubectl get pods -n test -l "app=hlf-ca,release=rca" -o jsonpath="{.items[0].metadata.name}") 22 | 23 | Check that server is running 24 | 25 | kubectl logs -n test $RCA_POD | grep "Listening on" 26 | 27 | ### Bootstrap ID 28 | 29 | Check that we don't have a certificate for the bootstrap identity and create it. 30 | 31 | kubectl exec -n test $RCA_POD -- cat /var/hyperledger/fabric-ca/msp/signcerts/cert.pem 32 | 33 | kubectl exec -n test $RCA_POD -- bash -c 'fabric-ca-client enroll -d -u http://$CA_ADMIN:$CA_PASSWORD@$SERVICE_DNS:7054' 34 | 35 | ## Intermediate CA 36 | 37 | ### Install 38 | 39 | helm install ./hlf-ca -n ica --namespace test -f ./hlf-ca/tests/values/intermediate.yaml 40 | 41 | export ICA_POD=$(kubectl get pods -n test -l "app=hlf-ca,release=ica" -o jsonpath="{.items[0].metadata.name}") 42 | 43 | Check the server is running 44 | 45 | kubectl logs -n test $ICA_POD | grep "Listening on" 46 | 47 | ### Bootstrap ID 48 | 49 | Check that we don't have a certificate for the bootstrap identity and create it. 50 | 51 | kubectl exec -n test $ICA_POD -- cat /var/hyperledger/fabric-ca/msp/signcerts/cert.pem 52 | 53 | kubectl exec -n test $ICA_POD -- bash -c 'fabric-ca-client enroll -d -u http://$CA_ADMIN:$CA_PASSWORD@$SERVICE_DNS:7054' 54 | 55 | ### Register an enroll an identity 56 | 57 | Check that an organisation does not exist 58 | 59 | kubectl exec -n test $ICA_POD -- fabric-ca-client identity list --id org-admin 60 | 61 | Register Organisation Admin if the previous command did not work 62 | 63 | kubectl exec -n test $ICA_POD -- fabric-ca-client register --id.name org-admin --id.secret OrgAdm1nPW --id.attrs 'admin=true:ecert' 64 | 65 | Enroll the Organisation Admin identity (typically we would use a more secure password than `OrgAdm1nPW`, etc.) 66 | 67 | kubectl exec -n test $ICA_POD -- bash -c 'fabric-ca-client enroll -u http://org-admin:OrgAdm1nPW@$SERVICE_DNS:7054 -M ./TestMSP' 68 | 69 | Check that the new identity is present in the Intermediate CA 70 | 71 | kubectl exec -n test $ICA_POD -- fabric-ca-client identity list --id org-admin 72 | 73 | ### Cleanup 74 | 75 | Delete charts we installed 76 | 77 | helm delete --purge ica rca 78 | 79 | Currently, the Persistent Volume Claim for the PostgreSQL database does not get deleted automatically. 80 | 81 | kubectl -n test delete pvc data-rca-postgresql-0 82 | -------------------------------------------------------------------------------- /hlf-ca/tests/values/intermediate.yaml: -------------------------------------------------------------------------------- 1 | ## Name of CA, stored in FABRIC_CA_SERVER_CA_NAME (and the configMap "--config", if deploying a Root CA) 2 | caName: ica 3 | 4 | mysql: 5 | enabled: true 6 | 7 | ## Settings used in configMap "--config" 8 | config: 9 | ## CSR pathlength in ca Server configuration file 10 | csr: 11 | ca: 12 | pathlength: 0 13 | ## "intermediate" is only specified for Intermediate CA's 14 | intermediate: 15 | parent: 16 | chart: rca-hlf-ca 17 | url: rca-hlf-ca 18 | port: 7054 19 | -------------------------------------------------------------------------------- /hlf-ca/tests/values/root.yaml: -------------------------------------------------------------------------------- 1 | ## Name of CA, stored in FABRIC_CA_SERVER_CA_NAME (and the configMap "--config", if deploying a Root CA) 2 | caName: rca 3 | 4 | postgresql: 5 | ## Whether to deploy a postgres server to satisfy the Fabric CA database requirements. 6 | # To use an external database set this to false and configure the externalDatabase parameters, specifying the type to 'postgres' 7 | enabled: true 8 | -------------------------------------------------------------------------------- /hlf-couchdb/.helmignore: -------------------------------------------------------------------------------- 1 | # Patterns to ignore when building packages. 2 | # This supports shell glob matching, relative path matching, and 3 | # negation (prefixed with !). Only one pattern per line. 4 | .DS_Store 5 | # Common VCS dirs 6 | .git/ 7 | .gitignore 8 | .bzr/ 9 | .bzrignore 10 | .hg/ 11 | .hgignore 12 | .svn/ 13 | # Common backup files 14 | *.swp 15 | *.bak 16 | *.tmp 17 | *~ 18 | # Various IDEs 19 | .project 20 | .idea/ 21 | *.tmproj 22 | 23 | # OWNERS file for Helm repository 24 | OWNERS 25 | -------------------------------------------------------------------------------- /hlf-couchdb/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | description: CouchDB instance for Hyperledger Fabric (these charts are created by AID:Tech and are currently not directly associated with the Hyperledger project) 3 | name: hlf-couchdb 4 | version: 1.0.7 5 | appVersion: 0.4.15 6 | keywords: 7 | - blockchain 8 | - hyperledger 9 | - fabric 10 | - couchdb 11 | home: http://hyperledger-fabric.readthedocs.io 12 | sources: 13 | - https://github.com/hyperledger/fabric 14 | maintainers: 15 | - name: alexvicegrab 16 | email: sasha@aid.technology 17 | - name: nicolapaoli 18 | email: nicola@aid.technology 19 | icon: https://www.hyperledger.org/wp-content/uploads/2018/04/fabric-logo.png 20 | -------------------------------------------------------------------------------- /hlf-couchdb/FABRIC_UPGRADE.md: -------------------------------------------------------------------------------- 1 | # Upgrade Fabric from version 1.3.0 to 1.4.1 2 | 3 | 1. Update image tag from 0.4.10 to 0.4.15 in *values.yaml* 4 | 5 | 2. Update version from 1.0.6 to 1.0.7 in *Chart.yaml* 6 | 7 | 3. Update appVersion from 0.4.10 to 0.4.15 in *Chart.yaml* 8 | 9 | 4. Variable to update 10 | 11 | * COUCHDB_USERNAME > COUCHDB_USER 12 | -------------------------------------------------------------------------------- /hlf-couchdb/OWNERS: -------------------------------------------------------------------------------- 1 | approvers: 2 | - alexvicegrab 3 | - nicolapaoli 4 | reviewers: 5 | - alexvicegrab 6 | - nicolapaoli 7 | -------------------------------------------------------------------------------- /hlf-couchdb/templates/NOTES.txt: -------------------------------------------------------------------------------- 1 | 1. Get the application URL by running these commands: 2 | {{- if .Values.ingress.enabled }} 3 | {{- range .Values.ingress.hosts }} 4 | http{{ if $.Values.ingress.tls }}s{{ end }}://{{ . }}{{ $.Values.ingress.path }} 5 | {{- end }} 6 | {{- else if contains "NodePort" .Values.service.type }} 7 | export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "hlf-couchdb.fullname" . }}) 8 | export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") 9 | echo http://$NODE_IP:$NODE_PORT 10 | {{- else if contains "LoadBalancer" .Values.service.type }} 11 | NOTE: It may take a few minutes for the LoadBalancer IP to be available. 12 | You can watch the status of by running 'kubectl get svc -w {{ include "hlf-couchdb.fullname" . }}' 13 | export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "hlf-couchdb.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}') 14 | echo http://$SERVICE_IP:{{ .Values.service.port }} 15 | {{- else if contains "ClusterIP" .Values.service.type }} 16 | export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app={{ include "hlf-couchdb.name" . }},release={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}") 17 | echo "Visit http://127.0.0.1:8080 to use your application" 18 | kubectl port-forward $POD_NAME 8080:80 19 | {{- end }} 20 | 21 | 2. Get the CouchDB username and password by running this command: 22 | export COUCHDB_USER=$(kubectl get secret --namespace {{ .Release.Namespace }} {{ include "hlf-couchdb.fullname" . }} -o jsonpath="{.data.COUCHDB_USER}" | base64 --decode; echo) 23 | export COUCHDB_PASSWORD=$(kubectl get secret --namespace {{ .Release.Namespace }} {{ include "hlf-couchdb.fullname" . }} -o jsonpath="{.data.COUCHDB_PASSWORD}" | base64 --decode; echo) 24 | 25 | 3. Update the chart without resetting the password (requires running step 2): 26 | helm upgrade {{ .Release.Name }} stable/hlf-couchdb --namespace {{ .Release.Namespace }} -f my-values.yaml --set couchdbUsername=$COUCHDB_USER,couchdbPassword=$COUCHDB_PASSWORD -------------------------------------------------------------------------------- /hlf-couchdb/templates/_helpers.tpl: -------------------------------------------------------------------------------- 1 | {{/* vim: set filetype=mustache: */}} 2 | {{/* 3 | Expand the name of the chart. 4 | */}} 5 | {{- define "hlf-couchdb.name" -}} 6 | {{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} 7 | {{- end -}} 8 | 9 | {{/* 10 | Create a default fully qualified app name. 11 | We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). 12 | If release name contains chart name it will be used as a full name. 13 | */}} 14 | {{- define "hlf-couchdb.fullname" -}} 15 | {{- if .Values.fullnameOverride -}} 16 | {{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} 17 | {{- else -}} 18 | {{- $name := default .Chart.Name .Values.nameOverride -}} 19 | {{- if contains $name .Release.Name -}} 20 | {{- .Release.Name | trunc 63 | trimSuffix "-" -}} 21 | {{- else -}} 22 | {{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} 23 | {{- end -}} 24 | {{- end -}} 25 | {{- end -}} 26 | 27 | {{/* 28 | Create chart name and version as used by the chart label. 29 | */}} 30 | {{- define "hlf-couchdb.chart" -}} 31 | {{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} 32 | {{- end -}} 33 | 34 | {{- /* 35 | Credit: @technosophos 36 | https://github.com/technosophos/common-chart/ 37 | labels.standard prints the standard Helm labels. 38 | The standard labels are frequently used in metadata. 39 | */ -}} 40 | {{- define "labels.standard" -}} 41 | app: {{ include "hlf-couchdb.name" . }} 42 | heritage: {{ .Release.Service | quote }} 43 | release: {{ .Release.Name | quote }} 44 | chart: {{ include "hlf-couchdb.chart" . }} 45 | {{- end -}} 46 | -------------------------------------------------------------------------------- /hlf-couchdb/templates/deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: {{ include "hlf-couchdb.fullname" . }} 5 | labels: 6 | {{ include "labels.standard" . | indent 4 }} 7 | spec: 8 | replicas: {{ .Values.replicaCount }} 9 | selector: 10 | matchLabels: 11 | app: {{ include "hlf-couchdb.name" . }} 12 | release: {{ .Release.Name }} 13 | # Ensure we allow our pod to be unavailable, so we can upgrade 14 | strategy: 15 | rollingUpdate: 16 | maxUnavailable: 1 17 | template: 18 | metadata: 19 | labels: 20 | {{ include "labels.standard" . | indent 8 }} 21 | spec: 22 | volumes: 23 | - name: data 24 | {{- if .Values.persistence.enabled }} 25 | persistentVolumeClaim: 26 | claimName: {{ .Values.persistence.existingClaim | default (include "hlf-couchdb.fullname" .) }} 27 | {{- else }} 28 | emptyDir: {} 29 | {{- end }} 30 | containers: 31 | - name: {{ .Chart.Name }} 32 | image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" 33 | imagePullPolicy: {{ .Values.image.pullPolicy }} 34 | envFrom: 35 | - secretRef: 36 | name: {{ include "hlf-couchdb.fullname" . }} 37 | volumeMounts: 38 | - mountPath: /opt/couchdb/data 39 | name: data 40 | ports: 41 | - name: couchdb 42 | containerPort: 5984 43 | protocol: TCP 44 | livenessProbe: 45 | tcpSocket: 46 | port: 5984 47 | initialDelaySeconds: 60 48 | timeoutSeconds: 5 49 | failureThreshold: 6 50 | readinessProbe: 51 | tcpSocket: 52 | port: 5984 53 | initialDelaySeconds: 5 54 | timeoutSeconds: 3 55 | periodSeconds: 5 56 | resources: 57 | {{ toYaml .Values.resources | indent 12 }} 58 | {{- with .Values.nodeSelector }} 59 | nodeSelector: 60 | {{ toYaml . | indent 8 }} 61 | {{- end }} 62 | {{- with .Values.affinity }} 63 | affinity: 64 | {{ toYaml . | indent 8 }} 65 | {{- end }} 66 | {{- with .Values.tolerations }} 67 | tolerations: 68 | {{ toYaml . | indent 8 }} 69 | {{- end }} 70 | -------------------------------------------------------------------------------- /hlf-couchdb/templates/ingress.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.ingress.enabled -}} 2 | {{- $fullName := include "hlf-couchdb.fullname" . -}} 3 | {{- $ingressPath := .Values.ingress.path -}} 4 | apiVersion: extensions/v1beta1 5 | kind: Ingress 6 | metadata: 7 | name: {{ $fullName }} 8 | labels: 9 | {{ include "labels.standard" . | indent 4 }} 10 | {{- with .Values.ingress.annotations }} 11 | annotations: 12 | {{ toYaml . | indent 4 }} 13 | {{- end }} 14 | spec: 15 | {{- if .Values.ingress.tls }} 16 | tls: 17 | {{- range .Values.ingress.tls }} 18 | - hosts: 19 | {{- range .hosts }} 20 | - {{ . }} 21 | {{- end }} 22 | secretName: {{ .secretName }} 23 | {{- end }} 24 | {{- end }} 25 | rules: 26 | {{- range .Values.ingress.hosts }} 27 | - host: {{ . }} 28 | http: 29 | paths: 30 | - path: {{ $ingressPath }} 31 | backend: 32 | serviceName: {{ $fullName }} 33 | servicePort: couchdb 34 | {{- end }} 35 | {{- end }} 36 | -------------------------------------------------------------------------------- /hlf-couchdb/templates/pvc.yaml: -------------------------------------------------------------------------------- 1 | {{- if and .Values.persistence.enabled (not .Values.persistence.existingClaim) }} 2 | kind: PersistentVolumeClaim 3 | apiVersion: v1 4 | metadata: 5 | name: {{ include "hlf-couchdb.fullname" . }} 6 | labels: 7 | {{ include "labels.standard" . | indent 4 }} 8 | {{- if .Values.persistence.annotations }} 9 | annotations: 10 | {{ toYaml .Values.persistence.annotations | indent 4 }} 11 | {{- end }} 12 | spec: 13 | accessModes: 14 | - {{ .Values.persistence.accessMode | quote }} 15 | resources: 16 | requests: 17 | storage: {{ .Values.persistence.size | quote }} 18 | {{- if .Values.persistence.storageClass }} 19 | {{- if (eq "-" .Values.persistence.storageClass) }} 20 | storageClassName: "" 21 | {{- else }} 22 | storageClassName: "{{ .Values.persistence.storageClass }}" 23 | {{- end }} 24 | {{- end }} 25 | {{- end }} 26 | -------------------------------------------------------------------------------- /hlf-couchdb/templates/secret.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Secret 3 | metadata: 4 | name: {{ include "hlf-couchdb.fullname" . }} 5 | labels: 6 | {{ include "labels.standard" . | indent 4 }} 7 | type: Opaque 8 | data: 9 | COUCHDB_USER: {{ .Values.couchdbUsername | b64enc | quote }} 10 | {{ if .Values.couchdbPassword }} 11 | COUCHDB_PASSWORD: {{ .Values.couchdbPassword | b64enc | quote }} 12 | {{ else }} 13 | COUCHDB_PASSWORD: {{ randAlphaNum 24 | b64enc | quote }} 14 | {{ end }} 15 | -------------------------------------------------------------------------------- /hlf-couchdb/templates/service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: {{ include "hlf-couchdb.fullname" . }} 5 | labels: 6 | {{ include "labels.standard" . | indent 4 }} 7 | spec: 8 | type: {{ .Values.service.type }} 9 | ports: 10 | - port: {{ .Values.service.port }} 11 | targetPort: couchdb 12 | protocol: TCP 13 | name: couchdb 14 | selector: 15 | app: {{ include "hlf-couchdb.name" . }} 16 | release: {{ .Release.Name }} 17 | -------------------------------------------------------------------------------- /hlf-couchdb/values.yaml: -------------------------------------------------------------------------------- 1 | ## Default values for hlf-couchdb. 2 | ## This is a YAML-formatted file. 3 | ## Declare variables to be passed into your templates. 4 | 5 | replicaCount: 1 6 | 7 | image: 8 | repository: hyperledger/fabric-couchdb 9 | tag: 0.4.15 10 | pullPolicy: IfNotPresent 11 | 12 | service: 13 | type: ClusterIP 14 | port: 5984 15 | 16 | ingress: 17 | enabled: false 18 | annotations: {} 19 | # kubernetes.io/ingress.class: nginx 20 | # kubernetes.io/tls-acme: "true" 21 | path: / 22 | hosts: 23 | - chart-example.local 24 | tls: [] 25 | # - secretName: chart-example-tls 26 | # hosts: 27 | # - chart-example.local 28 | 29 | persistence: 30 | enabled: true 31 | annotations: {} 32 | ## If defined, storageClassName: 33 | ## If set to "-", storageClassName: "", which disables dynamic provisioning 34 | ## If undefined (the default) or set to null, no storageClassName spec is 35 | ## set, choosing the default provisioner. (gp2 on AWS, standard on 36 | ## GKE, AWS & OpenStack) 37 | ## 38 | storageClass: "" 39 | accessMode: ReadWriteOnce 40 | size: 1Gi 41 | # existingClaim: "" 42 | 43 | ################################## 44 | ## Further configuration options # 45 | ################################## 46 | ## Database username 47 | couchdbUsername: "couchdb" 48 | ## Database password (default: random 24 character string) 49 | # couchdbPassword: 50 | 51 | resources: {} 52 | ## We usually recommend not to specify default resources and to leave this as a conscious 53 | ## choice for the user. This also increases chances charts run on environments with little 54 | ## resources, such as Minikube. If you do want to specify resources, uncomment the following 55 | ## lines, adjust them as necessary, and remove the curly braces after 'resources:'. 56 | ## limits: 57 | # cpu: 100m 58 | # memory: 128Mi 59 | # requests: 60 | # cpu: 100m 61 | # memory: 128Mi 62 | 63 | nodeSelector: {} 64 | 65 | tolerations: [] 66 | 67 | affinity: 68 | ## Suggested antiAffinity, as each CouchDB instance should be on a separate Node for resilience 69 | # podAntiAffinity: 70 | # requiredDuringSchedulingIgnoredDuringExecution: 71 | # - topologyKey: "kubernetes.io/hostname" 72 | # labelSelector: 73 | # matchLabels: 74 | # app: hlf-couchdb 75 | -------------------------------------------------------------------------------- /hlf-ord/.helmignore: -------------------------------------------------------------------------------- 1 | # Patterns to ignore when building packages. 2 | # This supports shell glob matching, relative path matching, and 3 | # negation (prefixed with !). Only one pattern per line. 4 | .DS_Store 5 | # Common VCS dirs 6 | .git/ 7 | .gitignore 8 | .bzr/ 9 | .bzrignore 10 | .hg/ 11 | .hgignore 12 | .svn/ 13 | # Common backup files 14 | *.swp 15 | *.bak 16 | *.tmp 17 | *~ 18 | # Various IDEs 19 | .project 20 | .idea/ 21 | *.tmproj 22 | 23 | # OWNERS file for Helm repository 24 | OWNERS 25 | -------------------------------------------------------------------------------- /hlf-ord/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | description: Hyperledger Fabric Orderer chart (these charts are created by AID:Tech and are currently not directly associated with the Hyperledger project) 3 | name: hlf-ord 4 | version: 1.2.6 5 | appVersion: 1.4.1 6 | keywords: 7 | - blockchain 8 | - hyperledger 9 | - fabric 10 | home: http://hyperledger-fabric.readthedocs.io 11 | sources: 12 | - https://github.com/hyperledger/fabric 13 | maintainers: 14 | - name: alexvicegrab 15 | email: sasha@aid.technology 16 | - name: nicolapaoli 17 | email: nicola@aid.technology 18 | icon: https://www.hyperledger.org/wp-content/uploads/2018/04/fabric-logo.png 19 | -------------------------------------------------------------------------------- /hlf-ord/FABRIC_UPGRADE.md: -------------------------------------------------------------------------------- 1 | # Upgrade Fabric from version 1.3.0 to 1.4.1 2 | 3 | 1. Update image tag from 1.3.0 to 1.4.1 in values.yaml 4 | 5 | 2. Variables to update 6 | 7 | * ORDERER_GENERAL_LOGLEVEL > FABRIC_LOGGING_SPEC 8 | 9 | 3. Variables to add 10 | 11 | * ORDERER_KAFKA_TOPIC_REPLICATIONFACTOR=1 12 | 13 | * ORDERER_KAFKA_VERBOSE=true 14 | -------------------------------------------------------------------------------- /hlf-ord/OWNERS: -------------------------------------------------------------------------------- 1 | approvers: 2 | - alexvicegrab 3 | - nicolapaoli 4 | reviewers: 5 | - alexvicegrab 6 | - nicolapaoli 7 | -------------------------------------------------------------------------------- /hlf-ord/UPGRADE_1-1-x.md: -------------------------------------------------------------------------------- 1 | # Upgrading from version 1.1.x and up. 2 | 3 | ## Secret creation 4 | 5 | Specify which release we want to work with: 6 | 7 | ``` 8 | export RELEASE='ord1' 9 | export NAMESPACE='default' 10 | export POD_NAME=$(kubectl -n ${NAMESPACE} get pods -l "app=hlf-ord,release=${RELEASE}" -o jsonpath="{.items[0].metadata.name}") 11 | ``` 12 | 13 | ### Cred secret 14 | 15 | Get relevant credentials 16 | 17 | ``` 18 | export CA_USERNAME=$(kubectl -n ${NAMESPACE} get secret ${RELEASE}-hlf-ord -o jsonpath="{.data.CA_USERNAME}" | base64 --decode; echo) 19 | export CA_PASSWORD=$(kubectl -n ${NAMESPACE} get secret ${RELEASE}-hlf-ord -o jsonpath="{.data.CA_PASSWORD}" | base64 --decode; echo) 20 | ``` 21 | 22 | Save credentials in secret 23 | 24 | ``` 25 | kubectl -n ${NAMESPACE} create secret generic hlf--${RELEASE}-cred --from-literal=CA_USERNAME=$CA_USERNAME --from-literal=CA_PASSWORD=$CA_PASSWORD 26 | ``` 27 | 28 | ### Cert secret 29 | 30 | Get content of certificate file and save it as a secret: 31 | 32 | ``` 33 | export CONTENT=$(kubectl -n ${NAMESPACE} exec ${POD_NAME} -- cat /var/hyperledger/msp/signcerts/cert.pem) 34 | kubectl -n ${NAMESPACE} create secret generic hlf--${RELEASE}-idcert --from-literal=cert.pem=$CONTENT 35 | ``` 36 | 37 | ### Key secret 38 | 39 | Get content of key file and save it as a secret: 40 | 41 | ``` 42 | export CONTENT=$(kubectl -n ${NAMESPACE} exec ${POD_NAME} -- bash -c 'cat /var/hyperledger/msp/keystore/*_sk') 43 | kubectl -n ${NAMESPACE} create secret generic hlf--${RELEASE}-idkey --from-literal=key.pem=$CONTENT 44 | ``` 45 | 46 | ### CA cert secret 47 | 48 | Get content of key file and save it as a secret: 49 | 50 | ``` 51 | export CONTENT=$(kubectl -n ${NAMESPACE} exec ${POD_NAME} -- bash -c 'cat /var/hyperledger/msp/cacerts/*.pem') 52 | kubectl -n ${NAMESPACE} create secret generic hlf--${RELEASE}-cacert --from-literal=cacert.pem=$CONTENT 53 | ``` 54 | 55 | ### Intermediate CA cert secret 56 | 57 | Get content of key file and save it as a secret (if you have used an intermediate CA): 58 | 59 | ``` 60 | export CONTENT=$(kubectl -n ${NAMESPACE} exec ${POD_NAME} -- bash -c 'cat /var/hyperledger/msp/intermediatecerts/*.pem') 61 | kubectl -n ${NAMESPACE} create secret generic hlf--${RELEASE}-intcacert --from-literal=intermediatecacert.pem=$CONTENT 62 | ``` 63 | 64 | ## Move old MSP material out 65 | 66 | We can move the crypto material we created earlier to another directory in our Persistent Volume, so we can rollback if needed. 67 | 68 | ``` 69 | kubectl -n ${NAMESPACE} exec ${POD_NAME} -- mv /var/hyperledger/msp /var/hyperledger/msp_old 70 | ``` 71 | 72 | ## Upgrade the chart 73 | 74 | You will need to update the chart to the latest version by editing the relevant values files: 75 | 76 | ``` 77 | secrets: 78 | ord: 79 | cred: hlf--ord1-cred 80 | cert: hlf--ord1-idcert 81 | key: hlf--ord1-idkey 82 | caCert: hlf--ord1-cacert 83 | intCaCert: hlf--ord1-caintcert # If applicable 84 | ``` 85 | 86 | And running: 87 | 88 | ``` 89 | helm upgrade ${RELEASE} ./hlf-ord 90 | ``` 91 | -------------------------------------------------------------------------------- /hlf-ord/templates/NOTES.txt: -------------------------------------------------------------------------------- 1 | Run the following commands to... 2 | 1. Get the name of the pod running the Fabric Orderer: 3 | export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app={{ include "hlf-ord.name" . }},release={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}") 4 | 5 | 2. Get the application URL by running these commands: 6 | {{- if contains "NodePort" .Values.service.type }} 7 | export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "hlf-ord.fullname" . }}) 8 | export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") 9 | echo http://$NODE_IP:$NODE_PORT 10 | {{- else if contains "LoadBalancer" .Values.service.type }} 11 | NOTE: It may take a few minutes for the LoadBalancer IP to be available. 12 | You can watch the status of by running 'kubectl get svc -w {{ include "hlf-ord.fullname" . }}' 13 | export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "hlf-ord.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}') 14 | echo http://$SERVICE_IP:{{ .Values.service.port }} 15 | {{- else if contains "ClusterIP" .Values.service.type }} 16 | export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app={{ include "hlf-ord.name" . }},release={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}") 17 | echo "Visit http://127.0.0.1:8080 to use your application" 18 | kubectl port-forward $POD_NAME 8080:7050 19 | {{- end }} 20 | 21 | 3. Obtain CA_USERNAME and CA_PASSWORD to register identity with CA: 22 | export CA_USERNAME=$(kubectl get secret --namespace {{ .Release.Namespace }} {{ include "hlf-ord.fullname" . }} -o jsonpath="{.data.CA_USERNAME}" | base64 --decode; echo) 23 | export CA_PASSWORD=$(kubectl get secret --namespace {{ .Release.Namespace }} {{ include "hlf-ord.fullname" . }} -o jsonpath="{.data.CA_PASSWORD}" | base64 --decode; echo) 24 | 25 | 4. Update the chart without resetting a password (requires running step 3): 26 | helm upgrade {{ .Release.Name }} stable/hlf-ord --namespace {{ .Release.Namespace }} -f my-values.yaml --set caUsername=$CA_USERNAME,caPassword=$CA_PASSWORD 27 | -------------------------------------------------------------------------------- /hlf-ord/templates/_helpers.tpl: -------------------------------------------------------------------------------- 1 | {{/* vim: set filetype=mustache: */}} 2 | {{/* 3 | Expand the name of the chart. 4 | */}} 5 | {{- define "hlf-ord.name" -}} 6 | {{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} 7 | {{- end -}} 8 | 9 | {{/* 10 | Create a default fully qualified app name. 11 | We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). 12 | If release name contains chart name it will be used as a full name. 13 | */}} 14 | {{- define "hlf-ord.fullname" -}} 15 | {{- if .Values.fullnameOverride -}} 16 | {{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} 17 | {{- else -}} 18 | {{- $name := default .Chart.Name .Values.nameOverride -}} 19 | {{- if contains $name .Release.Name -}} 20 | {{- .Release.Name | trunc 63 | trimSuffix "-" -}} 21 | {{- else -}} 22 | {{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} 23 | {{- end -}} 24 | {{- end -}} 25 | {{- end -}} 26 | 27 | {{/* 28 | Create chart name and version as used by the chart label. 29 | */}} 30 | {{- define "hlf-ord.chart" -}} 31 | {{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} 32 | {{- end -}} 33 | 34 | {{- /* 35 | Credit: @technosophos 36 | https://github.com/technosophos/common-chart/ 37 | labels.standard prints the standard Helm labels. 38 | The standard labels are frequently used in metadata. 39 | */ -}} 40 | {{- define "labels.standard" -}} 41 | app: {{ include "hlf-ord.name" . }} 42 | heritage: {{ .Release.Service | quote }} 43 | release: {{ .Release.Name | quote }} 44 | chart: {{ include "hlf-ord.chart" . }} 45 | {{- end -}} 46 | -------------------------------------------------------------------------------- /hlf-ord/templates/configmap--ord.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: {{ include "hlf-ord.fullname" . }}--ord 5 | labels: 6 | {{ include "labels.standard" . | indent 4 }} 7 | data: 8 | ## Location where fabric-ca-client configuration is saved 9 | FABRIC_CA_CLIENT_HOME: /var/hyperledger/fabric-ca-client 10 | ## Orderer defaults 11 | ORDERER_CFG_PATH: /var/hyperledger/config 12 | ORDERER_GENERAL_LEDGERTYPE: file 13 | ORDERER_FILELEDGER_LOCATION: /var/hyperledger/ledger 14 | ORDERER_GENERAL_BATCHTIMEOUT: 1s 15 | ORDERER_GENERAL_BATCHSIZE_MAXMESSAGECOUNT: "10" 16 | ORDERER_GENERAL_MAXWINDOWSIZE: "1000" 17 | ORDERER_GENERAL_ORDERERTYPE: {{ .Values.ord.type | quote }} 18 | ORDERER_GENERAL_LISTENADDRESS: 0.0.0.0 19 | ORDERER_GENERAL_LISTENPORT: "7050" 20 | FABRIC_LOGGING_SPEC: {{ .Values.ord.logging | quote }} 21 | ORDERER_KAFKA_TOPIC_REPLICATIONFACTOR: {{ .Values.ord.replicationFactor | quote }} 22 | ORDERER_KAFKA_VERBOSE: {{ .Values.ord.verbose | quote }} 23 | ORDERER_GENERAL_LOCALMSPDIR: /var/hyperledger/msp 24 | ORDERER_GENERAL_LOCALMSPID: {{ .Values.ord.mspID | quote }} 25 | ORDERER_GENERAL_GENESISMETHOD: file 26 | ORDERER_GENERAL_GENESISFILE: /hl_config/genesis/genesis.block 27 | ORDERER_GENERAL_GENESISPROFILE: initial 28 | ORDERER_GENERAL_TLS_ENABLED: {{ .Values.ord.tls.server.enabled | quote }} 29 | ORDERER_GENERAL_TLS_CERTIFICATE: "/var/hyperledger/tls/server/pair/tls.crt" 30 | ORDERER_GENERAL_TLS_PRIVATEKEY: "/var/hyperledger/tls/server/pair/tls.key" 31 | ORDERER_GENERAL_TLS_ROOTCAS: "/var/hyperledger/tls/server/cert/cacert.pem" 32 | ORDERER_GENERAL_TLS_CLIENTAUTHREQUIRED: {{ .Values.ord.tls.client.enabled | quote }} 33 | # This is fixed prior to starting the orderer 34 | ORDERER_GENERAL_TLS_CLIENTROOTCAS: "/var/hyperledger/tls/client/cert/*" 35 | GODEBUG: "netdns=go" 36 | ADMIN_MSP_PATH: /var/hyperledger/admin_msp 37 | -------------------------------------------------------------------------------- /hlf-ord/templates/ingress.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.ingress.enabled -}} 2 | {{- $fullName := include "hlf-ord.fullname" . -}} 3 | {{- $ingressPath := .Values.ingress.path -}} 4 | apiVersion: extensions/v1beta1 5 | kind: Ingress 6 | metadata: 7 | name: {{ $fullName }} 8 | labels: 9 | {{ include "labels.standard" . | indent 4 }} 10 | {{- with .Values.ingress.annotations }} 11 | annotations: 12 | {{ toYaml . | indent 4 }} 13 | {{- end }} 14 | spec: 15 | {{- if .Values.ingress.tls }} 16 | tls: 17 | {{- range .Values.ingress.tls }} 18 | - hosts: 19 | {{- range .hosts }} 20 | - {{ . }} 21 | {{- end }} 22 | secretName: {{ .secretName }} 23 | {{- end }} 24 | {{- end }} 25 | rules: 26 | {{- range .Values.ingress.hosts }} 27 | - host: {{ . }} 28 | http: 29 | paths: 30 | - path: {{ $ingressPath }} 31 | backend: 32 | serviceName: {{ $fullName }} 33 | servicePort: grpc 34 | {{- end }} 35 | {{- end }} 36 | -------------------------------------------------------------------------------- /hlf-ord/templates/pvc.yaml: -------------------------------------------------------------------------------- 1 | {{- if and .Values.persistence.enabled (not .Values.persistence.existingClaim) }} 2 | kind: PersistentVolumeClaim 3 | apiVersion: v1 4 | metadata: 5 | name: {{ include "hlf-ord.fullname" . }} 6 | labels: 7 | {{ include "labels.standard" . | indent 4 }} 8 | {{- if .Values.persistence.annotations }} 9 | annotations: 10 | {{ toYaml .Values.persistence.annotations | indent 4 }} 11 | {{- end }} 12 | spec: 13 | accessModes: 14 | - {{ .Values.persistence.accessMode | quote }} 15 | resources: 16 | requests: 17 | storage: {{ .Values.persistence.size | quote }} 18 | {{- if .Values.persistence.storageClass }} 19 | {{- if (eq "-" .Values.persistence.storageClass) }} 20 | storageClassName: "" 21 | {{- else }} 22 | storageClassName: "{{ .Values.persistence.storageClass }}" 23 | {{- end }} 24 | {{- end }} 25 | {{- end }} 26 | -------------------------------------------------------------------------------- /hlf-ord/templates/service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: {{ include "hlf-ord.fullname" . }} 5 | labels: 6 | {{ include "labels.standard" . | indent 4 }} 7 | spec: 8 | type: {{ .Values.service.type }} 9 | ports: 10 | - port: {{ .Values.service.port }} 11 | targetPort: 7050 12 | protocol: TCP 13 | name: grpc 14 | selector: 15 | app: {{ include "hlf-ord.name" . }} 16 | release: {{ .Release.Name }} 17 | -------------------------------------------------------------------------------- /hlf-ord/tests/README.md: -------------------------------------------------------------------------------- 1 | # Chart testing 2 | 3 | > Eventually this will be replaced with an integration test, likely running with `pytest` 4 | 5 | Commands should be run from the root folder of the repository. 6 | 7 | ## Orderer 8 | 9 | ### Set up cryptographic material 10 | 11 | #### Orderer Org admin 12 | 13 | ORG_CERT=$(ls ./hlf-ord/tests/fixtures/crypto/admin/*.pem) 14 | 15 | kubectl create secret generic -n test hlf--ord-admincert --from-file=cert.pem=$ORG_CERT 16 | 17 | CA_CERT=$(ls ./hlf-ord/tests/fixtures/crypto/ca/*.pem) 18 | 19 | kubectl create secret generic -n test hlf--ord-cacert --from-file=cacert.pem=$CA_CERT 20 | 21 | #### Orderer node 22 | 23 | NODE_CERT=$(ls ./hlf-ord/tests/fixtures/crypto/orderer/*.pem) 24 | 25 | kubectl create secret generic -n test hlf--ord0-idcert --from-file=cert.pem=$NODE_CERT 26 | 27 | NODE_KEY=$(ls ./hlf-ord/tests/fixtures/crypto/orderer/*_sk) 28 | 29 | kubectl create secret generic -n test hlf--ord0-idkey --from-file=key.pem=$NODE_KEY 30 | 31 | #### Genesis block 32 | 33 | kubectl create secret generic -n test hlf--genesis --from-file=./hlf-ord/tests/fixtures/crypto/genesis.block 34 | 35 | ### Install 36 | 37 | Install helm chart of orderer. 38 | 39 | helm install ./hlf-ord -n ord0 --namespace test -f ./hlf-ord/tests/values/orderer.yaml 40 | 41 | export ORD_POD=$(kubectl get pods --namespace test -l "app=hlf-ord,release=ord0" -o jsonpath="{.items[0].metadata.name}") 42 | 43 | Check that server is running 44 | 45 | kubectl logs -n test $ORD_POD | grep 'completeInitialization' 46 | 47 | ### Cleanup 48 | 49 | Delete charts we installed 50 | 51 | helm delete --purge ord0 52 | 53 | Delete the secrets we created 54 | 55 | kubectl -n test delete secret hlf--ord-admincert hlf--ord-cacert hlf--ord0-idcert hlf--ord0-idkey hlf--genesis 56 | -------------------------------------------------------------------------------- /hlf-ord/tests/fixtures/crypto/admin/Admin@test.svc.cluster.local-cert.pem: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE----- 2 | MIICLDCCAdKgAwIBAgIRAK1vx1TifWv50bGNHoqKeq4wCgYIKoZIzj0EAwIwfzEL 3 | MAkGA1UEBhMCVVMxEzARBgNVBAgTCkNhbGlmb3JuaWExFjAUBgNVBAcTDVNhbiBG 4 | cmFuY2lzY28xHzAdBgNVBAoTFnRlc3Quc3ZjLmNsdXN0ZXIubG9jYWwxIjAgBgNV 5 | BAMTGWNhLnRlc3Quc3ZjLmNsdXN0ZXIubG9jYWwwHhcNMTgxMTE2MTYzNjAwWhcN 6 | MjgxMTEzMTYzNjAwWjBhMQswCQYDVQQGEwJVUzETMBEGA1UECBMKQ2FsaWZvcm5p 7 | YTEWMBQGA1UEBxMNU2FuIEZyYW5jaXNjbzElMCMGA1UEAwwcQWRtaW5AdGVzdC5z 8 | dmMuY2x1c3Rlci5sb2NhbDBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABO1mivkH 9 | RD4pFuWrJJnxG9FVAC9vGXWnq7RehPVLNYQMFZfC0o7zfrh24BPl5QorqhnfjPWS 10 | npN9gi+76goBJWKjTTBLMA4GA1UdDwEB/wQEAwIHgDAMBgNVHRMBAf8EAjAAMCsG 11 | A1UdIwQkMCKAIJdM9gxid6EMqIBZmn743NLt43id5yvlmmFo45rvCXB1MAoGCCqG 12 | SM49BAMCA0gAMEUCIQCmKozEPPRC4UXCNWQDXL6bz164btrY/kqSZGRpYOHD+AIg 13 | MHwiwlFArGH+JHDCwUzpUjMhqUlBuy5aIRMt3pSJ/ZM= 14 | -----END CERTIFICATE----- 15 | -------------------------------------------------------------------------------- /hlf-ord/tests/fixtures/crypto/ca/ca.test.svc.cluster.local-cert.pem: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE----- 2 | MIICXDCCAgKgAwIBAgIRAJtr236LoQiamWimE10C6/UwCgYIKoZIzj0EAwIwfzEL 3 | MAkGA1UEBhMCVVMxEzARBgNVBAgTCkNhbGlmb3JuaWExFjAUBgNVBAcTDVNhbiBG 4 | cmFuY2lzY28xHzAdBgNVBAoTFnRlc3Quc3ZjLmNsdXN0ZXIubG9jYWwxIjAgBgNV 5 | BAMTGWNhLnRlc3Quc3ZjLmNsdXN0ZXIubG9jYWwwHhcNMTgxMTE2MTYzNjAwWhcN 6 | MjgxMTEzMTYzNjAwWjB/MQswCQYDVQQGEwJVUzETMBEGA1UECBMKQ2FsaWZvcm5p 7 | YTEWMBQGA1UEBxMNU2FuIEZyYW5jaXNjbzEfMB0GA1UEChMWdGVzdC5zdmMuY2x1 8 | c3Rlci5sb2NhbDEiMCAGA1UEAxMZY2EudGVzdC5zdmMuY2x1c3Rlci5sb2NhbDBZ 9 | MBMGByqGSM49AgEGCCqGSM49AwEHA0IABDa/ibgL2fNYJo6/abGEVSxQ3+Fiz97B 10 | kITnSnwlxNH2giNBGEJnvuJO3LIngfKLRgnuhI79u+fX1u8+ck+kbZejXzBdMA4G 11 | A1UdDwEB/wQEAwIBpjAPBgNVHSUECDAGBgRVHSUAMA8GA1UdEwEB/wQFMAMBAf8w 12 | KQYDVR0OBCIEIJdM9gxid6EMqIBZmn743NLt43id5yvlmmFo45rvCXB1MAoGCCqG 13 | SM49BAMCA0gAMEUCIQDF5R5JN8TyxkXou2rnwQVb2L05lEfqXLBdWfgRJfFYWAIg 14 | YNd2FfDj5h8r0yUm1TsIYm8R4zubIcSUYlXWYIEM7ng= 15 | -----END CERTIFICATE----- 16 | -------------------------------------------------------------------------------- /hlf-ord/tests/fixtures/crypto/genesis.block: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aidtechnology/at-charts/c785f1dc0cd0e53ff2dee23f3f5c3c81823f50d1/hlf-ord/tests/fixtures/crypto/genesis.block -------------------------------------------------------------------------------- /hlf-ord/tests/fixtures/crypto/orderer/4ca789b7ba945262b58f873358ef32947fe450d949084a887995a7aa89a95ee1_sk: -------------------------------------------------------------------------------- 1 | -----BEGIN PRIVATE KEY----- 2 | MIGHAgEAMBMGByqGSM49AgEGCCqGSM49AwEHBG0wawIBAQQgdvsu8Mj4jO5+taw7 3 | nJq09sPBi+SaoLe2KhEdOSsOExKhRANCAATeLiGC3f7NVWiZJJuRBhtOrl4mXPVX 4 | wYSyQlqBatljxZK0jRmSPQ/qd8mBBSkYaBEE5qKMbWJXlMxtm+m9+9Ot 5 | -----END PRIVATE KEY----- 6 | -------------------------------------------------------------------------------- /hlf-ord/tests/fixtures/crypto/orderer/ord0-hlf-ord.test.svc.cluster.local-cert.pem: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE----- 2 | MIICMjCCAdigAwIBAgIQeSG4ubdKA9OtbVtvlljQZjAKBggqhkjOPQQDAjB/MQsw 3 | CQYDVQQGEwJVUzETMBEGA1UECBMKQ2FsaWZvcm5pYTEWMBQGA1UEBxMNU2FuIEZy 4 | YW5jaXNjbzEfMB0GA1UEChMWdGVzdC5zdmMuY2x1c3Rlci5sb2NhbDEiMCAGA1UE 5 | AxMZY2EudGVzdC5zdmMuY2x1c3Rlci5sb2NhbDAeFw0xODExMTYxNjM2MDBaFw0y 6 | ODExMTMxNjM2MDBaMGgxCzAJBgNVBAYTAlVTMRMwEQYDVQQIEwpDYWxpZm9ybmlh 7 | MRYwFAYDVQQHEw1TYW4gRnJhbmNpc2NvMSwwKgYDVQQDEyNvcmQwLWhsZi1vcmQu 8 | dGVzdC5zdmMuY2x1c3Rlci5sb2NhbDBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IA 9 | BN4uIYLd/s1VaJkkm5EGG06uXiZc9VfBhLJCWoFq2WPFkrSNGZI9D+p3yYEFKRho 10 | EQTmooxtYleUzG2b6b37062jTTBLMA4GA1UdDwEB/wQEAwIHgDAMBgNVHRMBAf8E 11 | AjAAMCsGA1UdIwQkMCKAIJdM9gxid6EMqIBZmn743NLt43id5yvlmmFo45rvCXB1 12 | MAoGCCqGSM49BAMCA0gAMEUCIQCcbdX/kv+Hhh/P0vqxzZam6Di7cZC47zCHv1QU 13 | +NgOOQIgWHR03dYXStfggla4PVhS7ha+rGOBS8uQ9bVhDyum0eY= 14 | -----END CERTIFICATE----- 15 | -------------------------------------------------------------------------------- /hlf-ord/tests/values/orderer.yaml: -------------------------------------------------------------------------------- 1 | ord: 2 | type: solo 3 | mspID: OrdererMSP 4 | 5 | secrets: 6 | ord: 7 | cert: hlf--ord0-idcert 8 | key: hlf--ord0-idkey 9 | caCert: hlf--ord-cacert 10 | genesis: hlf--genesis 11 | adminCert: hlf--ord-admincert 12 | -------------------------------------------------------------------------------- /hlf-ord/values.yaml: -------------------------------------------------------------------------------- 1 | ## Default values for hlf-ord. 2 | ## This is a YAML-formatted file. 3 | ## Declare variables to be passed into your templates. 4 | 5 | image: 6 | repository: hyperledger/fabric-orderer 7 | tag: 1.4.1 8 | pullPolicy: IfNotPresent 9 | 10 | service: 11 | # Cluster IP or LoadBalancer 12 | type: ClusterIP 13 | port: 7050 14 | 15 | ingress: 16 | enabled: false 17 | annotations: {} 18 | # kubernetes.io/ingress.class: nginx 19 | # nginx.ingress.kubernetes.io/ssl-redirect: "true" 20 | # nginx.ingress.kubernetes.io/backend-protocol: "GRPC" 21 | # certmanager.k8s.io/cluster-issuer: "letsencrypt-staging" 22 | path: / 23 | hosts: 24 | - hlf-ord.local 25 | tls: [] 26 | # - secretName: hlf-ord-tls 27 | # hosts: 28 | # - hlf-ord.local 29 | 30 | persistence: 31 | enabled: true 32 | annotations: {} 33 | ## If defined, storageClassName: 34 | ## If set to "-", storageClassName: "", which disables dynamic provisioning 35 | ## If undefined (the default) or set to null, no storageClassName spec is 36 | ## set, choosing the default provisioner. (gp2 on AWS, standard on 37 | ## GKE, AWS & OpenStack) 38 | ## 39 | storageClass: "" 40 | accessMode: ReadWriteOnce 41 | size: 1Gi 42 | # existingClaim: "" 43 | 44 | ################################## 45 | ## Orderer configuration options # 46 | ################################## 47 | ord: 48 | ## Type of Orderer, `solo` or `kafka` 49 | type: solo 50 | ## MSP ID of the Orderer 51 | mspID: OrdererMSP 52 | ## Orderer logging level 53 | logging: debug 54 | ## Orderer Kafka Replication Factor 55 | replicationFactor: "1" 56 | ## Kafka Verbose 57 | verbose: "true" 58 | # TLS 59 | tls: 60 | server: 61 | enabled: "false" 62 | client: 63 | enabled: "false" 64 | 65 | 66 | secrets: 67 | ## These secrets should contain the Orderer crypto materials and credentials 68 | ord: {} 69 | ## Credentials, saved under keys 'CA_USERNAME' and 'CA_PASSWORD' 70 | # cred: hlf--ord1-cred 71 | ## Certificate, saved under key 'cert.pem' 72 | # cert: hlf--ord1-idcert 73 | ## Key, saved under 'key.pem' 74 | # key: hlf--ord1-idkey 75 | ## CA Cert, saved under 'cacert.pem' 76 | # caCert: hlf--ord1-cacert 77 | ## Intermediate CA Cert (optional), saved under 'intermediatecacert.pem' 78 | # intCaCert: hlf--ord1-caintcert 79 | ## TLS secret, saved under keys 'tls.crt' and 'tls.key' (to conform with K8S nomenclature) 80 | # tls: hlf--ord1-tls 81 | ## TLS root CA certificate saved under key 'cert.pem' 82 | # tlsRootCert: hlf--ord-tlsrootcert 83 | ## TLS client root CA certificates saved under any names (as there may be multiple) 84 | # tlsClientRootCerts: hlf--peer-tlsrootcert 85 | ## This should contain "genesis" block derived from a configtx.yaml 86 | ## configtxgen -profile OrdererGenesis -outputBlock genesis.block 87 | # genesis: hlf--genesis 88 | ## This should contain the Certificate of the Orderer Organisation admin 89 | ## This is necessary to successfully run the orderer 90 | # adminCert: hlf--ord-admincert 91 | 92 | resources: {} 93 | ## We usually recommend not to specify default resources and to leave this as a conscious 94 | ## choice for the user. This also increases chances charts run on environments with little 95 | ## resources, such as Minikube. If you do want to specify resources, uncomment the following 96 | ## lines, adjust them as necessary, and remove the curly braces after 'resources:'. 97 | # limits: 98 | # cpu: 100m 99 | # memory: 128Mi 100 | # requests: 101 | # cpu: 100m 102 | # memory: 128Mi 103 | 104 | nodeSelector: {} 105 | 106 | tolerations: [] 107 | 108 | affinity: {} 109 | ## Suggested antiAffinity, as each Orderer should be on a separate Node for resilience 110 | # podAntiAffinity: 111 | # requiredDuringSchedulingIgnoredDuringExecution: 112 | # - topologyKey: "kubernetes.io/hostname" 113 | # labelSelector: 114 | # matchLabels: 115 | # app: hlf-ord 116 | -------------------------------------------------------------------------------- /hlf-peer/.helmignore: -------------------------------------------------------------------------------- 1 | # Patterns to ignore when building packages. 2 | # This supports shell glob matching, relative path matching, and 3 | # negation (prefixed with !). Only one pattern per line. 4 | .DS_Store 5 | # Common VCS dirs 6 | .git/ 7 | .gitignore 8 | .bzr/ 9 | .bzrignore 10 | .hg/ 11 | .hgignore 12 | .svn/ 13 | # Common backup files 14 | *.swp 15 | *.bak 16 | *.tmp 17 | *~ 18 | # Various IDEs 19 | .project 20 | .idea/ 21 | *.tmproj 22 | 23 | # OWNERS file for Helm repository 24 | OWNERS 25 | -------------------------------------------------------------------------------- /hlf-peer/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | description: Hyperledger Fabric Peer chart (these charts are created by AID:Tech and are currently not directly associated with the Hyperledger project) 3 | name: hlf-peer 4 | version: 1.2.7 5 | appVersion: 1.4.1 6 | 7 | keywords: 8 | - blockchain 9 | - hyperledger 10 | - fabric 11 | home: http://hyperledger-fabric.readthedocs.io 12 | sources: 13 | - https://github.com/hyperledger/fabric 14 | maintainers: 15 | - name: alexvicegrab 16 | email: sasha@aid.technology 17 | - name: nicolapaoli 18 | email: nicola@aid.technology 19 | icon: https://www.hyperledger.org/wp-content/uploads/2018/04/fabric-logo.png 20 | -------------------------------------------------------------------------------- /hlf-peer/FABRIC_UPGRADE.md: -------------------------------------------------------------------------------- 1 | # Upgrade Fabric from version 1.3.0 to 1.4.1 2 | 3 | 1. Update image tag from 1.3.0 to 1.4.1 in values.yaml 4 | 5 | 2. Update version from 1.2.6 to 1.2.7 in Chart.yaml 6 | 7 | 3. Update appVersion from 1.3.0 to 1.4.1 in Chart.yaml 8 | 9 | 4. Variable to update 10 | 11 | * CORE_LOGGING_LEVEL > FABRIC_LOGGING_SPEC 12 | 13 | 5. Variables to add 14 | 15 | * CORE_PEER_CHAINCODEADDRESS 16 | 17 | * CORE_PEER_CHAINCODELISTENADDRESS 18 | -------------------------------------------------------------------------------- /hlf-peer/OWNERS: -------------------------------------------------------------------------------- 1 | approvers: 2 | - alexvicegrab 3 | - nicolapaoli 4 | reviewers: 5 | - alexvicegrab 6 | - nicolapaoli 7 | -------------------------------------------------------------------------------- /hlf-peer/UPGRADE_1-1-x.md: -------------------------------------------------------------------------------- 1 | # Upgrading from version 1.1.x and up. 2 | 3 | ## Secret creation 4 | 5 | Specify which release we want to work with: 6 | 7 | ``` 8 | export RELEASE='peer1' 9 | export NAMESPACE='default' 10 | export POD_NAME=$(kubectl -n ${NAMESPACE} get pods -l "app=hlf-peer,release=${RELEASE}" -o jsonpath="{.items[0].metadata.name}") 11 | ``` 12 | 13 | ### Cred secret 14 | 15 | Get relevant credentials 16 | 17 | ``` 18 | export CA_USERNAME=$(kubectl -n ${NAMESPACE} get secret ${RELEASE}-hlf-peer -o jsonpath="{.data.CA_USERNAME}" | base64 --decode; echo) 19 | export CA_PASSWORD=$(kubectl -n ${NAMESPACE} get secret ${RELEASE}-hlf-peer -o jsonpath="{.data.CA_PASSWORD}" | base64 --decode; echo) 20 | ``` 21 | 22 | Save credentials in secret 23 | 24 | ``` 25 | kubectl -n ${NAMESPACE} create secret generic hlf--${RELEASE}-cred --from-literal=CA_USERNAME=$CA_USERNAME --from-literal=CA_PASSWORD=$CA_PASSWORD 26 | ``` 27 | 28 | ### Cert secret 29 | 30 | Get content of certificate file and save it as a secret: 31 | 32 | ``` 33 | export CONTENT=$(kubectl -n ${NAMESPACE} exec ${POD_NAME} -- cat /var/hyperledger/msp/signcerts/cert.pem) 34 | kubectl -n ${NAMESPACE} create secret generic hlf--${RELEASE}-idcert --from-literal=cert.pem=$CONTENT 35 | ``` 36 | 37 | ### Key secret 38 | 39 | Get content of key file and save it as a secret: 40 | 41 | ``` 42 | export CONTENT=$(kubectl -n ${NAMESPACE} exec ${POD_NAME} -- bash -c 'cat /var/hyperledger/msp/keystore/*_sk') 43 | kubectl -n ${NAMESPACE} create secret generic hlf--${RELEASE}-idkey --from-literal=key.pem=$CONTENT 44 | ``` 45 | 46 | ### CA cert secret 47 | 48 | Get content of key file and save it as a secret: 49 | 50 | ``` 51 | export CONTENT=$(kubectl -n ${NAMESPACE} exec ${POD_NAME} -- bash -c 'cat /var/hyperledger/msp/cacerts/*.pem') 52 | kubectl -n ${NAMESPACE} create secret generic hlf--${RELEASE}-cacert --from-literal=cacert.pem=$CONTENT 53 | ``` 54 | 55 | ### Intermediate CA cert secret 56 | 57 | Get content of key file and save it as a secret (if you have used an intermediate CA): 58 | 59 | ``` 60 | export CONTENT=$(kubectl -n ${NAMESPACE} exec ${POD_NAME} -- bash -c 'cat /var/hyperledger/msp/intermediatecerts/*.pem') 61 | kubectl -n ${NAMESPACE} create secret generic hlf--${RELEASE}-intcacert --from-literal=intermediatecacert.pem=$CONTENT 62 | ``` 63 | 64 | ## Move old MSP material out 65 | 66 | We can move the crypto material we created earlier to another directory in our Persistent Volume, so we can rollback if needed. 67 | 68 | ``` 69 | kubectl -n ${NAMESPACE} exec ${POD_NAME} -- mv /var/hyperledger/msp /var/hyperledger/msp_old 70 | ``` 71 | 72 | ## Upgrade the chart 73 | 74 | You will need to update the chart to the latest version by editing the relevant values files: 75 | 76 | ``` 77 | secrets: 78 | ord: 79 | cred: hlf--peer1-cred 80 | cert: hlf--peer1-idcert 81 | key: hlf--peer1-idkey 82 | caCert: hlf--peer1-cacert 83 | intCaCert: hlf--peer1-caintcert # If applicable 84 | ``` 85 | 86 | And running: 87 | 88 | ``` 89 | helm upgrade ${RELEASE} ./hlf-peer 90 | ``` 91 | -------------------------------------------------------------------------------- /hlf-peer/templates/NOTES.txt: -------------------------------------------------------------------------------- 1 | Run the following commands to... 2 | 1. Get the name of the pod running the Fabric Peer: 3 | export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app={{ include "hlf-peer.name" . }},release={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}") 4 | 5 | 2. Get the application URL by running these commands: 6 | {{- if contains "NodePort" .Values.service.type }} 7 | export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "hlf-peer.fullname" . }}) 8 | export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") 9 | echo http://$NODE_IP:$NODE_PORT 10 | {{- else if contains "LoadBalancer" .Values.service.type }} 11 | NOTE: It may take a few minutes for the LoadBalancer IP to be available. 12 | You can watch the status of by running 'kubectl get svc -w {{ include "hlf-peer.fullname" . }}' 13 | export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "hlf-peer.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}') 14 | echo http://$SERVICE_IP:{{ .Values.service.port }} 15 | {{- else if contains "ClusterIP" .Values.service.type }} 16 | export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app={{ include "hlf-peer.name" . }},release={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}") 17 | echo "Visit http://127.0.0.1:8080 to use your application" 18 | kubectl port-forward $POD_NAME 8080:7051 19 | {{- end }} 20 | 21 | 3. Obtain CA_USERNAME and CA_PASSWORD to register identity with CA: 22 | export CA_USERNAME=$(kubectl get secret --namespace {{ .Release.Namespace }} {{ include "hlf-peer.fullname" . }} -o jsonpath="{.data.CA_USERNAME}" | base64 --decode; echo) 23 | export CA_PASSWORD=$(kubectl get secret --namespace {{ .Release.Namespace }} {{ include "hlf-peer.fullname" . }} -o jsonpath="{.data.CA_PASSWORD}" | base64 --decode; echo) 24 | 25 | 4. Update the chart without resetting a password (requires running step 3): 26 | helm upgrade {{ .Release.Name }} stable/hlf-peer --namespace {{ .Release.Namespace }} -f my-values.yaml --set caUsername=$CA_USERNAME,caPassword=$CA_PASSWORD 27 | -------------------------------------------------------------------------------- /hlf-peer/templates/_helpers.tpl: -------------------------------------------------------------------------------- 1 | {{/* vim: set filetype=mustache: */}} 2 | {{/* 3 | Expand the name of the chart. 4 | */}} 5 | {{- define "hlf-peer.name" -}} 6 | {{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} 7 | {{- end -}} 8 | 9 | {{/* 10 | Create a default fully qualified app name. 11 | We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). 12 | If release name contains chart name it will be used as a full name. 13 | */}} 14 | {{- define "hlf-peer.fullname" -}} 15 | {{- if .Values.fullnameOverride -}} 16 | {{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} 17 | {{- else -}} 18 | {{- $name := default .Chart.Name .Values.nameOverride -}} 19 | {{- if contains $name .Release.Name -}} 20 | {{- .Release.Name | trunc 63 | trimSuffix "-" -}} 21 | {{- else -}} 22 | {{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} 23 | {{- end -}} 24 | {{- end -}} 25 | {{- end -}} 26 | 27 | {{/* 28 | Create chart name and version as used by the chart label. 29 | */}} 30 | {{- define "hlf-peer.chart" -}} 31 | {{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} 32 | {{- end -}} 33 | 34 | {{- /* 35 | Credit: @technosophos 36 | https://github.com/technosophos/common-chart/ 37 | labels.standard prints the standard Helm labels. 38 | The standard labels are frequently used in metadata. 39 | */ -}} 40 | {{- define "labels.standard" -}} 41 | app: {{ include "hlf-peer.name" . }} 42 | heritage: {{ .Release.Service | quote }} 43 | release: {{ .Release.Name | quote }} 44 | chart: {{ include "hlf-peer.chart" . }} 45 | {{- end -}} 46 | -------------------------------------------------------------------------------- /hlf-peer/templates/configmap--peer.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: {{ include "hlf-peer.fullname" . }}--peer 5 | labels: 6 | {{ include "labels.standard" . | indent 4 }} 7 | data: 8 | CORE_PEER_ADDRESSAUTODETECT: "true" 9 | CORE_PEER_ID: {{ .Release.Name }} 10 | CORE_PEER_NETWORKID: nid1 11 | # If we have an ingress, we set hostname to it 12 | {{- if .Values.ingress.enabled }} 13 | CORE_PEER_ADDRESS: {{ index .Values.ingress.hosts 0 }}:443 14 | {{- else }} 15 | # Otherwise we use CORE_PEER_ADDRESSAUTODETECT to auto-detect its address 16 | {{- end }} 17 | CORE_PEER_LISTENADDRESS: 0.0.0.0:7051 18 | CORE_PEER_EVENTS_ADDRESS: 0.0.0.0:7053 19 | CORE_PEER_COMMITTER_ENABLED: "true" 20 | CORE_PEER_PROFILE_ENABLED: "true" 21 | CORE_PEER_DISCOVERY_PERIOD: 60s 22 | CORE_PEER_DISCOVERY_TOUCHPERIOD: 60s 23 | CORE_PEER_LOCALMSPID: {{ .Values.peer.mspID | quote }} 24 | CORE_PEER_MSPCONFIGPATH: /var/hyperledger/msp 25 | CORE_PEER_CHAINCODEADDRESS: {{ index .Values.ingress.hosts 0 }}:7052 26 | CORE_PEER_CHAINCODELISTENADDRESS: 0.0.0.0:7052 27 | ########### 28 | # Logging # 29 | ########### 30 | FABRIC_LOGGING_SPEC: {{ .Values.peer.logging | quote }} 31 | CORE_LOGGING_PEER: debug 32 | CORE_LOGGING_CAUTHDSL: debug 33 | CORE_LOGGING_GOSSIP: debug 34 | CORE_LOGGING_LEDGER: debug 35 | CORE_LOGGING_MSP: info 36 | CORE_LOGGING_POLICIES: debug 37 | CORE_LOGGING_GRPC: debug 38 | ########## 39 | # Gossip # 40 | ########## 41 | CORE_PEER_GOSSIP_BOOTSTRAP: {{ .Values.peer.gossip.bootstrap | quote }} 42 | CORE_PEER_GOSSIP_ENDPOINT: {{ .Values.peer.gossip.endpoint | quote }} 43 | CORE_PEER_GOSSIP_EXTERNALENDPOINT: {{ .Values.peer.gossip.externalEndpoint | quote }} 44 | CORE_PEER_GOSSIP_ORGLEADER: {{ .Values.peer.gossip.orgLeader | quote }} 45 | CORE_PEER_GOSSIP_USELEADERELECTION: {{ .Values.peer.gossip.useLeaderElection | quote }} 46 | ########## 47 | # TLS # 48 | ########## 49 | CORE_PEER_TLS_ENABLED: {{ .Values.peer.tls.server.enabled | quote }} 50 | CORE_PEER_TLS_CERT_FILE: "/var/hyperledger/tls/server/pair/tls.crt" 51 | CORE_PEER_TLS_KEY_FILE: "/var/hyperledger/tls/server/pair/tls.key" 52 | CORE_PEER_TLS_ROOTCERT_FILE: "/var/hyperledger/tls/server/cert/cacert.pem" 53 | CORE_PEER_TLS_CLIENTAUTHREQUIRED: {{ .Values.peer.tls.client.enabled | quote }} 54 | # This is fixed prior to starting the peer 55 | CORE_PEER_TLS_CLIENTROOTCAS_FILES: "/var/hyperledger/tls/client/cert/*" 56 | CORE_PEER_TLS_CLIENTCERT_FILE: "/var/hyperledger/tls/client/pair/tls.crt" 57 | CORE_PEER_TLS_CLIENTKEY_FILE: "/var/hyperledger/tls/client/pair/tls.key" 58 | CORE_VM_ENDPOINT: unix:///host/var/run/docker.sock 59 | CORE_LEDGER_STATE_STATEDATABASE: {{ .Values.peer.databaseType | quote }} 60 | # Containers in the same pod share the host 61 | {{- if eq .Values.peer.databaseType "CouchDB" }} 62 | CORE_LEDGER_STATE_COUCHDBCONFIG_COUCHDBADDRESS: {{ .Values.peer.couchdbInstance }}-hlf-couchdb:5984 63 | {{- end }} 64 | PEER_CFG_PATH: /var/hyperledger/config 65 | FABRIC_CFG_PATH: /var/hyperledger/fabric_cfg 66 | GODEBUG: "netdns=go" 67 | ADMIN_MSP_PATH: /var/hyperledger/admin_msp 68 | ORD_TLS_PATH: /var/hyperledger/tls/ord/cert 69 | -------------------------------------------------------------------------------- /hlf-peer/templates/ingress.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.ingress.enabled -}} 2 | {{- $fullName := include "hlf-peer.fullname" . -}} 3 | {{- $ingressPath := .Values.ingress.path -}} 4 | apiVersion: extensions/v1beta1 5 | kind: Ingress 6 | metadata: 7 | name: {{ $fullName }} 8 | labels: 9 | {{ include "labels.standard" . | indent 4 }} 10 | {{- with .Values.ingress.annotations }} 11 | annotations: 12 | {{ toYaml . | indent 4 }} 13 | {{- end }} 14 | spec: 15 | {{- if .Values.ingress.tls }} 16 | tls: 17 | {{- range .Values.ingress.tls }} 18 | - hosts: 19 | {{- range .hosts }} 20 | - {{ . }} 21 | {{- end }} 22 | secretName: {{ .secretName }} 23 | {{- end }} 24 | {{- end }} 25 | rules: 26 | {{- range .Values.ingress.hosts }} 27 | - host: {{ . }} 28 | http: 29 | paths: 30 | - path: {{ $ingressPath }} 31 | backend: 32 | serviceName: {{ $fullName }} 33 | servicePort: request 34 | {{- end }} 35 | {{- end }} 36 | -------------------------------------------------------------------------------- /hlf-peer/templates/pvc.yaml: -------------------------------------------------------------------------------- 1 | {{- if and .Values.persistence.enabled (not .Values.persistence.existingClaim) }} 2 | kind: PersistentVolumeClaim 3 | apiVersion: v1 4 | metadata: 5 | name: {{ include "hlf-peer.fullname" . }} 6 | labels: 7 | {{ include "labels.standard" . | indent 4 }} 8 | {{- if .Values.persistence.annotations }} 9 | annotations: 10 | {{ toYaml .Values.persistence.annotations | indent 4 }} 11 | {{- end }} 12 | spec: 13 | accessModes: 14 | - {{ .Values.persistence.accessMode | quote }} 15 | resources: 16 | requests: 17 | storage: {{ .Values.persistence.size | quote }} 18 | {{- if .Values.persistence.storageClass }} 19 | {{- if (eq "-" .Values.persistence.storageClass) }} 20 | storageClassName: "" 21 | {{- else }} 22 | storageClassName: "{{ .Values.persistence.storageClass }}" 23 | {{- end }} 24 | {{- end }} 25 | {{- end }} 26 | -------------------------------------------------------------------------------- /hlf-peer/templates/service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: {{ include "hlf-peer.fullname" . }} 5 | labels: 6 | {{ include "labels.standard" . | indent 4 }} 7 | spec: 8 | type: {{ .Values.service.type }} 9 | ports: 10 | - port: {{ .Values.service.portRequest }} 11 | targetPort: 7051 12 | protocol: TCP 13 | name: request 14 | - port: {{ .Values.service.portEvent }} 15 | targetPort: 7053 16 | protocol: TCP 17 | name: event 18 | selector: 19 | app: {{ include "hlf-peer.name" . }} 20 | release: {{ .Release.Name }} 21 | -------------------------------------------------------------------------------- /hlf-peer/tests/README.md: -------------------------------------------------------------------------------- 1 | # Chart testing 2 | 3 | > Eventually this will be replaced with an integration test, likely running with `pytest` 4 | 5 | Commands should be run from the root folder of the repository. 6 | 7 | ## Peer 8 | 9 | ### Set up cryptographic material 10 | 11 | #### Peer Org admin 12 | 13 | ORG_CERT=$(ls ./hlf-peer/tests/fixtures/crypto/admin/*.pem) 14 | 15 | kubectl create secret generic -n test hlf--peer-admincert --from-file=cert.pem=$ORG_CERT 16 | 17 | ORG_KEY=$(ls ./hlf-peer/tests/fixtures/crypto/admin/*_sk) 18 | 19 | kubectl create secret generic -n test hlf--peer-adminkey --from-file=key.pem=$ORG_KEY 20 | 21 | CA_CERT=$(ls ./hlf-peer/tests/fixtures/crypto/ca/*.pem) 22 | 23 | kubectl create secret generic -n test hlf--peer-cacert --from-file=cacert.pem=$CA_CERT 24 | 25 | #### Peer node 26 | 27 | NODE_CERT=$(ls ./hlf-peer/tests/fixtures/crypto/peer/*.pem) 28 | 29 | kubectl create secret generic -n test hlf--peer0-idcert --from-file=cert.pem=$NODE_CERT 30 | 31 | NODE_KEY=$(ls ./hlf-peer/tests/fixtures/crypto/peer/*_sk) 32 | 33 | kubectl create secret generic -n test hlf--peer0-idkey --from-file=key.pem=$NODE_KEY 34 | 35 | #### Genesis block 36 | 37 | kubectl create secret generic -n test hlf--channel --from-file=./hlf-peer/tests/fixtures/crypto/mychannel.tx 38 | 39 | ### Install 40 | 41 | Install helm chart of peer. 42 | 43 | helm install ./hlf-peer -n peer0 --namespace test -f ./hlf-peer/tests/values/peer.yaml 44 | 45 | export PEER_POD=$(kubectl get pods --namespace test -l "app=hlf-peer,release=peer0" -o jsonpath="{.items[0].metadata.name}") 46 | 47 | Check that server is running 48 | 49 | kubectl logs -n test $PEER_POD | grep 'Starting peer' 50 | 51 | ### Cleanup 52 | 53 | Delete charts we installed 54 | 55 | helm delete --purge peer0 56 | 57 | Delete the secrets we created 58 | 59 | kubectl -n test delete secret hlf--peer-admincert hlf--peer-adminkey hlf--peer-cacert hlf--peer0-idcert hlf--peer0-idkey hlf--channel 60 | -------------------------------------------------------------------------------- /hlf-peer/tests/fixtures/crypto/admin/478bb28c12190a41a8eb361c6815cf91a0d0ec291f0977f562661a67b1c250dd_sk: -------------------------------------------------------------------------------- 1 | -----BEGIN PRIVATE KEY----- 2 | MIGHAgEAMBMGByqGSM49AgEGCCqGSM49AwEHBG0wawIBAQQgn3Rvk8JAivQEgxEV 3 | 1uweQa3KtYKx2iY9lP7F+r1/yZOhRANCAAQmbMcHL/4/LTL9/CmIe9JCO9kcyyrb 4 | XHhjaUMvc7lBIcx7C/FNccEFL9dbU6LI3/m3Nss0tD4PZh2v/oKOZiw6 5 | -----END PRIVATE KEY----- 6 | -------------------------------------------------------------------------------- /hlf-peer/tests/fixtures/crypto/admin/Admin@test.svc.cluster.local-cert.pem: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE----- 2 | MIICKzCCAdKgAwIBAgIRAORmUlx9x7GR5NlM9z1zP4QwCgYIKoZIzj0EAwIwfzEL 3 | MAkGA1UEBhMCVVMxEzARBgNVBAgTCkNhbGlmb3JuaWExFjAUBgNVBAcTDVNhbiBG 4 | cmFuY2lzY28xHzAdBgNVBAoTFnRlc3Quc3ZjLmNsdXN0ZXIubG9jYWwxIjAgBgNV 5 | BAMTGWNhLnRlc3Quc3ZjLmNsdXN0ZXIubG9jYWwwHhcNMTgxMTE2MTYzNjAwWhcN 6 | MjgxMTEzMTYzNjAwWjBhMQswCQYDVQQGEwJVUzETMBEGA1UECBMKQ2FsaWZvcm5p 7 | YTEWMBQGA1UEBxMNU2FuIEZyYW5jaXNjbzElMCMGA1UEAwwcQWRtaW5AdGVzdC5z 8 | dmMuY2x1c3Rlci5sb2NhbDBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABCZsxwcv 9 | /j8tMv38KYh70kI72RzLKttceGNpQy9zuUEhzHsL8U1xwQUv11tTosjf+bc2yzS0 10 | Pg9mHa/+go5mLDqjTTBLMA4GA1UdDwEB/wQEAwIHgDAMBgNVHRMBAf8EAjAAMCsG 11 | A1UdIwQkMCKAIBsOEs+gj9C70F4Z1n29Dr7eLzY0h+h3uXpopgxnsQuyMAoGCCqG 12 | SM49BAMCA0cAMEQCIAwfowKCEW2f33N4vy6HIj5LhqVRbZivbH4pydf1El0+AiBC 13 | UUnlvLofHi2IseM7WQx5UC78Hv7FGrQr1Sut4P+GBQ== 14 | -----END CERTIFICATE----- 15 | -------------------------------------------------------------------------------- /hlf-peer/tests/fixtures/crypto/ca/ca.test.svc.cluster.local-cert.pem: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE----- 2 | MIICXDCCAgKgAwIBAgIRAN6p+R5Yq31fzE+ZBoxSoFowCgYIKoZIzj0EAwIwfzEL 3 | MAkGA1UEBhMCVVMxEzARBgNVBAgTCkNhbGlmb3JuaWExFjAUBgNVBAcTDVNhbiBG 4 | cmFuY2lzY28xHzAdBgNVBAoTFnRlc3Quc3ZjLmNsdXN0ZXIubG9jYWwxIjAgBgNV 5 | BAMTGWNhLnRlc3Quc3ZjLmNsdXN0ZXIubG9jYWwwHhcNMTgxMTE2MTYzNjAwWhcN 6 | MjgxMTEzMTYzNjAwWjB/MQswCQYDVQQGEwJVUzETMBEGA1UECBMKQ2FsaWZvcm5p 7 | YTEWMBQGA1UEBxMNU2FuIEZyYW5jaXNjbzEfMB0GA1UEChMWdGVzdC5zdmMuY2x1 8 | c3Rlci5sb2NhbDEiMCAGA1UEAxMZY2EudGVzdC5zdmMuY2x1c3Rlci5sb2NhbDBZ 9 | MBMGByqGSM49AgEGCCqGSM49AwEHA0IABBPFAO2F4wnI3zMWpLwbjBwDo+wYFR9r 10 | WqjlXD79JFLUmOVimoergoXiGKk3Bufw4Ou7wtErCdmBsSSVxDIqNhSjXzBdMA4G 11 | A1UdDwEB/wQEAwIBpjAPBgNVHSUECDAGBgRVHSUAMA8GA1UdEwEB/wQFMAMBAf8w 12 | KQYDVR0OBCIEIBsOEs+gj9C70F4Z1n29Dr7eLzY0h+h3uXpopgxnsQuyMAoGCCqG 13 | SM49BAMCA0gAMEUCIQCyD/XQi0NafSRf/NgYQexzLkh2w/PAtiWaE9wSazvRKgIg 14 | UPvJDyI9nQGlXaXhSrA6m/09QJSQCKd3shS1Nu1YXwA= 15 | -----END CERTIFICATE----- 16 | -------------------------------------------------------------------------------- /hlf-peer/tests/fixtures/crypto/mychannel.tx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aidtechnology/at-charts/c785f1dc0cd0e53ff2dee23f3f5c3c81823f50d1/hlf-peer/tests/fixtures/crypto/mychannel.tx -------------------------------------------------------------------------------- /hlf-peer/tests/fixtures/crypto/peer/22de23eeb2b0259b6b739a6954408780beb9e614858c879c75df2e6d04325bf4_sk: -------------------------------------------------------------------------------- 1 | -----BEGIN PRIVATE KEY----- 2 | MIGHAgEAMBMGByqGSM49AgEGCCqGSM49AwEHBG0wawIBAQQgYxXOQsbAWgGe3jTk 3 | mCXvrW/nQgdDTx6w8qR8RTccREWhRANCAAQfU3F6LblDCfBfAbQX/Znk9SUXjG0i 4 | LyWVNIULoj+PzcC/5vRD18NcGkVn2gGZq9VnF4vEjv6Lv4yp87/GcUmW 5 | -----END PRIVATE KEY----- 6 | -------------------------------------------------------------------------------- /hlf-peer/tests/fixtures/crypto/peer/peer0-hlf-peer.test.svc.cluster.local-cert.pem: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE----- 2 | MIICNTCCAdugAwIBAgIRAK5bmw+kGL1WnGMKedQz3LEwCgYIKoZIzj0EAwIwfzEL 3 | MAkGA1UEBhMCVVMxEzARBgNVBAgTCkNhbGlmb3JuaWExFjAUBgNVBAcTDVNhbiBG 4 | cmFuY2lzY28xHzAdBgNVBAoTFnRlc3Quc3ZjLmNsdXN0ZXIubG9jYWwxIjAgBgNV 5 | BAMTGWNhLnRlc3Quc3ZjLmNsdXN0ZXIubG9jYWwwHhcNMTgxMTE2MTYzNjAwWhcN 6 | MjgxMTEzMTYzNjAwWjBqMQswCQYDVQQGEwJVUzETMBEGA1UECBMKQ2FsaWZvcm5p 7 | YTEWMBQGA1UEBxMNU2FuIEZyYW5jaXNjbzEuMCwGA1UEAxMlcGVlcjAtaGxmLXBl 8 | ZXIudGVzdC5zdmMuY2x1c3Rlci5sb2NhbDBZMBMGByqGSM49AgEGCCqGSM49AwEH 9 | A0IABB9TcXotuUMJ8F8BtBf9meT1JReMbSIvJZU0hQuiP4/NwL/m9EPXw1waRWfa 10 | AZmr1WcXi8SO/ou/jKnzv8ZxSZajTTBLMA4GA1UdDwEB/wQEAwIHgDAMBgNVHRMB 11 | Af8EAjAAMCsGA1UdIwQkMCKAIBsOEs+gj9C70F4Z1n29Dr7eLzY0h+h3uXpopgxn 12 | sQuyMAoGCCqGSM49BAMCA0gAMEUCIQCjLN4bBUuCtxD8hOg1ZYp7CgCNQn8YHj2K 13 | zocqBCWI1wIgOnOrhlNq2LZwT1YA0vRwP3ljibsTEzJaCyVfY4ra/Zk= 14 | -----END CERTIFICATE----- 15 | -------------------------------------------------------------------------------- /hlf-peer/tests/values/peer.yaml: -------------------------------------------------------------------------------- 1 | peer: 2 | mspID: PeerMSP 3 | 4 | secrets: 5 | peer: 6 | cert: hlf--peer0-idcert 7 | key: hlf--peer0-idkey 8 | caCert: hlf--peer-cacert 9 | channel: hlf--channel 10 | adminCert: hlf--peer-admincert 11 | adminKey: hlf--peer-adminkey 12 | -------------------------------------------------------------------------------- /hlf-peer/values.yaml: -------------------------------------------------------------------------------- 1 | # Default values for hlf-peer. 2 | # This is a YAML-formatted file. 3 | # Declare variables to be passed into your templates. 4 | 5 | image: 6 | repository: hyperledger/fabric-peer 7 | tag: 1.4.1 8 | pullPolicy: IfNotPresent 9 | 10 | service: 11 | # Cluster IP or LoadBalancer 12 | type: ClusterIP 13 | portRequest: 7051 14 | portEvent: 7053 15 | 16 | ingress: 17 | enabled: false 18 | annotations: {} 19 | # kubernetes.io/ingress.class: nginx 20 | # nginx.ingress.kubernetes.io/ssl-redirect: "true" 21 | # nginx.ingress.kubernetes.io/backend-protocol: "GRPC" 22 | # certmanager.k8s.io/cluster-issuer: "letsencrypt-staging" 23 | path: / 24 | hosts: 25 | - hlf-peer.local 26 | tls: [] 27 | # - secretName: hlf-peer-tls 28 | # hosts: 29 | # - hlf-peer.local 30 | 31 | persistence: 32 | enabled: true 33 | annotations: {} 34 | ## If defined, storageClassName: 35 | ## If set to "-", storageClassName: "", which disables dynamic provisioning 36 | ## If undefined (the default) or set to null, no storageClassName spec is 37 | ## set, choosing the default provisioner. (gp2 on AWS, standard on 38 | ## GKE, AWS & OpenStack) 39 | ## 40 | storageClass: "" 41 | accessMode: ReadWriteOnce 42 | size: 1Gi 43 | # existingClaim: "" 44 | 45 | ################################## 46 | ## Peer configuration options # 47 | ################################## 48 | peer: 49 | # Type of database ("goleveldb" or "CouchDB"): 50 | databaseType: goleveldb 51 | # If CouchDB is used, which chart holds it 52 | couchdbInstance: cdb-peer1 53 | ## MSP ID of the Peer 54 | mspID: Org1MSP 55 | ## Peer logging level 56 | logging: debug 57 | gossip: 58 | bootstrap: "" 59 | endpoint: "" 60 | externalEndpoint: "" 61 | orgLeader: "false" 62 | useLeaderElection: "true" 63 | 64 | tls: 65 | server: 66 | enabled: "false" 67 | client: 68 | enabled: "false" 69 | 70 | # Secrets references, empty by default, fill in with your secrets (particularly adminCert) or add Peer Admin certificate manually after launching chart. 71 | secrets: 72 | ## These secrets should contain the Orderer crypto materials and credentials 73 | peer: {} 74 | ## Credentials, saved under keys 'CA_USERNAME' and 'CA_PASSWORD' 75 | # cred: hlf--peer1-cred 76 | ## Certificate, saved under key 'cert.pem' 77 | # cert: hlf--peer1-idcert 78 | ## Key, saved under 'key.pem' 79 | # key: hlf--peer1-idkey 80 | ## CA Cert, saved under 'cacert.pem' 81 | # caCert: hlf--peer1-cacert 82 | ## Intermediate CA Cert (optional), saved under 'intermediatecacert.pem' 83 | # intCaCert: hlf--peer1-caintcert 84 | ## TLS secret, saved under keys 'tls.crt' and 'tls.key' (to conform with K8S nomenclature) 85 | # tls: hlf--peer1-tls 86 | ## TLS root CA certificate saved under key 'cert.pem' 87 | # tlsRootCert: hlf--peer-tlsrootcert 88 | ## TLS client root CA certificates saved under any names (as there may be multiple) 89 | # tlsClient: hlf--peer1-tls 90 | ## TLS client root CA certificates saved under any names (as there may be multiple) 91 | # tlsClientRootCerts: hlf--ord-tlsrootcert 92 | ## This should contain "channel" transaction derived from a configtx.yaml 93 | ## configtxgen -profile ComposerChannel -channelID composerchannel -outputCreateChannelTx composerchannel.tx 94 | # channels: 95 | # - foochannel 96 | # - barchannel 97 | ## This should contain the Certificate of the Peer Organisation admin 98 | ## This is necessary to successfully run the peer 99 | # adminCert: hlf--peer-admincert 100 | ## This should contain the Private Key of the Peer Organisation admin 101 | ## This is necessary to successfully join a channel 102 | # adminKey: hlf--peer-adminkey 103 | ## This should include the Orderer TLS 'cacert.pem' 104 | # ordTlsRootCert: hlf--ord-tlsrootcert 105 | 106 | resources: {} 107 | ## We usually recommend not to specify default resources and to leave this as a conscious 108 | ## choice for the user. This also increases chances charts run on environments with little 109 | ## resources, such as Minikube. If you do want to specify resources, uncomment the following 110 | ## lines, adjust them as necessary, and remove the curly braces after 'resources:'. 111 | # limits: 112 | # cpu: 100m 113 | # memory: 128Mi 114 | # requests: 115 | # cpu: 100m 116 | # memory: 128Mi 117 | 118 | nodeSelector: {} 119 | 120 | tolerations: [] 121 | 122 | affinity: {} 123 | ## Suggested antiAffinity, as each Peer should be on a separate Node for resilience 124 | # podAntiAffinity: 125 | # requiredDuringSchedulingIgnoredDuringExecution: 126 | # - topologyKey: "kubernetes.io/hostname" 127 | # labelSelector: 128 | # matchLabels: 129 | # app: hlf-peer 130 | -------------------------------------------------------------------------------- /integration/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aidtechnology/at-charts/c785f1dc0cd0e53ff2dee23f3f5c3c81823f50d1/integration/__init__.py -------------------------------------------------------------------------------- /integration/test_qa.py: -------------------------------------------------------------------------------- 1 | import os 2 | import logging 3 | 4 | from nephos.fabric.settings import load_config, check_cluster 5 | from nephos.helpers.k8s import ns_create 6 | from nephos.helpers.misc import execute 7 | from nephos.runners import runner_fabric 8 | 9 | CURRENT_PATH = os.path.abspath(os.path.split(__file__)[0]) 10 | 11 | 12 | class TestIntegrationQa: 13 | # We will check cluster and flatly refuse to do integration testing unless on 'minikube' 14 | CONTEXT = "minikube" 15 | CONFIG = os.path.join(CURRENT_PATH, "..", "networks", "qa", "nephos_config.yaml") 16 | TLS_PATH = os.path.join(CURRENT_PATH, "..", "networks", "ca-nephos-local") 17 | logging.basicConfig( 18 | level=logging.DEBUG, 19 | format='%(asctime)s %(module)-10s %(levelname)-8s %(message)s' 20 | ) 21 | 22 | def test_integration_qa(self): 23 | # Get options 24 | opts = load_config(self.CONFIG) 25 | 26 | # Save TLS of each CA in its relevant secret 27 | ns_create("cas") 28 | 29 | # TODO: Eventually we should enable getting path for multiple CAs programatically 30 | execute( 31 | ( 32 | "kubectl -n cas create secret tls ca--tls " 33 | + f"--cert={self.TLS_PATH}.crt " 34 | + f"--key={self.TLS_PATH}.key" 35 | ) 36 | ) 37 | 38 | # TODO: There should be a more elegant way of obtaining all the releases 39 | releases = ( 40 | [key for key in opts["cas"].keys()] 41 | + [key + "-pg" for key in opts["cas"].keys()] 42 | + list(opts["msps"]["AlphaMSP"]["orderers"]["nodes"].keys()) 43 | + [ 44 | ("cdb-" + key) 45 | for key in opts["msps"]["BetaMSP"]["peers"]["nodes"].keys() 46 | ] 47 | + [key for key in opts["msps"]["BetaMSP"]["peers"]["nodes"].keys()] 48 | ) 49 | 50 | # Run Fabric script 51 | check_cluster( 52 | self.CONTEXT 53 | ) # Dangerous operation, recheck we have not shifted context 54 | runner_fabric(opts) 55 | 56 | # Delete all deployments from Helm 57 | check_cluster( 58 | self.CONTEXT 59 | ) # Dangerous operation, recheck we have not shifted context 60 | execute(f"helm delete --purge {' '.join(releases)}") 61 | 62 | # Delete the namespaces 63 | check_cluster( 64 | self.CONTEXT 65 | ) # Dangerous operation, recheck we have not shifted context 66 | execute("kubectl delete ns cas alpha beta") 67 | -------------------------------------------------------------------------------- /networks/ca-nephos-local.crt: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE----- 2 | MIIFNDCCAxwCCQCYygRMy/3XDzANBgkqhkiG9w0BAQsFADBcMQswCQYDVQQGEwJJ 3 | RTEPMA0GA1UECAwGRHVibGluMQ8wDQYDVQQHDAZEdWJsaW4xETAPBgNVBAoMCEFJ 4 | RDpUZWNoMRgwFgYDVQQDDA9jYS5uZXBob3MubG9jYWwwHhcNMTkwMjI2MDQ1MTM5 5 | WhcNMjkwMjIzMDQ1MTM5WjBcMQswCQYDVQQGEwJJRTEPMA0GA1UECAwGRHVibGlu 6 | MQ8wDQYDVQQHDAZEdWJsaW4xETAPBgNVBAoMCEFJRDpUZWNoMRgwFgYDVQQDDA9j 7 | YS5uZXBob3MubG9jYWwwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDL 8 | M4gPH5Y8gW8FTSi+nGm0614vd9xlDwTTff116vbUt2XiP17uycAr3ix70nrWsmhJ 9 | ysZrPWk31tJcLN5iqaavYn2iUGYxR+y3comaBdHxKZ8IaC+gz7dNLQco8Hs1oqTI 10 | 9RzXPd9tkH8BL7uML7Wlau6mLF06nLAwATdbiLlONkH5T1tFV1hby+7Jhn3AyQ0c 11 | XB5ki8p7F7fMRFkYuVwMzrlqdfOHhkd3eRd+LL6zkPdNqbvXgXMO8JaruCR4drEA 12 | BYRUR05cGCHEH+OA6g2RNSn+6YprJ1kqDXYoznp+wvEXe5HbjzonY+zkpgtkaBot 13 | 0n6dSNKvyoQu8Et0i0PSHojI97+s90zM1H1QeXlL223khzAF8uHzuwAkPKirHbWe 14 | ZZIVHV6FKrHsLEb8dxerqug2zQHipNOY/tNQ4idtLN+2383i1q6CynNMb736DB4K 15 | pkNB5pIf/k9dneUpsQRwV1k3SIalzVdaKb5vFLB9a9o73Yj7tu05Bftv19wycjWP 16 | qw3ymJjxrQ3Q1nIGxn9YaR68U/pnkJF3L7qIl+0DmYFFDV7WUn5k7B6hw1mtsVup 17 | Q8ew/DNod5yzyTrFLmp+3TVGYP4eHWpHGloa81cKfqP6cdAGjDO/qU7v1+d8UJtd 18 | qPNKKemK8BMWaCw6i81UCaGam6chWrPJrrhHR3pEnwIDAQABMA0GCSqGSIb3DQEB 19 | CwUAA4ICAQBd26I4OIQ/dIzW4zkZh01dBmjwGnbRKw4XsUKxqsEZLYfryWPQ66K/ 20 | XjGmrLOxo2Cyn2g9Ls5BF/KJWst6vQTLDlSw8xLWdUwfL2IGiBylhZL4Ejnr0Dxn 21 | gI9SkvclhGwcNqRh+/IPoYo47MeuBDx8AR+UepG+TF1beRkgYNjDt3t3Vc2JRXvk 22 | b7F4rKyy06SxWDZbnTH5zzejOwiN/kUEz8/k4UO4mHgJnTHl1OeoHElSPXLr+CQI 23 | L+AYjI578A35o+PLAVT6Z8EcZpHIt8S5V2b/xfBEkiGy8xsLMBMZ8FUGyGfkoOt3 24 | p6qHcVk94Kejxuj+ZUoXNyHxTar0Q1qo7hFwH/FBA0VYCug6INPyb5StU11AfulK 25 | 4lvEHCjRqpnZGXSVhmcJwsYdvw1x8dqKUkjRV5i8Au5Nfmrj1W/WdhcDTCQTc7wP 26 | B9v9GaGL6jBG88xq326UwYw3cq6g6bwFCwIAxj98XmyNx65gqzpTmNXQlPvfXJXn 27 | Vn6J3ALwFXeCPC8KiSyUT2wOmSrgKFbyQlft9B0oDl7HkM5EDVsHnqQX9zaeOoHt 28 | kPd9C3D6kl42YecxLxktvQBVGg3wrEEVj+p84+VxIqOFJg8M6GCwkEMkTbt/65Vc 29 | m73xxCb+tq6yDq88TSH0NuKLAVNaLx599E2R+wIrM3pMh/NftdRpAA== 30 | -----END CERTIFICATE----- 31 | -------------------------------------------------------------------------------- /networks/ca-nephos-local.key: -------------------------------------------------------------------------------- 1 | -----BEGIN PRIVATE KEY----- 2 | MIIJQwIBADANBgkqhkiG9w0BAQEFAASCCS0wggkpAgEAAoICAQDLM4gPH5Y8gW8F 3 | TSi+nGm0614vd9xlDwTTff116vbUt2XiP17uycAr3ix70nrWsmhJysZrPWk31tJc 4 | LN5iqaavYn2iUGYxR+y3comaBdHxKZ8IaC+gz7dNLQco8Hs1oqTI9RzXPd9tkH8B 5 | L7uML7Wlau6mLF06nLAwATdbiLlONkH5T1tFV1hby+7Jhn3AyQ0cXB5ki8p7F7fM 6 | RFkYuVwMzrlqdfOHhkd3eRd+LL6zkPdNqbvXgXMO8JaruCR4drEABYRUR05cGCHE 7 | H+OA6g2RNSn+6YprJ1kqDXYoznp+wvEXe5HbjzonY+zkpgtkaBot0n6dSNKvyoQu 8 | 8Et0i0PSHojI97+s90zM1H1QeXlL223khzAF8uHzuwAkPKirHbWeZZIVHV6FKrHs 9 | LEb8dxerqug2zQHipNOY/tNQ4idtLN+2383i1q6CynNMb736DB4KpkNB5pIf/k9d 10 | neUpsQRwV1k3SIalzVdaKb5vFLB9a9o73Yj7tu05Bftv19wycjWPqw3ymJjxrQ3Q 11 | 1nIGxn9YaR68U/pnkJF3L7qIl+0DmYFFDV7WUn5k7B6hw1mtsVupQ8ew/DNod5yz 12 | yTrFLmp+3TVGYP4eHWpHGloa81cKfqP6cdAGjDO/qU7v1+d8UJtdqPNKKemK8BMW 13 | aCw6i81UCaGam6chWrPJrrhHR3pEnwIDAQABAoICAQC76MqQmbg3+cXuFB1yr3g9 14 | W088/E2uksX2wlLqgb+ClJq+L7YdgiqZJpriYpFAC7nPS5pTv3WWsqzTmpaIjLRd 15 | Cep+eHUYa5fnSR8gdPNhXgj4IEizIIM09kfhvwlhXyedwrXKAiikzHP2Z+q6x7/l 16 | 2c4Zk3sfYZi4u9cyaWxbfGdkItOXex91vE/0ws91wTbDnBhrCK7VR+irGCWAbKFI 17 | Ir9qSOtMz4s1W6CzCLPNoGhhxG2167q3/RqIcAAh+jjSeUeXHF3FYnePQ6ICK+8h 18 | 8hUE63kagEe/ULWNJOw4XxPOkHIgpQG1PX3rL/XnAzH6X+bAkoBrGg9EzOgqPUMX 19 | 3M6fNIUKF1pu5An7JArp13HWLufDya3L4+pBlyzv3CkfSqNlyoTllJHdq8gDi1PJ 20 | kzrIFqqbNEoZ0NBnUtl2AeUn6B4j+SfZdic8JVneqssPjfyd65+S9qPcniXMtWUS 21 | LRnDJZS5Lz1+kIbcD8UskGASCbinA+FF1MAGMYlJRGd5cuFJMHlnoTElQgPg3jtt 22 | xAqMwPcfpJDqqRm2/Cp1RKzaYXK08f4qzWnrX6YctZGCszz7YURG0o7uHJ9ttGDU 23 | gF0zyUSSR+vbQcSB+qSnBrxYbKN2VDQs3hMmsBYK9jmhl+xig7chxDFDMVXSr2Kq 24 | UlQr2VqYa3wZkOeHA1dqgQKCAQEA7+4yvCFMtDfLUmY5Ppkxvd9kB8jRf4HlVbF3 25 | R6aHmsNBO3QN9I/bhMvlTs3xKeEV6NRKd5DwOG3klX6WgyCeb8d1wVmobuls5oNp 26 | 7dnX6Wmb17a/BMUB6TuFeu90xvD+LnfLgEJFrNK9K6By4BNxhKGg1XvSc6iTAeIg 27 | P/x/dMsP+mACj/KIHn9BwLhjS6xiZe5HXBONLIMWAsomYXCKhlcH+QRqxwoufxRs 28 | ZGadAbNLhk8mWpPaMf5KhnMF1xetZEJkg8enD+VprQDkVvD51HGcYzQuN4/DQdqR 29 | W+HPOowDZGV9yucNYRbrLGpNmt945yu9PWvDvT2hRL6OMGtVwQKCAQEA2M+U/7Uk 30 | JcgbCEF6gzQ4gH6IRE7+aQUBAJ4RtTW+e+JYJbeVe0Ifg765mqUkYOVgqVPVAsCE 31 | Mlg0QwjayAz3hUTJD6k+fjV6IK0hZ9UwQPvvvgTz4imUe3LdHeBgL+NejQIdQ4Wa 32 | yXG+wW2V2oI9NnU05gChJNqGKpkCVjijTHzPjrTSlJDV17tAf+AkOqV+OWQqJ1Ny 33 | fhufANRi2NHaMbea/fgu8XcBbaHmbL5FbNG5EvjAklvhqPuUKIEV8rdrV4vI4HFS 34 | K5tDs7bPuXqQ4GLFOMC5vtpPvE/ZRvxf34mPX+4iirR9q9TD3RQ9cDysmiw9BSCd 35 | lp3JtLuVP9XyXwKCAQEAmGzoWHKDS17C0ddVeLpUSn74CqX+yhWYv6cd0ywtrBlt 36 | gLrK/trBqAzs4fKdynBPvfnYUTuIESFnfb3NGwMHBJZf7fTqt4OeBpfoHQ4vR/8q 37 | 3ZEgq4QJIcYJl782ngiOQrEiXJo9r3y2Z1fogwh3Lr6L8fRXD9lBg4nH2T6tLmKv 38 | BO4n0pFO/qjz+d5qXrHmVUFanllhb6zbt+v7FB4uMwhpbT7sIwUJ4vvznJlAF8tz 39 | KKFyCUsy7F3TtLSwOiS1qCMxvjKBpOynBRyy87Tl0pxWsQN+dRflARo9DHVZdKVs 40 | pkR/9P5yloK75xylrsVYBuzXrYSf9sKHiKfSp1oZQQKCAQBahrIL4rJpJBhFZjGl 41 | PtSosixreSA48WHtxBJYu0wBpgxwEytS/9TwyV3t+C4sVxn5GYvDTmbkLngD1Vqk 42 | VOW0u3fIFj6FrAaGRkPAev9e4tLDYIYEVKAuu1Mf+mOvGxyLe+qEGvD6U4f2c68y 43 | QVcVufKQZoriPGvwtVqIbbWhZiuzxkR2y9K99O2Fdy0pX5hIlk4ThYA0r4vAaO2s 44 | 4lJcl/EbQdk38RyjjgoMNZX/TcIc7chaVArw57qA/wUPBw7GyxLgw/AvTPEDxL0P 45 | EOIVDqiSfcYQrk7ErgJm5VGSoHlA8MlzaNFgQcJWOPE+P9dBuF7yGSAzl0Z1NC2p 46 | Lce5AoIBABbw3qHElQSCwW/3gZ4iDfKx0yFa+M03e4HzOpUXhK1PQcZ7PBFH8m84 47 | GlwAhiFB0JQZGSznnYvpTZkhJ8P1Zlf6XXW7XCnwU7ayvWdiU5iUvbVaa1FTcPkH 48 | t48CLW97lHgojQ7kpGGIkItCivShNvkT66L5V0Ybpz8wdbonml1b5/VbYPaY/IrZ 49 | x3SlwEwyAEBq4dxZV6yOttDlCsIavBe2m3GyuWyHApzssUf9ZHl4HNQi4bhqXNEW 50 | H0ciDhhAxoQjnr+EIwv9jRcAdxwm+irlLF2mGPYvgbw0nQ3r2IXrNRs45jefWq2j 51 | USkaxbJJb6NjUjD+lbgfeFLW5gMeTvM= 52 | -----END PRIVATE KEY----- 53 | -------------------------------------------------------------------------------- /networks/ca-nephos-local.pem: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE----- 2 | MIIFNDCCAxwCCQCYygRMy/3XDzANBgkqhkiG9w0BAQsFADBcMQswCQYDVQQGEwJJ 3 | RTEPMA0GA1UECAwGRHVibGluMQ8wDQYDVQQHDAZEdWJsaW4xETAPBgNVBAoMCEFJ 4 | RDpUZWNoMRgwFgYDVQQDDA9jYS5uZXBob3MubG9jYWwwHhcNMTkwMjI2MDQ1MTM5 5 | WhcNMjkwMjIzMDQ1MTM5WjBcMQswCQYDVQQGEwJJRTEPMA0GA1UECAwGRHVibGlu 6 | MQ8wDQYDVQQHDAZEdWJsaW4xETAPBgNVBAoMCEFJRDpUZWNoMRgwFgYDVQQDDA9j 7 | YS5uZXBob3MubG9jYWwwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDL 8 | M4gPH5Y8gW8FTSi+nGm0614vd9xlDwTTff116vbUt2XiP17uycAr3ix70nrWsmhJ 9 | ysZrPWk31tJcLN5iqaavYn2iUGYxR+y3comaBdHxKZ8IaC+gz7dNLQco8Hs1oqTI 10 | 9RzXPd9tkH8BL7uML7Wlau6mLF06nLAwATdbiLlONkH5T1tFV1hby+7Jhn3AyQ0c 11 | XB5ki8p7F7fMRFkYuVwMzrlqdfOHhkd3eRd+LL6zkPdNqbvXgXMO8JaruCR4drEA 12 | BYRUR05cGCHEH+OA6g2RNSn+6YprJ1kqDXYoznp+wvEXe5HbjzonY+zkpgtkaBot 13 | 0n6dSNKvyoQu8Et0i0PSHojI97+s90zM1H1QeXlL223khzAF8uHzuwAkPKirHbWe 14 | ZZIVHV6FKrHsLEb8dxerqug2zQHipNOY/tNQ4idtLN+2383i1q6CynNMb736DB4K 15 | pkNB5pIf/k9dneUpsQRwV1k3SIalzVdaKb5vFLB9a9o73Yj7tu05Bftv19wycjWP 16 | qw3ymJjxrQ3Q1nIGxn9YaR68U/pnkJF3L7qIl+0DmYFFDV7WUn5k7B6hw1mtsVup 17 | Q8ew/DNod5yzyTrFLmp+3TVGYP4eHWpHGloa81cKfqP6cdAGjDO/qU7v1+d8UJtd 18 | qPNKKemK8BMWaCw6i81UCaGam6chWrPJrrhHR3pEnwIDAQABMA0GCSqGSIb3DQEB 19 | CwUAA4ICAQBd26I4OIQ/dIzW4zkZh01dBmjwGnbRKw4XsUKxqsEZLYfryWPQ66K/ 20 | XjGmrLOxo2Cyn2g9Ls5BF/KJWst6vQTLDlSw8xLWdUwfL2IGiBylhZL4Ejnr0Dxn 21 | gI9SkvclhGwcNqRh+/IPoYo47MeuBDx8AR+UepG+TF1beRkgYNjDt3t3Vc2JRXvk 22 | b7F4rKyy06SxWDZbnTH5zzejOwiN/kUEz8/k4UO4mHgJnTHl1OeoHElSPXLr+CQI 23 | L+AYjI578A35o+PLAVT6Z8EcZpHIt8S5V2b/xfBEkiGy8xsLMBMZ8FUGyGfkoOt3 24 | p6qHcVk94Kejxuj+ZUoXNyHxTar0Q1qo7hFwH/FBA0VYCug6INPyb5StU11AfulK 25 | 4lvEHCjRqpnZGXSVhmcJwsYdvw1x8dqKUkjRV5i8Au5Nfmrj1W/WdhcDTCQTc7wP 26 | B9v9GaGL6jBG88xq326UwYw3cq6g6bwFCwIAxj98XmyNx65gqzpTmNXQlPvfXJXn 27 | Vn6J3ALwFXeCPC8KiSyUT2wOmSrgKFbyQlft9B0oDl7HkM5EDVsHnqQX9zaeOoHt 28 | kPd9C3D6kl42YecxLxktvQBVGg3wrEEVj+p84+VxIqOFJg8M6GCwkEMkTbt/65Vc 29 | m73xxCb+tq6yDq88TSH0NuKLAVNaLx599E2R+wIrM3pMh/NftdRpAA== 30 | -----END CERTIFICATE----- 31 | -------------------------------------------------------------------------------- /networks/qa/helm_values/AlphaMSP/hlf-ord/ord1.yaml: -------------------------------------------------------------------------------- 1 | 2 | persistence: 3 | accessMode: ReadWriteOnce 4 | size: 1Gi 5 | 6 | ord: 7 | type: Solo 8 | mspID: AlphaMSP 9 | 10 | secrets: 11 | ord: 12 | cert: hlf--ord1-idcert 13 | key: hlf--ord1-idkey 14 | caCert: hlf--alphaadmin-cacert 15 | genesis: hlf--genesis 16 | adminCert: hlf--alphaadmin-idcert 17 | 18 | affinity: 19 | podAntiAffinity: 20 | preferredDuringSchedulingIgnoredDuringExecution: 21 | - weight: 95 22 | podAffinityTerm: 23 | topologyKey: "kubernetes.io/hostname" 24 | labelSelector: 25 | matchLabels: 26 | app: hlf-ord 27 | -------------------------------------------------------------------------------- /networks/qa/helm_values/BetaMSP/hlf-couchdb/cdb-beta-peer1.yaml: -------------------------------------------------------------------------------- 1 | image: 2 | tag: 0.4.10 3 | 4 | persistence: 5 | size: 1Gi 6 | 7 | affinity: 8 | podAntiAffinity: 9 | preferredDuringSchedulingIgnoredDuringExecution: 10 | - weight: 95 11 | podAffinityTerm: 12 | topologyKey: "kubernetes.io/hostname" 13 | labelSelector: 14 | matchLabels: 15 | app: hlf-couchdb 16 | -------------------------------------------------------------------------------- /networks/qa/helm_values/BetaMSP/hlf-peer/beta-peer1.yaml: -------------------------------------------------------------------------------- 1 | 2 | persistence: 3 | accessMode: ReadWriteOnce 4 | size: 1Gi 5 | 6 | peer: 7 | databaseType: CouchDB 8 | couchdbInstance: cdb-beta-peer1 9 | mspID: BetaMSP 10 | 11 | secrets: 12 | peer: 13 | cert: hlf--beta-peer1-idcert 14 | key: hlf--beta-peer1-idkey 15 | caCert: hlf--betaadmin-cacert 16 | channels: 17 | - hlf--foochannel 18 | adminCert: hlf--betaadmin-idcert 19 | adminKey: hlf--betaadmin-idkey 20 | 21 | affinity: 22 | podAntiAffinity: 23 | preferredDuringSchedulingIgnoredDuringExecution: 24 | - weight: 95 25 | podAffinityTerm: 26 | topologyKey: "kubernetes.io/hostname" 27 | labelSelector: 28 | matchLabels: 29 | app: hlf-peer 30 | -------------------------------------------------------------------------------- /networks/qa/helm_values/hl-composer/hlc.yaml: -------------------------------------------------------------------------------- 1 | persistence: 2 | enabled: true 3 | # Required since multiple containers need to access same file system 4 | accessMode: ReadWriteMany 5 | size: 1Gi 6 | # e.g. Custom Azure storage class 7 | # Using mountOptions "0750" for dir_mode/file_mode and "1000" for uid and gid 8 | storageClass: "azurefile0permissive" 9 | 10 | cli: 11 | image: 12 | tag: 0.20.0 13 | secrets: 14 | # This should contain the packaged .bna network file. 15 | blockchainNetwork: hlc--bna 16 | adminCert: hlf--peeradmin-idcert 17 | adminKey: hlf--peeradmin-idkey 18 | hlcConnection: hlc--connection 19 | 20 | rest: 21 | image: 22 | tag: 0.20.0 23 | # Ingress for Composer REST 24 | ingress: 25 | enabled: true 26 | annotations: 27 | kubernetes.io/ingress.class: nginx 28 | certmanager.k8s.io/cluster-issuer: "letsencrypt-production" 29 | path: / 30 | hosts: 31 | - hlc-rest.nephos.aidtech-test.xyz 32 | tls: 33 | - secretName: hlc-rest--tls 34 | hosts: 35 | - hlc-rest.nephos.aidtech-test.xyz 36 | 37 | config: 38 | # Composer REST server API key 39 | #apiKey: 40 | # Card for network connection 41 | composerRestServerCard: peeradmin@test-network 42 | 43 | pg: 44 | image: 45 | tag: 0.20.0 46 | # Ingress for Composer PlayGround 47 | ingress: 48 | enabled: true 49 | annotations: 50 | kubernetes.io/ingress.class: nginx 51 | certmanager.k8s.io/cluster-issuer: "letsencrypt-production" 52 | path: / 53 | hosts: 54 | - hlc-pg.nephos.aidtech-test.xyz 55 | tls: 56 | - secretName: hl-composer-pg--tls 57 | hosts: 58 | - hlc-pg.nephos.aidtech-test.xyz 59 | -------------------------------------------------------------------------------- /networks/qa/helm_values/hlf-ca/ca.yaml: -------------------------------------------------------------------------------- 1 | 2 | 3 | ingress: 4 | enabled: true 5 | annotations: 6 | kubernetes.io/ingress.class: nginx 7 | # TODO: When working locally, we may wish to comment the cluster-issuer 8 | #certmanager.k8s.io/cluster-issuer: "letsencrypt-production" 9 | path: / 10 | hosts: 11 | # TODO: Change this to your Domain Name if not working locally 12 | - ca.nephos.local 13 | tls: 14 | - secretName: ca--tls 15 | hosts: 16 | # TODO: Change this to your Domain Name if not working locally 17 | - ca.nephos.local 18 | 19 | persistence: 20 | accessMode: ReadWriteOnce 21 | size: 1Gi 22 | 23 | caName: ca 24 | 25 | externalDatabase: 26 | type: postgres 27 | host: ca-pg-postgresql 28 | username: postgres 29 | # password: 30 | database: fabric_ca 31 | port: "5432" 32 | 33 | config: 34 | hlfToolsVersion: 1.3.0 35 | csr: 36 | names: 37 | c: IE 38 | st: Dublin 39 | l: 40 | o: "AID:Tech" 41 | ou: Blockchain 42 | affiliations: 43 | aidtech: [] 44 | 45 | affinity: 46 | podAntiAffinity: 47 | preferredDuringSchedulingIgnoredDuringExecution: 48 | - weight: 95 49 | podAffinityTerm: 50 | topologyKey: "kubernetes.io/hostname" 51 | labelSelector: 52 | matchLabels: 53 | app: hlf-ca 54 | podAffinity: 55 | requiredDuringSchedulingIgnoredDuringExecution: 56 | - labelSelector: 57 | matchLabels: 58 | app: postgresql 59 | release: ca-pg 60 | topologyKey: "kubernetes.io/hostname" 61 | -------------------------------------------------------------------------------- /networks/qa/helm_values/postgres-ca/ca-pg.yaml: -------------------------------------------------------------------------------- 1 | imageTag: "9.6.2" 2 | 3 | # postgresPassword: 4 | postgresDatabase: fabric_ca 5 | 6 | persistence: 7 | enabled: true 8 | size: 1Gi 9 | 10 | affinity: 11 | podAntiAffinity: 12 | requiredDuringSchedulingIgnoredDuringExecution: 13 | - topologyKey: "kubernetes.io/hostname" 14 | labelSelector: 15 | matchLabels: 16 | app: postgresql 17 | -------------------------------------------------------------------------------- /networks/qa/nephos_config.yaml: -------------------------------------------------------------------------------- 1 | core: 2 | # Comment out to make it work on your cluster 3 | cluster: minikube 4 | # AT_Charts directory holding charts 5 | chart_repo: ./ 6 | # Directory where we hold configtx.yaml 7 | dir_config: ./networks/qa/config 8 | # Directory where we hold: 9 | # 1) cryptographic material 10 | # 2) genesis block and 11 | # 3) channel transaction 12 | dir_crypto: ./networks/qa/crypto 13 | # Directory where the Helm Chart values reside 14 | dir_values: ./networks/qa/helm_values 15 | cas: 16 | ca: 17 | namespace: cas 18 | # If testing locally on Minikube, we use our own self-signed certificate's PEM 19 | tls_cert: ./networks/ca-nephos-local.pem 20 | # Alternatively, if testing on a cluster with proper DNS and cert-manager... 21 | #tls_cert: ./nephos/extras/Lets_Encrypt_Authority_X3.pem 22 | ordering: 23 | secret_genesis: hlf--genesis 24 | msps: 25 | AlphaMSP: 26 | ca: ca 27 | name: AlphaMSP 28 | namespace: alpha 29 | org_admin: alphaadmin 30 | # org_passwd: # Set implicitly 31 | orderers: 32 | domain: alpha.svc.cluster.local 33 | nodes: 34 | ord1: {} 35 | BetaMSP: 36 | ca: ca 37 | name: BetaMSP 38 | namespace: beta 39 | org_admin: betaadmin 40 | # org_passwd: # Set implicitly 41 | orderers: {} 42 | peers: 43 | domain: beta.svc.cluster.local 44 | nodes: 45 | beta-peer1: {} 46 | channels: 47 | foochannel: 48 | msps: 49 | - BetaMSP 50 | channel_name: foochannel 51 | channel_profile: "FooChannel" 52 | secret_channel: hlf--foochannel 53 | # This is only used when we implement a Composer network 54 | composer: 55 | name: hlc 56 | secret_bna: hlc--bna 57 | secret_connection: hlc--connection 58 | # You can specify a particular version of a chart for each chart used, or use the latest by default 59 | versions: 60 | postgresql: 61 | hlf-ca: 62 | kafka: 63 | hlf-ord: 64 | hlf-couchdb: 65 | hlf-peer: 66 | hl-composer: 67 | 68 | -------------------------------------------------------------------------------- /package.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | CHART_LIST=(hl-composer hlf-ca hlf-couchdb hlf-ord hlf-peer) 4 | 5 | ########## 6 | # Helm # 7 | ########## 8 | 9 | # Initialise helm on client side only 10 | helm init --client-only 11 | 12 | for CHART in ${CHART_LIST[*]} 13 | do 14 | HELM_LINT=$(helm lint ./${CHART} | grep "no failures") 15 | 16 | # Run only if we have Helm lint passing 17 | if [[ -z "$HELM_LINT" ]] 18 | then 19 | echo "Helm linting fails, cannot build Helm package" 20 | exit 126 21 | else 22 | echo "Helm linting suceeded for $CHART" 23 | helm dependency build ./${CHART} 24 | helm package ./${CHART} 25 | fi 26 | done 27 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | adal==1.2.1 2 | asn1crypto==0.24.0 3 | blessings==1.7 4 | cachetools==3.1.0 5 | certifi==2018.11.29 6 | cffi==1.12.1 7 | chardet==3.0.4 8 | Click==7.0 9 | cryptography==2.5 10 | google-auth==1.6.3 11 | idna==2.8 12 | kubernetes==8.0.1 13 | nephos 14 | oauthlib==3.0.1 15 | pyasn1==0.4.5 16 | pyasn1-modules==0.2.4 17 | pycparser==2.19 18 | Pygments==2.3.1 19 | PyJWT==1.7.1 20 | python-dateutil==2.8.0 21 | PyYAML==3.13 22 | requests==2.21.0 23 | requests-oauthlib==1.2.0 24 | rsa==4.1 25 | six==1.12.0 26 | urllib3==1.24.2 27 | websocket-client==0.54.0 28 | -------------------------------------------------------------------------------- /sonar-project.properties: -------------------------------------------------------------------------------- 1 | # Must be unique in a given SonarQube instance 2 | sonar.projectKey=aidtechnology_at-charts 3 | # Name and version displayed in the SonarQube UI. 4 | sonar.projectName=AID:Tech Charts 5 | sonar.projectVersion=1.0 6 | 7 | # Path is relative to the sonar-project.properties file. Replace "\" by "/" on Windows. 8 | # Optional if sonar.modules is set. 9 | sonar.sources=. 10 | 11 | # Encoding of the source code. By default, the system encoding 12 | #sonar.sourceEncoding=UTF-8 13 | -------------------------------------------------------------------------------- /stable/prometheus/.helmignore: -------------------------------------------------------------------------------- 1 | # Patterns to ignore when building packages. 2 | # This supports shell glob matching, relative path matching, and 3 | # negation (prefixed with !). Only one pattern per line. 4 | .DS_Store 5 | # Common VCS dirs 6 | .git/ 7 | .gitignore 8 | .bzr/ 9 | .bzrignore 10 | .hg/ 11 | .hgignore 12 | .svn/ 13 | # Common backup files 14 | *.swp 15 | *.bak 16 | *.tmp 17 | *~ 18 | # Various IDEs 19 | .project 20 | .idea/ 21 | *.tmproj 22 | -------------------------------------------------------------------------------- /stable/prometheus/Chart.yaml: -------------------------------------------------------------------------------- 1 | name: prometheus 2 | version: 8.9.1 3 | appVersion: 2.8.0 4 | description: Prometheus is a monitoring system and time series database. 5 | home: https://prometheus.io/ 6 | icon: https://raw.githubusercontent.com/prometheus/prometheus.github.io/master/assets/prometheus_logo-cb55bb5c346.png 7 | sources: 8 | - https://github.com/prometheus/alertmanager 9 | - https://github.com/prometheus/prometheus 10 | - https://github.com/prometheus/pushgateway 11 | - https://github.com/prometheus/node_exporter 12 | - https://github.com/kubernetes/kube-state-metrics 13 | maintainers: 14 | - name: mgoodness 15 | email: mgoodness@gmail.com 16 | - name: gianrubio 17 | email: gianrubio@gmail.com 18 | engine: gotpl 19 | tillerVersion: ">=2.8.0" 20 | -------------------------------------------------------------------------------- /stable/prometheus/OWNERS: -------------------------------------------------------------------------------- 1 | approvers: 2 | - mgoodness 3 | - gianrubio 4 | reviewers: 5 | - mgoodness 6 | - gianrubio 7 | -------------------------------------------------------------------------------- /stable/prometheus/requirements.yaml: -------------------------------------------------------------------------------- 1 | dependencies: 2 | - name: prometheus-blackbox-exporter 3 | version: 0.2.0 4 | repository: https://kubernetes-charts.storage.googleapis.com -------------------------------------------------------------------------------- /stable/prometheus/templates/alertmanager-configmap.yaml: -------------------------------------------------------------------------------- 1 | {{- if and .Values.alertmanager.enabled (and (empty .Values.alertmanager.configMapOverrideName) (empty .Values.alertmanager.configFromSecret)) -}} 2 | apiVersion: v1 3 | kind: ConfigMap 4 | metadata: 5 | labels: 6 | {{- include "prometheus.alertmanager.labels" . | nindent 4 }} 7 | name: {{ template "prometheus.alertmanager.fullname" . }} 8 | data: 9 | {{- $root := . -}} 10 | {{- range $key, $value := .Values.alertmanagerFiles }} 11 | {{ $key }}: | 12 | {{ toYaml $value | default "{}" | indent 4 }} 13 | {{- end -}} 14 | {{- end -}} 15 | -------------------------------------------------------------------------------- /stable/prometheus/templates/alertmanager-ingress.yaml: -------------------------------------------------------------------------------- 1 | {{- if and .Values.alertmanager.enabled .Values.alertmanager.ingress.enabled -}} 2 | {{- $releaseName := .Release.Name -}} 3 | {{- $serviceName := include "prometheus.alertmanager.fullname" . }} 4 | {{- $servicePort := .Values.alertmanager.service.servicePort -}} 5 | apiVersion: extensions/v1beta1 6 | kind: Ingress 7 | metadata: 8 | {{- if .Values.alertmanager.ingress.annotations }} 9 | annotations: 10 | {{ toYaml .Values.alertmanager.ingress.annotations | indent 4 }} 11 | {{- end }} 12 | labels: 13 | {{- include "prometheus.alertmanager.labels" . | nindent 4 }} 14 | {{- range $key, $value := .Values.alertmanager.ingress.extraLabels }} 15 | {{ $key }}: {{ $value }} 16 | {{- end }} 17 | name: {{ template "prometheus.alertmanager.fullname" . }} 18 | spec: 19 | rules: 20 | {{- range .Values.alertmanager.ingress.hosts }} 21 | {{- $url := splitList "/" . }} 22 | - host: {{ first $url }} 23 | http: 24 | paths: 25 | - path: /{{ rest $url | join "/" }} 26 | backend: 27 | serviceName: {{ $serviceName }} 28 | servicePort: {{ $servicePort }} 29 | {{- end -}} 30 | {{- if .Values.alertmanager.ingress.tls }} 31 | tls: 32 | {{ toYaml .Values.alertmanager.ingress.tls | indent 4 }} 33 | {{- end -}} 34 | {{- end -}} 35 | -------------------------------------------------------------------------------- /stable/prometheus/templates/alertmanager-networkpolicy.yaml: -------------------------------------------------------------------------------- 1 | {{- if and .Values.alertmanager.enabled .Values.networkPolicy.enabled -}} 2 | apiVersion: {{ template "prometheus.networkPolicy.apiVersion" . }} 3 | kind: NetworkPolicy 4 | metadata: 5 | name: {{ template "prometheus.alertmanager.fullname" . }} 6 | labels: 7 | {{- include "prometheus.alertmanager.labels" . | nindent 4 }} 8 | spec: 9 | podSelector: 10 | matchLabels: 11 | {{- include "prometheus.alertmanager.matchLabels" . | nindent 6 }} 12 | ingress: 13 | - from: 14 | - podSelector: 15 | matchLabels: 16 | {{- include "prometheus.server.matchLabels" . | nindent 12 }} 17 | - ports: 18 | - port: 9093 19 | {{- end -}} 20 | -------------------------------------------------------------------------------- /stable/prometheus/templates/alertmanager-pvc.yaml: -------------------------------------------------------------------------------- 1 | {{- if not .Values.alertmanager.statefulSet.enabled -}} 2 | {{- if and .Values.alertmanager.enabled .Values.alertmanager.persistentVolume.enabled -}} 3 | {{- if not .Values.alertmanager.persistentVolume.existingClaim -}} 4 | apiVersion: v1 5 | kind: PersistentVolumeClaim 6 | metadata: 7 | {{- if .Values.alertmanager.persistentVolume.annotations }} 8 | annotations: 9 | {{ toYaml .Values.alertmanager.persistentVolume.annotations | indent 4 }} 10 | {{- end }} 11 | labels: 12 | {{- include "prometheus.alertmanager.labels" . | nindent 4 }} 13 | name: {{ template "prometheus.alertmanager.fullname" . }} 14 | spec: 15 | accessModes: 16 | {{ toYaml .Values.alertmanager.persistentVolume.accessModes | indent 4 }} 17 | {{- if .Values.alertmanager.persistentVolume.storageClass }} 18 | {{- if (eq "-" .Values.alertmanager.persistentVolume.storageClass) }} 19 | storageClassName: "" 20 | {{- else }} 21 | storageClassName: "{{ .Values.alertmanager.persistentVolume.storageClass }}" 22 | {{- end }} 23 | {{- end }} 24 | resources: 25 | requests: 26 | storage: "{{ .Values.alertmanager.persistentVolume.size }}" 27 | {{- end -}} 28 | {{- end -}} 29 | {{- end -}} 30 | -------------------------------------------------------------------------------- /stable/prometheus/templates/alertmanager-service-headless.yaml: -------------------------------------------------------------------------------- 1 | {{- if and .Values.alertmanager.enabled .Values.alertmanager.statefulSet.enabled -}} 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | {{- if .Values.alertmanager.statefulSet.headless.annotations }} 6 | annotations: 7 | {{ toYaml .Values.alertmanager.statefulSet.headless.annotations | indent 4 }} 8 | {{- end }} 9 | labels: 10 | {{- include "prometheus.alertmanager.labels" . | nindent 4 }} 11 | {{- if .Values.alertmanager.statefulSet.headless.labels }} 12 | {{ toYaml .Values.alertmanager.statefulSet.headless.labels | indent 4 }} 13 | {{- end }} 14 | name: {{ template "prometheus.alertmanager.fullname" . }}-headless 15 | spec: 16 | clusterIP: None 17 | ports: 18 | - name: http 19 | port: {{ .Values.alertmanager.statefulSet.headless.servicePort }} 20 | protocol: TCP 21 | targetPort: 9093 22 | {{- if .Values.alertmanager.statefulSet.headless.enableMeshPeer }} 23 | - name: meshpeer 24 | port: 6783 25 | protocol: TCP 26 | targetPort: 6783 27 | {{- end }} 28 | selector: 29 | {{- include "prometheus.alertmanager.matchLabels" . | nindent 4 }} 30 | {{- end }} 31 | -------------------------------------------------------------------------------- /stable/prometheus/templates/alertmanager-service.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.alertmanager.enabled -}} 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | {{- if .Values.alertmanager.service.annotations }} 6 | annotations: 7 | {{ toYaml .Values.alertmanager.service.annotations | indent 4 }} 8 | {{- end }} 9 | labels: 10 | {{- include "prometheus.alertmanager.labels" . | nindent 4 }} 11 | {{- if .Values.alertmanager.service.labels }} 12 | {{ toYaml .Values.alertmanager.service.labels | indent 4 }} 13 | {{- end }} 14 | name: {{ template "prometheus.alertmanager.fullname" . }} 15 | spec: 16 | {{- if .Values.alertmanager.service.clusterIP }} 17 | clusterIP: {{ .Values.alertmanager.service.clusterIP }} 18 | {{- end }} 19 | {{- if .Values.alertmanager.service.externalIPs }} 20 | externalIPs: 21 | {{ toYaml .Values.alertmanager.service.externalIPs | indent 4 }} 22 | {{- end }} 23 | {{- if .Values.alertmanager.service.loadBalancerIP }} 24 | loadBalancerIP: {{ .Values.alertmanager.service.loadBalancerIP }} 25 | {{- end }} 26 | {{- if .Values.alertmanager.service.loadBalancerSourceRanges }} 27 | loadBalancerSourceRanges: 28 | {{- range $cidr := .Values.alertmanager.service.loadBalancerSourceRanges }} 29 | - {{ $cidr }} 30 | {{- end }} 31 | {{- end }} 32 | ports: 33 | - name: http 34 | port: {{ .Values.alertmanager.service.servicePort }} 35 | protocol: TCP 36 | targetPort: 9093 37 | {{- if .Values.alertmanager.service.nodePort }} 38 | nodePort: {{ .Values.alertmanager.service.nodePort }} 39 | {{- end }} 40 | {{- if .Values.alertmanager.service.enableMeshPeer }} 41 | - name: meshpeer 42 | port: 6783 43 | protocol: TCP 44 | targetPort: 6783 45 | {{- end }} 46 | selector: 47 | {{- include "prometheus.alertmanager.matchLabels" . | nindent 4 }} 48 | type: "{{ .Values.alertmanager.service.type }}" 49 | {{- end }} 50 | -------------------------------------------------------------------------------- /stable/prometheus/templates/alertmanager-serviceaccount.yaml: -------------------------------------------------------------------------------- 1 | {{- if and .Values.alertmanager.enabled .Values.serviceAccounts.alertmanager.create -}} 2 | apiVersion: v1 3 | kind: ServiceAccount 4 | metadata: 5 | labels: 6 | {{- include "prometheus.alertmanager.labels" . | nindent 4 }} 7 | name: {{ template "prometheus.serviceAccountName.alertmanager" . }} 8 | {{- end -}} 9 | -------------------------------------------------------------------------------- /stable/prometheus/templates/kube-state-metrics-clusterrole.yaml: -------------------------------------------------------------------------------- 1 | {{- if and .Values.kubeStateMetrics.enabled .Values.rbac.create -}} 2 | apiVersion: rbac.authorization.k8s.io/v1beta1 3 | kind: ClusterRole 4 | metadata: 5 | labels: 6 | {{- include "prometheus.kubeStateMetrics.labels" . | nindent 4 }} 7 | name: {{ template "prometheus.kubeStateMetrics.fullname" . }} 8 | rules: 9 | - apiGroups: 10 | - "" 11 | resources: 12 | - namespaces 13 | - nodes 14 | - persistentvolumeclaims 15 | - pods 16 | - services 17 | - resourcequotas 18 | - replicationcontrollers 19 | - limitranges 20 | - persistentvolumeclaims 21 | - persistentvolumes 22 | - endpoints 23 | - secrets 24 | - configmaps 25 | verbs: 26 | - list 27 | - watch 28 | - apiGroups: 29 | - extensions 30 | resources: 31 | - daemonsets 32 | - deployments 33 | - replicasets 34 | verbs: 35 | - list 36 | - watch 37 | - apiGroups: 38 | - apps 39 | resources: 40 | - statefulsets 41 | verbs: 42 | - get 43 | - list 44 | - watch 45 | - apiGroups: 46 | - batch 47 | resources: 48 | - cronjobs 49 | - jobs 50 | verbs: 51 | - list 52 | - watch 53 | - apiGroups: 54 | - autoscaling 55 | resources: 56 | - horizontalpodautoscalers 57 | verbs: 58 | - list 59 | - watch 60 | - apiGroups: 61 | - policy 62 | resources: 63 | - poddisruptionbudgets 64 | verbs: 65 | - list 66 | - watch 67 | {{- end }} 68 | -------------------------------------------------------------------------------- /stable/prometheus/templates/kube-state-metrics-clusterrolebinding.yaml: -------------------------------------------------------------------------------- 1 | {{- if and .Values.kubeStateMetrics.enabled .Values.rbac.create -}} 2 | apiVersion: rbac.authorization.k8s.io/v1beta1 3 | kind: ClusterRoleBinding 4 | metadata: 5 | labels: 6 | {{- include "prometheus.kubeStateMetrics.labels" . | nindent 4 }} 7 | name: {{ template "prometheus.kubeStateMetrics.fullname" . }} 8 | subjects: 9 | - kind: ServiceAccount 10 | name: {{ template "prometheus.serviceAccountName.kubeStateMetrics" . }} 11 | namespace: {{ .Release.Namespace }} 12 | roleRef: 13 | apiGroup: rbac.authorization.k8s.io 14 | kind: ClusterRole 15 | name: {{ template "prometheus.kubeStateMetrics.fullname" . }} 16 | {{- end -}} 17 | -------------------------------------------------------------------------------- /stable/prometheus/templates/kube-state-metrics-deployment.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.kubeStateMetrics.enabled -}} 2 | apiVersion: extensions/v1beta1 3 | kind: Deployment 4 | metadata: 5 | {{- if .Values.kubeStateMetrics.deploymentAnnotations }} 6 | annotations: 7 | {{ toYaml .Values.kubeStateMetrics.deploymentAnnotations | indent 4 }} 8 | {{- end }} 9 | labels: 10 | {{- include "prometheus.kubeStateMetrics.labels" . | nindent 4 }} 11 | name: {{ template "prometheus.kubeStateMetrics.fullname" . }} 12 | spec: 13 | selector: 14 | matchLabels: 15 | {{- include "prometheus.kubeStateMetrics.matchLabels" . | nindent 6 }} 16 | replicas: {{ .Values.kubeStateMetrics.replicaCount }} 17 | template: 18 | metadata: 19 | {{- if .Values.kubeStateMetrics.podAnnotations }} 20 | annotations: 21 | {{ toYaml .Values.kubeStateMetrics.podAnnotations | indent 8 }} 22 | {{- end }} 23 | labels: 24 | {{- include "prometheus.kubeStateMetrics.labels" . | nindent 8 }} 25 | {{- if .Values.kubeStateMetrics.pod.labels }} 26 | {{ toYaml .Values.kubeStateMetrics.pod.labels | indent 8 }} 27 | {{- end }} 28 | spec: 29 | serviceAccountName: {{ template "prometheus.serviceAccountName.kubeStateMetrics" . }} 30 | {{- if .Values.kubeStateMetrics.priorityClassName }} 31 | priorityClassName: "{{ .Values.kubeStateMetrics.priorityClassName }}" 32 | {{- end }} 33 | containers: 34 | - name: {{ template "prometheus.name" . }}-{{ .Values.kubeStateMetrics.name }} 35 | image: "{{ .Values.kubeStateMetrics.image.repository }}:{{ .Values.kubeStateMetrics.image.tag }}" 36 | imagePullPolicy: "{{ .Values.kubeStateMetrics.image.pullPolicy }}" 37 | {{- if .Values.kubeStateMetrics.args }} 38 | args: 39 | {{- range $key, $value := .Values.kubeStateMetrics.args }} 40 | - --{{ $key }}={{ $value }} 41 | {{- end }} 42 | {{- end }} 43 | ports: 44 | - name: metrics 45 | containerPort: 8080 46 | resources: 47 | {{ toYaml .Values.kubeStateMetrics.resources | indent 12 }} 48 | {{- if .Values.imagePullSecrets }} 49 | imagePullSecrets: 50 | {{ toYaml .Values.imagePullSecrets | indent 2 }} 51 | {{- end }} 52 | {{- if .Values.kubeStateMetrics.nodeSelector }} 53 | nodeSelector: 54 | {{ toYaml .Values.kubeStateMetrics.nodeSelector | indent 8 }} 55 | {{- end }} 56 | {{- if .Values.kubeStateMetrics.securityContext }} 57 | securityContext: 58 | {{ toYaml .Values.kubeStateMetrics.securityContext | indent 8 }} 59 | {{- end }} 60 | {{- if .Values.kubeStateMetrics.tolerations }} 61 | tolerations: 62 | {{ toYaml .Values.kubeStateMetrics.tolerations | indent 8 }} 63 | {{- end }} 64 | {{- if .Values.kubeStateMetrics.affinity }} 65 | affinity: 66 | {{ toYaml .Values.kubeStateMetrics.affinity | indent 8 }} 67 | {{- end }} 68 | {{- end }} 69 | -------------------------------------------------------------------------------- /stable/prometheus/templates/kube-state-metrics-networkpolicy.yaml: -------------------------------------------------------------------------------- 1 | {{- if and .Values.kubeStateMetrics.enabled .Values.networkPolicy.enabled -}} 2 | apiVersion: {{ template "prometheus.networkPolicy.apiVersion" . }} 3 | kind: NetworkPolicy 4 | metadata: 5 | name: {{ template "prometheus.kubeStateMetrics.fullname" . }} 6 | labels: 7 | {{- include "prometheus.kubeStateMetrics.labels" . | nindent 4 }} 8 | spec: 9 | podSelector: 10 | matchLabels: 11 | {{- include "prometheus.kubeStateMetrics.matchLabels" . | nindent 6 }} 12 | ingress: 13 | - from: 14 | - podSelector: 15 | matchLabels: 16 | {{- include "prometheus.server.matchLabels" . | nindent 10 }} 17 | - ports: 18 | - port: 8080 19 | {{- end -}} 20 | -------------------------------------------------------------------------------- /stable/prometheus/templates/kube-state-metrics-serviceaccount.yaml: -------------------------------------------------------------------------------- 1 | {{- if and .Values.kubeStateMetrics.enabled .Values.serviceAccounts.kubeStateMetrics.create -}} 2 | apiVersion: v1 3 | kind: ServiceAccount 4 | metadata: 5 | labels: 6 | {{- include "prometheus.kubeStateMetrics.labels" . | nindent 4 }} 7 | name: {{ template "prometheus.serviceAccountName.kubeStateMetrics" . }} 8 | {{- end -}} 9 | -------------------------------------------------------------------------------- /stable/prometheus/templates/kube-state-metrics-svc.yaml: -------------------------------------------------------------------------------- 1 | {{- if and .Values.kubeStateMetrics.enabled .Values.kubeStateMetrics.enabled -}} 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | {{- if .Values.kubeStateMetrics.service.annotations }} 6 | annotations: 7 | {{ toYaml .Values.kubeStateMetrics.service.annotations | indent 4 }} 8 | {{- end }} 9 | labels: 10 | {{- include "prometheus.kubeStateMetrics.labels" . | nindent 4 }} 11 | {{- if .Values.kubeStateMetrics.service.labels }} 12 | {{ toYaml .Values.kubeStateMetrics.service.labels | indent 4 }} 13 | {{- end }} 14 | name: {{ template "prometheus.kubeStateMetrics.fullname" . }} 15 | spec: 16 | {{- if .Values.kubeStateMetrics.service.clusterIP }} 17 | clusterIP: {{ .Values.kubeStateMetrics.service.clusterIP }} 18 | {{- end }} 19 | {{- if .Values.kubeStateMetrics.service.externalIPs }} 20 | externalIPs: 21 | {{ toYaml .Values.kubeStateMetrics.service.externalIPs | indent 4 }} 22 | {{- end }} 23 | {{- if .Values.kubeStateMetrics.service.loadBalancerIP }} 24 | loadBalancerIP: {{ .Values.kubeStateMetrics.service.loadBalancerIP }} 25 | {{- end }} 26 | {{- if .Values.kubeStateMetrics.service.loadBalancerSourceRanges }} 27 | loadBalancerSourceRanges: 28 | {{- range $cidr := .Values.kubeStateMetrics.service.loadBalancerSourceRanges }} 29 | - {{ $cidr }} 30 | {{- end }} 31 | {{- end }} 32 | ports: 33 | - name: http 34 | port: {{ .Values.kubeStateMetrics.service.servicePort }} 35 | protocol: TCP 36 | targetPort: 8080 37 | selector: 38 | {{- include "prometheus.kubeStateMetrics.matchLabels" . | nindent 4 }} 39 | type: "{{ .Values.kubeStateMetrics.service.type }}" 40 | {{- end }} 41 | -------------------------------------------------------------------------------- /stable/prometheus/templates/node-exporter-daemonset.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.nodeExporter.enabled -}} 2 | apiVersion: extensions/v1beta1 3 | kind: DaemonSet 4 | metadata: 5 | {{- if .Values.nodeExporter.deploymentAnnotations }} 6 | annotations: 7 | {{ toYaml .Values.nodeExporter.deploymentAnnotations | indent 4 }} 8 | {{- end }} 9 | labels: 10 | {{- include "prometheus.nodeExporter.labels" . | nindent 4 }} 11 | name: {{ template "prometheus.nodeExporter.fullname" . }} 12 | spec: 13 | selector: 14 | matchLabels: 15 | {{- include "prometheus.nodeExporter.matchLabels" . | nindent 6 }} 16 | {{- if .Values.nodeExporter.updateStrategy }} 17 | updateStrategy: 18 | {{ toYaml .Values.nodeExporter.updateStrategy | indent 4 }} 19 | {{- end }} 20 | template: 21 | metadata: 22 | {{- if .Values.nodeExporter.podAnnotations }} 23 | annotations: 24 | {{ toYaml .Values.nodeExporter.podAnnotations | indent 8 }} 25 | {{- end }} 26 | labels: 27 | {{- include "prometheus.nodeExporter.labels" . | nindent 8 }} 28 | {{- if .Values.nodeExporter.pod.labels }} 29 | {{ toYaml .Values.nodeExporter.pod.labels | indent 8 }} 30 | {{- end }} 31 | spec: 32 | serviceAccountName: {{ template "prometheus.serviceAccountName.nodeExporter" . }} 33 | {{- if .Values.nodeExporter.priorityClassName }} 34 | priorityClassName: "{{ .Values.nodeExporter.priorityClassName }}" 35 | {{- end }} 36 | containers: 37 | - name: {{ template "prometheus.name" . }}-{{ .Values.nodeExporter.name }} 38 | image: "{{ .Values.nodeExporter.image.repository }}:{{ .Values.nodeExporter.image.tag }}" 39 | imagePullPolicy: "{{ .Values.nodeExporter.image.pullPolicy }}" 40 | args: 41 | - --path.procfs=/host/proc 42 | - --path.sysfs=/host/sys 43 | {{- range $key, $value := .Values.nodeExporter.extraArgs }} 44 | {{- if $value }} 45 | - --{{ $key }}={{ $value }} 46 | {{- else }} 47 | - --{{ $key }} 48 | {{- end }} 49 | {{- end }} 50 | ports: 51 | - name: metrics 52 | containerPort: 9100 53 | hostPort: {{ .Values.nodeExporter.service.hostPort }} 54 | resources: 55 | {{ toYaml .Values.nodeExporter.resources | indent 12 }} 56 | volumeMounts: 57 | - name: proc 58 | mountPath: /host/proc 59 | readOnly: true 60 | - name: sys 61 | mountPath: /host/sys 62 | readOnly: true 63 | {{- range .Values.nodeExporter.extraHostPathMounts }} 64 | - name: {{ .name }} 65 | mountPath: {{ .mountPath }} 66 | readOnly: {{ .readOnly }} 67 | {{- end }} 68 | {{- range .Values.nodeExporter.extraConfigmapMounts }} 69 | - name: {{ .name }} 70 | mountPath: {{ .mountPath }} 71 | readOnly: {{ .readOnly }} 72 | {{- end }} 73 | {{- if .Values.imagePullSecrets }} 74 | imagePullSecrets: 75 | {{ toYaml .Values.imagePullSecrets | indent 2 }} 76 | {{- end }} 77 | {{- if .Values.nodeExporter.hostNetwork }} 78 | hostNetwork: true 79 | {{- end }} 80 | {{- if .Values.nodeExporter.hostPID }} 81 | hostPID: true 82 | {{- end }} 83 | {{- if .Values.nodeExporter.tolerations }} 84 | tolerations: 85 | {{ toYaml .Values.nodeExporter.tolerations | indent 8 }} 86 | {{- end }} 87 | {{- if .Values.nodeExporter.nodeSelector }} 88 | nodeSelector: 89 | {{ toYaml .Values.nodeExporter.nodeSelector | indent 8 }} 90 | {{- end }} 91 | {{- if .Values.nodeExporter.securityContext }} 92 | securityContext: 93 | {{ toYaml .Values.nodeExporter.securityContext | indent 8 }} 94 | {{- end }} 95 | volumes: 96 | - name: proc 97 | hostPath: 98 | path: /proc 99 | - name: sys 100 | hostPath: 101 | path: /sys 102 | {{- range .Values.nodeExporter.extraHostPathMounts }} 103 | - name: {{ .name }} 104 | hostPath: 105 | path: {{ .hostPath }} 106 | {{- end }} 107 | {{- range .Values.nodeExporter.extraConfigmapMounts }} 108 | - name: {{ .name }} 109 | configMap: 110 | name: {{ .configMap }} 111 | {{- end }} 112 | 113 | {{- end -}} 114 | -------------------------------------------------------------------------------- /stable/prometheus/templates/node-exporter-podsecuritypolicy.yaml: -------------------------------------------------------------------------------- 1 | {{- if and .Values.nodeExporter.enabled .Values.rbac.create }} 2 | {{- if .Values.nodeExporter.podSecurityPolicy.enabled }} 3 | apiVersion: extensions/v1beta1 4 | kind: PodSecurityPolicy 5 | metadata: 6 | name: {{ template "prometheus.nodeExporter.fullname" . }} 7 | labels: 8 | {{- include "prometheus.nodeExporter.labels" . | nindent 4 }} 9 | annotations: 10 | {{- if .Values.nodeExporter.podSecurityPolicy.annotations }} 11 | {{ toYaml .Values.nodeExporter.podSecurityPolicy.annotations | indent 4 }} 12 | {{- end }} 13 | spec: 14 | privileged: false 15 | allowPrivilegeEscalation: false 16 | requiredDropCapabilities: 17 | - ALL 18 | volumes: 19 | - 'configMap' 20 | - 'hostPath' 21 | - 'secret' 22 | AllowedHostPaths: 23 | - pathPrefix: /proc 24 | readOnly: true 25 | - pathPrefix: /sys 26 | readOnly: true 27 | {{- range .Values.nodeExporter.extraHostPathMounts }} 28 | - pathPrefix: {{ .hostPath }} 29 | readOnly: {{ .readOnly }} 30 | {{- end }} 31 | hostNetwork: {{ .Values.nodeExporter.hostNetwork }} 32 | hostPID: {{ .Values.nodeExporter.hostPID }} 33 | hostIPC: false 34 | runAsUser: 35 | rule: 'RunAsAny' 36 | seLinux: 37 | rule: 'RunAsAny' 38 | supplementalGroups: 39 | rule: 'MustRunAs' 40 | ranges: 41 | # Forbid adding the root group. 42 | - min: 1 43 | max: 65535 44 | fsGroup: 45 | rule: 'MustRunAs' 46 | ranges: 47 | # Forbid adding the root group. 48 | - min: 1 49 | max: 65535 50 | readOnlyRootFilesystem: false 51 | hostPorts: 52 | - min: 1 53 | max: 65535 54 | {{- end }} 55 | {{- end }} 56 | -------------------------------------------------------------------------------- /stable/prometheus/templates/node-exporter-role.yaml: -------------------------------------------------------------------------------- 1 | {{- if and .Values.nodeExporter.enabled .Values.rbac.create }} 2 | {{- if .Values.nodeExporter.podSecurityPolicy.enabled }} 3 | apiVersion: rbac.authorization.k8s.io/v1beta1 4 | kind: Role 5 | metadata: 6 | name: {{ template "prometheus.nodeExporter.fullname" . }} 7 | labels: 8 | {{- include "prometheus.nodeExporter.labels" . | nindent 4 }} 9 | namespace: {{ .Release.Namespace }} 10 | rules: 11 | - apiGroups: ['extensions'] 12 | resources: ['podsecuritypolicies'] 13 | verbs: ['use'] 14 | resourceNames: 15 | - {{ template "prometheus.nodeExporter.fullname" . }} 16 | {{- end }} 17 | {{- end }} 18 | -------------------------------------------------------------------------------- /stable/prometheus/templates/node-exporter-rolebinding.yaml: -------------------------------------------------------------------------------- 1 | {{- if and .Values.nodeExporter.enabled .Values.rbac.create }} 2 | {{- if .Values.nodeExporter.podSecurityPolicy.enabled }} 3 | apiVersion: rbac.authorization.k8s.io/v1 4 | kind: RoleBinding 5 | metadata: 6 | name: {{ template "prometheus.nodeExporter.fullname" . }} 7 | labels: 8 | {{- include "prometheus.nodeExporter.labels" . | nindent 4 }} 9 | namespace: {{ .Release.Namespace }} 10 | roleRef: 11 | kind: Role 12 | name: {{ template "prometheus.nodeExporter.fullname" . }} 13 | apiGroup: rbac.authorization.k8s.io 14 | subjects: 15 | - kind: ServiceAccount 16 | name: {{ template "prometheus.serviceAccountName.nodeExporter" . }} 17 | namespace: {{ .Release.Namespace }} 18 | {{- end }} 19 | {{- end }} 20 | -------------------------------------------------------------------------------- /stable/prometheus/templates/node-exporter-service.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.nodeExporter.enabled -}} 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | {{- if .Values.nodeExporter.service.annotations }} 6 | annotations: 7 | {{ toYaml .Values.nodeExporter.service.annotations | indent 4 }} 8 | {{- end }} 9 | labels: 10 | {{- include "prometheus.nodeExporter.labels" . | nindent 4 }} 11 | {{- if .Values.nodeExporter.service.labels }} 12 | {{ toYaml .Values.nodeExporter.service.labels | indent 4 }} 13 | {{- end }} 14 | name: {{ template "prometheus.nodeExporter.fullname" . }} 15 | spec: 16 | {{- if .Values.nodeExporter.service.clusterIP }} 17 | clusterIP: {{ .Values.nodeExporter.service.clusterIP }} 18 | {{- end }} 19 | {{- if .Values.nodeExporter.service.externalIPs }} 20 | externalIPs: 21 | {{ toYaml .Values.nodeExporter.service.externalIPs | indent 4 }} 22 | {{- end }} 23 | {{- if .Values.nodeExporter.service.loadBalancerIP }} 24 | loadBalancerIP: {{ .Values.nodeExporter.service.loadBalancerIP }} 25 | {{- end }} 26 | {{- if .Values.nodeExporter.service.loadBalancerSourceRanges }} 27 | loadBalancerSourceRanges: 28 | {{- range $cidr := .Values.nodeExporter.service.loadBalancerSourceRanges }} 29 | - {{ $cidr }} 30 | {{- end }} 31 | {{- end }} 32 | ports: 33 | - name: metrics 34 | port: {{ .Values.nodeExporter.service.servicePort }} 35 | protocol: TCP 36 | targetPort: 9100 37 | selector: 38 | {{- include "prometheus.nodeExporter.matchLabels" . | nindent 4 }} 39 | type: "{{ .Values.nodeExporter.service.type }}" 40 | {{- end -}} 41 | -------------------------------------------------------------------------------- /stable/prometheus/templates/node-exporter-serviceaccount.yaml: -------------------------------------------------------------------------------- 1 | {{- if and .Values.nodeExporter.enabled .Values.serviceAccounts.nodeExporter.create -}} 2 | apiVersion: v1 3 | kind: ServiceAccount 4 | metadata: 5 | labels: 6 | {{- include "prometheus.nodeExporter.labels" . | nindent 4 }} 7 | name: {{ template "prometheus.serviceAccountName.nodeExporter" . }} 8 | {{- end -}} 9 | -------------------------------------------------------------------------------- /stable/prometheus/templates/pushgateway-deployment.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.pushgateway.enabled -}} 2 | apiVersion: extensions/v1beta1 3 | kind: Deployment 4 | metadata: 5 | labels: 6 | {{- include "prometheus.pushgateway.labels" . | nindent 4 }} 7 | name: {{ template "prometheus.pushgateway.fullname" . }} 8 | spec: 9 | selector: 10 | matchLabels: 11 | {{- include "prometheus.pushgateway.matchLabels" . | nindent 6 }} 12 | replicas: {{ .Values.pushgateway.replicaCount }} 13 | template: 14 | metadata: 15 | {{- if .Values.pushgateway.podAnnotations }} 16 | annotations: 17 | {{ toYaml .Values.pushgateway.podAnnotations | indent 8 }} 18 | {{- end }} 19 | labels: 20 | {{- include "prometheus.pushgateway.labels" . | nindent 8 }} 21 | spec: 22 | serviceAccountName: {{ template "prometheus.serviceAccountName.pushgateway" . }} 23 | {{- if .Values.pushgateway.priorityClassName }} 24 | priorityClassName: "{{ .Values.pushgateway.priorityClassName }}" 25 | {{- end }} 26 | containers: 27 | - name: {{ template "prometheus.name" . }}-{{ .Values.pushgateway.name }} 28 | image: "{{ .Values.pushgateway.image.repository }}:{{ .Values.pushgateway.image.tag }}" 29 | imagePullPolicy: "{{ .Values.pushgateway.image.pullPolicy }}" 30 | args: 31 | {{- range $key, $value := .Values.pushgateway.extraArgs }} 32 | - --{{ $key }}={{ $value }} 33 | {{- end }} 34 | ports: 35 | - containerPort: 9091 36 | readinessProbe: 37 | httpGet: 38 | {{- if (index .Values "pushgateway" "extraArgs" "web.route-prefix") }} 39 | path: /{{ index .Values "pushgateway" "extraArgs" "web.route-prefix" }}/#/status 40 | {{- else }} 41 | path: /#/status 42 | {{- end }} 43 | port: 9091 44 | initialDelaySeconds: 10 45 | timeoutSeconds: 10 46 | resources: 47 | {{ toYaml .Values.pushgateway.resources | indent 12 }} 48 | {{- if .Values.pushgateway.persistentVolume.enabled }} 49 | volumeMounts: 50 | - name: storage-volume 51 | mountPath: "{{ .Values.pushgateway.persistentVolume.mountPath }}" 52 | subPath: "{{ .Values.pushgateway.persistentVolume.subPath }}" 53 | {{- end }} 54 | {{- if .Values.imagePullSecrets }} 55 | imagePullSecrets: 56 | {{ toYaml .Values.imagePullSecrets | indent 2 }} 57 | {{- end }} 58 | {{- if .Values.pushgateway.nodeSelector }} 59 | nodeSelector: 60 | {{ toYaml .Values.pushgateway.nodeSelector | indent 8 }} 61 | {{- end }} 62 | {{- if .Values.pushgateway.securityContext }} 63 | securityContext: 64 | {{ toYaml .Values.pushgateway.securityContext | indent 8 }} 65 | {{- end }} 66 | {{- if .Values.pushgateway.tolerations }} 67 | tolerations: 68 | {{ toYaml .Values.pushgateway.tolerations | indent 8 }} 69 | {{- end }} 70 | {{- if .Values.pushgateway.affinity }} 71 | affinity: 72 | {{ toYaml .Values.pushgateway.affinity | indent 8 }} 73 | {{- end }} 74 | {{- if .Values.pushgateway.persistentVolume.enabled }} 75 | volumes: 76 | - name: storage-volume 77 | persistentVolumeClaim: 78 | claimName: {{ if .Values.pushgateway.persistentVolume.existingClaim }}{{ .Values.pushgateway.persistentVolume.existingClaim }}{{- else }}{{ template "prometheus.pushgateway.fullname" . }}{{- end }} 79 | {{- end -}} 80 | {{- end }} 81 | -------------------------------------------------------------------------------- /stable/prometheus/templates/pushgateway-ingress.yaml: -------------------------------------------------------------------------------- 1 | {{- if and .Values.pushgateway.enabled .Values.pushgateway.ingress.enabled -}} 2 | {{- $releaseName := .Release.Name -}} 3 | {{- $serviceName := include "prometheus.pushgateway.fullname" . }} 4 | {{- $servicePort := .Values.pushgateway.service.servicePort -}} 5 | apiVersion: extensions/v1beta1 6 | kind: Ingress 7 | metadata: 8 | {{- if .Values.pushgateway.ingress.annotations }} 9 | annotations: 10 | {{ toYaml .Values.pushgateway.ingress.annotations | indent 4}} 11 | {{- end }} 12 | labels: 13 | {{- include "prometheus.pushgateway.labels" . | nindent 4 }} 14 | name: {{ template "prometheus.pushgateway.fullname" . }} 15 | spec: 16 | rules: 17 | {{- range .Values.pushgateway.ingress.hosts }} 18 | {{- $url := splitList "/" . }} 19 | - host: {{ first $url }} 20 | http: 21 | paths: 22 | - path: /{{ rest $url | join "/" }} 23 | backend: 24 | serviceName: {{ $serviceName }} 25 | servicePort: {{ $servicePort }} 26 | {{- end -}} 27 | {{- if .Values.pushgateway.ingress.tls }} 28 | tls: 29 | {{ toYaml .Values.pushgateway.ingress.tls | indent 4 }} 30 | {{- end -}} 31 | {{- end -}} 32 | -------------------------------------------------------------------------------- /stable/prometheus/templates/pushgateway-pvc.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.pushgateway.persistentVolume.enabled -}} 2 | {{- if not .Values.pushgateway.persistentVolume.existingClaim -}} 3 | apiVersion: v1 4 | kind: PersistentVolumeClaim 5 | metadata: 6 | {{- if .Values.pushgateway.persistentVolume.annotations }} 7 | annotations: 8 | {{ toYaml .Values.pushgateway.persistentVolume.annotations | indent 4 }} 9 | {{- end }} 10 | labels: 11 | {{- include "prometheus.pushgateway.labels" . | nindent 4 }} 12 | name: {{ template "prometheus.pushgateway.fullname" . }} 13 | spec: 14 | accessModes: 15 | {{ toYaml .Values.pushgateway.persistentVolume.accessModes | indent 4 }} 16 | {{- if .Values.pushgateway.persistentVolume.storageClass }} 17 | {{- if (eq "-" .Values.pushgateway.persistentVolume.storageClass) }} 18 | storageClassName: "" 19 | {{- else }} 20 | storageClassName: "{{ .Values.pushgateway.persistentVolume.storageClass }}" 21 | {{- end }} 22 | {{- end }} 23 | resources: 24 | requests: 25 | storage: "{{ .Values.pushgateway.persistentVolume.size }}" 26 | {{- end -}} 27 | {{- end -}} 28 | -------------------------------------------------------------------------------- /stable/prometheus/templates/pushgateway-service.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.pushgateway.enabled -}} 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | {{- if .Values.pushgateway.service.annotations }} 6 | annotations: 7 | {{ toYaml .Values.pushgateway.service.annotations | indent 4}} 8 | {{- end }} 9 | labels: 10 | {{- include "prometheus.pushgateway.labels" . | nindent 4 }} 11 | {{- if .Values.pushgateway.service.labels }} 12 | {{ toYaml .Values.pushgateway.service.labels | indent 4}} 13 | {{- end }} 14 | name: {{ template "prometheus.pushgateway.fullname" . }} 15 | spec: 16 | {{- if .Values.pushgateway.service.clusterIP }} 17 | clusterIP: {{ .Values.pushgateway.service.clusterIP }} 18 | {{- end }} 19 | {{- if .Values.pushgateway.service.externalIPs }} 20 | externalIPs: 21 | {{ toYaml .Values.pushgateway.service.externalIPs | indent 4 }} 22 | {{- end }} 23 | {{- if .Values.pushgateway.service.loadBalancerIP }} 24 | loadBalancerIP: {{ .Values.pushgateway.service.loadBalancerIP }} 25 | {{- end }} 26 | {{- if .Values.pushgateway.service.loadBalancerSourceRanges }} 27 | loadBalancerSourceRanges: 28 | {{- range $cidr := .Values.pushgateway.service.loadBalancerSourceRanges }} 29 | - {{ $cidr }} 30 | {{- end }} 31 | {{- end }} 32 | ports: 33 | - name: http 34 | port: {{ .Values.pushgateway.service.servicePort }} 35 | protocol: TCP 36 | targetPort: 9091 37 | selector: 38 | {{- include "prometheus.pushgateway.matchLabels" . | nindent 4 }} 39 | type: "{{ .Values.pushgateway.service.type }}" 40 | {{- end }} 41 | -------------------------------------------------------------------------------- /stable/prometheus/templates/pushgateway-serviceaccount.yaml: -------------------------------------------------------------------------------- 1 | {{- if and .Values.pushgateway.enabled .Values.serviceAccounts.pushgateway.create -}} 2 | apiVersion: v1 3 | kind: ServiceAccount 4 | metadata: 5 | labels: 6 | {{- include "prometheus.pushgateway.labels" . | nindent 4 }} 7 | name: {{ template "prometheus.serviceAccountName.pushgateway" . }} 8 | {{- end -}} 9 | -------------------------------------------------------------------------------- /stable/prometheus/templates/server-clusterrole.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.rbac.create }} 2 | apiVersion: rbac.authorization.k8s.io/v1beta1 3 | kind: ClusterRole 4 | metadata: 5 | labels: 6 | {{- include "prometheus.server.labels" . | nindent 4 }} 7 | name: {{ template "prometheus.server.fullname" . }} 8 | rules: 9 | - apiGroups: 10 | - "" 11 | resources: 12 | - nodes 13 | - nodes/proxy 14 | - services 15 | - endpoints 16 | - pods 17 | - ingresses 18 | - configmaps 19 | verbs: 20 | - get 21 | - list 22 | - watch 23 | - apiGroups: 24 | - "extensions" 25 | resources: 26 | - ingresses/status 27 | - ingresses 28 | verbs: 29 | - get 30 | - list 31 | - watch 32 | - nonResourceURLs: 33 | - "/metrics" 34 | verbs: 35 | - get 36 | {{- end }} 37 | -------------------------------------------------------------------------------- /stable/prometheus/templates/server-clusterrolebinding.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.rbac.create }} 2 | apiVersion: rbac.authorization.k8s.io/v1beta1 3 | kind: ClusterRoleBinding 4 | metadata: 5 | labels: 6 | {{- include "prometheus.server.labels" . | nindent 4 }} 7 | name: {{ template "prometheus.server.fullname" . }} 8 | subjects: 9 | - kind: ServiceAccount 10 | name: {{ template "prometheus.serviceAccountName.server" . }} 11 | namespace: {{ .Release.Namespace }} 12 | roleRef: 13 | apiGroup: rbac.authorization.k8s.io 14 | kind: ClusterRole 15 | name: {{ template "prometheus.server.fullname" . }} 16 | {{- end }} 17 | -------------------------------------------------------------------------------- /stable/prometheus/templates/server-configmap.yaml: -------------------------------------------------------------------------------- 1 | {{- if (empty .Values.server.configMapOverrideName) -}} 2 | apiVersion: v1 3 | kind: ConfigMap 4 | metadata: 5 | labels: 6 | {{- include "prometheus.server.labels" . | nindent 4 }} 7 | name: {{ template "prometheus.server.fullname" . }} 8 | data: 9 | {{- $root := . -}} 10 | {{- range $key, $value := .Values.serverFiles }} 11 | {{ $key }}: | 12 | {{- if eq $key "prometheus.yml" }} 13 | global: 14 | {{ $root.Values.server.global | toYaml | trimSuffix "\n" | indent 6 }} 15 | {{- end }} 16 | {{ toYaml $value | default "{}" | indent 4 }} 17 | {{- if eq $key "prometheus.yml" -}} 18 | {{- if $root.Values.extraScrapeConfigs }} 19 | {{ tpl $root.Values.extraScrapeConfigs $root | indent 4 }} 20 | {{- end -}} 21 | {{- if $root.Values.alertmanager.enabled }} 22 | alerting: 23 | alertmanagers: 24 | - kubernetes_sd_configs: 25 | - role: pod 26 | tls_config: 27 | ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt 28 | bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token 29 | {{- if $root.Values.alertmanager.prefixURL }} 30 | path_prefix: {{ $root.Values.alertmanager.prefixURL }} 31 | {{- end }} 32 | relabel_configs: 33 | - source_labels: [__meta_kubernetes_namespace] 34 | regex: {{ $root.Release.Namespace }} 35 | action: keep 36 | - source_labels: [__meta_kubernetes_pod_label_app] 37 | regex: {{ template "prometheus.name" $root }} 38 | action: keep 39 | - source_labels: [__meta_kubernetes_pod_label_component] 40 | regex: alertmanager 41 | action: keep 42 | - source_labels: [__meta_kubernetes_pod_container_port_number] 43 | regex: 44 | action: drop 45 | {{- end -}} 46 | {{- end -}} 47 | {{- end -}} 48 | {{- end -}} 49 | -------------------------------------------------------------------------------- /stable/prometheus/templates/server-ingress.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.server.ingress.enabled -}} 2 | {{- $releaseName := .Release.Name -}} 3 | {{- $serviceName := include "prometheus.server.fullname" . }} 4 | {{- $servicePort := .Values.server.service.servicePort -}} 5 | apiVersion: extensions/v1beta1 6 | kind: Ingress 7 | metadata: 8 | {{- if .Values.server.ingress.annotations }} 9 | annotations: 10 | {{ toYaml .Values.server.ingress.annotations | indent 4 }} 11 | {{- end }} 12 | labels: 13 | {{- include "prometheus.server.labels" . | nindent 4 }} 14 | {{- range $key, $value := .Values.server.ingress.extraLabels }} 15 | {{ $key }}: {{ $value }} 16 | {{- end }} 17 | name: {{ template "prometheus.server.fullname" . }} 18 | spec: 19 | rules: 20 | {{- range .Values.server.ingress.hosts }} 21 | {{- $url := splitList "/" . }} 22 | - host: {{ first $url }} 23 | http: 24 | paths: 25 | - path: /{{ rest $url | join "/" }} 26 | backend: 27 | serviceName: {{ $serviceName }} 28 | servicePort: {{ $servicePort }} 29 | {{- end -}} 30 | {{- if .Values.server.ingress.tls }} 31 | tls: 32 | {{ toYaml .Values.server.ingress.tls | indent 4 }} 33 | {{- end -}} 34 | {{- end -}} 35 | -------------------------------------------------------------------------------- /stable/prometheus/templates/server-networkpolicy.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.networkPolicy.enabled }} 2 | apiVersion: {{ template "prometheus.networkPolicy.apiVersion" . }} 3 | kind: NetworkPolicy 4 | metadata: 5 | name: {{ template "prometheus.server.fullname" . }} 6 | labels: 7 | {{- include "prometheus.server.labels" . | nindent 4 }} 8 | spec: 9 | podSelector: 10 | matchLabels: 11 | {{- include "prometheus.server.matchLabels" . | nindent 6 }} 12 | ingress: 13 | - ports: 14 | - port: 9090 15 | {{- end }} 16 | -------------------------------------------------------------------------------- /stable/prometheus/templates/server-pvc.yaml: -------------------------------------------------------------------------------- 1 | {{- if not .Values.server.statefulSet.enabled -}} 2 | {{- if .Values.server.persistentVolume.enabled -}} 3 | {{- if not .Values.server.persistentVolume.existingClaim -}} 4 | apiVersion: v1 5 | kind: PersistentVolumeClaim 6 | metadata: 7 | {{- if .Values.server.persistentVolume.annotations }} 8 | annotations: 9 | {{ toYaml .Values.server.persistentVolume.annotations | indent 4 }} 10 | {{- end }} 11 | labels: 12 | {{- include "prometheus.server.labels" . | nindent 4 }} 13 | name: {{ template "prometheus.server.fullname" . }} 14 | spec: 15 | accessModes: 16 | {{ toYaml .Values.server.persistentVolume.accessModes | indent 4 }} 17 | {{- if .Values.server.persistentVolume.storageClass }} 18 | {{- if (eq "-" .Values.server.persistentVolume.storageClass) }} 19 | storageClassName: "" 20 | {{- else }} 21 | storageClassName: "{{ .Values.server.persistentVolume.storageClass }}" 22 | {{- end }} 23 | {{- end }} 24 | resources: 25 | requests: 26 | storage: "{{ .Values.server.persistentVolume.size }}" 27 | {{- end -}} 28 | {{- end -}} 29 | {{- end -}} 30 | -------------------------------------------------------------------------------- /stable/prometheus/templates/server-service-headless.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.server.statefulSet.enabled -}} 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | {{- if .Values.server.statefulSet.headless.annotations }} 6 | annotations: 7 | {{ toYaml .Values.server.statefulSet.headless.annotations | indent 4 }} 8 | {{- end }} 9 | labels: 10 | {{- include "prometheus.server.labels" . | nindent 4 }} 11 | {{- if .Values.server.statefulSet.headless.labels }} 12 | {{ toYaml .Values.server.statefulSet.headless.labels | indent 4 }} 13 | {{- end }} 14 | name: {{ template "prometheus.server.fullname" . }}-headless 15 | spec: 16 | clusterIP: None 17 | ports: 18 | - name: http 19 | port: {{ .Values.server.statefulSet.headless.servicePort }} 20 | protocol: TCP 21 | targetPort: 9090 22 | selector: 23 | {{- include "prometheus.server.matchLabels" . | nindent 4 }} 24 | {{- end -}} 25 | -------------------------------------------------------------------------------- /stable/prometheus/templates/server-service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | {{- if .Values.server.service.annotations }} 5 | annotations: 6 | {{ toYaml .Values.server.service.annotations | indent 4 }} 7 | {{- end }} 8 | labels: 9 | {{- include "prometheus.server.labels" . | nindent 4 }} 10 | {{- if .Values.server.service.labels }} 11 | {{ toYaml .Values.server.service.labels | indent 4 }} 12 | {{- end }} 13 | name: {{ template "prometheus.server.fullname" . }} 14 | spec: 15 | {{- if .Values.server.service.clusterIP }} 16 | clusterIP: {{ .Values.server.service.clusterIP }} 17 | {{- end }} 18 | {{- if .Values.server.service.externalIPs }} 19 | externalIPs: 20 | {{ toYaml .Values.server.service.externalIPs | indent 4 }} 21 | {{- end }} 22 | {{- if .Values.server.service.loadBalancerIP }} 23 | loadBalancerIP: {{ .Values.server.service.loadBalancerIP }} 24 | {{- end }} 25 | {{- if .Values.server.service.loadBalancerSourceRanges }} 26 | loadBalancerSourceRanges: 27 | {{- range $cidr := .Values.server.service.loadBalancerSourceRanges }} 28 | - {{ $cidr }} 29 | {{- end }} 30 | {{- end }} 31 | ports: 32 | - name: http 33 | port: {{ .Values.server.service.servicePort }} 34 | protocol: TCP 35 | targetPort: 9090 36 | {{- if .Values.server.service.nodePort }} 37 | nodePort: {{ .Values.server.service.nodePort }} 38 | {{- end }} 39 | selector: 40 | {{- include "prometheus.server.matchLabels" . | nindent 4 }} 41 | type: "{{ .Values.server.service.type }}" 42 | -------------------------------------------------------------------------------- /stable/prometheus/templates/server-serviceaccount.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.serviceAccounts.server.create }} 2 | apiVersion: v1 3 | kind: ServiceAccount 4 | metadata: 5 | labels: 6 | {{- include "prometheus.server.labels" . | nindent 4 }} 7 | name: {{ template "prometheus.serviceAccountName.server" . }} 8 | {{- end }} 9 | --------------------------------------------------------------------------------