├── OWNERS
├── systemd
├── kubelet.service.d
│ └── wants-crc-custom.conf
├── ovs-configuration.service.d
│ └── mute-console.conf
├── crc-custom.target
├── crc-disable-tap.sh
├── crc-routes-controller.sh
├── ocp-clusterid.sh
├── crc-conditionally-disable-tap.sh
├── ocp-clusterid.service
├── crc-wait-apiserver-up.service
├── crc-check-tap.service
├── crc-wait-apiserver-up.sh
├── crc-wait-node-ready.service
├── crc-routes-controller.service
├── ocp-cluster-ca.service
├── ocp-mco-sshkey.service
├── crc-dnsmasq.service
├── crc-pullsecret.service
├── ocp-custom-domain.service
├── crc-needs-tap.sh
├── ocp-wait-apiservices-available.service
├── crc-cluster-status.service
├── ocp-userpasswords.service
├── crc-wait-node-ready.sh
├── dnsmasq.sh.template
├── ocp-mco-sshkey.sh
├── crc-pullsecret.sh
├── crc-self-sufficient-env.sh
├── crc-cluster-status.sh
├── crc-test-vsock.py
├── crc-systemd-common.sh
├── crc-user-mode-networking.sh
├── ocp-userpasswords.sh
├── ocp-wait-apiservices-available.sh
├── ocp-custom-domain.sh
├── ocp-cluster-ca.sh
└── crc-aws-fetch-secrets.sh
├── kubevirt-hostpath-provisioner-csi
├── namespace.yaml
├── csi-sc.yaml
├── csi-driver-hostpath-provisioner.yaml
├── kubevirt-hostpath-security-constraints-csi.yaml
├── csi-driver
│ ├── kustomization.yaml
│ └── csi-kubevirt-hostpath-provisioner.yaml
└── external-provisioner-rbac.yaml
├── node-sizing-enabled.env
├── pull-secret.yaml
├── repos
└── mirror-microshift.repo
├── cluster-network-03-config.yaml
├── crio-wipe.service
├── images
└── openshift-ci
│ ├── google-cloud-sdk.repo
│ ├── mock-nss.sh
│ └── Dockerfile
├── qemuga-vsock.te
├── security-notice.yaml
├── registry_pvc.yaml
├── 99-openshift-machineconfig-master-console.yaml
├── .gitignore
├── 99_master-chronyd-mask.yaml
├── oauth_cr.yaml
├── qemu-guest-agent.service
├── shellcheck.sh
├── 99_master-node-sizing-enabled-env.yaml.in
├── routes-controller.yaml.in
├── install-config.yaml
├── 99-openshift-machineconfig-master-dummy-networks.yaml
├── ci_microshift.sh
├── host-libvirt-net.xml.template
├── cvo-overrides.yaml
├── cvo-overrides-after-first-run.yaml
├── image-mode
└── microshift
│ ├── config
│ ├── Containerfile.bootc-rhel9
│ └── config.toml.template
│ └── build.sh
├── test-metadata-generation.sh
├── cluster-kube-controller-manager-operator.patch
├── pki
└── 2022-RH-IT-Root-CA.crt
├── docs
└── self-sufficient-bundle.md
├── microshift.sh
├── README.md
├── crc-bundle-info.json.sample
├── gen-bundle-image.sh
├── ci.sh
├── kubelet-bootstrap-cred-manager-ds.yaml
├── tools.sh
├── cluster-kube-apiserver-operator.patch
├── createdisk.sh
├── LICENSE
├── snc-library.sh
├── createdisk-library.sh
└── snc.sh
/OWNERS:
--------------------------------------------------------------------------------
1 | approvers:
2 | - cfergeau
3 | - praveenkumar
4 | - anjannath
5 | - guillaumerose
6 | - gbraad
7 |
--------------------------------------------------------------------------------
/systemd/kubelet.service.d/wants-crc-custom.conf:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Wants=crc-custom.target
3 | Before=crc-custom.target
4 |
--------------------------------------------------------------------------------
/kubevirt-hostpath-provisioner-csi/namespace.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Namespace
3 | metadata:
4 | name: hostpath-provisioner
5 |
--------------------------------------------------------------------------------
/node-sizing-enabled.env:
--------------------------------------------------------------------------------
1 | NODE_SIZING_ENABLED=false
2 | SYSTEM_RESERVED_MEMORY=350Mi
3 | SYSTEM_RESERVED_CPU=200m
4 | SYSTEM_RESERVED_ES=350Mi
5 |
--------------------------------------------------------------------------------
/systemd/ovs-configuration.service.d/mute-console.conf:
--------------------------------------------------------------------------------
1 | [Service]
2 | StandardOutput=append:/var/log/ovs-configure.log
3 | StandardError=append:/var/log/ovs-configure.log
4 |
--------------------------------------------------------------------------------
/pull-secret.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | data:
3 | .dockerconfigjson: e30K
4 | kind: Secret
5 | metadata:
6 | name: pull-secret
7 | namespace: openshift-config
8 | type: kubernetes.io/dockerconfigjson
9 |
--------------------------------------------------------------------------------
/systemd/crc-custom.target:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=CRC custom target
3 | Requires=crc-wait-apiserver-up.service
4 | Requires=crc-cluster-status.service
5 | After=crc-wait-apiserver-up.service crc-cluster-status.service
6 |
--------------------------------------------------------------------------------
/repos/mirror-microshift.repo:
--------------------------------------------------------------------------------
1 | [mirror-microshift]
2 | name=microshift repo for mirror
3 | baseurl=https://mirror.openshift.com/pub/openshift-v4/$basearch/microshift/ocp-dev-preview/latest-4.20/el9/os/
4 | enabled=1
5 | gpgcheck=0
6 |
--------------------------------------------------------------------------------
/cluster-network-03-config.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: operator.openshift.io/v1
2 | kind: Network
3 | metadata:
4 | name: cluster
5 | spec:
6 | defaultNetwork:
7 | type: OpenShiftSDN
8 | openshiftSDNConfig:
9 | mtu: 1400
10 |
--------------------------------------------------------------------------------
/crio-wipe.service:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=Dummy crio-wipe service
3 | Before=crio.service
4 | RequiresMountsFor=/var/lib/containers
5 |
6 | [Service]
7 | ExecStart=/bin/true
8 | Type=oneshot
9 |
10 | [Install]
11 | WantedBy=multi-user.target
12 |
13 |
--------------------------------------------------------------------------------
/images/openshift-ci/google-cloud-sdk.repo:
--------------------------------------------------------------------------------
1 | [google-cloud-sdk]
2 | name=Google Cloud SDK
3 | baseurl=https://packages.cloud.google.com/yum/repos/cloud-sdk-el9-x86_64
4 | enabled=1
5 | gpgcheck=1
6 | repo_gpgcheck=0
7 | gpgkey=https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg
8 |
--------------------------------------------------------------------------------
/qemuga-vsock.te:
--------------------------------------------------------------------------------
1 | module qemuga-vsock 1.0;
2 |
3 | require {
4 | type virt_qemu_ga_t;
5 | class vsock_socket { bind create getattr listen accept read write };
6 | }
7 |
8 | #============= virt_qemu_ga_t ==============
9 | allow virt_qemu_ga_t self:vsock_socket { bind create getattr listen accept read write };
10 |
--------------------------------------------------------------------------------
/security-notice.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: console.openshift.io/v1
2 | kind: ConsoleNotification
3 | metadata:
4 | name: security-notice
5 | spec:
6 | text: OpenShift Local cluster is for development and testing purposes. DON'T use it for production.
7 | location: BannerTop
8 | color: '#fff'
9 | backgroundColor: darkred
10 |
--------------------------------------------------------------------------------
/registry_pvc.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: PersistentVolumeClaim
3 | metadata:
4 | name: crc-image-registry-storage
5 | namespace: openshift-image-registry
6 | spec:
7 | accessModes:
8 | - ReadWriteMany
9 | resources:
10 | requests:
11 | storage: 20Gi
12 | storageClassName: crc-csi-hostpath-provisioner
13 |
--------------------------------------------------------------------------------
/99-openshift-machineconfig-master-console.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: machineconfiguration.openshift.io/v1
2 | kind: MachineConfig
3 | metadata:
4 | labels:
5 | machineconfiguration.openshift.io/role: master
6 | name: 99-openshift-machineconfig-master-console
7 | spec:
8 | kernelArguments:
9 | - console=hvc0
10 | - console=ttyS0
11 |
12 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | id_ecdsa_crc
2 | id_ecdsa_crc.pub
3 | crc-tmp-install-data
4 | 99_master-node-sizing-enabled-env.yaml
5 | openshift-baremetal-install
6 | pull-secret
7 | oc
8 | yq
9 | openshift-clients/
10 | podman-remote/
11 | .sw[a-p]
12 | crc-cluster-kube-apiserver-operator
13 | crc-cluster-kube-controller-manager-operator
14 | systemd/crc-dnsmasq.sh
15 |
--------------------------------------------------------------------------------
/systemd/crc-disable-tap.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | set -o pipefail
4 | set -o errexit
5 | set -o nounset
6 | set -o errtrace
7 | set -x
8 |
9 | echo "Disabling the tap0 network configuration ..."
10 |
11 | rm -f /etc/NetworkManager/system-connections/tap0.nmconnection
12 | systemctl disable --now gv-user-network@tap0.service || true
13 |
14 | exit 0
15 |
--------------------------------------------------------------------------------
/99_master-chronyd-mask.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: machineconfiguration.openshift.io/v1
2 | kind: MachineConfig
3 | metadata:
4 | labels:
5 | machineconfiguration.openshift.io/role: master
6 | name: chronyd-mask
7 | spec:
8 | config:
9 | ignition:
10 | version: 3.2.0
11 | systemd:
12 | units:
13 | - name: chronyd.service
14 | mask: true
15 |
--------------------------------------------------------------------------------
/oauth_cr.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: config.openshift.io/v1
2 | kind: OAuth
3 | metadata:
4 | name: cluster
5 | spec:
6 | tokenConfig:
7 | # token max age set to 365 days
8 | accessTokenMaxAgeSeconds: 31536000
9 | identityProviders:
10 | - name: developer
11 | mappingMethod: claim
12 | type: HTPasswd
13 | htpasswd:
14 | fileData:
15 | name: htpass-secret
16 |
--------------------------------------------------------------------------------
/kubevirt-hostpath-provisioner-csi/csi-sc.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: storage.k8s.io/v1
2 | kind: StorageClass
3 | metadata:
4 | name: crc-csi-hostpath-provisioner
5 | annotations:
6 | storageclass.kubernetes.io/is-default-class: "true"
7 | provisioner: kubevirt.io.hostpath-provisioner
8 | parameters:
9 | storagePool: local
10 | volumeBindingMode: WaitForFirstConsumer
11 | reclaimPolicy: Retain
12 |
--------------------------------------------------------------------------------
/systemd/crc-routes-controller.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | set -o pipefail
4 | set -o errexit
5 | set -o nounset
6 | set -o errtrace
7 | set -x
8 |
9 | ROUTE_CONTROLLER=/opt/crc/routes-controller.yaml
10 |
11 | source /usr/local/bin/crc-systemd-common.sh
12 |
13 | wait_for_resource_or_die pods
14 | wait_for_resource_or_die deployments
15 |
16 | oc apply -f "$ROUTE_CONTROLLER"
17 |
18 | echo "All done."
19 |
20 | exit 0
21 |
--------------------------------------------------------------------------------
/qemu-guest-agent.service:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=QEMU Guest Agent
3 | IgnoreOnIsolate=True
4 | ConditionVirtualization=apple
5 |
6 | [Service]
7 | UMask=0077
8 | EnvironmentFile=/etc/sysconfig/qemu-ga
9 | ExecStart=/usr/bin/qemu-ga \
10 | --method=vsock-listen \
11 | --path=3:1234 \
12 | $FILTER_RPC_ARGS \
13 | -F${FSFREEZE_HOOK_PATHNAME}
14 | Restart=always
15 | RestartSec=0
16 |
17 | [Install]
18 | WantedBy=default.target
19 |
--------------------------------------------------------------------------------
/systemd/ocp-clusterid.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | set -o pipefail
4 | set -o errexit
5 | set -o nounset
6 | set -o errtrace
7 |
8 | source /usr/local/bin/crc-systemd-common.sh
9 |
10 | wait_for_resource_or_die clusterversion
11 |
12 | uuid=$(uuidgen)
13 |
14 | jq -n --arg id "${uuid}" '{spec: {clusterID: $id}}' \
15 | | oc patch clusterversion version --type merge --patch-file=/dev/stdin
16 |
17 | echo "All done"
18 |
19 | exit 0
20 |
--------------------------------------------------------------------------------
/systemd/crc-conditionally-disable-tap.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | set -o pipefail
4 | set -o errexit
5 | set -o nounset
6 | set -o errtrace
7 | set -x
8 |
9 | # Nothing to do here if CRC needs the TAP interface
10 | if /usr/local/bin/crc-needs-tap.sh; then
11 | echo "TAP device is required, doing nothing."
12 | exit 0
13 | fi
14 |
15 | echo "TAP device not required, running disable script..."
16 |
17 | exec /usr/local/bin/crc-disable-tap.sh
18 |
--------------------------------------------------------------------------------
/systemd/ocp-clusterid.service:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=CRC Unit setting random cluster ID
3 | After=crc-wait-apiserver-up.service
4 | StartLimitIntervalSec=450
5 | StartLimitBurst=10
6 |
7 | [Service]
8 | Type=oneshot
9 | Restart=on-failure
10 | RestartSec=40
11 | Environment=KUBECONFIG=/opt/kubeconfig
12 | ExecCondition=/usr/local/bin/crc-self-sufficient-env.sh
13 | ExecStart=/usr/local/bin/ocp-clusterid.sh
14 |
15 | [Install]
16 | WantedBy=crc-custom.target
17 |
--------------------------------------------------------------------------------
/systemd/crc-wait-apiserver-up.service:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=CRC Unit waiting till k8s API server is up
3 | Requires=kubelet.service
4 | After=kubelet.service
5 | Before=ocp-delete-mco-leases.service
6 |
7 | [Service]
8 | Type=oneshot
9 | Restart=on-failure
10 | Environment=KUBECONFIG=/opt/kubeconfig
11 | ExecCondition=/usr/local/bin/crc-self-sufficient-env.sh
12 | ExecStart=/usr/local/bin/crc-wait-apiserver-up.sh
13 |
14 | [Install]
15 | WantedBy=crc-custom.target
16 |
--------------------------------------------------------------------------------
/systemd/crc-check-tap.service:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=Ensure that tap0 network configuration is disabled when not necessary
3 | Before=NetworkManager.service
4 | Before=gv-user-network@tap0.service
5 | After=local-fs.target
6 | RequiresMountsFor=/etc/NetworkManager/system-connections
7 |
8 | [Service]
9 | Type=oneshot
10 | ExecStart=/usr/local/bin/crc-conditionally-disable-tap.sh
11 |
12 | [Install]
13 | WantedBy=NetworkManager.service
14 | WantedBy=gv-user-network@tap0.service
15 |
--------------------------------------------------------------------------------
/systemd/crc-wait-apiserver-up.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | set -o pipefail
4 | set -o errexit
5 | set -o nounset
6 | set -o errtrace
7 |
8 | source /usr/local/bin/crc-systemd-common.sh
9 |
10 | SECONDS=0
11 |
12 | echo "Waiting for the node resource to be available ..."
13 | # $1 resource, $2 retry count, $3 wait time
14 | wait_for_resource_or_die node 60 5
15 |
16 | echo "node resource available, APIServer is ready after $SECONDS seconds."
17 |
18 | echo "All done"
19 |
20 | exit 0
21 |
--------------------------------------------------------------------------------
/shellcheck.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | set -exuo pipefail
3 |
4 | SHELLCHECK=${SHELLCHECK:-shellcheck}
5 |
6 | if ! "${SHELLCHECK}" -V; then
7 | if [[ ! -e SHELLCHECK ]]; then
8 | scversion="stable"
9 | arch=$(uname -m)
10 | curl -L "https://github.com/koalaman/shellcheck/releases/download/${scversion?}/shellcheck-${scversion?}.linux.${arch}.tar.xz" | tar -xJv
11 | fi
12 | SHELLCHECK="./shellcheck-${scversion}/shellcheck"
13 | fi
14 |
15 | ${SHELLCHECK} -S error *.sh
16 |
--------------------------------------------------------------------------------
/kubevirt-hostpath-provisioner-csi/csi-driver-hostpath-provisioner.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: storage.k8s.io/v1
2 | kind: CSIDriver
3 | metadata:
4 | name: kubevirt.io.hostpath-provisioner
5 | spec:
6 | attachRequired: false
7 | storageCapacity: false
8 | fsGroupPolicy: File
9 | # Supports persistent volumes.
10 | volumeLifecycleModes:
11 | - Persistent
12 | # To determine at runtime which mode a volume uses, pod info and its
13 | # "csi.storage.k8s.io/ephemeral" entry are needed.
14 | podInfoOnMount: true
15 |
--------------------------------------------------------------------------------
/images/openshift-ci/mock-nss.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # mock passwd and group files
4 | (
5 | exec 2>/dev/null
6 | username="${NSS_USERNAME:-$(id -un)}"
7 | uid="${NSS_UID:-$(id -u)}"
8 |
9 | groupname="${NSS_GROUPNAME:-$(id -gn)}"
10 | gid="${NSS_GID:-$(id -g)}"
11 |
12 | echo "${username}:x:${uid}:${uid}:gecos:${HOME}:/bin/bash" > "${NSS_WRAPPER_PASSWD}"
13 | echo "${groupname}:x:${gid}:" > "${NSS_WRAPPER_GROUP}"
14 | )
15 |
16 | # wrap command
17 | export LD_PRELOAD=/usr/lib64/libnss_wrapper.so
18 | exec "$@"
19 |
--------------------------------------------------------------------------------
/99_master-node-sizing-enabled-env.yaml.in:
--------------------------------------------------------------------------------
1 | apiVersion: machineconfiguration.openshift.io/v1
2 | kind: MachineConfig
3 | metadata:
4 | labels:
5 | machineconfiguration.openshift.io/role: master
6 | name: 99-node-sizing-for-crc
7 | spec:
8 | config:
9 | ignition:
10 | version: 3.2.0
11 | storage:
12 | files:
13 | - contents:
14 | source: data:text/plain;charset=utf-8;base64,${DYNAMIC_DATA}
15 | overwrite: true
16 | mode: 0420
17 | path: /etc/node-sizing-enabled.env
18 |
--------------------------------------------------------------------------------
/systemd/crc-wait-node-ready.service:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=CRC Unit waiting till k8s node is ready
3 | Requires=kubelet.service
4 | After=kubelet.service
5 | After=crc-wait-apiserver-up.service
6 | StartLimitIntervalSec=450
7 | StartLimitBurst=10
8 |
9 | [Service]
10 | Type=oneshot
11 | Restart=on-failure
12 | RestartSec=10
13 | Environment=KUBECONFIG=/opt/kubeconfig
14 | ExecCondition=/usr/local/bin/crc-self-sufficient-env.sh
15 | ExecStart=/usr/local/bin/crc-wait-node-ready.sh
16 |
17 | [Install]
18 | WantedBy=crc-custom.target
19 |
--------------------------------------------------------------------------------
/systemd/crc-routes-controller.service:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=CRC Unit starting routes controller
3 | After=crc-wait-apiserver-up.service
4 | StartLimitIntervalSec=450
5 | StartLimitBurst=10
6 |
7 | [Service]
8 | Type=oneshot
9 | Restart=on-failure
10 | RestartSec=40
11 | Environment=KUBECONFIG=/opt/kubeconfig
12 | ExecCondition=/usr/local/bin/crc-user-mode-networking.sh
13 | ExecCondition=/usr/local/bin/crc-self-sufficient-env.sh
14 | ExecStart=/usr/local/bin/crc-routes-controller.sh
15 |
16 | [Install]
17 | WantedBy=crc-custom.target
18 |
--------------------------------------------------------------------------------
/routes-controller.yaml.in:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | labels:
5 | app: routes-controller
6 | name: routes-controller
7 | namespace: openshift-ingress
8 | spec:
9 | replicas: 1
10 | selector:
11 | matchLabels:
12 | app: routes-controller
13 | template:
14 | metadata:
15 | labels:
16 | app: routes-controller
17 | spec:
18 | serviceAccountName: router
19 | containers:
20 | - image: quay.io/crcont/routes-controller:${TAG}
21 | name: routes-controller
22 | imagePullPolicy: IfNotPresent
23 |
24 |
--------------------------------------------------------------------------------
/systemd/ocp-cluster-ca.service:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=CRC Unit setting custom cluster ca
3 | After=crc-wait-apiserver-up.service
4 | After=ocp-wait-apiservices-available.service
5 | StartLimitIntervalSec=450
6 | StartLimitBurst=10
7 | ConditionPathExists=!/opt/crc/%n.done
8 |
9 | [Service]
10 | Type=oneshot
11 | Restart=on-failure
12 | RestartSec=40
13 | Environment=KUBECONFIG=/opt/kubeconfig
14 | ExecCondition=/usr/local/bin/crc-self-sufficient-env.sh
15 | ExecStart=/usr/local/bin/ocp-cluster-ca.sh
16 | ExecStartPost=-touch /opt/crc/%n.done
17 |
18 | [Install]
19 | WantedBy=crc-custom.target
20 |
--------------------------------------------------------------------------------
/systemd/ocp-mco-sshkey.service:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=CRC Unit patching the MachineConfig to add new ssh key
3 | After=crc-wait-apiserver-up.service
4 | After=cloud-final.service
5 | StartLimitIntervalSec=450
6 | StartLimitBurst=10
7 |
8 | [Service]
9 | Type=oneshot
10 | Restart=on-failure
11 | RestartSec=40
12 | Environment=KUBECONFIG=/opt/kubeconfig
13 | ExecCondition=/usr/local/bin/crc-self-sufficient-env.sh
14 | ExecStartPre=/usr/bin/test -f /opt/crc/id_rsa.pub
15 | ExecStart=/usr/local/bin/ocp-mco-sshkey.sh /opt/crc/id_rsa.pub
16 | RemainAfterExit=true
17 |
18 | [Install]
19 | WantedBy=crc-custom.target
20 |
--------------------------------------------------------------------------------
/systemd/crc-dnsmasq.service:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=CRC Unit for configuring dnsmasq
3 | Wants=ovs-configuration.service
4 | After=ovs-configuration.service
5 | Before=kubelet-dependencies.target
6 | StartLimitIntervalSec=30
7 |
8 | [Service]
9 | Type=oneshot
10 | Restart=on-failure
11 | ExecStartPre=/bin/systemctl start ovs-configuration.service
12 | ExecCondition=/usr/local/bin/crc-self-sufficient-env.sh
13 | ExecStart=/usr/local/bin/crc-dnsmasq.sh
14 | ExecStartPost=/usr/bin/systemctl restart NetworkManager.service
15 | ExecStartPost=/usr/bin/systemctl restart dnsmasq.service
16 |
17 | [Install]
18 | WantedBy=kubelet-dependencies.target
19 |
--------------------------------------------------------------------------------
/systemd/crc-pullsecret.service:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=CRC Unit for adding pull secret to cluster
3 | After=crc-wait-apiserver-up.service
4 | After=cloud-final.service
5 | StartLimitIntervalSec=450
6 | StartLimitBurst=40
7 | ConditionPathExists=!/opt/crc/%n.done
8 |
9 | [Service]
10 | Type=oneshot
11 | Restart=on-failure
12 | RestartSec=10
13 | Environment=KUBECONFIG=/opt/kubeconfig
14 | ExecCondition=/usr/local/bin/crc-self-sufficient-env.sh
15 | ExecStartPre=/usr/bin/test -f /opt/crc/pull-secret
16 | ExecStart=/usr/local/bin/crc-pullsecret.sh /opt/crc/pull-secret
17 | ExecStartPost=-touch /opt/crc/%n.done
18 |
19 | [Install]
20 | WantedBy=crc-custom.target
21 |
--------------------------------------------------------------------------------
/kubevirt-hostpath-provisioner-csi/kubevirt-hostpath-security-constraints-csi.yaml:
--------------------------------------------------------------------------------
1 | kind: SecurityContextConstraints
2 | apiVersion: security.openshift.io/v1
3 | metadata:
4 | name: hostpath-provisioner
5 | allowPrivilegedContainer: true
6 | requiredDropCapabilities:
7 | - KILL
8 | - MKNOD
9 | - SETUID
10 | - SETGID
11 | runAsUser:
12 | type: RunAsAny
13 | seLinuxContext:
14 | type: RunAsAny
15 | fsGroup:
16 | type: RunAsAny
17 | supplementalGroups:
18 | type: RunAsAny
19 | allowHostDirVolumePlugin: true
20 | readOnlyRootFilesystem: false
21 | allowHostNetwork: true
22 | users:
23 | - system:serviceaccount:hostpath-provisioner:csi-hostpath-provisioner-sa
24 |
--------------------------------------------------------------------------------
/systemd/ocp-custom-domain.service:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=CRC Unit setting nip.io domain for cluster
3 | After=crc-wait-apiserver-up.service
4 | After=ocp-wait-apiservices-available.service
5 | StartLimitIntervalSec=450
6 | StartLimitBurst=10
7 | ConditionPathExists=!/opt/crc/%n.done
8 |
9 | [Service]
10 | Type=oneshot
11 | Restart=on-failure
12 | RestartSec=40
13 | Environment=KUBECONFIG=/opt/kubeconfig
14 | ExecCondition=/usr/local/bin/crc-self-sufficient-env.sh
15 | ExecStartPre=/usr/bin/test -f /opt/crc/eip
16 | ExecStart=/usr/local/bin/ocp-custom-domain.sh /opt/crc/eip
17 | ExecStartPost=-touch /opt/crc/%n.done
18 |
19 | [Install]
20 | WantedBy=crc-custom.target
21 |
--------------------------------------------------------------------------------
/systemd/crc-needs-tap.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | set -o pipefail
4 | set -o errexit
5 | set -o nounset
6 | set -o errtrace
7 | set -x
8 |
9 | source /etc/sysconfig/crc-env || echo "WARNING: crc-env not found"
10 |
11 | EXIT_NEED_TAP=0
12 | EXIT_DONT_NEED_TAP=77
13 | EXIT_ERROR=1
14 |
15 | virt="$(systemd-detect-virt || true)"
16 |
17 | case "${virt}" in
18 | apple)
19 | echo "Running with vfkit ($virt) virtualization. Don't need tap0."
20 | exit "$EXIT_DONT_NEED_TAP"
21 | ;;
22 | none)
23 | echo "Bare metal detected. Don't need tap0."
24 | exit "$EXIT_DONT_NEED_TAP"
25 | ;;
26 | *)
27 | echo "Running with '$virt' virtualization. Need tap0."
28 | exit "$EXIT_NEED_TAP"
29 | ;;
30 | esac
31 |
--------------------------------------------------------------------------------
/systemd/ocp-wait-apiservices-available.service:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=Wait for all Kubernetes APIServices to be Available
3 |
4 | # This service needs network to talk to the k8s API server
5 | Wants=network-online.target
6 | After=network-online.target
7 | After=crc-wait-apiserver-up.service
8 | After=crc-wait-node-ready.service
9 | Requires=crc-wait-node-ready.service
10 | StartLimitIntervalSec=450
11 | StartLimitBurst=10
12 |
13 | [Service]
14 | Type=oneshot
15 | Restart=on-failure
16 | RestartSec=20
17 | ExecCondition=/usr/local/bin/crc-self-sufficient-env.sh
18 | ExecStart=/usr/local/bin/ocp-wait-apiservices-available.sh
19 |
20 | Environment=KUBECONFIG=/opt/kubeconfig
21 |
22 | [Install]
23 | WantedBy=crc-custom.target
24 |
--------------------------------------------------------------------------------
/systemd/crc-cluster-status.service:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=CRC Unit checking if cluster is ready
3 | After=crc-wait-apiserver-up.service crc-pullsecret.service
4 | After=ocp-mco-sshkey.service ocp-cluster-ca.service
5 | After=ocp-custom-domain.service ocp-userpasswords.service
6 | After=ocp-clusterid.service
7 | After=ocp-wait-apiservices-available.service
8 | After=crc-wait-node-ready.service
9 | StartLimitIntervalSec=450
10 | StartLimitBurst=10
11 |
12 | [Service]
13 | Type=oneshot
14 | Restart=on-failure
15 | RestartSec=40
16 | Environment=KUBECONFIG=/opt/kubeconfig
17 | ExecCondition=/usr/local/bin/crc-self-sufficient-env.sh
18 | ExecStart=/usr/local/bin/crc-cluster-status.sh
19 | RemainAfterExit=true
20 |
21 | [Install]
22 | WantedBy=crc-custom.target
23 |
--------------------------------------------------------------------------------
/kubevirt-hostpath-provisioner-csi/csi-driver/kustomization.yaml:
--------------------------------------------------------------------------------
1 | resources:
2 | - csi-kubevirt-hostpath-provisioner.yaml
3 | images:
4 | - name: quay.io/kubevirt/hostpath-csi-driver
5 | newName: registry.redhat.io/container-native-virtualization/hostpath-csi-driver-rhel9
6 | newTag: v4.20
7 | - name: registry.k8s.io/sig-storage/csi-node-driver-registrar
8 | newName: registry.redhat.io/openshift4/ose-csi-node-driver-registrar
9 | newTag: latest
10 | - name: registry.k8s.io/sig-storage/livenessprobe
11 | newName: registry.redhat.io/openshift4/ose-csi-livenessprobe
12 | newTag: latest
13 | - name: registry.k8s.io/sig-storage/csi-provisioner
14 | newName: registry.redhat.io/openshift4/ose-csi-external-provisioner
15 | newTag: latest
16 |
--------------------------------------------------------------------------------
/systemd/ocp-userpasswords.service:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=CRC Unit setting the developer and kubeadmin user password
3 | Before=ocp-cluster-ca.service
4 | After=crc-wait-apiserver-up.service
5 | After=cloud-final.service
6 | StartLimitIntervalSec=450
7 | StartLimitBurst=10
8 | ConditionPathExists=!/opt/crc/%n.done
9 |
10 | [Service]
11 | Type=oneshot
12 | Restart=on-failure
13 | RestartSec=40
14 | Environment=KUBECONFIG=/opt/kubeconfig
15 | ExecCondition=/usr/local/bin/crc-self-sufficient-env.sh
16 | ExecStartPre=/usr/bin/test -f /opt/crc/pass_developer
17 | ExecStartPre=/usr/bin/test -f /opt/crc/pass_kubeadmin
18 | ExecStart=/usr/local/bin/ocp-userpasswords.sh /opt/crc/pass_kubeadmin /opt/crc/pass_developer
19 | ExecStartPost=-touch /opt/crc/%n.done
20 |
21 | [Install]
22 | WantedBy=crc-custom.target
23 |
--------------------------------------------------------------------------------
/images/openshift-ci/Dockerfile:
--------------------------------------------------------------------------------
1 | # This Dockerfile is used by openshift CI
2 | # It builds an image containing snc code and nss-wrapper for remote deployments, as well as the google cloud-sdk for nested GCE environments.
3 | FROM scratch AS builder
4 | WORKDIR /code-ready/snc
5 | COPY . .
6 |
7 | FROM registry.access.redhat.com/ubi9/ubi
8 | COPY --from=builder /code-ready/snc /opt/snc
9 | COPY --from=builder /code-ready/snc/images/openshift-ci/mock-nss.sh /bin/mock-nss.sh
10 | COPY --from=builder /code-ready/snc/images/openshift-ci/google-cloud-sdk.repo /etc/yum.repos.d/google-cloud-sdk.repo
11 |
12 | RUN yum install --setopt=tsflags=nodocs -y \
13 | gettext \
14 | google-cloud-cli \
15 | nss_wrapper \
16 | openssh-clients && \
17 | yum clean all && rm -rf /var/cache/yum/*
18 | RUN mkdir /output && chown 1000:1000 /output
19 | USER 1000:1000
20 | ENV PATH /bin
21 | ENV HOME /output
22 | WORKDIR /output
23 |
--------------------------------------------------------------------------------
/install-config.yaml:
--------------------------------------------------------------------------------
1 | # This file was generated using openshift-install create install-config
2 | # and then user specific information was removed as snc.sh will readd it
3 | apiVersion: v1
4 | baseDomain: testing
5 | compute:
6 | - architecture:
7 | name: worker
8 | replicas: 0
9 | controlPlane:
10 | architecture:
11 | name: master
12 | replicas: 1
13 | metadata:
14 | name: crc
15 | networking:
16 | clusterNetwork:
17 | - cidr: 10.217.0.0/22
18 | hostPrefix: 23
19 | machineNetwork:
20 | - cidr: 192.168.126.0/24
21 | serviceNetwork:
22 | - 10.217.4.0/23
23 | platform:
24 | none: {}
25 | bootstrapInPlace:
26 | installationDisk: /dev/vda
27 | capabilities:
28 | baselineCapabilitySet: None
29 | additionalEnabledCapabilities:
30 | - openshift-samples
31 | - marketplace
32 | - Console
33 | - MachineAPI
34 | - ImageRegistry
35 | - DeploymentConfig
36 | - Build
37 | - OperatorLifecycleManager
38 | - Ingress
39 |
--------------------------------------------------------------------------------
/99-openshift-machineconfig-master-dummy-networks.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: machineconfiguration.openshift.io/v1
2 | kind: MachineConfig
3 | metadata:
4 | labels:
5 | machineconfiguration.openshift.io/role: master
6 | name: 99-openshift-machineconfig-master-dummy-networks
7 | spec:
8 | config:
9 | ignition:
10 | version: 3.2.0
11 | systemd:
12 | units:
13 | - contents: |
14 | [Unit]
15 | Description=Create dummy network
16 | After=NetworkManager.service
17 |
18 | [Service]
19 | Type=oneshot
20 | RemainAfterExit=yes
21 | ExecStart=/bin/nmcli conn add type dummy ifname eth10 autoconnect yes save yes con-name internalEtcd ip4 192.168.126.11/24
22 |
23 | [Install]
24 | WantedBy=multi-user.target
25 | enabled: true
26 | name: dummy-network.service
27 | networkd: {}
28 | passwd: {}
29 | storage: {}
30 | osImageURL: ""
31 |
32 |
--------------------------------------------------------------------------------
/ci_microshift.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | set -exuo pipefail
4 |
5 | sudo yum install -y make golang
6 |
7 | ./shellcheck.sh
8 | ./microshift.sh
9 |
10 | # Set the zstd compression level to 10 to have faster
11 | # compression while keeping a reasonable bundle size.
12 | export CRC_ZSTD_EXTRA_FLAGS="-10"
13 | ./createdisk.sh crc-tmp-install-data
14 |
15 | # Delete the crc domain which created by snc so it can created
16 | # for crc test
17 | sudo virsh undefine crc --nvram
18 |
19 | git clone https://github.com/crc-org/crc.git
20 | pushd crc
21 | podman run --rm -v ${PWD}:/data:Z registry.ci.openshift.org/openshift/release:rhel-9-release-golang-1.24-openshift-4.20 /bin/bash -c "cd /data && make cross"
22 | sudo mv out/linux-amd64/crc /usr/local/bin/
23 | popd
24 |
25 | crc config set bundle crc_microshift_libvirt_*.crcbundle
26 | crc config set preset microshift
27 | crc setup
28 | crc start -p "${HOME}"/pull-secret --log-level debug
29 |
30 | rc=$?
31 | echo "${rc}" > /tmp/test-return
32 | set -e
33 | echo "### Done! (${rc})"
34 |
--------------------------------------------------------------------------------
/host-libvirt-net.xml.template:
--------------------------------------------------------------------------------
1 |
2 | NETWORK_NAME
3 | a29bce40-ce15-43c8-9142-fd0a3cc37f9a
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 | api.CLUSTER_NAME.BASE_DOMAIN
12 | api-int.CLUSTER_NAME.BASE_DOMAIN
13 | console-openshift-console.apps-CLUSTER_NAME.BASE_DOMAIN
14 | oauth-openshift.apps-CLUSTER_NAME.BASE_DOMAIN
15 | canary-openshift-ingress-canary.apps-CLUSTER_NAME.BASE_DOMAIN
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
--------------------------------------------------------------------------------
/systemd/crc-wait-node-ready.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | set -o pipefail
4 | set -o errexit
5 | set -o nounset
6 | set -o errtrace
7 |
8 | source /usr/local/bin/crc-systemd-common.sh
9 |
10 | SECONDS=0
11 | MAX_RETRY=150
12 | WAIT_SEC=2
13 | NODE_NAME=node/crc
14 | # Loop from 1 up to max_retry
15 | for retry in $(seq 1 "$MAX_RETRY"); do
16 | node_status=$(oc get "$NODE_NAME" --no-headers | awk '{print $2}' || true)
17 | node_status=${node_status:-""}
18 |
19 | # Check if the node status is "Ready"
20 | if [[ $node_status == "Ready" ]]; then
21 | echo "CRC node is ready after $SECONDS seconds."
22 | exit 0
23 | fi
24 |
25 | echo "CRC node is not ready. Status: $node_status"
26 |
27 | # If it's the last attempt, log a failure message before exiting
28 | if (( retry == MAX_RETRY )); then
29 | echo "ERROR: Timed out waiting for the CRC node to be ready after $MAX_RETRY attempts x $WAIT_SEC seconds." >&2
30 | exit 1
31 | fi
32 |
33 | # Wait before the next attempt
34 | echo "Waiting $WAIT_SEC seconds for crc node to be ready ... (Attempt ${retry}/${MAX_RETRY})"
35 | sleep "$WAIT_SEC"
36 | done
37 |
38 | # cannot be reached
39 |
40 | exit 1
41 |
--------------------------------------------------------------------------------
/cvo-overrides.yaml:
--------------------------------------------------------------------------------
1 | spec:
2 | overrides:
3 | - kind: Deployment
4 | group: apps
5 | name: cluster-monitoring-operator
6 | namespace: openshift-monitoring
7 | unmanaged: true
8 | - kind: ClusterOperator
9 | group: config.openshift.io
10 | name: monitoring
11 | namespace: ""
12 | unmanaged: true
13 | - kind: Deployment
14 | group: apps
15 | name: cloud-credential-operator
16 | namespace: openshift-cloud-credential-operator
17 | unmanaged: true
18 | - kind: ClusterOperator
19 | group: config.openshift.io
20 | name: cloud-credential
21 | namespace: ""
22 | unmanaged: true
23 | - kind: Deployment
24 | group: apps
25 | name: cluster-autoscaler-operator
26 | namespace: openshift-machine-api
27 | unmanaged: true
28 | - kind: ClusterOperator
29 | group: config.openshift.io
30 | name: cluster-autoscaler
31 | namespace: ""
32 | unmanaged: true
33 | - kind: Deployment
34 | group: apps
35 | name: cluster-cloud-controller-manager-operator
36 | namespace: openshift-cloud-controller-manager-operator
37 | unmanaged: true
38 | - kind: ClusterOperator
39 | group: config.openshift.io
40 | name: cloud-controller-manager
41 | namespace: ""
42 | unmanaged: true
43 |
44 |
--------------------------------------------------------------------------------
/cvo-overrides-after-first-run.yaml:
--------------------------------------------------------------------------------
1 | - op: add
2 | path: /spec/overrides
3 | value:
4 | - kind: Deployment
5 | group: apps
6 | name: cluster-monitoring-operator
7 | namespace: openshift-monitoring
8 | unmanaged: true
9 | - kind: ClusterOperator
10 | group: config.openshift.io
11 | name: monitoring
12 | namespace: ""
13 | unmanaged: true
14 | - kind: Deployment
15 | group: apps
16 | name: cloud-credential-operator
17 | namespace: openshift-cloud-credential-operator
18 | unmanaged: true
19 | - kind: ClusterOperator
20 | group: config.openshift.io
21 | name: cloud-credential
22 | namespace: ""
23 | unmanaged: true
24 | - kind: Deployment
25 | group: apps
26 | name: cluster-autoscaler-operator
27 | namespace: openshift-machine-api
28 | unmanaged: true
29 | - kind: ClusterOperator
30 | group: config.openshift.io
31 | name: cluster-autoscaler
32 | namespace: ""
33 | unmanaged: true
34 | - kind: Deployment
35 | group: apps
36 | name: cluster-cloud-controller-manager-operator
37 | namespace: openshift-cloud-controller-manager-operator
38 | unmanaged: true
39 | - kind: ClusterOperator
40 | group: config.openshift.io
41 | name: cloud-controller-manager
42 | namespace: ""
43 | unmanaged: true
44 |
45 |
--------------------------------------------------------------------------------
/systemd/dnsmasq.sh.template:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | set -o pipefail
4 | set -o errexit
5 | set -o nounset
6 | set -o errtrace
7 | set -x
8 |
9 | source /etc/sysconfig/crc-env || echo "WARNING: crc-env not found"
10 |
11 |
12 | if (( ${CRC_NETWORK_MODE_USER:-0} == 1 )); then
13 | echo -n "network-mode 'user' detected: skipping dnsmasq configuration"
14 | exit 0
15 | fi
16 |
17 | # The value of APPS_DOMAIN is set by the
18 | # createdisk-library.sh::copy_systemd_units script during the template
19 | # instantiation. So in the end system, the test below should be a
20 | # tautologie (ie, always true if correctly set up)
21 |
22 | # disable this to properly reach the error block (cannot use ${var:-}
23 | # here because of the envsubst instantiating the template)
24 | set +o nounset
25 | if [[ -z "${APPS_DOMAIN}" ]]; then
26 | echo "ERROR: APPS_DOMAIN must be defined to use this script"
27 | exit 1
28 | fi
29 | set -o nounset
30 |
31 | hostName=$(hostname)
32 | hostIp=$(hostname --all-ip-addresses | awk '{print $1}')
33 |
34 | cat << EOF > /etc/dnsmasq.d/crc-dnsmasq.conf
35 | listen-address=$hostIp
36 | expand-hosts
37 | log-queries
38 | local=/crc.testing/
39 | domain=crc.testing
40 | address=/${APPS_DOMAIN}/$hostIp
41 | address=/api.crc.testing/$hostIp
42 | address=/api-int.crc.testing/$hostIp
43 | address=/$hostName.crc.testing/$hostIp
44 | EOF
45 |
46 | /bin/systemctl enable --now dnsmasq.service
47 | /bin/nmcli conn modify --temporary ovs-if-br-ex ipv4.dns $hostIp,1.1.1.1
48 |
--------------------------------------------------------------------------------
/systemd/ocp-mco-sshkey.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | set -o pipefail
4 | set -o errexit
5 | set -o nounset
6 | set -o errtrace
7 | set -x
8 |
9 | source /usr/local/bin/crc-systemd-common.sh
10 |
11 | CRC_PUB_KEY_PATH="${1:-}"
12 |
13 | if [[ -z "$CRC_PUB_KEY_PATH" ]]; then
14 | echo "ERROR: expected to receive the path to the pub key file as first argument."
15 | exit 1
16 | fi
17 |
18 | # enforced by systemd
19 | if [[ ! -r "$CRC_PUB_KEY_PATH" ]]; then
20 | echo "ERROR: CRC pubkey file does not exist ($CRC_PUB_KEY_PATH)"
21 | exit 1
22 | fi
23 |
24 | wait_for_resource_or_die machineconfig/99-master-ssh
25 |
26 | echo "Updating the public key resource for machine config operator"
27 |
28 | # Use --rawfile to read the key file directly into a jq variable named 'pub_key'.
29 | # The key's content is never exposed as a command-line argument.
30 | # We use jq's rtrimstr function to remove any trailing newlines from the file.
31 |
32 | jq -n --rawfile pub_key "$CRC_PUB_KEY_PATH" '
33 | {
34 | "spec": {
35 | "config": {
36 | "passwd": {
37 | "users": [
38 | {
39 | "name": "core",
40 | "sshAuthorizedKeys": [
41 | # Trim trailing newlines and carriage returns from the slurped file content
42 | $pub_key | rtrimstr("\n") | rtrimstr("\r")
43 | ]
44 | }
45 | ]
46 | }
47 | }
48 | }
49 | }' | oc patch machineconfig 99-master-ssh --type merge --patch-file=/dev/stdin
50 |
51 | echo "All done"
52 |
53 | exit 0
54 |
--------------------------------------------------------------------------------
/systemd/crc-pullsecret.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | set -o pipefail
4 | set -o errexit
5 | set -o nounset
6 | set -o errtrace
7 | set -x
8 |
9 | source /usr/local/bin/crc-systemd-common.sh
10 |
11 | PULL_SECRETS_FILE="${1:-}"
12 |
13 | wait_for_resource_or_die secret
14 |
15 | # The pull secret data is piped through stdin and not exposed in command arguments,
16 | # so `set -x` is safe to keep
17 |
18 | # check if the .auths field is there
19 | if oc get secret pull-secret \
20 | -n openshift-config \
21 | -o jsonpath="{['data']['\.dockerconfigjson']}" \
22 | | base64 -d \
23 | | jq -e 'has("auths")' >/dev/null 2>&1
24 | then
25 | echo "Cluster already has some pull secrets, nothing to do."
26 | exit 0
27 | fi
28 |
29 | echo "Cluster doesn't have the pull secrets. Setting them from $PULL_SECRETS_FILE ..."
30 |
31 | # enforced by systemd
32 | if [[ ! -r "$PULL_SECRETS_FILE" ]];
33 | then
34 | echo "ERROR: $PULL_SECRETS_FILE is missing or unreadable" 1>&2
35 | exit 1
36 | fi
37 |
38 | if ! jq -e 'has("auths")' < "$PULL_SECRETS_FILE" >/dev/null;
39 | then
40 | echo "ERROR: pull-secrets file doesn't have the required '.auths' field"
41 | exit 1
42 | fi
43 |
44 | # Create the JSON patch in memory and pipe it to the oc command
45 | base64 -w0 < "$PULL_SECRETS_FILE" | \
46 | jq -R '{"data": {".dockerconfigjson": .}}' | \
47 | oc patch secret pull-secret \
48 | -n openshift-config \
49 | --type merge \
50 | --patch-file=/dev/stdin
51 |
52 | echo "All done"
53 |
54 | exit 0
55 |
--------------------------------------------------------------------------------
/systemd/crc-self-sufficient-env.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | set -o pipefail
4 | set -o nounset
5 | set -o errtrace
6 |
7 | source /etc/sysconfig/crc-env || echo "WARNING: crc-env not found"
8 |
9 | if [[ "${CRC_SELF_SUFFICIENT:-}" ]]; then
10 | echo "Found CRC_SELF_SUFFICIENT=$CRC_SELF_SUFFICIENT"
11 |
12 | if [[ ! "${CRC_SELF_SUFFICIENT}" =~ ^[01]$ ]]; then
13 | echo "ERROR: CRC_SELF_SUFFICIENT should be 0 or 1 ..." >&2
14 | exit 1
15 | fi
16 |
17 | if [[ "$CRC_SELF_SUFFICIENT" == 1 ]]; then
18 | exit 0
19 | else
20 | exit 1
21 | fi
22 | fi
23 |
24 | TEST_TIMEOUT=120
25 | VSOCK_COMM_PORT=1024
26 |
27 | set +o errexit
28 | # set -o errexit disabled to capture the test return code
29 | timeout "$TEST_TIMEOUT" python3 /usr/local/bin/crc-test-vsock.py "$VSOCK_COMM_PORT"
30 | returncode=$?
31 | set -o errexit
32 |
33 | case "$returncode" in
34 | 19) # ENODEV
35 | echo "vsock device doesn't exist, not running self-sufficient bundle" >&2
36 | exit 1
37 | ;;
38 | 124)
39 | echo "ERROR: vsock/${VSOCK_COMM_PORT} test timed out after $TEST_TIMEOUT seconds :/" >&2
40 | exit 124
41 | ;;
42 | 1)
43 | echo "vsock/${VSOCK_COMM_PORT} not working, running with a self-sufficient bundle" >&2
44 | exit 0
45 | ;;
46 | 0)
47 | echo "vsock/${VSOCK_COMM_PORT} works, not running with a self-sufficient bundle" >&2
48 | exit 1
49 | ;;
50 | *)
51 | echo "ERROR: unexpected return code from the vsock test: $returncode" >&2
52 | exit "$returncode"
53 | esac
54 |
55 | # cannot be reached
56 |
--------------------------------------------------------------------------------
/image-mode/microshift/config/Containerfile.bootc-rhel9:
--------------------------------------------------------------------------------
1 | FROM registry.redhat.io/rhel9/rhel-bootc:9.4
2 |
3 | ARG MICROSHIFT_VER=4.18
4 | RUN if [ -z "${UNRELEASED_MIRROR_REPO}" ]; then \
5 | dnf config-manager --set-enabled "rhocp-${MICROSHIFT_VER}-for-rhel-9-$(uname -m)-rpms" \
6 | --set-enabled "fast-datapath-for-rhel-9-$(uname -m)-rpms"; \
7 | else \
8 | # This is required to update the gpgcheck for repoID
9 | # Add the specified OpenShift v4 dependencies repository to get packages like crio, runc, openvswitch ..etc.
10 | # to which microshift package depend on for the current architecture and MICROSHIFT_VER version (e.g., 4.18).
11 | repoID=$(echo "${UNRELEASED_MIRROR_REPO#*://}" | tr '/:' '_'); \
12 | dnf config-manager --add-repo "${UNRELEASED_MIRROR_REPO}" \
13 | --add-repo "https://mirror.openshift.com/pub/openshift-v4/$(uname -m)/dependencies/rpms/${MICROSHIFT_VER}-el9-beta" \
14 | --set-enabled "fast-datapath-for-rhel-9-$(uname -m)-rpms"; \
15 | dnf config-manager --save --setopt="${repoID}".gpgcheck=0 --setopt=*-el9-beta.gpgcheck=0; \
16 | fi
17 | RUN dnf install -y firewalld microshift microshift-release-info cloud-utils-growpart qemu-guest-agent dnsmasq && \
18 | dnf clean all && rm -fr /etc/yum.repos.d/*
19 |
20 | # https://github.com/containers/bootc/discussions/1036
21 | # /Users is created to make sure share directory works on
22 | # mac because on linux it is /home and for windows it is /mnt
23 | # and both are symlink to `var` already
24 | RUN rm -fr /opt && ln -sf var/opt /opt && mkdir /var/opt
25 | RUN ln -sf var/Users /Users && mkdir /var/Users
26 | RUN rm -fr /usr/local && ln -sf ../var/usrlocal /usr/local && mkdir /var/usrlocal
27 |
--------------------------------------------------------------------------------
/test-metadata-generation.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | set -exuo pipefail
4 |
5 | source tools.sh
6 | source createdisk-library.sh
7 | source snc-library.sh
8 |
9 | MIRROR=${MIRROR:-https://mirror.openshift.com/pub/openshift-v4/$ARCH/clients/ocp}
10 | OPENSHIFT_RELEASE_VERSION="${OPENSHIFT_VERSION-4.7.0}"
11 |
12 | SNC_PRODUCT_NAME=${SNC_PRODUCT_NAME:-crc}
13 | BASE_DOMAIN=${CRC_BASE_DOMAIN:-testing}
14 | VM_PREFIX=${SNC_PRODUCT_NAME}-abcde
15 | VM_IP=192.168.126.11
16 |
17 | CRC_ZSTD_EXTRA_FLAGS="--fast"
18 |
19 | # Prepare fake directory structure matching create_disk expectations
20 | baseDir="test-metadata-generation"
21 | mkdir $baseDir
22 | cd $baseDir
23 | srcDir=src
24 | destDir=dest
25 |
26 | mkdir -p "$srcDir"
27 | mkdir -p "$srcDir/auth"
28 | touch "$srcDir"/auth/kubeconfig
29 | touch id_ecdsa_crc
30 |
31 |
32 | echo {} | ${JQ} '.version = "1.2"' \
33 | | ${JQ} '.type = "snc"' \
34 | | ${JQ} ".buildInfo.buildTime = \"$(date -u --iso-8601=seconds)\"" \
35 | | ${JQ} ".buildInfo.openshiftInstallerVersion = \"0.0.0\"" \
36 | | ${JQ} ".buildInfo.sncVersion = \"xxx\"" \
37 | | ${JQ} ".clusterInfo.openshiftVersion = \"${OPENSHIFT_RELEASE_VERSION}\"" \
38 | | ${JQ} ".clusterInfo.clusterName = \"${SNC_PRODUCT_NAME}\"" \
39 | | ${JQ} ".clusterInfo.baseDomain = \"${BASE_DOMAIN}\"" \
40 | | ${JQ} ".clusterInfo.appsDomain = \"apps-${SNC_PRODUCT_NAME}.${BASE_DOMAIN}\"" >${srcDir}/crc-bundle-info.json
41 |
42 | download_oc
43 |
44 | mkdir -p "$destDir/linux"
45 | ${QEMU_IMG} create -f qcow2 "$destDir/linux/${SNC_PRODUCT_NAME}.qcow2" 64M
46 | copy_additional_files "$srcDir" "$destDir/linux"
47 | create_tarball "$destDir/linux"
48 |
49 | generate_hyperv_bundle "$destDir/linux" "$destDir/windows"
50 | generate_hyperkit_bundle "$destDir/linux" "$destDir/macos" "$srcDir" "0.0.0" "init=/init/sh"
51 |
--------------------------------------------------------------------------------
/systemd/crc-cluster-status.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | set -o pipefail
4 | set -o errexit
5 | set -o nounset
6 | set -o errtrace
7 | set -x
8 |
9 | MAXIMUM_LOGIN_RETRY=10
10 | RETRY_DELAY=5
11 |
12 | if [ ! -f /opt/crc/pass_kubeadmin ]; then
13 | echo "kubeadmin password file not found"
14 | exit 1
15 | fi
16 |
17 | rm -rf /tmp/.crc-cluster-ready
18 |
19 | SECONDS=0
20 | if ! oc adm wait-for-stable-cluster --minimum-stable-period=1m --timeout=10m; then
21 | exit 1
22 | fi
23 |
24 | echo "Cluster took $SECONDS seconds to stabilize."
25 |
26 | echo "Logging into OpenShift with kubeadmin user to update the KUBECONFIG"
27 |
28 | try_login() {
29 | ( # use a `(set +x)` subshell to avoid leaking the password
30 | set +x
31 | set +e # don't abort on error in this subshell
32 | oc login --insecure-skip-tls-verify=true \
33 | --kubeconfig=/tmp/kubeconfig \
34 | -u kubeadmin \
35 | -p "$(cat /opt/crc/pass_kubeadmin)" \
36 | https://api.crc.testing:6443 > /dev/null 2>&1
37 | )
38 | local success="$?"
39 | if [[ "$success" == 0 ]]; then
40 | echo "Login succeeded"
41 | else
42 | echo "Login did not complete ..."
43 | fi
44 |
45 | return "$success"
46 | }
47 |
48 | for ((counter=1; counter<=MAXIMUM_LOGIN_RETRY; counter++)); do
49 | echo "Login attempt $counter/$MAXIMUM_LOGIN_RETRY…"
50 | if try_login; then
51 | break
52 | fi
53 | if (( counter == MAXIMUM_LOGIN_RETRY )); then
54 | echo "Unable to login to the cluster after $counter attempts; authentication failed."
55 | exit 1
56 | fi
57 | sleep "$RETRY_DELAY"
58 | done
59 |
60 | # need to set a marker to let `crc` know the cluster is ready
61 | touch /tmp/.crc-cluster-ready
62 |
63 | echo "All done after $SECONDS seconds "
64 |
65 | exit 0
66 |
--------------------------------------------------------------------------------
/systemd/crc-test-vsock.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 |
3 | import socket
4 | import sys
5 | import time
6 | import fcntl, struct
7 | import os
8 | import errno
9 | import pathlib
10 |
11 | VSOCK_DEV = pathlib.Path("/dev/vsock")
12 | HOST_CID = 2 # VMADDR_CID_HOST
13 |
14 | def main():
15 | if len(sys.argv) != 2:
16 | print("ERROR: expected a vsock port number as first argument.")
17 | raise SystemExit(errno.EINVAL)
18 |
19 | port = int(sys.argv[1])
20 | tries = 5
21 | while not VSOCK_DEV.exists():
22 | tries -= 1
23 |
24 | if not tries:
25 | print(f"ERROR: {VSOCK_DEV} didn't appear ...")
26 | return errno.ENODEV
27 | print(f"Waiting for {VSOCK_DEV} to appear ... ({tries} tries left)")
28 | time.sleep(1)
29 |
30 | print(f"Looking up the CID in {VSOCK_DEV}...")
31 | with open(VSOCK_DEV, 'rb') as f:
32 | r = fcntl.ioctl(f, socket.IOCTL_VM_SOCKETS_GET_LOCAL_CID, ' ')
33 | cid = struct.unpack('I', r)[0]
34 | print(f'Our vsock CID is {cid}.')
35 |
36 | s = socket.socket(socket.AF_VSOCK, socket.SOCK_STREAM)
37 |
38 | try:
39 | s.connect((HOST_CID, port))
40 | except OSError as e:
41 |
42 | if e.errno in (errno.ENODEV, errno.ECONNREFUSED, errno.EHOSTUNREACH, errno.ETIMEDOUT, errno.ECONNRESET):
43 | print(f"No remote host on vsock://{HOST_CID}:{port} ({e.strerror})")
44 | s.close()
45 | return 1
46 |
47 | print(f"Unexpected error connecting vsock://{HOST_CID}:{port}: {e}")
48 | s.close()
49 | return 1
50 |
51 | msg = b"hello"
52 | s.sendall(msg)
53 |
54 | s.sendall(b"\n")
55 |
56 | s.close()
57 | print(f"A remote host is listening on vsock://{HOST_CID}:{port}")
58 |
59 | return 0
60 |
61 |
62 | if __name__ == "__main__":
63 | raise SystemExit(main())
64 |
--------------------------------------------------------------------------------
/systemd/crc-systemd-common.sh:
--------------------------------------------------------------------------------
1 | # $1 is the resource to check
2 | # $2 is an optional maximum retry count; default 20
3 | function wait_for_resource_or_die() {
4 | local resource=${1:-}
5 | local max_retry=${2:-20}
6 | local wait_sec=${3:-5}
7 |
8 | local xtrace_was_disabled=0
9 | # Check if xtrace is currently DISABLED. If so, set a flag.
10 | [[ $- == *x* ]] || xtrace_was_disabled=1
11 | set +x # disable xtrace to reduce the verbosity of this function
12 |
13 | if [[ -z "$resource" ]]; then
14 | echo "ERROR: expected a K8s resource as first parameter ..."
15 | echo "ERROR: wait_for_resource_or_die RESOURCE [max_retry=20] [wait_sec=5]"
16 | exit 1 # this is wait_for_resource_or_die, so die ...
17 | fi
18 |
19 | local start_time
20 | start_time=$(date +%s)
21 |
22 | # Loop from 1 up to max_retry
23 | for (( retry=1; retry<=max_retry; retry++ )); do
24 | # Try the command. If it succeeds, exit the loop.
25 | if oc get $resource > /dev/null 2>&1; then
26 | local end_time
27 | end_time=$(date +%s)
28 |
29 | local duration=$((end_time - start_time))
30 | echo "Resource '$resource' found after $retry tries ($duration seconds)."
31 |
32 | if (( ! xtrace_was_disabled )); then
33 | set -x # reenable xtrace
34 | fi
35 |
36 | return 0
37 | fi
38 |
39 | # If it's the last attempt, log a failure message before exiting
40 | if (( retry == max_retry )); then
41 | echo "Error: Timed out waiting for resource '$resource' after ${max_retry} attempts x ${wait_sec} seconds." >&2
42 | exit 1 # this is wait_for_resource_or_die, so die ...
43 | fi
44 |
45 | # Wait before the next attempt
46 | echo "Attempt ${retry}/${max_retry} didn't succeed."
47 | echo "Waiting $wait_sec seconds for '$resource'."
48 | sleep "$wait_sec"
49 | done
50 |
51 | # unreachable
52 | }
53 |
--------------------------------------------------------------------------------
/systemd/crc-user-mode-networking.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | set -o pipefail
4 | set -o errexit
5 | set -o nounset
6 | set -o errtrace
7 |
8 | source /etc/sysconfig/crc-env || echo "WARNING: crc-env not found"
9 |
10 | EXIT_ERROR=77
11 |
12 | target="${1:-}"
13 | if [[ "$target" == user || -z "$target" ]]; then
14 | # searching for user mode, return 0 if user
15 | EXIT_USER_MODE=0
16 | EXIT_NOT_USER_MODE=1
17 | elif [[ "$target" == system ]]; then
18 | # searching for system mode, return 0 if system
19 | EXIT_NOT_USER_MODE=0
20 | EXIT_USER_MODE=1
21 | else
22 | echo "ERROR: invalid target '$target'. Should be 'user' (default) or 'system'. Got '$target'." >&2
23 | exit "$EXIT_ERROR"
24 | fi
25 |
26 |
27 | if /usr/local/bin/crc-self-sufficient-env.sh; then
28 | echo "Running a self-sufficient bundle. Not user-mode networking."
29 | if [[ "${CRC_NETWORK_MODE_USER:-}" ]]; then
30 | echo "WARNING: Ignoring CRC_NETWORK_MODE_USER='$CRC_NETWORK_MODE_USER' in the self-sufficient bundle."
31 | fi
32 |
33 | exit "$EXIT_NOT_USER_MODE"
34 | fi
35 |
36 | # no value --> error
37 | if [[ -z "${CRC_NETWORK_MODE_USER:-}" ]]; then
38 | echo "ERROR: CRC_NETWORK_MODE_USER not set. Assuming user networking." >&2
39 | exit "$EXIT_USER_MODE"
40 | fi
41 |
42 | # value not in [0, 1] --> error
43 | if [[ ! "${CRC_NETWORK_MODE_USER}" =~ ^[01]$ ]]; then
44 | echo "ERROR: unknown network mode: CRC_NETWORK_MODE_USER=${CRC_NETWORK_MODE_USER} (expected 0 or 1)" >&2
45 | exit "$EXIT_ERROR"
46 | fi
47 |
48 | # value == 0 --> not user-node
49 | if (( CRC_NETWORK_MODE_USER == 0 )); then
50 | echo "network-mode 'system' detected"
51 | exit "$EXIT_NOT_USER_MODE"
52 | fi
53 |
54 | # value == 1 --> user-mode
55 | if (( CRC_NETWORK_MODE_USER == 1 )); then
56 | echo "network-mode 'user' detected"
57 | exit "$EXIT_USER_MODE"
58 | fi
59 |
60 | # anything else --> error (can't be reached)
61 | echo "ERROR: unknown network mode: CRC_NETWORK_MODE_USER=$CRC_NETWORK_MODE_USER." >&2
62 | echo "Assuming user networking." >&2
63 | echo "SHOULD NOT BE REACHED." >&2
64 |
65 | exit "$EXIT_ERROR"
66 |
--------------------------------------------------------------------------------
/cluster-kube-controller-manager-operator.patch:
--------------------------------------------------------------------------------
1 | diff --git a/bindata/assets/config/defaultconfig.yaml b/bindata/assets/config/defaultconfig.yaml
2 | index d22e9f9e..a9076801 100644
3 | --- a/bindata/assets/config/defaultconfig.yaml
4 | +++ b/bindata/assets/config/defaultconfig.yaml
5 | @@ -27,7 +27,7 @@ extendedArguments:
6 | - "-bootstrapsigner"
7 | - "-tokencleaner"
8 | cluster-signing-duration:
9 | - - "720h"
10 | + - "8760h"
11 | secure-port:
12 | - "10257"
13 | cert-dir:
14 | diff --git a/pkg/operator/certrotationcontroller/certrotationcontroller.go b/pkg/operator/certrotationcontroller/certrotationcontroller.go
15 | index 0d328e24..01941a28 100644
16 | --- a/pkg/operator/certrotationcontroller/certrotationcontroller.go
17 | +++ b/pkg/operator/certrotationcontroller/certrotationcontroller.go
18 | @@ -85,8 +85,8 @@ func newCertRotationController(
19 | Namespace: operatorclient.OperatorNamespace,
20 | // this is not a typo, this is the signer of the signer
21 | Name: "csr-signer-signer",
22 | - Validity: 60 * rotationDay,
23 | - Refresh: 30 * rotationDay,
24 | + Validity: 2 * 365 * rotationDay,
25 | + Refresh: 365 * rotationDay,
26 | RefreshOnlyWhenExpired: refreshOnlyWhenExpired,
27 | Informer: kubeInformersForNamespaces.InformersFor(operatorclient.OperatorNamespace).Core().V1().Secrets(),
28 | Lister: kubeInformersForNamespaces.InformersFor(operatorclient.OperatorNamespace).Core().V1().Secrets().Lister(),
29 | @@ -104,8 +104,8 @@ func newCertRotationController(
30 | certrotation.RotatedSelfSignedCertKeySecret{
31 | Namespace: operatorclient.OperatorNamespace,
32 | Name: "csr-signer",
33 | - Validity: 30 * rotationDay,
34 | - Refresh: 15 * rotationDay,
35 | + Validity: 2 * 365 * rotationDay,
36 | + Refresh: 365 * rotationDay,
37 | RefreshOnlyWhenExpired: refreshOnlyWhenExpired,
38 | CertCreator: &certrotation.SignerRotation{
39 | SignerName: "kube-csr-signer",
40 |
41 |
--------------------------------------------------------------------------------
/systemd/ocp-userpasswords.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | set -o pipefail
4 | set -o errexit
5 | set -o nounset
6 | set -o errtrace
7 | set -x
8 |
9 | source /usr/local/bin/crc-systemd-common.sh
10 |
11 | CRC_PASS_KUBEADMIN_PATH=${1:-}
12 | CRC_PASS_DEVELOPER_PATH=${2:-}
13 |
14 | if [[ -z "$CRC_PASS_KUBEADMIN_PATH" || -z "$CRC_PASS_DEVELOPER_PATH" ]]; then
15 | echo "ERROR: expected to receive the kubeadmin password file as 1st arg and the dev password file as 2nd arg. Got '$CRC_PASS_KUBEADMIN_PATH' and '$CRC_PASS_DEVELOPER_PATH'"
16 | exit 1
17 | fi
18 |
19 | CRC_HTPASSWD_IMAGE=registry.access.redhat.com/ubi10/httpd-24
20 |
21 | function gen_htpasswd() {
22 | if [ -z "${1:-}" ] || [ -z "${2:-}" ]; then
23 | echo "gen_htpasswd needs two arguments: username password" >&2
24 | return 1
25 | fi
26 |
27 | # --log-driver=none avoids that the journal captures the stdout
28 | # logs of podman and leaks the passwords in the journal ...
29 | podman run --log-driver=none --rm "$CRC_HTPASSWD_IMAGE" htpasswd -nb "$1" "$2"
30 | }
31 |
32 | # enforced by systemd
33 | if [[ ! -r "$CRC_PASS_DEVELOPER_PATH" ]]; then
34 | echo "ERROR: CRC developer password does not exist ($CRC_PASS_DEVELOPER_PATH)"
35 | exit 1
36 | fi
37 |
38 | # enforced by systemd
39 | if [[ ! -r "$CRC_PASS_KUBEADMIN_PATH" ]]; then
40 | echo "ERROR: CRC kubeadmin password does not exist ($CRC_PASS_KUBEADMIN_PATH)"
41 | exit 1
42 | fi
43 |
44 | echo "Pulling $CRC_HTPASSWD_IMAGE ..."
45 | podman pull --quiet "$CRC_HTPASSWD_IMAGE"
46 |
47 | wait_for_resource_or_die secret
48 |
49 | echo "Generating the kubeadmin and developer passwords ..."
50 | set +x # disable the logging to avoid leaking the passwords
51 |
52 | dev_pass=$(gen_htpasswd developer "$(cat "$CRC_PASS_DEVELOPER_PATH")")
53 | adm_pass=$(gen_htpasswd kubeadmin "$(cat "$CRC_PASS_KUBEADMIN_PATH")")
54 |
55 | echo "creating the password secret ..."
56 | # use bash "<()" to use a temporary fd file (safer to handle secrets)
57 | oc create secret generic htpass-secret \
58 | --from-file=htpasswd=<(printf '%s\n%s\n' "$dev_pass" "$adm_pass") \
59 | -n openshift-config \
60 | --dry-run=client -oyaml \
61 | | oc apply -f-
62 |
63 | echo "All done"
64 |
65 | exit 0
66 |
--------------------------------------------------------------------------------
/pki/2022-RH-IT-Root-CA.crt:
--------------------------------------------------------------------------------
1 | -----BEGIN CERTIFICATE-----
2 | MIIGcjCCBFqgAwIBAgIFICIEEFwwDQYJKoZIhvcNAQEMBQAwgaMxCzAJBgNVBAYT
3 | AlVTMRcwFQYDVQQIDA5Ob3J0aCBDYXJvbGluYTEQMA4GA1UEBwwHUmFsZWlnaDEW
4 | MBQGA1UECgwNUmVkIEhhdCwgSW5jLjETMBEGA1UECwwKUmVkIEhhdCBJVDEZMBcG
5 | A1UEAwwQSW50ZXJuYWwgUm9vdCBDQTEhMB8GCSqGSIb3DQEJARYSaW5mb3NlY0By
6 | ZWRoYXQuY29tMCAXDTIzMDQwNTE4MzM0NFoYDzIwNTIwNDAyMTgzMzQ0WjCBozEL
7 | MAkGA1UEBhMCVVMxFzAVBgNVBAgMDk5vcnRoIENhcm9saW5hMRAwDgYDVQQHDAdS
8 | YWxlaWdoMRYwFAYDVQQKDA1SZWQgSGF0LCBJbmMuMRMwEQYDVQQLDApSZWQgSGF0
9 | IElUMRkwFwYDVQQDDBBJbnRlcm5hbCBSb290IENBMSEwHwYJKoZIhvcNAQkBFhJp
10 | bmZvc2VjQHJlZGhhdC5jb20wggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoIC
11 | AQCxuloEVglzWXZ9FFFUOSVdpRIB2jW5YBpwgMem2fPZeWIIvrVQ6PL9XNenDOXu
12 | BHbShD/PApxi/ujSZyOIjLsNh7WDO+0NqpkfTyB9wUYAhx3GTIGY75RSoyZy1yKb
13 | ZDTKv+rSfui9IlstAMz6L3OQLZES9zAYK8ICiDUwTeNZ7quA6qf0Kam2LyuBc/bl
14 | BI7WFLOGGWY135P1OUXJgnJUsMhnYMTgvZQyJ2P7eLQpiR8TOr5ZI6CYapiyG64L
15 | nkr/rsALjSxoUo09Yai1CVO66VFJ/XgMNt3mzQtLDMPXiKUuwsBsgvo4QvLjkXYI
16 | ii+/YQyQaypsKctG8mefKkTT1kRDKj4LNdTRRgd5tco+b4+O/4upt8mIsx1+tbdM
17 | LNGEz3Jqd0sj8Fl4Rzus+W+enzXmMfZH86X6bU5tMvueuFd5LV+M9XzliscaEQMK
18 | EQ7CC72ldrOK2K12Gjb7bu8dKq+aSlNuWK+Gz1NvbwYpaCBYp0JoryvHEq5jrCLP
19 | lTkuJQ3HaaAf+4LaBm8no9xK2VbDf6l/7Htb5I5LnAAZi0/5TzH07NhHoIeMSmTE
20 | Ea07i/i5lbhM2qbx6pfLukg24HLCKTdi4Fo6/JqPWH6/3eI55NsoWSmoDdTiLg4v
21 | 1G/rgUVr2N6F36GTYMGqiITvvd4Qm3i9XOTQvsx8RJx4JQIDAQABo4GoMIGlMB0G
22 | A1UdDgQWBBS1+o3lCnihCZXbTSGGlWpZT0nIizAfBgNVHSMEGDAWgBS1+o3lCnih
23 | CZXbTSGGlWpZT0nIizAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBhjAR
24 | BglghkgBhvhCAQEEBAMCAQYwLwYDVR0fBCgwJjAkoCKgIIYeaHR0cDovL29jc3Au
25 | cmVkaGF0LmNvbS9jcmwucGVtMA0GCSqGSIb3DQEBDAUAA4ICAQCDLaGTS0g2HmMS
26 | g0i6Z0RVDC7sSnWFgEk2ZO1WUQj5WkFVS7gWxed/mXCzeL2EV1Pd22YKHM1eU1vo
27 | 6b03cbNRXlRGGFksmQeM9h2sVjbP0hRZxqqfI+UW223N8E+qK3wSa8m6nhOfIJie
28 | DD9s8CdL1VT6l4qq2gR8mVBW7EZ+Ux5u+AMXpN4WPEkcLer2djbfhXoPsJ4r5CcX
29 | vh7W5rCZbo+0oBI5hrTlG4Tjhv1atqLhMmssjn8NbRrnhrbGF7w8NxFts69GkKDB
30 | UIXr1pWZSAuRELlIxmvh5ZSX5YTbFmDuTvmNx8RPPy6OY4W1v1BUKp0HyJTi07s2
31 | 8SN+n9htHPHX9XBZctQmOSFLiqhi15LIqI54tR2tSgwH3Z5moh4sy6MuApXstsu4
32 | qtkII2KZk3SottI8MOS6zqKrU7jPou6ZE0fznNiu23Q3Ksuuj6mBkLVw3bQe68Vm
33 | NUTDac1oVzc8d5NMbx5kVb4Lahq+SATVFC8NK9G/Pk1AiwO8WhKffySsLeO5nMib
34 | 4BOVq0qFoAi8YCFuJOl9FlH1dPW/TnqlTQMQNhXpzGjU3HV3lr/Mk+ghNgIYcLcz
35 | pEBsiGwKOVW4nYKIqPLn/36Ao/kfXeAdJhaAZq1SkTbeqNiwHQm3KNHzNObmjD0f
36 | 56vmq8fwQYIcazjrygWiaOnoep/SMw==
37 | -----END CERTIFICATE-----
38 |
--------------------------------------------------------------------------------
/systemd/ocp-wait-apiservices-available.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | set -o pipefail
4 | set -o errexit
5 | set -o nounset
6 | set -o errtrace
7 |
8 | echo "➡️ Waiting for all APIServices to become available..."
9 |
10 | SECONDS=0
11 | MAX_RETRY=60
12 | WAIT_SEC=5
13 |
14 | for retry in $(seq 1 "$MAX_RETRY"); do
15 | # This command gets the 'status' of the 'Available' condition for every apiservice.
16 | # It produces a list of "True" and/or "False" strings. We then count how many are "False".
17 | APISERVICE_DATA=$(oc get apiservices -o json 2>/dev/null || true)
18 | if [[ -z "$APISERVICE_DATA" ]]; then
19 | UNAVAILABLE_COUNT=999
20 | echo "⚠️ Couldn't get the list of apiservices ..."
21 | else
22 | UNAVAILABLE_COUNT=$(jq -r '
23 | [ .items[]
24 | | select(((.status.conditions // [])
25 | | any(.type=="Available" and .status=="True")) | not)
26 | ] | length
27 | ' <<<"$APISERVICE_DATA")
28 | UNAVAILABLE_COUNT=${UNAVAILABLE_COUNT:-0}
29 | fi
30 |
31 | if [ "$UNAVAILABLE_COUNT" -eq 0 ]; then
32 | echo "✅ All APIServices are now available after $SECONDS seconds."
33 | break
34 | fi
35 |
36 | echo
37 | echo "⏳ Still waiting for $UNAVAILABLE_COUNT APIService(s) to become available. Retrying in $WAIT_SEC seconds."
38 | echo "--------------------------------------------------------------------------------"
39 | echo "Unavailable services and their messages:"
40 |
41 | # Get all apiservices as JSON and pipe to jq for filtering and formatting.
42 | # The '-r' flag outputs raw strings instead of JSON-quoted strings.
43 | if ! oc get apiservices -o json | jq -r '
44 | .items[] |
45 | . as $item |
46 | (
47 | $item.status.conditions[]? |
48 | select(.type == "Available" and .status == "False")
49 | ) |
50 | " - \($item.metadata.name): \(.reason) - \(.message)"
51 | '
52 | then
53 | echo "⚠️ Unable to list unavailable APIServices details (will retry)" >&2
54 | fi
55 |
56 | echo "--------------------------------------------------------------------------------"
57 |
58 | # If it's the last attempt, log a failure message before exiting
59 | if (( retry == MAX_RETRY )); then
60 | echo "ERROR: Timed out waiting for the api-services to get ready, after $MAX_RETRY attempts x $WAIT_SEC seconds = $SECONDS seconds." >&2
61 | exit 1
62 | fi
63 |
64 | sleep "$WAIT_SEC"
65 | done
66 |
67 | echo "🎉 Done."
68 |
69 | exit 0
70 |
--------------------------------------------------------------------------------
/docs/self-sufficient-bundle.md:
--------------------------------------------------------------------------------
1 | # Self sufficient bundles
2 |
3 | Since release 4.19.0 of OpenShift Local, the bundles generated by `snc` contain additional systemd services to provision the cluster and remove the need for
4 | an outside entity to provision the cluster, although an outside process needs to create some files on pre-defined locations inside the VM for the systemd
5 | services to do their work.
6 |
7 | ## The following table lists the systemd services and the location of files they need to provision the cluster, users of SNC need to create those files
8 |
9 | | Systemd unit | Runs for (ocp, MicroShift, both) | Input files location | Marker env variables |
10 | | :----------------------------: | :------------------------------: | :----------------------------------: | :------------------: |
11 | | `crc-cluster-status.service` | both | none | none |
12 | | `crc-pullsecret.service` | both | /opt/crc/pull-secret | none |
13 | | `crc-dnsmasq.service` | both | none | none |
14 | | `crc-routes-controller.service`| both | none | none |
15 | | `ocp-cluster-ca.service` | ocp | /opt/crc/custom-ca.crt | none |
16 | | `ocp-clusterid.service` | ocp | none | none |
17 | | `ocp-custom-domain.service` | ocp | none | none |
18 | | `ocp-userpasswords.service` | ocp | /opt/crc/pass_{kubeadmin, developer} | none |
19 |
20 | In addition to the above services we have `ocp-cluster-ca.path`, `crc-pullsecret.path` and `ocp-userpasswords.path` that monitors the filesystem paths
21 | related to their `*.service` counterparts and starts the service when the paths become available.
22 |
23 | > [!NOTE]
24 | > "Marker env variable" is set using an env file (/etc/sysconfig/crc.env), if the required env variable is not set then unit is skipped
25 | > some units are run only when CRC_SELF_SUFFICIENT=1 is set, these are only needed when using self-sufficient functionality.
26 |
27 | The systemd services are heavily based on the [`clustersetup.sh`](https://github.com/crc-org/crc-cloud/blob/main/pkg/bundle/setup/clustersetup.sh) script found in the `crc-cloud` project.
28 |
29 | ## Naming convention for the systemd unit files
30 |
31 | Systemd units that are needed for both 'OpenShift' and 'MicroShift' are named as `crc-*.service`, units that are needed only for 'OpenShift' are named
32 | as `ocp-*.service` and when we add units that are only needed for 'MicroShift' they should be named as `ucp-*.service`
33 |
34 |
--------------------------------------------------------------------------------
/microshift.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | set -exuo pipefail
4 |
5 | export LC_ALL=C.UTF-8
6 | export LANG=C.UTF-8
7 |
8 | source tools.sh
9 | source snc-library.sh
10 |
11 | BUNDLE_TYPE="microshift"
12 | INSTALL_DIR=crc-tmp-install-data
13 | SNC_PRODUCT_NAME=${SNC_PRODUCT_NAME:-crc}
14 | SNC_CLUSTER_MEMORY=${SNC_CLUSTER_MEMORY:-2048}
15 | SNC_CLUSTER_CPUS=${SNC_CLUSTER_CPUS:-2}
16 | CRC_VM_DISK_SIZE=${CRC_VM_DISK_SIZE:-31}
17 | BASE_DOMAIN=${CRC_BASE_DOMAIN:-testing}
18 | MIRROR=${MIRROR:-https://mirror.openshift.com/pub/openshift-v4/$ARCH/clients/ocp}
19 | MICROSHIFT_VERSION=${MICROSHIFT_VERSION:-4.20}
20 | MIRROR_REPO=${MIRROR_REPO:-https://mirror.openshift.com/pub/openshift-v4/$ARCH/microshift/ocp-dev-preview/latest-${MICROSHIFT_VERSION}/el9/os}
21 |
22 | echo "Check if system is registered"
23 | # Check the subscription status and register if necessary
24 | if ! sudo subscription-manager status >& /dev/null ; then
25 | echo "machine must be registered using subscription-manager"
26 | exit 1
27 | fi
28 |
29 | run_preflight_checks ${BUNDLE_TYPE}
30 | rm -fr ${INSTALL_DIR} && mkdir ${INSTALL_DIR}
31 |
32 | destroy_libvirt_resources microshift-installer.iso
33 | create_libvirt_resources
34 |
35 | # Generate a new ssh keypair for this cluster
36 | # Create a 521bit ECDSA Key
37 | rm id_ecdsa_crc* || true
38 | ssh-keygen -t ecdsa -b 521 -N "" -f id_ecdsa_crc -C "core"
39 |
40 | function create_iso {
41 | local buildDir=$1
42 | local extra_args=""
43 | if [ -n "${MICROSHIFT_PRERELEASE-}" ]; then
44 | extra_args="-use-unreleased-mirror-repo ${MIRROR_REPO}"
45 | fi
46 | BUILDDIR=${buildDir} image-mode/microshift/build.sh -pull_secret_file ${OPENSHIFT_PULL_SECRET_PATH} \
47 | -lvm_sysroot_size 15360 \
48 | -authorized_keys_file $(realpath id_ecdsa_crc.pub) \
49 | -microshift-version ${MICROSHIFT_VERSION} \
50 | -hostname api.${SNC_PRODUCT_NAME}.${BASE_DOMAIN} \
51 | -base-domain ${SNC_PRODUCT_NAME}.${BASE_DOMAIN} \
52 | ${extra_args}
53 | }
54 |
55 | microshift_pkg_dir=$(mktemp -p /tmp -d tmp-rpmXXX)
56 |
57 | create_iso ${microshift_pkg_dir}
58 | sudo cp -Z ${microshift_pkg_dir}/bootiso/install.iso /var/lib/libvirt/${SNC_PRODUCT_NAME}/microshift-installer.iso
59 | OPENSHIFT_RELEASE_VERSION=$(sudo podman run --rm -it localhost/microshift:${MICROSHIFT_VERSION} /usr/bin/rpm -q --qf '%{VERSION}' microshift)
60 | # Change 4.x.0~ec0 to 4.x.0-ec0
61 | # https://docs.fedoraproject.org/en-US/packaging-guidelines/Versioning/#_complex_versioning
62 | OPENSHIFT_RELEASE_VERSION=$(echo ${OPENSHIFT_RELEASE_VERSION} | tr '~' '-')
63 | sudo rm -fr ${microshift_pkg_dir}
64 |
65 | # Download the oc binary for specific OS environment
66 | OC=./openshift-clients/linux/oc
67 | download_oc
68 |
69 | create_json_description ${BUNDLE_TYPE}
70 |
71 | # For microshift we create an empty kubeconfig file
72 | # to have it as part of bundle because we don't run microshift
73 | # service as part of bundle creation which creates the kubeconfig
74 | # file.
75 | mkdir -p ${INSTALL_DIR}/auth
76 | touch ${INSTALL_DIR}/auth/kubeconfig
77 |
78 | # Start the VM with generated ISO
79 | create_vm microshift-installer.iso
80 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Single node cluster (snc) scripts for OpenShift 4
2 |
3 | **NOTE:** Please select the respective branch to create a bundle for a specific OpenShift release (ie. to create a 4.15.x OpenShift bundle, choose the release-4.15 branch)
4 |
5 | ## How to use?
6 | - Clone this repo `git clone https://github.com/code-ready/snc.git`
7 | - `cd `
8 | - `./snc.sh`
9 |
10 | ## How to create disk image?
11 | - Once your `snc.sh` script run successfully.
12 | - `./createdisk.sh crc-tmp-install-data`
13 |
14 | ## Monitoring
15 |
16 | The installation is a [long process](https://github.com/openshift/installer/blob/master/docs/user/overview.md#cluster-installation-process). It can take up to 45 mins.
17 | You can monitor the progress of the installation with `kubectl`.
18 |
19 | ```
20 | $ export KUBECONFIG=/crc-tmp-install-data/auth/kubeconfig
21 | $ kubectl get pods --all-namespaces
22 | ```
23 |
24 | ## Building SNC for OKD 4
25 | - Before running `./snc.sh`, you need to create a pull secret file, and set a couple of environment variables to override the default behavior.
26 | - Select the OKD 4 release that you want to build from: [https://origin-release.apps.ci.l2s4.p1.openshiftapps.com](https://origin-release.apps.ci.l2s4.p1.openshiftapps.com)
27 | - For example, to build release: 4.5.0-0.okd-2020-08-12-020541
28 |
29 | ```bash
30 | # Create a pull secret file
31 |
32 | cat << EOF > /tmp/pull_secret.json
33 | {"auths":{"fake":{"auth": "Zm9vOmJhcgo="}}}
34 | EOF
35 |
36 | # Set environment for OKD build
37 | export OKD_VERSION=4.5.0-0.okd-2020-08-12-020541
38 | export OPENSHIFT_PULL_SECRET_PATH="/tmp/pull_secret.json"
39 |
40 | # Optionally cache the iso somewhere
41 | export ISO_CACHE_DIR=$HOME/.local/share/libvirt/images
42 |
43 | # Build the Single Node cluster
44 | ./snc.sh
45 | ```
46 |
47 | - When the build is complete, create the disk image as described below.
48 |
49 | ```
50 | export BUNDLED_PULL_SECRET_PATH="/tmp/pull_secret.json"
51 | ./createdisk.sh crc-tmp-install-data
52 | ```
53 |
54 | ## Creating container image for bundles
55 |
56 | After running snc.sh/createdisk.sh, the generated bundles can be uploaded to a container registry using this command:
57 |
58 | ```
59 | ./gen-bundle-image.sh
60 | ```
61 |
62 | Note: a GPG key is needed to sign the bundles before they are wrapped in a container image.
63 |
64 | ## Troubleshooting
65 |
66 | OpenShift installer will create 1 VM. It is sometimes useful to ssh inside the VM.
67 | Add the following lines in your `~/.ssh/config` file. You can then do `ssh master`.
68 |
69 | ```
70 | Host master
71 | Hostname 192.168.126.11
72 | User core
73 | IdentityFile /id_ecdsa_crc
74 | StrictHostKeyChecking no
75 | UserKnownHostsFile /dev/null
76 | ```
77 |
78 | ## Environment Variables
79 |
80 | The following environment variables can be used to change the default values of bundle generation.
81 |
82 | SNC_GENERATE_MACOS_BUNDLE : if set to 0, bundle generation for MacOS is disabled, any other value will enable it.
83 | SNC_GENERATE_WINDOWS_BUNDLE : if set to 0, bundle generation for Windows is disabled, any other value will enable it.
84 | SNC_GENERATE_LINUX_BUNDLE : if set to 0, bundle generation for Linux is disabled, any other value will enable it.
85 |
86 | Please note the SNC project is “as-is” on this Github repository. At this time, it is not an offically supported Red Hat solution.
87 |
--------------------------------------------------------------------------------
/image-mode/microshift/config/config.toml.template:
--------------------------------------------------------------------------------
1 | [customizations.installer.kickstart]
2 | contents = """
3 | lang en_US.UTF-8
4 | keyboard us
5 | timezone UTC
6 | text
7 | reboot
8 |
9 | # Configure network to use DHCP and activate on boot
10 | network --bootproto=dhcp --device=link --activate --onboot=on
11 |
12 | # Partition disk with a 1MB BIOS boot, 200M EFI, 800M boot XFS partition and
13 | # an LVM volume containing a 10GB+ system root. The remainder of the volume
14 | # will be used by the CSI driver for storing data
15 | #
16 | # For example, a 20GB disk would be partitioned in the following way:
17 | #
18 | # NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT
19 | # sda 8:0 0 20G 0 disk
20 | # ├─sda1 8:1 0 1M 0 part
21 | # ├─sda2 8:2 0 200M 0 part /boot/efi
22 | # ├─sda3 8:3 0 800M 0 part /boot
23 | # └─sda4 8:4 0 19G 0 part
24 | # └─rhel-root 253:0 0 10G 0 lvm /sysroot
25 | #
26 | zerombr
27 | clearpart --all --disklabel gpt
28 | part biosboot --fstype=biosboot --size=1
29 | part /boot/efi --fstype=efi --size=200
30 | part /boot --fstype=xfs --asprimary --size=800
31 | # Uncomment this line to add a SWAP partition of the recommended size
32 | #part swap --fstype=swap --recommended
33 | part pv.01 --grow
34 | volgroup rhel pv.01
35 | logvol / --vgname=rhel --fstype=xfs --size=REPLACE_LVM_SYSROOT_SIZE --name=root
36 |
37 | # Lock root user account
38 | rootpw --lock
39 |
40 |
41 | %post --log=/var/log/anaconda/post-install.log --erroronfail
42 |
43 | # The pull secret is mandatory for MicroShift builds on top of OpenShift, but not OKD
44 | # The /etc/crio/crio.conf.d/microshift.conf references the /etc/crio/openshift-pull-secret file
45 | cat > /etc/crio/openshift-pull-secret < /etc/microshift/config.d/00-microshift-dns.yaml < /etc/sudoers.d/microshift
59 |
60 | # Add authorized ssh keys
61 | mkdir -m 700 /home/core/.ssh
62 | cat > /home/core/.ssh/authorized_keys < /etc/hostname
70 | chmod 644 /etc/hostname
71 |
72 | # Support to boot for UEFI and legacy mode
73 | grub2-install --target=i386-pc /dev/vda
74 |
75 | # Make podman rootless available
76 | mkdir -p /home/core/.config/systemd/user/default.target.wants
77 | ln -s /usr/lib/systemd/user/podman.socket /home/core/.config/systemd/user/default.target.wants/podman.socket
78 |
79 | mkdir -p /home/core/.config/containers
80 | tee /home/core/.config/containers/containers.conf <&2
15 | exit 1
16 | fi
17 |
18 | # enforced by systemd
19 | if [[ ! -r "$CRC_EXTERNAL_IP_FILE_PATH" ]]; then
20 | echo "ERROR: CRC external ip file not found ($CRC_EXTERNAL_IP_FILE_PATH)" >&2
21 | exit 1
22 | fi
23 |
24 | EIP=$(tr -d '\r\n' < "$CRC_EXTERNAL_IP_FILE_PATH")
25 |
26 | if [[ -z "$EIP" ]]; then
27 | echo "ERROR: External IP file is empty: $CRC_EXTERNAL_IP_FILE_PATH" >&2
28 | exit 1
29 | fi
30 |
31 | # Basic IPv4 sanity check; adjust if IPv6 is expected
32 | if [[ ! "$EIP" =~ ^([0-9]{1,3}\.){3}[0-9]{1,3}$ ]]; then
33 | echo "ERROR: Invalid IPv4 address read from $CRC_EXTERNAL_IP_FILE_PATH: '$EIP'" >&2
34 | exit 1
35 | fi
36 |
37 | wait_for_resource_or_die secret
38 |
39 | TMP_KEY_FILE=$(mktemp /tmp/nip.key.XXXXX)
40 | TMP_CRT_FILE=$(mktemp /tmp/nip.crt.XXXXX)
41 |
42 | cleanup() {
43 | rm -f "$TMP_KEY_FILE" "$TMP_CRT_FILE"
44 | echo "Temp files cleanup complete."
45 | }
46 |
47 | # Cleanup happens automatically via trap on error or at script end
48 | trap cleanup ERR EXIT
49 |
50 | # create cert and add as secret
51 | openssl req -newkey rsa:2048 -new \
52 | -nodes -x509 -days 3650 \
53 | -keyout "$TMP_KEY_FILE" -out "$TMP_CRT_FILE" \
54 | -subj "/CN=$EIP.nip.io" \
55 | -addext "subjectAltName=DNS:apps.$EIP.nip.io,DNS:*.apps.$EIP.nip.io,DNS:api.$EIP.nip.io"
56 |
57 | oc delete secret nip-secret -n openshift-config --ignore-not-found
58 | oc create secret tls nip-secret \
59 | --cert="$TMP_CRT_FILE" \
60 | --key="$TMP_KEY_FILE" \
61 | -n openshift-config
62 |
63 | # patch ingress
64 | wait_for_resource_or_die ingresses.config.openshift.io
65 | jq -n --arg eip "$EIP" '
66 | {
67 | "spec": {
68 | "appsDomain": "apps.\($eip).nip.io",
69 | "componentRoutes": [
70 | {
71 | "hostname": "console-openshift-console.apps.\($eip).nip.io",
72 | "name": "console",
73 | "namespace": "openshift-console",
74 | "servingCertKeyPairSecret": {
75 | "name": "nip-secret"
76 | }
77 | },
78 | {
79 | "hostname": "oauth-openshift.apps.\($eip).nip.io",
80 | "name": "oauth-openshift",
81 | "namespace": "openshift-authentication",
82 | "servingCertKeyPairSecret": {
83 | "name": "nip-secret"
84 | }
85 | }
86 | ]
87 | }
88 | }' | oc patch ingresses.config.openshift.io cluster --type=merge --patch-file=/dev/stdin
89 |
90 | # patch API server to use new CA secret
91 | wait_for_resource_or_die apiserver.config.openshift.io
92 | jq -n --arg eip "$EIP" '
93 | {
94 | "spec": {
95 | "servingCerts": {
96 | "namedCertificates": [
97 | {
98 | "names": [
99 | "api.\($eip).nip.io"
100 | ],
101 | "servingCertificate": {
102 | "name": "nip-secret"
103 | }
104 | }
105 | ]
106 | }
107 | }
108 | }' | oc patch apiserver cluster --type=merge --patch-file=/dev/stdin
109 |
110 | # patch image registry route
111 | wait_for_resource_or_die route.route.openshift.io
112 | jq -n --arg eip "$EIP" '
113 | {
114 | "spec": {
115 | "host": "default-route-openshift-image-registry.\($eip).nip.io"
116 | }
117 | }' | oc patch route default-route -n openshift-image-registry --type=merge --patch-file=/dev/stdin
118 |
119 | echo "All done"
120 |
121 | exit 0
122 |
--------------------------------------------------------------------------------
/crc-bundle-info.json.sample:
--------------------------------------------------------------------------------
1 | {
2 | # Version of the bundle, used to denote format changes
3 | # Major is only increased changes incompatible with previous versions
4 | # Minor is increased for backwards-compatible changes
5 | #
6 | # Version history:
7 | # - 1.1: addition of 'name'
8 | # - 1.2: addition of 'storage.fileList'
9 | # - 1.3: remove of 'clusterInfo.kubeadminPasswordFile'
10 | # - 1.4: addition of 'arch'
11 | # - 1.5: remove of 'node[0].kernelCmdLine', 'node[0].initramfs', 'node[0].kernel'
12 | "version": "1.5",
13 | # Type of this bundle content
14 | # Currently the only valid type is 'snc' (which stands for 'single-node-cluster')
15 | "type": "snc",
16 | # Name of the bundle
17 | "name": "crc_libvirt_4.6.1",
18 | # Bundle arch (This follows https://gist.github.com/lizkes/975ab2d1b5f9d5fdee5d3fa665bcfde6 with amd64/arm64 being used at the moment)
19 | "arch": "amd64",
20 | "buildInfo": {
21 | # Time this bundle was built
22 | "buildTime": "2019-04-23T14:55:32+00:00",
23 | # Output of 'openshift-install version' from the installer used to generate the bundle
24 | "openshiftInstallerVersion": "./openshift-install v0.16.1\nbuilt from commit e3fceacc975953f56cb09931e6be015a36eb6075",
25 | # Output of 'git describe' or 'git rev-parse' of the 'snc' script
26 | # repository used when generating the bundle
27 | "sncVersion": "git9662"
28 | },
29 | "clusterInfo": {
30 | # Version of OpenShift installed in the virtual machine
31 | "openshiftVersion": "4.1.11"
32 | # Name of the openshift cluster stored in the bundle
33 | "clusterName": "crc",
34 | # Base domain name used for the openshift cluster
35 | "baseDomain": "testing",
36 | # Subdomain where the apps will go
37 | "appsDomain": "apps-crc.testing",
38 | # Name of a file containing an SSH private key which can be used to connect to
39 | # the cluster nodes
40 | "sshPrivateKeyFile": "id_ecdsa_crc",
41 | # Name of the kubeconfig file stored in the bundle
42 | "kubeConfig": "kubeconfig",
43 | # pull secret that can be used to fetch OpenShift container images (optional)
44 | # "openshiftPullSecret": "default-pull-secret"
45 | },
46 | "nodes": [
47 | {
48 | # Type of the node, can be 'master', 'worker' or both
49 | "kind": [
50 | "master",
51 | "worker"
52 | ],
53 | # Hostname of the node
54 | "hostname": "crc-88lpx-master-0",
55 | # Disk image used by the node, the 'storage' object will contain more
56 | # details about its format
57 | "diskImage": "crc.qcow2"
58 | # Internal IP for which etcd certs are valid
59 | "internalIP": "192.168.126.11"
60 | }
61 | ],
62 | "storage": {
63 | # List of virtual machine disk images in the bundle
64 | "diskImages": [
65 | {
66 | # Name of the disk image file
67 | "name": "crc.qcow2",
68 | # Format of the disk image, valid formats are 'qcow2', 'vmdk', 'vhdx'
69 | "format": "qcow2"
70 | "size": "9129426944"
71 | "sha256sum": "49766122a0834d62c1a24fb4e0de30cd7a39b8112083aa5e01fc26f16c15aed3"
72 | }
73 | ],
74 | # Information about the other files present in the bundle
75 | # In version 1.2, this only lists the files which are not mentioned
76 | # anywhere else in this file
77 | "fileList": [
78 | {
79 | # Name of the file
80 | "name": "oc"
81 | # What kind of file this is, valid types are 'oc-executable'
82 | "type": "oc-executable"
83 | "size": "72728632"
84 | "sha256sum": "983f0883a6dffd601afa663d10161bfd8033fd6d45cf587a9cb22e9a681d6047"
85 | }
86 | ]
87 | },
88 | "driverInfo": {
89 | # Name of driver the bundle supports, valid drivers are 'libvirt', 'hyperkit', 'virtualbox', 'hyperv'
90 | "name": "libvirt"
91 | }
92 | }
93 |
--------------------------------------------------------------------------------
/gen-bundle-image.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | set -exuo pipefail
4 |
5 | GPG_SECRET_KEY_PASSPHRASE_PATH=${GPG_SECRET_KEY_PASSPHRASE:-gpg_key_pass}
6 |
7 | function set_bundle_variables {
8 | local version=$1
9 | local preset=$2
10 |
11 | local bundlePreset=""
12 | if [ ${preset} != 'openshift' ]; then
13 | bundlePreset="_${preset}"
14 | fi
15 |
16 | if [ ${PRESET} != 'okd' ]; then
17 | vfkit_bundle_arm64=crc${bundlePreset}_vfkit_${version}_arm64.crcbundle
18 | libvirt_bundle_arm64=crc${bundlePreset}_libvirt_${version}_arm64.crcbundle
19 | fi
20 |
21 | vfkit_bundle=crc${bundlePreset}_vfkit_${version}_amd64.crcbundle
22 | libvirt_bundle=crc${bundlePreset}_libvirt_${version}_amd64.crcbundle
23 | hyperv_bundle=crc${bundlePreset}_hyperv_${version}_amd64.crcbundle
24 | }
25 |
26 | function generate_image {
27 | local preset=$1
28 |
29 | if [ ${preset} != "okd" ]; then
30 | cat </dev/null; then
22 | echo "API Server Client CA already rotated..."
23 | exit 0
24 | fi
25 |
26 | echo "API Server Client CA not rotated. Doing it now ..."
27 |
28 | # generate CA
29 | CA_FILE_PATH="/tmp/custom-ca.crt"
30 | CA_KEY_FILE_PATH="/tmp/custom-ca.key"
31 | CLIENT_CA_FILE_PATH="/tmp/client-ca.crt"
32 | CLIENT_CA_KEY_FILE_PATH="/tmp/client-ca.key"
33 | CLIENT_CSR_FILE_PATH="/tmp/client-csr.csr"
34 | CA_SUBJ="/OU=openshift/CN=admin-kubeconfig-signer-custom"
35 | CLIENT_SUBJ="/O=system:masters/CN=system:admin"
36 | VALIDITY=365
37 |
38 | cleanup() {
39 | rm -f "$CA_FILE_PATH" "$CA_KEY_FILE_PATH" \
40 | "$CLIENT_CA_FILE_PATH" "$CLIENT_CA_KEY_FILE_PATH" "$CLIENT_CSR_FILE_PATH"
41 | echo "Temp files cleanup complete."
42 | }
43 |
44 | # keep cleanup bound to EXIT; no need to clear ERR early
45 | trap cleanup ERR EXIT
46 |
47 | # generate the CA private key
48 | openssl genrsa -out "$CA_KEY_FILE_PATH" 4096
49 | # Create the CA certificate
50 | openssl req -x509 -new -nodes -key "$CA_KEY_FILE_PATH" -sha256 -days "$VALIDITY" -out "$CA_FILE_PATH" -subj "$CA_SUBJ"
51 | # create CSR
52 | openssl req -new -newkey rsa:4096 -nodes -keyout "$CLIENT_CA_KEY_FILE_PATH" -out "$CLIENT_CSR_FILE_PATH" -subj "$CLIENT_SUBJ"
53 | # sign the CSR with above CA
54 | openssl x509 -extfile <(printf "extendedKeyUsage = clientAuth") -req -in "$CLIENT_CSR_FILE_PATH" -CA "$CA_FILE_PATH" \
55 | -CAkey "$CA_KEY_FILE_PATH" -CAcreateserial -out "$CLIENT_CA_FILE_PATH" -days "$VALIDITY" -sha256
56 |
57 | oc create configmap client-ca-custom \
58 | -n openshift-config \
59 | --from-file=ca-bundle.crt="$CA_FILE_PATH" \
60 | --dry-run=client -o yaml \
61 | | oc apply -f -
62 |
63 | jq -n '
64 | {
65 | "spec": {
66 | "clientCA": {
67 | "name": "client-ca-custom"
68 | }
69 | }
70 | }' | oc patch apiserver cluster --type=merge --patch-file=/dev/stdin
71 |
72 | cluster_name=$(oc config view -o jsonpath='{.clusters[0].name}')
73 |
74 | if [[ -r "$CRC_EXTERNAL_IP_FILE_PATH" ]]; then
75 | external_ip=$(tr -d '\r\n' < "$CRC_EXTERNAL_IP_FILE_PATH")
76 | apiserver_url=https://api.${external_ip}.nip.io:6443
77 | echo "INFO: CRC external IP file found. Using apiserver_url='$apiserver_url'."
78 | else
79 | apiserver_url=$(oc config view -o jsonpath='{.clusters[0].cluster.server}')
80 | echo "INFO: CRC external IP file does not exist ($CRC_EXTERNAL_IP_FILE_PATH). Using apiserver_url='$apiserver_url'."
81 | fi
82 |
83 | export KUBECONFIG=/opt/crc/kubeconfig
84 | rm -rf "$KUBECONFIG"
85 |
86 | oc config set-credentials system:admin \
87 | --client-certificate="$CLIENT_CA_FILE_PATH" \
88 | --client-key="$CLIENT_CA_KEY_FILE_PATH" \
89 | --embed-certs
90 |
91 | oc config set-context system:admin --cluster="$cluster_name" --namespace=default --user=system:admin
92 | oc config set-cluster "$cluster_name" --server="$apiserver_url" --insecure-skip-tls-verify=true
93 | oc config use-context system:admin
94 |
95 | wait_for_resource_or_die clusteroperators 90 2
96 |
97 | oc create configmap admin-kubeconfig-client-ca \
98 | -n openshift-config \
99 | --from-file=ca-bundle.crt="$CA_FILE_PATH" \
100 | --dry-run=client -oyaml \
101 | | oc apply -f-
102 |
103 | # copy the new kubeconfig to /opt/kubeconfig
104 | rm -f /opt/kubeconfig
105 | cp /opt/crc/kubeconfig /opt/kubeconfig
106 | chmod 0666 /opt/kubeconfig # keep the file readable by everyone in the system, this is safe
107 |
108 | # cleanup will apply here
109 |
110 | echo "All done"
111 |
112 | exit 0
113 |
--------------------------------------------------------------------------------
/systemd/crc-aws-fetch-secrets.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | set -o pipefail
4 | set -o errexit
5 | set -o nounset
6 | set -o errtrace
7 | set -x
8 |
9 | # set -x is safe, the secrets are passed via stdin
10 |
11 | AWS_CLI_IMG=docker.io/amazon/aws-cli
12 | MIN_CHAR_COUNT=8 # minimum number of chars for the secret to be
13 | # assumed valid
14 |
15 | umask 0077 # 0600 file permission for secrets
16 | install -d -m 0700 /opt/crc # ensure that the target directory exists
17 |
18 | PULL_SECRETS_KEY=${1:-}
19 | KUBEADM_PASS_KEY=${2:-}
20 | DEVELOPER_PASS_KEY=${3:-}
21 |
22 | if [[ -z "$PULL_SECRETS_KEY" || -z "$KUBEADM_PASS_KEY" || -z "$DEVELOPER_PASS_KEY" ]]; then
23 | echo "ERROR: expected to receive 3 parameters: PULL_SECRETS_KEY KUBEADM_PASS_KEY DEVELOPER_PASS_KEY"
24 | exit 1
25 | fi
26 |
27 | DELAY=5
28 | TOTAL_PERIOD=$(( 3*60 ))
29 | ATTEMPTS=$(( TOTAL_PERIOD / DELAY))
30 | function retry_compact() {
31 | for i in $(seq 1 $ATTEMPTS); do
32 | # If the command succeeds (returns 0), exit the function with success.
33 | if "$@"; then
34 | echo "'$*' succeeded after $i attempts "
35 | return 0
36 | fi
37 | echo "'$*' still failing after $i/$ATTEMPTS attempts ..."
38 | sleep "$DELAY"
39 | done
40 | echo "'$*' didn't succeed after $i attempt ..."
41 | # If the loop finishes, the command never succeeded.
42 | return 1
43 | }
44 |
45 | cleanup() {
46 | rm -f /tmp/aws-region /opt/crc/pull-secret.tmp /opt/crc/pass_kubeadmin.tmp /opt/crc/pass_developer.tmp
47 | echo "Temp files cleanup complete."
48 | }
49 |
50 | # Cleanup happens automatically via trap on error or at script end
51 | trap cleanup ERR EXIT
52 |
53 | SECONDS=0
54 | podman pull --quiet "$AWS_CLI_IMG"
55 | echo "Took $SECONDS seconds to pull the $AWS_CLI_IMG"
56 |
57 | check_imds_available_and_get_region() {
58 | IMDS_TOKEN_COMMAND=(
59 | curl
60 | --connect-timeout 1
61 | -X PUT
62 | "http://169.254.169.254/latest/api/token"
63 | -H "X-aws-ec2-metadata-token-ttl-seconds: 21600"
64 | -Ssf
65 | )
66 |
67 | if ! TOKEN=$("${IMDS_TOKEN_COMMAND[@]}"); then
68 | echo "Couldn't fetch the token..." >&2
69 | return 1
70 | fi
71 |
72 | # Then, use the token to get the region
73 | echo "Fetching the AWS region ..."
74 | curl -Ssf -H "X-aws-ec2-metadata-token: $TOKEN" http://169.254.169.254/latest/meta-data/placement/region > /tmp/aws-region
75 | echo >> /tmp/aws-region # add EOL at EOF, for consistency
76 | echo "AWS region: $(< /tmp/aws-region)"
77 | }
78 |
79 | (
80 | set +x # disable the xtrace as the token would be leaked
81 | echo "Waiting for the AWS IMDS service to be available ..."
82 | SECONDS=0
83 | retry_compact check_imds_available_and_get_region
84 | echo "Took $SECONDS for the IMDS service to become available."
85 | )
86 |
87 | save_secret() {
88 | name=$1
89 | key=$2
90 | dest=$3
91 |
92 | # --log-driver=none avoids that the journal captures the stdout
93 | # logs of podman and leaks the passwords in the journal ...
94 | if ! podman run \
95 | --name "cloud-init-fetch-$name" \
96 | --env AWS_REGION="$(< /tmp/aws-region)" \
97 | --log-driver=none \
98 | --rm \
99 | "$AWS_CLI_IMG" \
100 | ssm get-parameter \
101 | --name "$key" \
102 | --with-decryption \
103 | --query "Parameter.Value" \
104 | --output text \
105 | > "${dest}.tmp"
106 | then
107 | rm -f "${dest}.tmp"
108 | echo "ERROR: failed to get the '$name' secret ... (fetched from $key)"
109 | return 1
110 | fi
111 | char_count=$(wc -c < "${dest}.tmp")
112 | if (( char_count < MIN_CHAR_COUNT )); then
113 | echo "ERROR: the content of the '$name' secret is too short ... (fetched from $key)"
114 | rm -f "${dest}.tmp"
115 | return 1
116 | fi
117 |
118 | mv "${dest}.tmp" "${dest}" # atomic creation of the file
119 |
120 | return 0
121 | }
122 |
123 | # execution will abort if 'retry_compact' fails.
124 | retry_compact save_secret "pull-secrets" "$PULL_SECRETS_KEY" /opt/crc/pull-secret
125 | retry_compact save_secret "kubeadmin-pass" "$KUBEADM_PASS_KEY" /opt/crc/pass_kubeadmin
126 | retry_compact save_secret "developer-pass" "$DEVELOPER_PASS_KEY" /opt/crc/pass_developer
127 |
128 | exit 0
129 |
--------------------------------------------------------------------------------
/ci.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | set -exuo pipefail
4 |
5 | sudo yum install -y podman make golang rsync
6 |
7 | cat > /tmp/ignoretests.txt << EOF
8 | [sig-apps] Daemon set [Serial] should rollback without unnecessary restarts [Conformance] [Suite:openshift/conformance/serial/minimal] [Suite:k8s]
9 | [sig-cli] Kubectl client Kubectl cluster-info should check if Kubernetes control plane services is included in cluster-info [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]
10 | [sig-scheduling] SchedulerPreemption [Serial] validates basic preemption works [Conformance] [Suite:openshift/conformance/serial/minimal] [Suite:k8s]
11 | [sig-scheduling] SchedulerPreemption [Serial] validates lower priority pod preemption by critical pod [Conformance] [Suite:openshift/conformance/serial/minimal] [Suite:k8s]
12 | [k8s.io] [sig-node] NoExecuteTaintManager Multiple Pods [Serial] evicts pods with minTolerationSeconds [Disruptive] [Conformance] [Suite:k8s]
13 | [k8s.io] [sig-node] NoExecuteTaintManager Single Pod [Serial] removing taint cancels eviction [Disruptive] [Conformance] [Suite:k8s]
14 | [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] should mutate custom resource with pruning [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]
15 | [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] should mutate pod and apply defaults after mutation [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]
16 | [sig-api-machinery] Aggregator Should be able to support the 1.17 Sample API Server using the current Aggregator [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]
17 | [sig-apps] Daemon set [Serial] should rollback without unnecessary restarts [Conformance] [Skipped:SingleReplicaTopology] [Suite:openshift/conformance/serial/minimal] [Suite:k8s]
18 | [sig-network] Proxy version v1 A set of valid responses are returned for both pod and service ProxyWithPath [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]
19 | EOF
20 |
21 | ./shellcheck.sh
22 | ./snc.sh
23 |
24 | echo "### Extracting openshift-tests binary"
25 | mkdir /tmp/os-test
26 | export TESTS_IMAGE=$(oc --kubeconfig=crc-tmp-install-data/auth/kubeconfig adm release info -a "${HOME}"/pull-secret --image-for=tests)
27 | oc image extract -a "${HOME}"/pull-secret "${TESTS_IMAGE}" --path=/usr/bin/openshift-tests:/tmp/os-test/.
28 | chmod +x /tmp/os-test/openshift-tests
29 | sudo mv /tmp/os-test/openshift-tests /usr/local/bin/
30 |
31 | # Run createdisk script
32 | export CRC_ZSTD_EXTRA_FLAGS="-10 --long"
33 | ./createdisk.sh crc-tmp-install-data
34 |
35 | function destroy_cluster () {
36 | # Destroy the cluster
37 | local snc_product_name=crc
38 | sudo virsh destroy ${snc_product_name} || true
39 | sudo virsh undefine ${snc_product_name} --nvram || true
40 | sudo virsh vol-delete --pool ${snc_product_name} ${snc_product_name}.qcow2 || true
41 | sudo virsh vol-delete --pool ${snc_product_name} rhcos-live.iso || true
42 | sudo virsh pool-destroy ${snc_product_name} || true
43 | sudo virsh pool-undefine ${snc_product_name} || true
44 | sudo virsh net-destroy ${snc_product_name} || true
45 | sudo virsh net-undefine ${snc_product_name} || true
46 | }
47 |
48 | destroy_cluster
49 | # Unset the kubeconfig which is set by snc
50 | unset KUBECONFIG
51 |
52 | # Delete the dnsmasq config created by snc
53 | # otherwise snc set the domain entry with 192.168.126.11
54 | # and crc set it in another file 192.168.130.11 so
55 | # better to remove the dnsmasq config after running snc
56 | sudo rm -fr /etc/NetworkManager/dnsmasq.d/*
57 | sudo systemctl reload NetworkManager
58 |
59 | git clone https://github.com/code-ready/crc.git
60 | pushd crc
61 | podman run --rm -v ${PWD}:/data:Z registry.ci.openshift.org/openshift/release:rhel-9-release-golang-1.24-openshift-4.20 /bin/bash -c "cd /data && make cross"
62 | sudo mv out/linux-amd64/crc /usr/local/bin/
63 | popd
64 |
65 | crc config set bundle crc_libvirt_*.crcbundle
66 | crc setup
67 | crc start --disk-size 80 -m 24000 -c 10 -p "${HOME}"/pull-secret --log-level debug
68 |
69 | mkdir -p crc-tmp-install-data/test-artifacts
70 | export KUBECONFIG="${HOME}"/.crc/machines/crc/kubeconfig
71 | openshift-tests run kubernetes/conformance --dry-run | grep -F -v -f /tmp/ignoretests.txt | openshift-tests run -o crc-tmp-install-data/test-artifacts/e2e.log --junit-dir crc-tmp-install-data/test-artifacts/junit --disable-monitor alert-summary-serializer,metrics-endpoints-down,metrics-api-availability,monitoring-statefulsets-recreation,pod-network-avalibility,legacy-test-framework-invariants,api-unreachable-from-client-metrics,clusteroperator-collector -f -
72 | rc=$?
73 | echo "${rc}" > /tmp/test-return
74 | set -e
75 | echo "### Done! (${rc})"
76 |
--------------------------------------------------------------------------------
/kubevirt-hostpath-provisioner-csi/external-provisioner-rbac.yaml:
--------------------------------------------------------------------------------
1 | # This YAML file contains all RBAC objects that are necessary to run external
2 | # CSI provisioner.
3 | #
4 | # In production, each CSI driver deployment has to be customized:
5 | # - to avoid conflicts, use non-default namespace and different names
6 | # for non-namespaced entities like the ClusterRole
7 | # - decide whether the deployment replicates the external CSI
8 | # provisioner, in which case leadership election must be enabled;
9 | # this influences the RBAC setup, see below
10 |
11 | apiVersion: v1
12 | kind: ServiceAccount
13 | metadata:
14 | name: csi-provisioner
15 | # replace with non-default namespace name
16 | namespace: hostpath-provisioner
17 |
18 | ---
19 | kind: ClusterRole
20 | apiVersion: rbac.authorization.k8s.io/v1
21 | metadata:
22 | name: crc-hostpath-external-provisioner-runner
23 | rules:
24 | # The following rule should be uncommented for plugins that require secrets
25 | # for provisioning.
26 | # - apiGroups: [""]
27 | # resources: ["secrets"]
28 | # verbs: ["get", "list"]
29 | - apiGroups: [""]
30 | resources: ["persistentvolumes"]
31 | verbs: ["get", "list", "watch", "create", "delete"]
32 | - apiGroups: [""]
33 | resources: ["persistentvolumeclaims"]
34 | verbs: ["get", "list", "watch", "update"]
35 | - apiGroups: ["storage.k8s.io"]
36 | resources: ["storageclasses"]
37 | verbs: ["get", "list", "watch"]
38 | - apiGroups: [""]
39 | resources: ["events"]
40 | verbs: ["list", "watch", "create", "update", "patch"]
41 | - apiGroups: ["snapshot.storage.k8s.io"]
42 | resources: ["volumesnapshots"]
43 | verbs: ["get", "list"]
44 | - apiGroups: ["snapshot.storage.k8s.io"]
45 | resources: ["volumesnapshotcontents"]
46 | verbs: ["get", "list"]
47 | - apiGroups: ["storage.k8s.io"]
48 | resources: ["csinodes"]
49 | verbs: ["get", "list", "watch"]
50 | - apiGroups: [""]
51 | resources: ["nodes"]
52 | verbs: ["get", "list", "watch"]
53 | # Access to volumeattachments is only needed when the CSI driver
54 | # has the PUBLISH_UNPUBLISH_VOLUME controller capability.
55 | # In that case, external-provisioner will watch volumeattachments
56 | # to determine when it is safe to delete a volume.
57 | - apiGroups: ["storage.k8s.io"]
58 | resources: ["volumeattachments"]
59 | verbs: ["get", "list", "watch"]
60 |
61 | ---
62 | kind: ClusterRoleBinding
63 | apiVersion: rbac.authorization.k8s.io/v1
64 | metadata:
65 | name: crc-hostpath-csi-provisioner-role
66 | subjects:
67 | - kind: ServiceAccount
68 | name: csi-provisioner
69 | # replace with non-default namespace name
70 | namespace: hostpath-provisioner
71 | roleRef:
72 | kind: ClusterRole
73 | name: crc-hostpath-external-provisioner-runner
74 | apiGroup: rbac.authorization.k8s.io
75 |
76 | ---
77 | # Provisioner must be able to work with endpoints in current namespace
78 | # if (and only if) leadership election is enabled
79 | kind: Role
80 | apiVersion: rbac.authorization.k8s.io/v1
81 | metadata:
82 | # replace with non-default namespace name
83 | namespace: hostpath-provisioner
84 | name: external-provisioner-cfg
85 | rules:
86 | # Only one of the following rules for endpoints or leases is required based on
87 | # what is set for `--leader-election-type`. Endpoints are deprecated in favor of Leases.
88 | - apiGroups: [""]
89 | resources: ["endpoints"]
90 | verbs: ["get", "watch", "list", "delete", "update", "create"]
91 | - apiGroups: ["coordination.k8s.io"]
92 | resources: ["leases"]
93 | verbs: ["get", "watch", "list", "delete", "update", "create"]
94 | # Permissions for CSIStorageCapacity are only needed enabling the publishing
95 | # of storage capacity information.
96 | - apiGroups: ["storage.k8s.io"]
97 | resources: ["csistoragecapacities"]
98 | verbs: ["get", "list", "watch", "create", "update", "patch", "delete"]
99 | # The GET permissions below are needed for walking up the ownership chain
100 | # for CSIStorageCapacity. They are sufficient for deployment via
101 | # StatefulSet (only needs to get Pod) and Deployment (needs to get
102 | # Pod and then ReplicaSet to find the Deployment).
103 | - apiGroups: [""]
104 | resources: ["pods"]
105 | verbs: ["get"]
106 | - apiGroups: ["apps"]
107 | resources: ["replicasets"]
108 | verbs: ["get"]
109 |
110 | ---
111 | kind: RoleBinding
112 | apiVersion: rbac.authorization.k8s.io/v1
113 | metadata:
114 | name: csi-provisioner-role-cfg
115 | # replace with non-default namespace name
116 | namespace: hostpath-provisioner
117 | subjects:
118 | - kind: ServiceAccount
119 | name: csi-provisioner
120 | # replace with non-default namespace name
121 | namespace: hostpath-provisioner
122 | roleRef:
123 | kind: Role
124 | name: external-provisioner-cfg
125 | apiGroup: rbac.authorization.k8s.io
126 |
--------------------------------------------------------------------------------
/kubelet-bootstrap-cred-manager-ds.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: DaemonSet
3 | metadata:
4 | name: kubelet-bootstrap-cred-manager
5 | namespace: openshift-machine-config-operator
6 | labels:
7 | k8s-app: kubelet-bootrap-cred-manager
8 | spec:
9 | replicas: 1
10 | selector:
11 | matchLabels:
12 | k8s-app: kubelet-bootstrap-cred-manager
13 | template:
14 | metadata:
15 | labels:
16 | k8s-app: kubelet-bootstrap-cred-manager
17 | spec:
18 | containers:
19 | - name: kubelet-bootstrap-cred-manager
20 | image: quay.io/openshift/origin-cli:4.20
21 | command: ['/bin/bash', '-ec']
22 | args:
23 | - |
24 | #!/bin/bash
25 |
26 | set -eoux pipefail
27 |
28 | while true; do
29 | unset KUBECONFIG
30 |
31 | echo "----------------------------------------------------------------------"
32 | echo "Gather info..."
33 | echo "----------------------------------------------------------------------"
34 | # context
35 | intapi=$(oc get infrastructures.config.openshift.io cluster -o "jsonpath={.status.apiServerInternalURI}")
36 | context="$(oc --config=/etc/kubernetes/kubeconfig config current-context)"
37 | # cluster
38 | cluster="$(oc --config=/etc/kubernetes/kubeconfig config view -o "jsonpath={.contexts[?(@.name==\"$context\")].context.cluster}")"
39 | server="$(oc --config=/etc/kubernetes/kubeconfig config view -o "jsonpath={.clusters[?(@.name==\"$cluster\")].cluster.server}")"
40 | # token
41 | ca_crt_data="$(oc get secret -n openshift-machine-config-operator node-bootstrapper-token -o "jsonpath={.data.ca\.crt}" | base64 --decode)"
42 | namespace="$(oc get secret -n openshift-machine-config-operator node-bootstrapper-token -o "jsonpath={.data.namespace}" | base64 --decode)"
43 | token="$(oc get secret -n openshift-machine-config-operator node-bootstrapper-token -o "jsonpath={.data.token}" | base64 --decode)"
44 |
45 | echo "----------------------------------------------------------------------"
46 | echo "Generate kubeconfig"
47 | echo "----------------------------------------------------------------------"
48 |
49 | export KUBECONFIG="$(mktemp)"
50 | kubectl config set-credentials "kubelet" --token="$token" >/dev/null
51 | ca_crt="$(mktemp)"; echo "$ca_crt_data" > $ca_crt
52 | kubectl config set-cluster $cluster --server="$intapi" --certificate-authority="$ca_crt" --embed-certs >/dev/null
53 | kubectl config set-context kubelet --cluster="$cluster" --user="kubelet" >/dev/null
54 | kubectl config use-context kubelet >/dev/null
55 |
56 | echo "----------------------------------------------------------------------"
57 | echo "Print kubeconfig"
58 | echo "----------------------------------------------------------------------"
59 | cat "$KUBECONFIG"
60 |
61 | echo "----------------------------------------------------------------------"
62 | echo "Whoami?"
63 | echo "----------------------------------------------------------------------"
64 | oc whoami
65 | whoami
66 |
67 | echo "----------------------------------------------------------------------"
68 | echo "Moving to real kubeconfig"
69 | echo "----------------------------------------------------------------------"
70 | cp /etc/kubernetes/kubeconfig /etc/kubernetes/kubeconfig.prev
71 | chown root:root ${KUBECONFIG}
72 | chmod 0644 ${KUBECONFIG}
73 | mv "${KUBECONFIG}" /etc/kubernetes/kubeconfig
74 |
75 | echo "----------------------------------------------------------------------"
76 | echo "Sleep 60 seconds..."
77 | echo "----------------------------------------------------------------------"
78 | sleep 60
79 | done
80 | securityContext:
81 | privileged: true
82 | runAsUser: 0
83 | volumeMounts:
84 | - mountPath: /etc/kubernetes/
85 | name: kubelet-dir
86 | nodeSelector:
87 | node-role.kubernetes.io/master: ""
88 | priorityClassName: "system-cluster-critical"
89 | restartPolicy: Always
90 | securityContext:
91 | runAsUser: 0
92 | tolerations:
93 | - key: "node-role.kubernetes.io/master"
94 | operator: "Exists"
95 | effect: "NoSchedule"
96 | - key: "node.kubernetes.io/unreachable"
97 | operator: "Exists"
98 | effect: "NoExecute"
99 | tolerationSeconds: 120
100 | - key: "node.kubernetes.io/not-ready"
101 | operator: "Exists"
102 | effect: "NoExecute"
103 | tolerationSeconds: 120
104 | volumes:
105 | - hostPath:
106 | path: /etc/kubernetes/
107 | type: Directory
108 | name: kubelet-dir
109 |
--------------------------------------------------------------------------------
/image-mode/microshift/build.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | set -eo pipefail
3 |
4 | ROOTDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )/../../" && pwd )"
5 | SCRIPTDIR=${ROOTDIR}/image-mode/microshift
6 | IMGNAME=microshift
7 | MICROSHIFT_VERSION=4.18
8 | BUILD_ARCH=$(uname -m)
9 | OSVERSION=$(awk -F: '{print $5}' /etc/system-release-cpe)
10 | LVM_SYSROOT_SIZE_MIN=10240
11 | LVM_SYSROOT_SIZE=${LVM_SYSROOT_SIZE_MIN}
12 | OCP_PULL_SECRET_FILE=
13 | AUTHORIZED_KEYS_FILE=
14 | AUTHORIZED_KEYS=
15 | USE_MIRROR_REPO=
16 |
17 | # shellcheck disable=SC2034
18 | STARTTIME="$(date +%s)"
19 | BUILDDIR=${BUILDDIR:-${ROOTDIR}/_output/image-mode}
20 |
21 | usage() {
22 | local error_message="$1"
23 |
24 | if [ -n "${error_message}" ]; then
25 | echo "ERROR: ${error_message}"
26 | echo
27 | fi
28 |
29 | echo "Usage: $(basename "$0") <-pull_secret_file path_to_file> [OPTION]..."
30 | echo ""
31 | echo " -pull_secret_file path_to_file"
32 | echo " Path to a file containing the OpenShift pull secret, which can be"
33 | echo " obtained from https://console.redhat.com/openshift/downloads#tool-pull-secret"
34 | echo ""
35 | echo "Optional arguments:"
36 | echo " -lvm_sysroot_size num_in_MB"
37 | echo " Size of the system root LVM partition. The remaining"
38 | echo " disk space will be allocated for data (default: ${LVM_SYSROOT_SIZE})"
39 | echo " -authorized_keys_file path_to_file"
40 | echo " Path to an SSH authorized_keys file to allow SSH access"
41 | echo " into the default 'redhat' account"
42 | echo " -use-unreleased-mirror-repo "
43 | echo " Use unreleased mirror repo to get release candidate and engineering preview rpms"
44 | echo " like (https://mirror.openshift.com/pub/openshift-v4/x86_64/microshift/ocp-dev-preview/latest-4.18/el9/os/)"
45 | echo " -microshift-version "
46 | echo " Version of microshift for image generation (default: ${MICROSHIFT_VERSION}"
47 | echo " -hostname "
48 | echo " Hostname of the machine"
49 | echo " -base-domain "
50 | echo " Base domain for microshift cluster"
51 | exit 1
52 | }
53 |
54 | title() {
55 | echo -e "\E[34m\n# $1\E[00m"
56 | }
57 |
58 | # Parse the command line
59 | while [ $# -gt 0 ] ; do
60 | case $1 in
61 | -pull_secret_file)
62 | shift
63 | OCP_PULL_SECRET_FILE="$1"
64 | [ -z "${OCP_PULL_SECRET_FILE}" ] && usage "Pull secret file not specified"
65 | [ ! -s "${OCP_PULL_SECRET_FILE}" ] && usage "Empty or missing pull secret file"
66 | shift
67 | ;;
68 | -lvm_sysroot_size)
69 | shift
70 | LVM_SYSROOT_SIZE="$1"
71 | [ -z "${LVM_SYSROOT_SIZE}" ] && usage "System root LVM partition size not specified"
72 | [ "${LVM_SYSROOT_SIZE}" -lt ${LVM_SYSROOT_SIZE_MIN} ] && usage "System root LVM partition size cannot be smaller than ${LVM_SYSROOT_SIZE_MIN}MB"
73 | shift
74 | ;;
75 | -authorized_keys_file)
76 | shift
77 | AUTHORIZED_KEYS_FILE="$1"
78 | [ -z "${AUTHORIZED_KEYS_FILE}" ] && usage "Authorized keys file not specified"
79 | shift
80 | ;;
81 | -use-unreleased-mirror-repo)
82 | shift
83 | USE_UNRELEASED_MIRROR_REPO="$1"
84 | [ -z "${USE_UNRELEASED_MIRROR_REPO}" ] && usage "Mirror repo not specified"
85 | shift
86 | ;;
87 | -microshift-version)
88 | shift
89 | MICROSHIFT_VERSION="$1"
90 | [ -z "${MICROSHIFT_VERSION}" ] && usage "MicroShift version not specified"
91 | shift
92 | ;;
93 | -hostname)
94 | shift
95 | HOSTNAME="$1"
96 | [ -z "${HOSTNAME}" ] && usage "Hostname not specified"
97 | shift
98 | ;;
99 | -base-domain)
100 | shift
101 | BASE_DOMAIN="$1"
102 | [ -z "${BASE_DOMAIN}" ] && usage "Base domain not specified"
103 | shift
104 | ;;
105 | *)
106 | usage
107 | ;;
108 | esac
109 | done
110 |
111 | if [ ! -r "${OCP_PULL_SECRET_FILE}" ] ; then
112 | echo "ERROR: pull_secret_file file does not exist or not readable: ${OCP_PULL_SECRET_FILE}"
113 | exit 1
114 | fi
115 | if [ -n "${AUTHORIZED_KEYS_FILE}" ]; then
116 | if [ ! -e "${AUTHORIZED_KEYS_FILE}" ]; then
117 | echo "ERROR: authorized_keys_file does not exist: ${AUTHORIZED_KEYS_FILE}"
118 | exit 1
119 | else
120 | AUTHORIZED_KEYS=$(cat "${AUTHORIZED_KEYS_FILE}")
121 | fi
122 | fi
123 |
124 | mkdir -p "${BUILDDIR}"
125 |
126 | title "Preparing kickstart config"
127 | # Create a kickstart file from a template, compacting pull secret contents if necessary
128 | cat < "${SCRIPTDIR}/config/config.toml.template" \
129 | | sed "s;REPLACE_HOSTNAME;${HOSTNAME};g" \
130 | | sed "s;REPLACE_BASE_DOMAIN;${BASE_DOMAIN};g" \
131 | | sed "s;REPLACE_LVM_SYSROOT_SIZE;${LVM_SYSROOT_SIZE};g" \
132 | | sed "s;REPLACE_OCP_PULL_SECRET_CONTENTS;$(cat < "${OCP_PULL_SECRET_FILE}" | jq -c);g" \
133 | | sed "s^REPLACE_CORE_AUTHORIZED_KEYS_CONTENTS^${AUTHORIZED_KEYS}^g" \
134 | > "${BUILDDIR}"/config.toml
135 |
136 | title "Building bootc image for microshift"
137 | sudo podman build --no-cache --authfile ${OCP_PULL_SECRET_FILE} -t ${IMGNAME}:${MICROSHIFT_VERSION} \
138 | --build-arg MICROSHIFT_VER=${MICROSHIFT_VERSION} \
139 | --env UNRELEASED_MIRROR_REPO=${USE_UNRELEASED_MIRROR_REPO} \
140 | -f "${SCRIPTDIR}/config/Containerfile.bootc-rhel9"
141 |
142 | # As of now we are generating the ISO to have same previous behavior
143 | # TODO: Try to use qcow2 directly for vm creation
144 | title "Creating ISO image"
145 | sudo podman run --authfile ${OCP_PULL_SECRET_FILE} --rm -it \
146 | --privileged \
147 | --security-opt label=type:unconfined_t \
148 | -v /var/lib/containers/storage:/var/lib/containers/storage \
149 | -v "${BUILDDIR}"/config.toml:/config.toml \
150 | -v "${BUILDDIR}":/output \
151 | registry.redhat.io/rhel9/bootc-image-builder:latest \
152 | --local \
153 | --type iso \
154 | --config /config.toml \
155 | localhost/${IMGNAME}:${MICROSHIFT_VERSION}
156 |
--------------------------------------------------------------------------------
/tools.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | JQ=${JQ:-jq}
4 |
5 | QEMU_IMG=${QEMU_IMG:-qemu-img}
6 | VIRT_FILESYSTEMS=${VIRT_FILESYSTEMS:-virt-filesystems}
7 | GUESTFISH=${GUESTFISH:-guestfish}
8 | VIRSH=${VIRSH:-virsh}
9 | VIRT_INSTALL=${VIRT_INSTALL:-virt-install}
10 |
11 | XMLLINT=${XMLLINT:-xmllint}
12 |
13 | DIG=${DIG:-dig}
14 | UNZIP=${UNZIP:-unzip}
15 | ZSTD=${ZSTD:-zstd}
16 | CRC_ZSTD_EXTRA_FLAGS=${CRC_ZSTD_EXTRA_FLAGS:-"--ultra -22"}
17 |
18 | HTPASSWD=${HTPASSWD:-htpasswd}
19 | PATCH=${PATCH:-patch}
20 |
21 | ARCH=$(uname -m)
22 |
23 | case "${ARCH}" in
24 | x86_64)
25 | yq_ARCH="amd64"
26 | SNC_GENERATE_MACOS_BUNDLE=${SNC_GENERATE_MACOS_BUNDLE:-1}
27 | SNC_GENERATE_WINDOWS_BUNDLE=${SNC_GENERATE_WINDOWS_BUNDLE:-1}
28 | SNC_GENERATE_LINUX_BUNDLE=${SNC_GENERATE_LINUX_BUNDLE:-1}
29 | ;;
30 | aarch64)
31 | yq_ARCH="arm64"
32 | SNC_GENERATE_MACOS_BUNDLE=${SNC_GENERATE_MACOS_BUNDLE:-1}
33 | SNC_GENERATE_WINDOWS_BUNDLE=${SNC_GENERATE_WINDOWS_BUNDLE:-0}
34 | SNC_GENERATE_LINUX_BUNDLE=${SNC_GENERATE_LINUX_BUNDLE:-1}
35 | ;;
36 | *)
37 | yq_ARCH=${ARCH}
38 | SNC_GENERATE_MACOS_BUNDLE=${SNC_GENERATE_MACOS_BUNDLE:-0}
39 | SNC_GENERATE_WINDOWS_BUNDLE=${SNC_GENERATE_WINDOWS_BUNDLE:-0}
40 | SNC_GENERATE_LINUX_BUNDLE=${SNC_GENERATE_LINUX_BUNDLE:-1}
41 | ;;
42 | esac
43 |
44 | # Download yq/jq for manipulating in place yaml configs
45 | if test -z ${YQ-}; then
46 | echo "Downloading yq binary to manipulate yaml files"
47 | curl -L https://github.com/mikefarah/yq/releases/download/v4.5.1/yq_linux_${yq_ARCH} -o yq
48 | chmod +x yq
49 | YQ=./yq
50 | fi
51 |
52 | if ! which ${JQ}; then
53 | sudo yum -y install /usr/bin/jq
54 | fi
55 |
56 | # Add virt-filesystems/guestfish/qemu-img
57 | if ! which ${VIRT_FILESYSTEMS}; then
58 | sudo yum -y install /usr/bin/virt-filesystems
59 | fi
60 |
61 | if ! which ${GUESTFISH}; then
62 | sudo yum -y install /usr/bin/guestfish
63 | fi
64 |
65 | if ! which ${VIRSH}; then
66 | sudo yum -y install /usr/bin/virsh
67 | fi
68 |
69 | if ! which ${QEMU_IMG}; then
70 | sudo yum -y install /usr/bin/qemu-img
71 | fi
72 |
73 | if ! which ${VIRT_INSTALL}; then
74 | sudo yum -y install /usr/bin/virt-install
75 | fi
76 |
77 | # The CoreOS image uses an XFS filesystem
78 | # Beware than if you are running on an el7 system, you won't be able
79 | # to resize the crc VM XFS filesystem as it was created on el8
80 | if ! rpm -q libguestfs-xfs; then
81 | sudo yum install libguestfs-xfs
82 | fi
83 |
84 | if [ "${SNC_GENERATE_WINDOWS_BUNDLE}" != "0" -o "${SNC_GENERATE_MACOS_BUNDLE}" != "0" ];then
85 | if ! which ${UNZIP}; then
86 | sudo yum -y install /usr/bin/unzip
87 | fi
88 | fi
89 |
90 | if ! which ${XMLLINT}; then
91 | sudo yum -y install /usr/bin/xmllint
92 | fi
93 |
94 | if ! which ${DIG}; then
95 | sudo yum -y install /usr/bin/dig
96 | fi
97 |
98 | if ! which ${ZSTD}; then
99 | sudo yum -y install /usr/bin/zstd
100 | fi
101 |
102 | if ! which ${HTPASSWD}; then
103 | sudo yum -y install /usr/bin/htpasswd
104 | fi
105 |
106 | if ! which ${PATCH}; then
107 | sudo yum -y install /usr/bin/patch
108 | fi
109 |
110 | function retry {
111 | # total wait time = 2 ^ (retries - 1) - 1 seconds
112 | local retries=14
113 |
114 | local count=0
115 | until "$@"; do
116 | exit=$?
117 | wait=$((2 ** $count))
118 | count=$(($count + 1))
119 | if [ $count -lt $retries ]; then
120 | echo "Retry $count/$retries exited $exit, retrying in $wait seconds..." 1>&2
121 | sleep $wait
122 | else
123 | echo "Retry $count/$retries exited $exit, no more retries left." 1>&2
124 | return $exit
125 | fi
126 | done
127 | return 0
128 | }
129 |
130 | function get_vm_prefix {
131 | local crc_vm_name=$1
132 | # This random_string is created by installer and added to each resource type,
133 | # in installer side also variable name is kept as `random_string`
134 | # so to maintain consistancy, we are also using random_string here.
135 | random_string=$(sudo virsh list --all | grep -m1 -oP "(?<=${crc_vm_name}-).*(?=-master-0)")
136 | if [ -z $random_string ]; then
137 | echo "Could not find virtual machine created by snc.sh"
138 | exit 1;
139 | fi
140 | echo ${crc_vm_name}-${random_string}
141 | }
142 |
143 | function shutdown_vm {
144 | local vm_name=$1
145 | retry sudo virsh shutdown ${vm_name}
146 | # Wait till instance started successfully
147 | until sudo virsh domstate ${vm_name} | grep shut; do
148 | echo " ${vm_name} still running"
149 | sleep 3
150 | done
151 | }
152 |
153 | function wait_for_ssh {
154 | local vm_name=$1
155 | local vm_ip=$2
156 | until ${SSH} core@${vm_ip} -- "exit 0" >/dev/null 2>&1; do
157 | echo " ${vm_name} still booting"
158 | sleep 2
159 | done
160 | }
161 |
162 | function start_vm {
163 | local vm_name=$1
164 | local vm_ip=$2
165 | retry sudo virsh start ${vm_name}
166 | # Wait till ssh connection available
167 | wait_for_ssh ${vm_name} ${vm_ip}
168 | }
169 |
170 | function destroy_libvirt_resources {
171 | local iso=$1
172 |
173 | sudo virsh destroy ${SNC_PRODUCT_NAME} || true
174 | sudo virsh undefine ${SNC_PRODUCT_NAME} --nvram || true
175 | sudo virsh vol-delete --pool ${SNC_PRODUCT_NAME} ${SNC_PRODUCT_NAME}.qcow2 || true
176 | sudo virsh vol-delete --pool ${SNC_PRODUCT_NAME} ${iso} || true
177 | sudo virsh pool-destroy ${SNC_PRODUCT_NAME} || true
178 | sudo virsh pool-undefine ${SNC_PRODUCT_NAME} || true
179 | sudo virsh net-destroy ${SNC_PRODUCT_NAME} || true
180 | sudo virsh net-undefine ${SNC_PRODUCT_NAME} || true
181 | }
182 |
183 | function create_libvirt_resources {
184 | sudo virsh pool-define-as ${SNC_PRODUCT_NAME} --type dir --target /var/lib/libvirt/${SNC_PRODUCT_NAME}
185 | sudo virsh pool-start --build ${SNC_PRODUCT_NAME}
186 | sudo virsh pool-autostart ${SNC_PRODUCT_NAME}
187 | sed -e "s|NETWORK_NAME|${SNC_PRODUCT_NAME}|" \
188 | -e "s|CLUSTER_NAME|${SNC_PRODUCT_NAME}|" \
189 | -e "s|BASE_DOMAIN|${BASE_DOMAIN}|" \
190 | host-libvirt-net.xml.template > host-libvirt-net.xml
191 | sudo virsh net-create host-libvirt-net.xml
192 | rm -fr host-libvirt-net.xml
193 | }
194 |
195 | function create_vm {
196 | local iso=$1
197 |
198 | bootOption=""
199 | if [[ ${BUNDLE_TYPE} != "okd" ]]; then
200 | bootOption="--boot uefi"
201 | fi
202 |
203 | sudo virt-install \
204 | --name ${SNC_PRODUCT_NAME} \
205 | --vcpus ${SNC_CLUSTER_CPUS} \
206 | --memory ${SNC_CLUSTER_MEMORY} \
207 | --arch=${ARCH} \
208 | --disk path=/var/lib/libvirt/${SNC_PRODUCT_NAME}/${SNC_PRODUCT_NAME}.qcow2,size=${CRC_VM_DISK_SIZE} \
209 | --network network="${SNC_PRODUCT_NAME}",mac=52:54:00:ee:42:e1 \
210 | --os-variant rhel9-unknown \
211 | --nographics \
212 | --cdrom /var/lib/libvirt/${SNC_PRODUCT_NAME}/${iso} \
213 | --events on_reboot=restart \
214 | --autoconsole none \
215 | ${bootOption} \
216 | --wait
217 | }
218 |
219 | function generate_htpasswd_file {
220 | local auth_file_dir=$1
221 | local pass_file=$2
222 | (
223 | set +x # use a subshell to avoid leaking the password
224 |
225 | local random_password
226 | random_password=$(cat "$auth_file_dir/auth/kubeadmin-password")
227 | "${HTPASSWD}" -c -B -i "${pass_file}" developer <<< "developer" # use -c to create the file
228 | "${HTPASSWD}" -B -i "${pass_file}" kubeadmin <<< "${random_password}" # append to the existing password file
229 | )
230 | }
231 |
--------------------------------------------------------------------------------
/kubevirt-hostpath-provisioner-csi/csi-driver/csi-kubevirt-hostpath-provisioner.yaml:
--------------------------------------------------------------------------------
1 | # All of the individual sidecar RBAC roles get bound
2 | # to this account.
3 | kind: ServiceAccount
4 | apiVersion: v1
5 | metadata:
6 | name: csi-hostpath-provisioner-sa
7 | namespace: hostpath-provisioner
8 | ---
9 | apiVersion: rbac.authorization.k8s.io/v1
10 | kind: ClusterRoleBinding
11 | metadata:
12 | name: crc-csi-hostpathplugin-health-monitor-controller-cluster-role
13 | roleRef:
14 | apiGroup: rbac.authorization.k8s.io
15 | kind: ClusterRole
16 | name: crc-hostpath-external-health-monitor-controller-runner
17 | subjects:
18 | - kind: ServiceAccount
19 | name: csi-hostpath-provisioner-sa
20 | namespace: hostpath-provisioner
21 | ---
22 | apiVersion: rbac.authorization.k8s.io/v1
23 | kind: ClusterRoleBinding
24 | metadata:
25 | name: crc-csi-hostpathplugin-provisioner-cluster-role
26 | roleRef:
27 | apiGroup: rbac.authorization.k8s.io
28 | kind: ClusterRole
29 | name: crc-hostpath-external-provisioner-runner
30 | subjects:
31 | - kind: ServiceAccount
32 | name: csi-hostpath-provisioner-sa
33 | namespace: hostpath-provisioner
34 | ---
35 | apiVersion: rbac.authorization.k8s.io/v1
36 | kind: RoleBinding
37 | metadata:
38 | name: csi-hostpathplugin-health-monitor-controller-role
39 | roleRef:
40 | apiGroup: rbac.authorization.k8s.io
41 | kind: Role
42 | name: external-health-monitor-controller-cfg
43 | subjects:
44 | - kind: ServiceAccount
45 | name: csi-hostpath-provisioner-sa
46 | namespace: hostpath-provisioner
47 | ---
48 | apiVersion: rbac.authorization.k8s.io/v1
49 | kind: RoleBinding
50 | metadata:
51 | name: csi-hostpathplugin-provisioner-role
52 | roleRef:
53 | apiGroup: rbac.authorization.k8s.io
54 | kind: Role
55 | name: external-provisioner-cfg
56 | subjects:
57 | - kind: ServiceAccount
58 | name: csi-hostpath-provisioner-sa
59 | namespace: hostpath-provisioner
60 | ---
61 | kind: DaemonSet
62 | apiVersion: apps/v1
63 | metadata:
64 | name: csi-hostpathplugin
65 | spec:
66 | selector:
67 | matchLabels:
68 | app.kubernetes.io/instance: hostpath.csi.kubevirt.io
69 | app.kubernetes.io/part-of: csi-driver-host-path
70 | app.kubernetes.io/name: csi-hostpathplugin
71 | app.kubernetes.io/component: plugin
72 | template:
73 | metadata:
74 | labels:
75 | app.kubernetes.io/instance: hostpath.csi.kubevirt.io
76 | app.kubernetes.io/part-of: csi-driver-host-path
77 | app.kubernetes.io/name: csi-hostpathplugin
78 | app.kubernetes.io/component: plugin
79 | spec:
80 | serviceAccountName: csi-hostpath-provisioner-sa
81 | containers:
82 | - args:
83 | - --drivername=kubevirt.io.hostpath-provisioner
84 | - --v=3
85 | - --datadir=[{"name":"local","path":"/csi-data-dir"}]
86 | - --endpoint=$(CSI_ENDPOINT)
87 | - --nodeid=$(NODE_NAME)
88 | - --version=$(VERSION)
89 | env:
90 | - name: CSI_ENDPOINT
91 | value: unix:///csi/csi.sock
92 | - name: NODE_NAME
93 | valueFrom:
94 | fieldRef:
95 | apiVersion: v1
96 | fieldPath: spec.nodeName
97 | - name: PV_DIR
98 | value: /var/hpvolumes
99 | - name: VERSION
100 | value: latest
101 | image: quay.io/kubevirt/hostpath-csi-driver:latest
102 | imagePullPolicy: IfNotPresent
103 | livenessProbe:
104 | failureThreshold: 5
105 | httpGet:
106 | path: /healthz
107 | port: 9898
108 | scheme: HTTP
109 | initialDelaySeconds: 10
110 | periodSeconds: 2
111 | successThreshold: 1
112 | timeoutSeconds: 3
113 | name: hostpath-provisioner
114 | ports:
115 | - containerPort: 9898
116 | name: healthz
117 | protocol: TCP
118 | resources: {}
119 | securityContext:
120 | privileged: true
121 | terminationMessagePath: /dev/termination-log
122 | terminationMessagePolicy: File
123 | volumeMounts:
124 | - mountPath: /csi-data-dir
125 | name: csi-data-dir
126 | - mountPath: /var/lib/kubelet/plugins
127 | mountPropagation: Bidirectional
128 | name: plugins-dir
129 | - mountPath: /var/lib/kubelet/pods
130 | mountPropagation: Bidirectional
131 | name: mountpoint-dir
132 | - mountPath: /csi
133 | name: socket-dir
134 | - args:
135 | - --v=3
136 | - --csi-address=/csi/csi.sock
137 | - --kubelet-registration-path=/var/lib/kubelet/plugins/csi-hostpath/csi.sock
138 | env:
139 | - name: KUBE_NODE_NAME
140 | valueFrom:
141 | fieldRef:
142 | apiVersion: v1
143 | fieldPath: spec.nodeName
144 | image: registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.2.0
145 | imagePullPolicy: IfNotPresent
146 | name: node-driver-registrar
147 | resources: {}
148 | securityContext:
149 | privileged: true
150 | terminationMessagePath: /dev/termination-log
151 | terminationMessagePolicy: File
152 | volumeMounts:
153 | - mountPath: /csi
154 | name: socket-dir
155 | - mountPath: /registration
156 | name: registration-dir
157 | - mountPath: /csi-data-dir
158 | name: csi-data-dir
159 | - args:
160 | - --csi-address=/csi/csi.sock
161 | - --health-port=9898
162 | image: registry.k8s.io/sig-storage/livenessprobe:v2.3.0
163 | imagePullPolicy: IfNotPresent
164 | name: liveness-probe
165 | resources: {}
166 | terminationMessagePath: /dev/termination-log
167 | terminationMessagePolicy: File
168 | volumeMounts:
169 | - mountPath: /csi
170 | name: socket-dir
171 | - args:
172 | - --v=5
173 | - --csi-address=/csi/csi.sock
174 | - --feature-gates=Topology=true
175 | - --enable-capacity=true
176 | - --capacity-for-immediate-binding=true
177 | - --extra-create-metadata=true
178 | - --immediate-topology=false
179 | - --strict-topology=true
180 | - --node-deployment=true
181 | env:
182 | - name: NAMESPACE
183 | valueFrom:
184 | fieldRef:
185 | apiVersion: v1
186 | fieldPath: metadata.namespace
187 | - name: POD_NAME
188 | valueFrom:
189 | fieldRef:
190 | apiVersion: v1
191 | fieldPath: metadata.name
192 | - name: NODE_NAME
193 | valueFrom:
194 | fieldRef:
195 | apiVersion: v1
196 | fieldPath: spec.nodeName
197 | image: registry.k8s.io/sig-storage/csi-provisioner:v2.2.1
198 | imagePullPolicy: IfNotPresent
199 | name: csi-provisioner
200 | resources: {}
201 | securityContext:
202 | privileged: true
203 | terminationMessagePath: /dev/termination-log
204 | terminationMessagePolicy: File
205 | volumeMounts:
206 | - mountPath: /csi
207 | name: socket-dir
208 | volumes:
209 | - hostPath:
210 | path: /var/lib/kubelet/plugins/csi-hostpath
211 | type: DirectoryOrCreate
212 | name: socket-dir
213 | - hostPath:
214 | path: /var/lib/kubelet/pods
215 | type: DirectoryOrCreate
216 | name: mountpoint-dir
217 | - hostPath:
218 | path: /var/lib/kubelet/plugins_registry
219 | type: Directory
220 | name: registration-dir
221 | - hostPath:
222 | path: /var/lib/kubelet/plugins
223 | type: Directory
224 | name: plugins-dir
225 | - hostPath:
226 | # 'path' is where PV data is persisted on host.
227 | # using /tmp is also possible while the PVs will not available after plugin container recreation or host reboot
228 | path: /var/lib/csi-hostpath-data/
229 | type: DirectoryOrCreate
230 | name: csi-data-dir
231 |
--------------------------------------------------------------------------------
/cluster-kube-apiserver-operator.patch:
--------------------------------------------------------------------------------
1 | diff --git a/pkg/operator/certrotationcontroller/certrotationcontroller.go b/pkg/operator/certrotationcontroller/certrotationcontroller.go
2 | index 1bf5d3224..a28ce71ed 100644
3 | --- a/pkg/operator/certrotationcontroller/certrotationcontroller.go
4 | +++ b/pkg/operator/certrotationcontroller/certrotationcontroller.go
5 | @@ -129,8 +129,8 @@ func newCertRotationController(
6 | certrotation.RotatedSigningCASecret{
7 | Namespace: operatorclient.OperatorNamespace,
8 | Name: "aggregator-client-signer",
9 | - Validity: 30 * rotationDay,
10 | - Refresh: 15 * rotationDay,
11 | + Validity: 365 * rotationDay,
12 | + Refresh: 180 * rotationDay,
13 | RefreshOnlyWhenExpired: refreshOnlyWhenExpired,
14 | Informer: kubeInformersForNamespaces.InformersFor(operatorclient.OperatorNamespace).Core().V1().Secrets(),
15 | Lister: kubeInformersForNamespaces.InformersFor(operatorclient.OperatorNamespace).Core().V1().Secrets().Lister(),
16 | @@ -148,8 +148,8 @@ func newCertRotationController(
17 | certrotation.RotatedSelfSignedCertKeySecret{
18 | Namespace: operatorclient.TargetNamespace,
19 | Name: "aggregator-client",
20 | - Validity: 30 * rotationDay,
21 | - Refresh: 15 * rotationDay,
22 | + Validity: 365 * rotationDay,
23 | + Refresh: 180 * rotationDay,
24 | RefreshOnlyWhenExpired: refreshOnlyWhenExpired,
25 | CertCreator: &certrotation.ClientRotation{
26 | UserInfo: &user.DefaultInfo{Name: "system:openshift-aggregator"},
27 | @@ -188,8 +188,8 @@ func newCertRotationController(
28 | certrotation.RotatedSelfSignedCertKeySecret{
29 | Namespace: operatorclient.TargetNamespace,
30 | Name: "kubelet-client",
31 | - Validity: 30 * rotationDay,
32 | - Refresh: 15 * rotationDay,
33 | + Validity: 365 * rotationDay,
34 | + Refresh: 180 * rotationDay,
35 | RefreshOnlyWhenExpired: refreshOnlyWhenExpired,
36 | CertCreator: &certrotation.ClientRotation{
37 | UserInfo: &user.DefaultInfo{Name: "system:kube-apiserver", Groups: []string{"kube-master"}},
38 | @@ -228,8 +228,8 @@ func newCertRotationController(
39 | certrotation.RotatedSelfSignedCertKeySecret{
40 | Namespace: operatorclient.TargetNamespace,
41 | Name: "localhost-serving-cert-certkey",
42 | - Validity: 30 * rotationDay,
43 | - Refresh: 15 * rotationDay,
44 | + Validity: 365 * rotationDay,
45 | + Refresh: 180 * rotationDay,
46 | RefreshOnlyWhenExpired: refreshOnlyWhenExpired,
47 | CertCreator: &certrotation.ServingRotation{
48 | Hostnames: func() []string { return []string{"localhost", "127.0.0.1"} },
49 | @@ -268,8 +268,8 @@ func newCertRotationController(
50 | certrotation.RotatedSelfSignedCertKeySecret{
51 | Namespace: operatorclient.TargetNamespace,
52 | Name: "service-network-serving-certkey",
53 | - Validity: 30 * rotationDay,
54 | - Refresh: 15 * rotationDay,
55 | + Validity: 365 * rotationDay,
56 | + Refresh: 180 * rotationDay,
57 | RefreshOnlyWhenExpired: refreshOnlyWhenExpired,
58 | CertCreator: &certrotation.ServingRotation{
59 | Hostnames: ret.serviceNetwork.GetHostnames,
60 | @@ -309,8 +309,8 @@ func newCertRotationController(
61 | certrotation.RotatedSelfSignedCertKeySecret{
62 | Namespace: operatorclient.TargetNamespace,
63 | Name: "external-loadbalancer-serving-certkey",
64 | - Validity: 30 * rotationDay,
65 | - Refresh: 15 * rotationDay,
66 | + Validity: 365 * rotationDay,
67 | + Refresh: 180 * rotationDay,
68 | RefreshOnlyWhenExpired: refreshOnlyWhenExpired,
69 | CertCreator: &certrotation.ServingRotation{
70 | Hostnames: ret.externalLoadBalancer.GetHostnames,
71 | @@ -350,8 +350,8 @@ func newCertRotationController(
72 | certrotation.RotatedSelfSignedCertKeySecret{
73 | Namespace: operatorclient.TargetNamespace,
74 | Name: "internal-loadbalancer-serving-certkey",
75 | - Validity: 30 * rotationDay,
76 | - Refresh: 15 * rotationDay,
77 | + Validity: 365 * rotationDay,
78 | + Refresh: 180 * rotationDay,
79 | RefreshOnlyWhenExpired: refreshOnlyWhenExpired,
80 | CertCreator: &certrotation.ServingRotation{
81 | Hostnames: ret.internalLoadBalancer.GetHostnames,
82 | @@ -410,8 +410,8 @@ func newCertRotationController(
83 | certrotation.RotatedSigningCASecret{
84 | Namespace: operatorclient.OperatorNamespace,
85 | Name: "kube-control-plane-signer",
86 | - Validity: 60 * defaultRotationDay,
87 | - Refresh: 30 * defaultRotationDay,
88 | + Validity: 2 * 365 * defaultRotationDay,
89 | + Refresh: 365 * defaultRotationDay,
90 | RefreshOnlyWhenExpired: refreshOnlyWhenExpired,
91 | Informer: kubeInformersForNamespaces.InformersFor(operatorclient.OperatorNamespace).Core().V1().Secrets(),
92 | Lister: kubeInformersForNamespaces.InformersFor(operatorclient.OperatorNamespace).Core().V1().Secrets().Lister(),
93 | @@ -429,8 +429,8 @@ func newCertRotationController(
94 | certrotation.RotatedSelfSignedCertKeySecret{
95 | Namespace: operatorclient.GlobalMachineSpecifiedConfigNamespace,
96 | Name: "kube-controller-manager-client-cert-key",
97 | - Validity: 30 * rotationDay,
98 | - Refresh: 15 * rotationDay,
99 | + Validity: 365 * rotationDay,
100 | + Refresh: 180 * rotationDay,
101 | RefreshOnlyWhenExpired: refreshOnlyWhenExpired,
102 | CertCreator: &certrotation.ClientRotation{
103 | UserInfo: &user.DefaultInfo{Name: "system:kube-controller-manager"},
104 | @@ -450,8 +450,8 @@ func newCertRotationController(
105 | certrotation.RotatedSigningCASecret{
106 | Namespace: operatorclient.OperatorNamespace,
107 | Name: "kube-control-plane-signer",
108 | - Validity: 60 * defaultRotationDay,
109 | - Refresh: 30 * defaultRotationDay,
110 | + Validity: 2 * 365 * defaultRotationDay,
111 | + Refresh: 365 * defaultRotationDay,
112 | RefreshOnlyWhenExpired: refreshOnlyWhenExpired,
113 | Informer: kubeInformersForNamespaces.InformersFor(operatorclient.OperatorNamespace).Core().V1().Secrets(),
114 | Lister: kubeInformersForNamespaces.InformersFor(operatorclient.OperatorNamespace).Core().V1().Secrets().Lister(),
115 | @@ -469,8 +469,8 @@ func newCertRotationController(
116 | certrotation.RotatedSelfSignedCertKeySecret{
117 | Namespace: operatorclient.GlobalMachineSpecifiedConfigNamespace,
118 | Name: "kube-scheduler-client-cert-key",
119 | - Validity: 30 * rotationDay,
120 | - Refresh: 15 * rotationDay,
121 | + Validity: 365 * rotationDay,
122 | + Refresh: 180 * rotationDay,
123 | RefreshOnlyWhenExpired: refreshOnlyWhenExpired,
124 | CertCreator: &certrotation.ClientRotation{
125 | UserInfo: &user.DefaultInfo{Name: "system:kube-scheduler"},
126 | @@ -490,8 +490,8 @@ func newCertRotationController(
127 | certrotation.RotatedSigningCASecret{
128 | Namespace: operatorclient.OperatorNamespace,
129 | Name: "kube-control-plane-signer",
130 | - Validity: 60 * defaultRotationDay,
131 | - Refresh: 30 * defaultRotationDay,
132 | + Validity: 2 * 365 * defaultRotationDay,
133 | + Refresh: 365 * defaultRotationDay,
134 | RefreshOnlyWhenExpired: refreshOnlyWhenExpired,
135 | Informer: kubeInformersForNamespaces.InformersFor(operatorclient.OperatorNamespace).Core().V1().Secrets(),
136 | Lister: kubeInformersForNamespaces.InformersFor(operatorclient.OperatorNamespace).Core().V1().Secrets().Lister(),
137 | @@ -509,8 +509,8 @@ func newCertRotationController(
138 | certrotation.RotatedSelfSignedCertKeySecret{
139 | Namespace: operatorclient.TargetNamespace,
140 | Name: "control-plane-node-admin-client-cert-key",
141 | - Validity: 30 * rotationDay,
142 | - Refresh: 15 * rotationDay,
143 | + Validity: 365 * rotationDay,
144 | + Refresh: 180 * rotationDay,
145 | RefreshOnlyWhenExpired: refreshOnlyWhenExpired,
146 | CertCreator: &certrotation.ClientRotation{
147 | UserInfo: &user.DefaultInfo{Name: "system:control-plane-node-admin", Groups: []string{"system:masters"}},
148 | @@ -530,8 +530,8 @@ func newCertRotationController(
149 | certrotation.RotatedSigningCASecret{
150 | Namespace: operatorclient.OperatorNamespace,
151 | Name: "kube-control-plane-signer",
152 | - Validity: 60 * defaultRotationDay,
153 | - Refresh: 30 * defaultRotationDay,
154 | + Validity: 2 * 365 * defaultRotationDay,
155 | + Refresh: 365 * defaultRotationDay,
156 | RefreshOnlyWhenExpired: refreshOnlyWhenExpired,
157 | Informer: kubeInformersForNamespaces.InformersFor(operatorclient.OperatorNamespace).Core().V1().Secrets(),
158 | Lister: kubeInformersForNamespaces.InformersFor(operatorclient.OperatorNamespace).Core().V1().Secrets().Lister(),
159 | @@ -549,8 +549,8 @@ func newCertRotationController(
160 | certrotation.RotatedSelfSignedCertKeySecret{
161 | Namespace: operatorclient.TargetNamespace,
162 | Name: "check-endpoints-client-cert-key",
163 | - Validity: 30 * rotationDay,
164 | - Refresh: 15 * rotationDay,
165 | + Validity: 365 * rotationDay,
166 | + Refresh: 180 * rotationDay,
167 | RefreshOnlyWhenExpired: refreshOnlyWhenExpired,
168 | CertCreator: &certrotation.ClientRotation{
169 | UserInfo: &user.DefaultInfo{Name: "system:serviceaccount:openshift-kube-apiserver:check-endpoints"},
170 | @@ -592,9 +592,9 @@ func newCertRotationController(
171 | // This needs to live longer then control plane certs so there is high chance that if a cluster breaks
172 | // because of expired certs these are still valid to use for collecting data using localhost-recovery
173 | // endpoint with long lived serving certs for localhost.
174 | - Validity: 120 * defaultRotationDay,
175 | - // We rotate sooner so certs are always valid for 90 days (30 days more then kube-control-plane-signer)
176 | - Refresh: 30 * defaultRotationDay,
177 | + Validity: 3 * 365 * defaultRotationDay,
178 | + // We rotate sooner so certs are always valid for 90 days (365 days more then kube-control-plane-signer)
179 | + Refresh: 365 * defaultRotationDay,
180 | RefreshOnlyWhenExpired: refreshOnlyWhenExpired,
181 | CertCreator: &certrotation.ClientRotation{
182 | UserInfo: &user.DefaultInfo{
183 |
184 |
--------------------------------------------------------------------------------
/createdisk.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | set -exuo pipefail
4 |
5 | export LC_ALL=C
6 | export LANG=C
7 |
8 | source tools.sh
9 | source createdisk-library.sh
10 |
11 | SSH="ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -i id_ecdsa_crc"
12 | SCP="scp -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -i id_ecdsa_crc"
13 |
14 | INSTALL_DIR=${1:-crc-tmp-install-data}
15 |
16 | OPENSHIFT_VERSION=$(${JQ} -r .clusterInfo.openshiftVersion $INSTALL_DIR/crc-bundle-info.json)
17 | BASE_DOMAIN=$(${JQ} -r .clusterInfo.baseDomain $INSTALL_DIR/crc-bundle-info.json)
18 | BUNDLE_TYPE=$(${JQ} -r .type $INSTALL_DIR/crc-bundle-info.json)
19 | ADDITIONAL_PACKAGES="cloud-init gvisor-tap-vsock-gvforwarder"
20 | PRE_DOWNLOADED_ADDITIONAL_PACKAGES=""
21 |
22 | case ${BUNDLE_TYPE} in
23 | microshift)
24 | destDirPrefix="crc_${BUNDLE_TYPE}"
25 | BASE_OS=rhel
26 | ;;
27 | okd)
28 | destDirPrefix="crc_${BUNDLE_TYPE}"
29 | # Base OS is not changed for scos-okd because `/proc/cmdline` still contain fedora-coreos
30 | # https://github.com/okd-project/okd-scos/issues/18
31 | BASE_OS=fedora-coreos
32 | ;;
33 | snc)
34 | destDirPrefix="crc"
35 | BASE_OS=rhcos
36 | ;;
37 | *)
38 | echo "Unknown bundle type '$BUNDLE_TYPE'"
39 | exit 1
40 | ;;
41 | esac
42 |
43 | # SNC_PRODUCT_NAME: If user want to use other than default product name (crc)
44 | # VM_PREFIX: short VM name (set by SNC_PRODUCT_NAME) + random string generated by openshift-installer
45 | SNC_PRODUCT_NAME=${SNC_PRODUCT_NAME:-crc}
46 | VM_NAME=${SNC_PRODUCT_NAME}
47 |
48 | VM_IP=$(sudo virsh domifaddr ${VM_NAME} | tail -2 | head -1 | awk '{print $4}' | cut -d/ -f1)
49 |
50 | wait_for_ssh ${VM_NAME} ${VM_IP}
51 |
52 | if [ ${BUNDLE_TYPE} != "microshift" ]; then
53 | # Disable kubelet service
54 | ${SSH} core@${VM_IP} -- sudo systemctl disable kubelet
55 |
56 | # Stop the kubelet service so it will not reprovision the pods
57 | ${SSH} core@${VM_IP} -- sudo systemctl stop kubelet
58 | fi
59 |
60 | # Enable the system and user level podman.socket service for API V2
61 | ${SSH} core@${VM_IP} -- sudo systemctl enable podman.socket
62 | ${SSH} core@${VM_IP} -- systemctl --user enable podman.socket
63 |
64 | if [ ${BUNDLE_TYPE} == "microshift" ]; then
65 | # Pull openshift release images because as part of microshift bundle creation we
66 | # don't run microshift service which fetch these image but instead service is run
67 | # as part of crc so user have a fresh cluster instead something already provisioned
68 | # but images we cache it as part of bundle.
69 | ${SSH} core@${VM_IP} 'sudo bash -x -s' </dev/null 2>&1; then
95 | image_tag=${OPENSHIFT_VERSION}
96 | fi
97 |
98 | # create the tap device interface with specified mac address
99 | # this mac address is used to allocate a specific IP to the VM
100 | # when tap device is in use.
101 | ${SSH} core@${VM_IP} 'sudo bash -x -s' < $INSTALL_DIR/routes-controller.yaml
137 | ${SCP} $INSTALL_DIR/routes-controller.yaml core@${VM_IP}:/home/core/
138 | ${SSH} core@${VM_IP} -- 'sudo mkdir -p /opt/crc && sudo mv /home/core/routes-controller.yaml /opt/crc/'
139 |
140 | if [ ${BUNDLE_TYPE} != "microshift" ]; then
141 | # Add internalIP as node IP for kubelet systemd unit file
142 | # More details at https://bugzilla.redhat.com/show_bug.cgi?id=1872632
143 | ${SSH} core@${VM_IP} 'sudo bash -x -s' < /etc/systemd/system/kubelet.service.d/80-nodeip.conf
145 | echo 'Environment=KUBELET_NODE_IP="${VM_IP}"' >> /etc/systemd/system/kubelet.service.d/80-nodeip.conf
146 | EOF
147 | fi
148 |
149 | if [ "${ARCH}" == "aarch64" ] && [ ${BUNDLE_TYPE} != "okd" ]; then
150 | # Install qemu-user-static-x86 package from fedora-updates repo to run x86 image on M1
151 | # Not supported by RHEL https://access.redhat.com/solutions/5654221 and not included
152 | # in any subscription repo.
153 | cat > /tmp/fedora-updates.repo <<'EOF'
154 | [fedora-updates]
155 | name=Fedora 41 - $basearch - Updates
156 | metalink=https://mirrors.fedoraproject.org/metalink?repo=updates-released-f41&arch=$basearch
157 | enabled=1
158 | type=rpm
159 | repo_gpgcheck=0
160 | gpgcheck=0
161 | EOF
162 | ${SCP} /tmp/fedora-updates.repo core@${VM_IP}:/tmp
163 | ${SSH} core@${VM_IP} -- "sudo mv /tmp/fedora-updates.repo /etc/yum.repos.d"
164 | ${SSH} core@${VM_IP} -- "mkdir -p ~/packages && dnf download --downloadonly --downloaddir ~/packages qemu-user-static-x86 --resolve"
165 | ${SSH} core@${VM_IP} -- "sudo rm -fr /etc/yum.repos.d/fedora-updates.repo"
166 | PRE_DOWNLOADED_ADDITIONAL_PACKAGES+=" qemu-user-static-x86"
167 | fi
168 |
169 | # install 9pfs binary from COPR repo so that it can be used to
170 | # set up 9p file sharing on Windows
171 | if [ "${SNC_GENERATE_WINDOWS_BUNDLE}" != "0" ]; then
172 | ${SSH} core@${VM_IP} -- "sudo dnf -y copr enable mskvarla/9pfs"
173 | ${SSH} core@${VM_IP} -- "mkdir -p ~/packages && dnf download --downloadonly --downloaddir ~/packages 9pfs --resolve"
174 | ${SSH} core@${VM_IP} -- "sudo dnf -y copr disable mskvarla/9pfs"
175 | PRE_DOWNLOADED_ADDITIONAL_PACKAGES+=" 9pfs"
176 | fi
177 |
178 | # Beyond this point, packages added to the ADDITIONAL_PACKAGES and PRE_DOWNLOADED_ADDITIONAL_PACKAGES
179 | # variables won’t be installed in the guest
180 | install_additional_packages ${VM_IP}
181 | copy_systemd_units
182 |
183 | # Create marker file with default value expected by systemd units
184 | # CRC_SELF_SUFFICIENT=0 to ensure bundle works with CRC without a
185 | # cloud-init configuration
186 | ${SSH} core@${VM_IP} 'sudo bash -x -s' < /etc/cloud/cloud.cfg.d/05_disable-network.cfg
216 | network:
217 | config: disabled
218 | EFF
219 | EOF
220 |
221 | # Add file resize cloud-init config
222 | # Taken from https://gitlab.com/fedora/bootc/examples/-/blob/main/cloud-init/10_bootc.cfg
223 | ${SSH} core@${VM_IP} 'sudo bash -x -s' << EOF
224 | cat << EFF > /etc/cloud/cloud.cfg.d/10_disk_resize.cfg
225 | growpart:
226 | mode: auto
227 | devices: ["/sysroot"]
228 |
229 | resize_rootfs: false
230 | EOF
231 |
232 | # Disable cloud-init hostname update
233 | ${SSH} core@${VM_IP} -- 'sudo sed -i "s/^preserve_hostname: false$/preserve_hostname: true/" /etc/cloud/cloud.cfg'
234 |
235 | # Cleanup cloud-init config
236 | ${SSH} core@${VM_IP} -- "sudo cloud-init clean --logs"
237 |
238 | # Shutdown the VM
239 | shutdown_vm ${VM_NAME}
240 |
241 | # libvirt image generation
242 | get_dest_dir_suffix "${OPENSHIFT_VERSION}"
243 | destDirSuffix="${DEST_DIR_SUFFIX}"
244 |
245 | libvirtDestDir="${destDirPrefix}_libvirt_${destDirSuffix}"
246 | rm -fr ${libvirtDestDir} ${libvirtDestDir}.crcbundle
247 | mkdir "$libvirtDestDir"
248 |
249 | create_qemu_image "$libvirtDestDir"
250 | copy_additional_files "$INSTALL_DIR" "$libvirtDestDir" "${VM_NAME}"
251 | if [ "${SNC_GENERATE_LINUX_BUNDLE}" != "0" ]; then
252 | create_tarball "$libvirtDestDir"
253 | fi
254 |
255 | # HyperV image generation
256 | #
257 | # This must be done after the generation of libvirt image as it reuses some of
258 | # the content of $libvirtDestDir
259 | if [ "${SNC_GENERATE_WINDOWS_BUNDLE}" != "0" ]; then
260 | hypervDestDir="${destDirPrefix}_hyperv_${destDirSuffix}"
261 | rm -fr ${hypervDestDir} ${hypervDestDir}.crcbundle
262 | generate_hyperv_bundle "$libvirtDestDir" "$hypervDestDir"
263 | fi
264 |
265 | # vfkit image generation
266 | # This must be done after the generation of libvirt image as it reuses some of
267 | # the content of $libvirtDestDir
268 | if [ "${SNC_GENERATE_MACOS_BUNDLE}" != "0" ]; then
269 | vfkitDestDir="${destDirPrefix}_vfkit_${destDirSuffix}"
270 | rm -fr ${vfkitDestDir} ${vfkitDestDir}.crcbundle
271 | generate_vfkit_bundle "$libvirtDestDir" "$vfkitDestDir"
272 | fi
273 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | Apache License
2 | Version 2.0, January 2004
3 | http://www.apache.org/licenses/
4 |
5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6 |
7 | 1. Definitions.
8 |
9 | "License" shall mean the terms and conditions for use, reproduction,
10 | and distribution as defined by Sections 1 through 9 of this document.
11 |
12 | "Licensor" shall mean the copyright owner or entity authorized by
13 | the copyright owner that is granting the License.
14 |
15 | "Legal Entity" shall mean the union of the acting entity and all
16 | other entities that control, are controlled by, or are under common
17 | control with that entity. For the purposes of this definition,
18 | "control" means (i) the power, direct or indirect, to cause the
19 | direction or management of such entity, whether by contract or
20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the
21 | outstanding shares, or (iii) beneficial ownership of such entity.
22 |
23 | "You" (or "Your") shall mean an individual or Legal Entity
24 | exercising permissions granted by this License.
25 |
26 | "Source" form shall mean the preferred form for making modifications,
27 | including but not limited to software source code, documentation
28 | source, and configuration files.
29 |
30 | "Object" form shall mean any form resulting from mechanical
31 | transformation or translation of a Source form, including but
32 | not limited to compiled object code, generated documentation,
33 | and conversions to other media types.
34 |
35 | "Work" shall mean the work of authorship, whether in Source or
36 | Object form, made available under the License, as indicated by a
37 | copyright notice that is included in or attached to the work
38 | (an example is provided in the Appendix below).
39 |
40 | "Derivative Works" shall mean any work, whether in Source or Object
41 | form, that is based on (or derived from) the Work and for which the
42 | editorial revisions, annotations, elaborations, or other modifications
43 | represent, as a whole, an original work of authorship. For the purposes
44 | of this License, Derivative Works shall not include works that remain
45 | separable from, or merely link (or bind by name) to the interfaces of,
46 | the Work and Derivative Works thereof.
47 |
48 | "Contribution" shall mean any work of authorship, including
49 | the original version of the Work and any modifications or additions
50 | to that Work or Derivative Works thereof, that is intentionally
51 | submitted to Licensor for inclusion in the Work by the copyright owner
52 | or by an individual or Legal Entity authorized to submit on behalf of
53 | the copyright owner. For the purposes of this definition, "submitted"
54 | means any form of electronic, verbal, or written communication sent
55 | to the Licensor or its representatives, including but not limited to
56 | communication on electronic mailing lists, source code control systems,
57 | and issue tracking systems that are managed by, or on behalf of, the
58 | Licensor for the purpose of discussing and improving the Work, but
59 | excluding communication that is conspicuously marked or otherwise
60 | designated in writing by the copyright owner as "Not a Contribution."
61 |
62 | "Contributor" shall mean Licensor and any individual or Legal Entity
63 | on behalf of whom a Contribution has been received by Licensor and
64 | subsequently incorporated within the Work.
65 |
66 | 2. Grant of Copyright License. Subject to the terms and conditions of
67 | this License, each Contributor hereby grants to You a perpetual,
68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69 | copyright license to reproduce, prepare Derivative Works of,
70 | publicly display, publicly perform, sublicense, and distribute the
71 | Work and such Derivative Works in Source or Object form.
72 |
73 | 3. Grant of Patent License. Subject to the terms and conditions of
74 | this License, each Contributor hereby grants to You a perpetual,
75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76 | (except as stated in this section) patent license to make, have made,
77 | use, offer to sell, sell, import, and otherwise transfer the Work,
78 | where such license applies only to those patent claims licensable
79 | by such Contributor that are necessarily infringed by their
80 | Contribution(s) alone or by combination of their Contribution(s)
81 | with the Work to which such Contribution(s) was submitted. If You
82 | institute patent litigation against any entity (including a
83 | cross-claim or counterclaim in a lawsuit) alleging that the Work
84 | or a Contribution incorporated within the Work constitutes direct
85 | or contributory patent infringement, then any patent licenses
86 | granted to You under this License for that Work shall terminate
87 | as of the date such litigation is filed.
88 |
89 | 4. Redistribution. You may reproduce and distribute copies of the
90 | Work or Derivative Works thereof in any medium, with or without
91 | modifications, and in Source or Object form, provided that You
92 | meet the following conditions:
93 |
94 | (a) You must give any other recipients of the Work or
95 | Derivative Works a copy of this License; and
96 |
97 | (b) You must cause any modified files to carry prominent notices
98 | stating that You changed the files; and
99 |
100 | (c) You must retain, in the Source form of any Derivative Works
101 | that You distribute, all copyright, patent, trademark, and
102 | attribution notices from the Source form of the Work,
103 | excluding those notices that do not pertain to any part of
104 | the Derivative Works; and
105 |
106 | (d) If the Work includes a "NOTICE" text file as part of its
107 | distribution, then any Derivative Works that You distribute must
108 | include a readable copy of the attribution notices contained
109 | within such NOTICE file, excluding those notices that do not
110 | pertain to any part of the Derivative Works, in at least one
111 | of the following places: within a NOTICE text file distributed
112 | as part of the Derivative Works; within the Source form or
113 | documentation, if provided along with the Derivative Works; or,
114 | within a display generated by the Derivative Works, if and
115 | wherever such third-party notices normally appear. The contents
116 | of the NOTICE file are for informational purposes only and
117 | do not modify the License. You may add Your own attribution
118 | notices within Derivative Works that You distribute, alongside
119 | or as an addendum to the NOTICE text from the Work, provided
120 | that such additional attribution notices cannot be construed
121 | as modifying the License.
122 |
123 | You may add Your own copyright statement to Your modifications and
124 | may provide additional or different license terms and conditions
125 | for use, reproduction, or distribution of Your modifications, or
126 | for any such Derivative Works as a whole, provided Your use,
127 | reproduction, and distribution of the Work otherwise complies with
128 | the conditions stated in this License.
129 |
130 | 5. Submission of Contributions. Unless You explicitly state otherwise,
131 | any Contribution intentionally submitted for inclusion in the Work
132 | by You to the Licensor shall be under the terms and conditions of
133 | this License, without any additional terms or conditions.
134 | Notwithstanding the above, nothing herein shall supersede or modify
135 | the terms of any separate license agreement you may have executed
136 | with Licensor regarding such Contributions.
137 |
138 | 6. Trademarks. This License does not grant permission to use the trade
139 | names, trademarks, service marks, or product names of the Licensor,
140 | except as required for reasonable and customary use in describing the
141 | origin of the Work and reproducing the content of the NOTICE file.
142 |
143 | 7. Disclaimer of Warranty. Unless required by applicable law or
144 | agreed to in writing, Licensor provides the Work (and each
145 | Contributor provides its Contributions) on an "AS IS" BASIS,
146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147 | implied, including, without limitation, any warranties or conditions
148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149 | PARTICULAR PURPOSE. You are solely responsible for determining the
150 | appropriateness of using or redistributing the Work and assume any
151 | risks associated with Your exercise of permissions under this License.
152 |
153 | 8. Limitation of Liability. In no event and under no legal theory,
154 | whether in tort (including negligence), contract, or otherwise,
155 | unless required by applicable law (such as deliberate and grossly
156 | negligent acts) or agreed to in writing, shall any Contributor be
157 | liable to You for damages, including any direct, indirect, special,
158 | incidental, or consequential damages of any character arising as a
159 | result of this License or out of the use or inability to use the
160 | Work (including but not limited to damages for loss of goodwill,
161 | work stoppage, computer failure or malfunction, or any and all
162 | other commercial damages or losses), even if such Contributor
163 | has been advised of the possibility of such damages.
164 |
165 | 9. Accepting Warranty or Additional Liability. While redistributing
166 | the Work or Derivative Works thereof, You may choose to offer,
167 | and charge a fee for, acceptance of support, warranty, indemnity,
168 | or other liability obligations and/or rights consistent with this
169 | License. However, in accepting such obligations, You may act only
170 | on Your own behalf and on Your sole responsibility, not on behalf
171 | of any other Contributor, and only if You agree to indemnify,
172 | defend, and hold each Contributor harmless for any liability
173 | incurred by, or claims asserted against, such Contributor by reason
174 | of your accepting any such warranty or additional liability.
175 |
176 | END OF TERMS AND CONDITIONS
177 |
178 | APPENDIX: How to apply the Apache License to your work.
179 |
180 | To apply the Apache License to your work, attach the following
181 | boilerplate notice, with the fields enclosed by brackets "{}"
182 | replaced with your own identifying information. (Don't include
183 | the brackets!) The text should be enclosed in the appropriate
184 | comment syntax for the file format. We also recommend that a
185 | file or class name and description of purpose be included on the
186 | same "printed page" as the copyright notice for easier
187 | identification within third-party archives.
188 |
189 | Copyright {yyyy} {name of copyright owner}
190 |
191 | Licensed under the Apache License, Version 2.0 (the "License");
192 | you may not use this file except in compliance with the License.
193 | You may obtain a copy of the License at
194 |
195 | http://www.apache.org/licenses/LICENSE-2.0
196 |
197 | Unless required by applicable law or agreed to in writing, software
198 | distributed under the License is distributed on an "AS IS" BASIS,
199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200 | See the License for the specific language governing permissions and
201 | limitations under the License.
202 |
203 |
--------------------------------------------------------------------------------
/snc-library.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | set -exuo pipefail
4 |
5 | function preflight_failure() {
6 | local msg=$1
7 | echo "$msg"
8 | if [ -z "${SNC_NON_FATAL_PREFLIGHT_CHECKS-}" ]; then
9 | exit 1
10 | fi
11 | }
12 |
13 | function check_oc_version() {
14 | local current_oc_version=
15 | if [ -f "$OC" ]; then
16 | current_oc_version=$(${OC} version --client -o json |jq -r .releaseClientVersion)
17 | fi
18 |
19 | [ ${current_oc_version} = ${OPENSHIFT_RELEASE_VERSION} ]
20 | }
21 |
22 | function download_oc() {
23 | local current_oc_version=
24 |
25 | if [ -f "$OC" ]; then
26 | current_oc_version=$(${OC} version --client -o json |jq -r .releaseClientVersion)
27 | if [ ${current_oc_version} = ${OPENSHIFT_RELEASE_VERSION} ]; then
28 | echo "No need to download oc, local oc is already version ${OPENSHIFT_RELEASE_VERSION}"
29 | return
30 | fi
31 | fi
32 |
33 | mkdir -p openshift-clients/linux
34 | curl -L "${MIRROR}/${OPENSHIFT_RELEASE_VERSION}/openshift-client-linux-${yq_ARCH}-rhel8-${OPENSHIFT_RELEASE_VERSION}.tar.gz" | tar -zx -C openshift-clients/linux oc
35 |
36 | if [ "${SNC_GENERATE_MACOS_BUNDLE}" != "0" ]; then
37 | mkdir -p openshift-clients/mac
38 | if [ "${yq_ARCH}" == "arm64" ]; then
39 | curl -L "${MIRROR}/${OPENSHIFT_RELEASE_VERSION}/openshift-client-mac-${yq_ARCH}-${OPENSHIFT_RELEASE_VERSION}.tar.gz" | tar -zx -C openshift-clients/mac oc
40 | else
41 | curl -L "${MIRROR}/${OPENSHIFT_RELEASE_VERSION}/openshift-client-mac-${OPENSHIFT_RELEASE_VERSION}.tar.gz" | tar -zx -C openshift-clients/mac oc
42 | fi
43 | fi
44 | if [ "${SNC_GENERATE_WINDOWS_BUNDLE}" != "0" ]; then
45 | mkdir -p openshift-clients/windows
46 | curl -L "${MIRROR}/${OPENSHIFT_RELEASE_VERSION}/openshift-client-windows-${OPENSHIFT_RELEASE_VERSION}.zip" > openshift-clients/windows/oc.zip
47 | ${UNZIP} -o -d openshift-clients/windows/ openshift-clients/windows/oc.zip
48 | fi
49 | }
50 |
51 | function run_preflight_checks() {
52 | local bundle_type=$1
53 | if [ -z "${OPENSHIFT_PULL_SECRET_PATH-}" ]; then
54 | echo "OpenShift pull secret file path must be specified through the OPENSHIFT_PULL_SECRET_PATH environment variable"
55 | exit 1
56 | elif [ ! -f ${OPENSHIFT_PULL_SECRET_PATH} ]; then
57 | echo "Provided OPENSHIFT_PULL_SECRET_PATH (${OPENSHIFT_PULL_SECRET_PATH}) does not exists"
58 | exit 1
59 | fi
60 |
61 | echo "Checking libvirt and DNS configuration"
62 |
63 | LIBVIRT_URI=qemu:///system
64 | # check if we can connect to ${LIBVIRT_URI}
65 | if ! sudo virsh -c ${LIBVIRT_URI} uri >/dev/null; then
66 | preflight_failure "libvirtd is not accessible over ${LIBVIRT_URI}, check if libvirt daemon is running https://libvirt.org/daemons.html"
67 | fi
68 |
69 | if ! sudo virsh -c ${LIBVIRT_URI} net-info default &> /dev/null; then
70 | echo "Installing libvirt default network configuration"
71 | sudo dnf install -y libvirt-daemon-config-network || exit 1
72 | fi
73 | echo "default libvirt network is available"
74 |
75 | #Check if default libvirt network is Active
76 | if [[ $(sudo virsh -c ${LIBVIRT_URI} net-info default | awk '{print $2}' | sed '3q;d') == "no" ]]; then
77 | echo "Default network is not active, starting it"
78 | sudo virsh -c ${LIBVIRT_URI} net-start default || exit 1
79 | fi
80 |
81 | #Just warn if architecture is not supported
82 | case $ARCH in
83 | x86_64|ppc64le|s390x|aarch64)
84 | echo "The host arch is ${ARCH}.";;
85 | *)
86 | echo "The host arch is ${ARCH}. This is not supported by SNC!";;
87 | esac
88 |
89 | # check for availability of a hypervisor using kvm
90 | if ! sudo virsh -c ${LIBVIRT_URI} capabilities | ${XMLLINT} --xpath "/capabilities/guest/arch[@name='${ARCH}']/domain[@type='kvm']" - &>/dev/null; then
91 | preflight_failure "Your ${ARCH} platform does not provide a hardware-accelerated hypervisor, it's strongly recommended to enable it before running SNC. Check virt-host-validate for more detailed diagnostics"
92 | return
93 | fi
94 | if [ ${bundle_type} == "snc" ] || [ ${bundle_type} == "okd" ]; then
95 | # check that api.${SNC_PRODUCT_NAME}.${BASE_DOMAIN} either can't be resolved, or resolves to 192.168.126.11
96 | local ping_status
97 | ping_status="$(ping -c1 api.${SNC_PRODUCT_NAME}.${BASE_DOMAIN} | head -1 || true >/dev/null)"
98 | if echo ${ping_status} | grep "PING api.${SNC_PRODUCT_NAME}.${BASE_DOMAIN} (" && ! echo ${ping_status} | grep "192.168.126.11)"; then
99 | preflight_failure "DNS setup seems wrong, api.${SNC_PRODUCT_NAME}.${BASE_DOMAIN} resolved to an IP which is not 192.168.126.11, please check your NetworkManager configuration and /etc/hosts content"
100 | return
101 | fi
102 | fi
103 | echo "libvirt and DNS configuration successfully checked"
104 | }
105 |
106 | function replace_pull_secret() {
107 | # Hide the output of 'cat $OPENSHIFT_PULL_SECRET_PATH' so that it doesn't
108 | # get leaked in CI logs
109 | set +x
110 | local filename=$1
111 | pull_secret="$(< $OPENSHIFT_PULL_SECRET_PATH)" ${YQ} eval --inplace '.pullSecret = strenv(pull_secret) | .pullSecret style="literal"' $filename
112 | set -x
113 | }
114 |
115 | function create_json_description {
116 | local bundle_type=$1
117 | sncGitHash=$(git describe --abbrev=4 HEAD 2>/dev/null || git rev-parse --short=4 HEAD)
118 | echo {} | ${JQ} '.version = "1.4"' \
119 | | ${JQ} ".type = \"${BUNDLE_TYPE}\"" \
120 | | ${JQ} ".arch = \"${yq_ARCH}\"" \
121 | | ${JQ} ".buildInfo.buildTime = \"$(date -u --iso-8601=seconds)\"" \
122 | | ${JQ} ".buildInfo.sncVersion = \"git${sncGitHash}\"" \
123 | | ${JQ} ".clusterInfo.openshiftVersion = \"${OPENSHIFT_RELEASE_VERSION}\"" \
124 | | ${JQ} ".clusterInfo.clusterName = \"${SNC_PRODUCT_NAME}\"" \
125 | | ${JQ} ".clusterInfo.baseDomain = \"${BASE_DOMAIN}\"" \
126 | | ${JQ} ".clusterInfo.appsDomain = \"apps.${SNC_PRODUCT_NAME}.${BASE_DOMAIN}\"" >${INSTALL_DIR}/crc-bundle-info.json
127 | if [ ${bundle_type} == "snc" ] || [ ${bundle_type} == "okd" ]; then
128 | openshiftInstallerVersion=$(${OPENSHIFT_INSTALL} version)
129 | tmp=$(mktemp)
130 | cat ${INSTALL_DIR}/crc-bundle-info.json \
131 | | ${JQ} ".buildInfo.openshiftInstallerVersion = \"${openshiftInstallerVersion}\"" \
132 | | ${JQ} ".clusterInfo.appsDomain = \"apps-${SNC_PRODUCT_NAME}.${BASE_DOMAIN}\"" \
133 | > ${tmp} && mv ${tmp} ${INSTALL_DIR}/crc-bundle-info.json
134 | fi
135 | }
136 |
137 |
138 | function create_pvs() {
139 | local bundle_type=$1
140 |
141 | # Create hostpath-provisioner namespace
142 | retry ${OC} apply -f kubevirt-hostpath-provisioner-csi/namespace.yaml
143 | # Add external provisioner RBACs
144 | retry ${OC} apply -f kubevirt-hostpath-provisioner-csi/external-provisioner-rbac.yaml -n hostpath-provisioner
145 | # Create CSIDriver/kubevirt.io.hostpath-provisioner resource
146 | retry ${OC} apply -f kubevirt-hostpath-provisioner-csi/csi-driver-hostpath-provisioner.yaml -n hostpath-provisioner
147 | # Apply SCC allowin hostpath-provisioner containers to run as root and access host network
148 | retry ${OC} apply -f kubevirt-hostpath-provisioner-csi/kubevirt-hostpath-security-constraints-csi.yaml
149 |
150 | # Deploy csi driver components
151 | if [[ ${bundle_type} == "snc" ]]; then
152 | # in case of OCP we want the images to come from registry.redhat.io
153 | # this is done using the kustomize.yaml file
154 | retry ${OC} apply -k kubevirt-hostpath-provisioner-csi/csi-driver -n hostpath-provisioner
155 | else
156 | retry ${OC} apply -f kubevirt-hostpath-provisioner-csi/csi-driver/csi-kubevirt-hostpath-provisioner.yaml -n hostpath-provisioner
157 | fi
158 |
159 | # create StorageClass crc-csi-hostpath-provisioner
160 | retry ${OC} apply -f kubevirt-hostpath-provisioner-csi/csi-sc.yaml
161 |
162 | # Apply registry pvc with crc-csi-hostpath-provisioner StorageClass
163 | retry ${OC} apply -f registry_pvc.yaml
164 |
165 | # Add registry storage to pvc
166 | retry ${OC} patch config.imageregistry.operator.openshift.io/cluster --patch='[{"op": "add", "path": "/spec/storage/pvc", "value": {"claim": "crc-image-registry-storage"}}]' --type=json
167 | }
168 |
169 | # This follows https://blog.openshift.com/enabling-openshift-4-clusters-to-stop-and-resume-cluster-vms/
170 | # in order to trigger regeneration of the initial 24h certs the installer created on the cluster
171 | function renew_certificates() {
172 | shutdown_vm ${SNC_PRODUCT_NAME}
173 |
174 | # Enable the network time sync and set the clock back to present on host
175 | sudo date -s '1 day'
176 | sudo timedatectl set-ntp on
177 |
178 | start_vm ${SNC_PRODUCT_NAME} api.${SNC_PRODUCT_NAME}.${BASE_DOMAIN}
179 |
180 | # Loop until the kubelet certs are valid for a month
181 | i=0
182 | while [ $i -lt 60 ]; do
183 | if ! ${SSH} core@api.${SNC_PRODUCT_NAME}.${BASE_DOMAIN} -- sudo openssl x509 -checkend 2160000 -noout -in /var/lib/kubelet/pki/kubelet-client-current.pem ||
184 | ! ${SSH} core@api.${SNC_PRODUCT_NAME}.${BASE_DOMAIN} -- sudo openssl x509 -checkend 2160000 -noout -in /var/lib/kubelet/pki/kubelet-server-current.pem; then
185 | retry ${OC} get csr -ojson > certs.json
186 | retry ${OC} adm certificate approve -f certs.json
187 | rm -f certs.json
188 | echo "Retry loop $i, wait for 10sec before starting next loop"
189 | sleep 10
190 | else
191 | break
192 | fi
193 | i=$[$i+1]
194 | done
195 |
196 | if ! ${SSH} core@api.${SNC_PRODUCT_NAME}.${BASE_DOMAIN} -- sudo openssl x509 -checkend 2160000 -noout -in /var/lib/kubelet/pki/kubelet-client-current.pem; then
197 | echo "kubelet client certs are not yet rotated to have 30 days validity"
198 | exit 1
199 | fi
200 |
201 | if ! ${SSH} core@api.${SNC_PRODUCT_NAME}.${BASE_DOMAIN} -- sudo openssl x509 -checkend 2160000 -noout -in /var/lib/kubelet/pki/kubelet-server-current.pem; then
202 | echo "kubelet server certs are not yet rotated to have 30 days validity"
203 | exit 1
204 | fi
205 | }
206 |
207 | # deletes an operator and wait until the resources it manages are gone.
208 | function delete_operator() {
209 | local delete_object=$1
210 | local namespace=$2
211 | local pod_selector=$3
212 |
213 | retry ${OC} get pods
214 | pod=$(${OC} get pod -l ${pod_selector} -o jsonpath="{.items[0].metadata.name}" -n ${namespace})
215 |
216 | retry ${OC} delete ${delete_object} -n ${namespace}
217 | # Wait until the operator pod is deleted before trying to delete the resources it manages
218 | ${OC} wait --for=delete pod/${pod} --timeout=120s -n ${namespace} || ${OC} delete pod/${pod} --grace-period=0 --force -n ${namespace} || true
219 | }
220 |
221 | function all_operators_available() {
222 | # Check the cluster operator output, status for available should be true for all operators
223 | # The single line oc get co output should not contain any occurrences of `False`
224 | ${OC} get co -ojsonpath='{.items[*].status.conditions[?(@.type=="Available")].status}' | grep -v False
225 | }
226 |
227 | function no_operators_progressing() {
228 | # Check the cluster operator output, status for progressing should be false for all operators
229 | # The single line oc get co output should not contain any occurrences of `True`
230 | ${OC} get co -ojsonpath='{.items[*].status.conditions[?(@.type=="Progressing")].status}' | grep -v True
231 | }
232 |
233 | function no_operators_degraded() {
234 | # Check the cluster operator output, status for available should be false for all operators
235 | # The single line oc get co output should not contain any occurrences of `True`
236 | ${OC} get co -ojsonpath='{.items[*].status.conditions[?(@.type=="Degraded")].status}' | grep -v True
237 | }
238 |
239 | function all_pods_are_running_completed() {
240 | local ignoreNamespace=$1
241 | ! ${OC} get pod --no-headers --all-namespaces --field-selector=metadata.namespace!="${ignoreNamespace}" | grep -v Running | grep -v Completed
242 | }
243 |
244 | function wait_till_cluster_stable() {
245 | sleep 1m
246 |
247 | local retryCount=30
248 | local numConsecutive=3
249 | local count=0
250 | local ignoreNamespace=${1:-"none"}
251 |
252 | # Remove all the failed Pods
253 | retry ${OC} delete pods --field-selector=status.phase=Failed -A
254 |
255 | local a=0
256 | while [ $a -lt $retryCount ]; do
257 | if all_operators_available && no_operators_progressing && no_operators_degraded; then
258 | echo "All operators are available. Ensuring stability ..."
259 | count=$((count + 1))
260 | else
261 | echo "Some operators are still not available"
262 | count=0
263 | fi
264 | if [ $count -eq $numConsecutive ]; then
265 | echo "Cluster has stabilized"
266 | retry ${OC} delete pods --field-selector=status.phase=Failed -A
267 | break
268 | fi
269 | sleep 30s
270 | a=$((a + 1))
271 | done
272 |
273 | # Wait till all the pods are either running or complete state
274 | retry all_pods_are_running_completed "${ignoreNamespace}"
275 | }
276 |
277 |
--------------------------------------------------------------------------------
/createdisk-library.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | set -exuo pipefail
4 |
5 | function get_dest_dir_suffix {
6 | local version=$1
7 | DEST_DIR_SUFFIX="${version}_${yq_ARCH}"
8 | if [ -n "${PULL_NUMBER-}" ]; then
9 | DEST_DIR_SUFFIX="${DEST_DIR_SUFFIX}_${PULL_NUMBER}"
10 | fi
11 | }
12 |
13 | # This removes extra os tree layers, log files, ... from the image
14 | function cleanup_vm_image() {
15 | local vm_name=$1
16 | local vm_ip=$2
17 |
18 | # Shutdown and Start the VM to get the latest ostree layer. If packages
19 | # have been added/removed since last boot, the VM will reboot in a different ostree layer.
20 | shutdown_vm ${vm_name}
21 | start_vm ${vm_name} ${vm_ip}
22 |
23 | # Remove miscellaneous unneeded data from rpm-ostree
24 | ${SSH} core@${vm_ip} -- 'sudo rpm-ostree cleanup --rollback --base --repomd'
25 |
26 | # Remove logs.
27 | # Note: With `sudo journalctl --rotate --vacuum-time=1s`, it doesn't
28 | # remove all the journal logs so separate commands are used here.
29 | ${SSH} core@${vm_ip} -- 'sudo journalctl --rotate'
30 | ${SSH} core@${vm_ip} -- 'sudo journalctl --vacuum-time=1s'
31 | ${SSH} core@${vm_ip} -- 'sudo find /var/log/ -iname "*.log" -exec rm -f {} \;'
32 |
33 | # Shutdown and Start the VM after removing base deployment tree
34 | # This is required because kernel commandline changed, namely
35 | # ostree=/ostree/boot.1/fedora-coreos/$hash/0 which switches
36 | # between boot.0 and boot.1 when cleanup is run
37 | shutdown_vm ${vm_name}
38 | start_vm ${vm_name} ${vm_ip}
39 | }
40 |
41 | function sparsify {
42 | local baseDir=$1
43 | local srcFile=$2
44 | local destFile=$3
45 |
46 | export LIBGUESTFS_BACKEND=direct
47 | # Check which partition is labeled as `root`
48 | partition=$(${VIRT_FILESYSTEMS} -a $baseDir/$srcFile -l --partitions | sort -rk4 -n | sed -n 1p | cut -f1 -d' ')
49 | # check if the base image has the lvm named as `rhel/root`
50 | if ${VIRT_FILESYSTEMS} --lvs -a ${baseDir}/${srcFile} | grep -q "rhel/root"; then
51 | partition="/dev/rhel/root"
52 | fi
53 |
54 | # https://bugzilla.redhat.com/show_bug.cgi?id=1837765
55 | export LIBGUESTFS_MEMSIZE=2048
56 | # Interact with guestfish directly
57 | eval $(echo nokey | ${GUESTFISH} --keys-from-stdin --listen )
58 | if [ $? -ne 0 ]; then
59 | echo "${GUESTFISH} failed to start, aborting"
60 | exit 1
61 | fi
62 |
63 | ${GUESTFISH} --remote <$destDir/crc-bundle-info.json
132 | }
133 |
134 | function eventually_add_pull_secret {
135 | local destDir=$1
136 |
137 | if [ "${BUNDLED_PULL_SECRET_PATH-}" != "" ]
138 | then
139 | cat "$BUNDLED_PULL_SECRET_PATH" > "$destDir/default-pull-secret"
140 | cat $destDir/crc-bundle-info.json \
141 | | ${JQ} '.clusterInfo.openshiftPullSecret = "default-pull-secret"' \
142 | >$destDir/crc-bundle-info.json.tmp
143 | mv $destDir/crc-bundle-info.json.tmp $destDir/crc-bundle-info.json
144 | fi
145 | }
146 |
147 | function copy_additional_files {
148 | local srcDir=$1
149 | local destDir=$2
150 | local vm_name=$3
151 |
152 | # Copy the kubeconfig file
153 | cp $1/auth/kubeconfig $destDir/
154 |
155 | # Copy the master public key
156 | cp id_ecdsa_crc $destDir/
157 | chmod 400 $destDir/id_ecdsa_crc
158 |
159 | # Copy oc client
160 | cp openshift-clients/linux/oc $destDir/
161 |
162 | update_json_description $srcDir $destDir $vm_name
163 |
164 | eventually_add_pull_secret $destDir
165 | }
166 |
167 | function install_additional_packages() {
168 | local vm_ip=$1
169 | shift
170 | if [[ ${BASE_OS} = "fedora-coreos" ]]; then
171 | ${SSH} core@${vm_ip} -- 'sudo sed -i -z s/enabled=0/enabled=1/g /etc/yum.repos.d/centos.repo'
172 | ${SSH} core@${vm_ip} -- "sudo rpm-ostree install --allow-inactive $ADDITIONAL_PACKAGES"
173 | ${SSH} core@${vm_ip} -- 'sudo sed -i -z s/enabled=1/enabled=0/g /etc/yum.repos.d/centos.repo'
174 | else
175 | # Download the hyperV daemons dependency on host
176 | local pkgDir=$(mktemp -d tmp-rpmXXX)
177 | mkdir -p ${pkgDir}/packages
178 | sudo yum download --downloadonly --downloaddir ${pkgDir}/packages ${ADDITIONAL_PACKAGES} --resolve --alldeps
179 |
180 | # SCP the downloaded rpms to VM
181 | ${SCP} -r ${pkgDir}/packages core@${vm_ip}:/home/core/
182 |
183 | # Create local repo of downloaded RPMs in the VM
184 | ${SSH} core@${vm_ip} 'sudo bash -x -s' < /etc/yum.repos.d/local.repo << EOF
189 | [local]
190 | name=Local repo
191 | baseurl=file:///home/core/packages/
192 | enabled=1
193 | gpgcheck=0
194 | EOF'"
195 | # Install these rpms to VM
196 | ${SSH} core@${vm_ip} -- "sudo rpm-ostree install $ADDITIONAL_PACKAGES $PRE_DOWNLOADED_ADDITIONAL_PACKAGES"
197 |
198 | # Remove the packages and repo from VM
199 | ${SSH} core@${vm_ip} -- sudo rm -fr /home/core/packages
200 | ${SSH} core@${vm_ip} -- sudo rm -fr /etc/yum.repos.d/local.repo
201 |
202 | # Cleanup up packages
203 | rm -fr ${pkgDir}
204 | fi
205 | }
206 |
207 | function prepare_hyperV() {
208 | local vm_ip=$1
209 |
210 | ADDITIONAL_PACKAGES+=" hyperv-daemons"
211 |
212 | # Adding Hyper-V vsock support
213 | ${SSH} core@${vm_ip} 'sudo bash -x -s' < /etc/udev/rules.d/90-crc-vsock.rules
215 | EOF
216 | }
217 |
218 | function prepare_qemu_guest_agent() {
219 | local vm_ip=$1
220 |
221 | # f36+ default selinux policy blocks usage of qemu-guest-agent over vsock, we have to install
222 | # our own selinux rules to allow this.
223 | #
224 | # we need to disable pipefail for the `checkmodule | grep check` as we expect `checkmodule`
225 | # to fail on rhel8.
226 | set +o pipefail
227 | if ! checkmodule -c 19 2>&1 |grep 'invalid option' >/dev/null; then
228 | # RHEL8 checkmodule does not have this arg
229 | MOD_VERSION_ARG="-c 19"
230 | fi
231 | set -o pipefail
232 | /usr/bin/checkmodule ${MOD_VERSION_ARG-} -M -m -o qemuga-vsock.mod qemuga-vsock.te
233 | /usr/bin/semodule_package -o qemuga-vsock.pp -m qemuga-vsock.mod
234 |
235 | ${SCP} qemuga-vsock.pp core@${vm_ip}:
236 | ${SSH} core@${vm_ip} 'sudo semodule -i qemuga-vsock.pp && rm qemuga-vsock.pp'
237 | ${SCP} qemu-guest-agent.service core@${vm_ip}:
238 | ${SSH} core@${vm_ip} 'sudo mv -Z qemu-guest-agent.service /etc/systemd/system/'
239 | ${SSH} core@${vm_ip} 'sudo systemctl daemon-reload'
240 | ${SSH} core@${vm_ip} 'sudo systemctl enable qemu-guest-agent.service'
241 | }
242 |
243 | function generate_vfkit_bundle {
244 | local srcDir=$1
245 | local destDir=$2
246 |
247 | generate_macos_bundle "vfkit" "$@"
248 |
249 | ${QEMU_IMG} convert -f qcow2 -O raw $srcDir/${SNC_PRODUCT_NAME}.qcow2 $destDir/${SNC_PRODUCT_NAME}.img
250 | add_disk_info_to_json_description "${destDir}" "${SNC_PRODUCT_NAME}.img" "raw"
251 |
252 | create_tarball "$destDir"
253 | }
254 |
255 | function generate_macos_bundle {
256 | local bundleType=$1
257 | local srcDir=$2
258 | local destDir=$3
259 |
260 |
261 | mkdir -p "$destDir"
262 | cp $srcDir/kubeconfig $destDir/
263 | cp $srcDir/id_ecdsa_crc $destDir/
264 |
265 | # Copy oc client
266 | cp openshift-clients/mac/oc $destDir/
267 |
268 | ocSize=$(du -b $destDir/oc | awk '{print $1}')
269 | ocSha256Sum=$(sha256sum $destDir/oc | awk '{print $1}')
270 |
271 | # Update the bundle metadata info
272 | cat $srcDir/crc-bundle-info.json \
273 | | ${JQ} ".name = \"${destDir}\"" \
274 | | ${JQ} ".storage.fileList[0].name = \"oc\"" \
275 | | ${JQ} '.storage.fileList[0].type = "oc-executable"' \
276 | | ${JQ} ".storage.fileList[0].size = \"${ocSize}\"" \
277 | | ${JQ} ".storage.fileList[0].sha256sum = \"${ocSha256Sum}\"" \
278 | | ${JQ} ".driverInfo.name = \"${bundleType}\"" \
279 | >$destDir/crc-bundle-info.json
280 | }
281 |
282 | function add_disk_info_to_json_description {
283 | local destDir=$1
284 | local imageFilename=$2
285 | local imageFormat=$3
286 |
287 | diskSize=$(du -b $destDir/$imageFilename | awk '{print $1}')
288 | diskSha256Sum=$(sha256sum $destDir/$imageFilename | awk '{print $1}')
289 |
290 | cat $destDir/crc-bundle-info.json \
291 | | ${JQ} ".nodes[0].diskImage = \"${imageFilename}\"" \
292 | | ${JQ} ".storage.diskImages[0].name = \"${imageFilename}\"" \
293 | | ${JQ} ".storage.diskImages[0].format = \"${imageFormat}\"" \
294 | | ${JQ} ".storage.diskImages[0].size = \"${diskSize}\"" \
295 | | ${JQ} ".storage.diskImages[0].sha256sum = \"${diskSha256Sum}\"" >$destDir/crc-bundle-info.json.tmp
296 | mv $destDir/crc-bundle-info.json.tmp $destDir/crc-bundle-info.json
297 | }
298 |
299 | function generate_hyperv_bundle {
300 | local srcDir=$1
301 | local destDir=$2
302 |
303 | mkdir "$destDir"
304 |
305 | cp $srcDir/kubeconfig $destDir/
306 | cp $srcDir/id_ecdsa_crc $destDir/
307 |
308 | # Copy oc client
309 | cp openshift-clients/windows/oc.exe $destDir/
310 |
311 | ocSize=$(du -b $destDir/oc.exe | awk '{print $1}')
312 | ocSha256Sum=$(sha256sum $destDir/oc.exe | awk '{print $1}')
313 |
314 | cat $srcDir/crc-bundle-info.json \
315 | | ${JQ} ".name = \"${destDir}\"" \
316 | | ${JQ} ".storage.fileList[0].name = \"oc.exe\"" \
317 | | ${JQ} '.storage.fileList[0].type = "oc-executable"' \
318 | | ${JQ} ".storage.fileList[0].size = \"${ocSize}\"" \
319 | | ${JQ} ".storage.fileList[0].sha256sum = \"${ocSha256Sum}\"" \
320 | | ${JQ} '.driverInfo.name = "hyperv"' \
321 | >$destDir/crc-bundle-info.json
322 |
323 | ${QEMU_IMG} convert -f qcow2 -O vhdx -o subformat=dynamic $srcDir/${SNC_PRODUCT_NAME}.qcow2 $destDir/${SNC_PRODUCT_NAME}.vhdx
324 | add_disk_info_to_json_description "${destDir}" "${SNC_PRODUCT_NAME}.vhdx" vhdx
325 |
326 | create_tarball "$destDir"
327 | }
328 |
329 | function create_tarball {
330 | local dirName=$1
331 |
332 | tar cSf - --sort=name "$dirName" | ${ZSTD} --no-progress ${CRC_ZSTD_EXTRA_FLAGS} --threads=0 -o "${dirName}".crcbundle
333 | }
334 |
335 | function remove_pull_secret_from_disk() {
336 | case "${BUNDLE_TYPE}" in
337 | "microshift")
338 | ${SSH} core@${VM_IP} -- sudo rm -f /etc/crio/openshift-pull-secret
339 | ;;
340 | esac
341 | }
342 |
343 | function copy_systemd_units() {
344 | case "${BUNDLE_TYPE}" in
345 | "snc"|"okd")
346 | export APPS_DOMAIN="apps-crc.testing"
347 | envsubst '${APPS_DOMAIN}' < systemd/dnsmasq.sh.template > systemd/crc-dnsmasq.sh
348 | unset APPS_DOMAIN
349 | ;;
350 | "microshift")
351 | export APPS_DOMAIN="apps.crc.testing"
352 | envsubst '${APPS_DOMAIN}' < systemd/dnsmasq.sh.template > systemd/crc-dnsmasq.sh
353 | unset APPS_DOMAIN
354 | ;;
355 | esac
356 |
357 | ${SSH} core@${VM_IP} -- 'mkdir -p /home/core/systemd-units && mkdir -p /home/core/systemd-scripts'
358 | ${SCP} systemd/crc-*.service core@${VM_IP}:/home/core/systemd-units/
359 | ${SCP} systemd/crc-*.target core@${VM_IP}:/home/core/systemd-units/
360 | ${SCP} -r systemd/*.d core@${VM_IP}:/home/core/systemd-units/
361 | ${SCP} systemd/crc-*.sh core@${VM_IP}:/home/core/systemd-scripts/
362 | ${SCP} systemd/crc-*.py core@${VM_IP}:/home/core/systemd-scripts/
363 |
364 | case "${BUNDLE_TYPE}" in
365 | "snc"|"okd")
366 | ${SCP} systemd/ocp-*.service core@${VM_IP}:/home/core/systemd-units/
367 | ${SCP} systemd/ocp-*.sh core@${VM_IP}:/home/core/systemd-scripts/
368 | ;;
369 | esac
370 |
371 | ${SSH} core@${VM_IP} -- 'sudo cp -r /home/core/systemd-units/* /etc/systemd/system/ && sudo cp /home/core/systemd-scripts/* /usr/local/bin/'
372 | ${SSH} core@${VM_IP} -- 'ls /home/core/systemd-scripts/ | xargs -t -I % sudo chmod +x /usr/local/bin/%'
373 | ${SSH} core@${VM_IP} -- 'sudo restorecon -rv /usr/local/bin'
374 |
375 | ${SSH} core@${VM_IP} -- 'ls /home/core/systemd-units/*.service | xargs basename -a | xargs sudo systemctl enable'
376 |
377 | ${SSH} core@${VM_IP} -- 'rm -rf /home/core/systemd-units /home/core/systemd-scripts'
378 | }
379 |
--------------------------------------------------------------------------------
/snc.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | set -exuo pipefail
4 |
5 | export LC_ALL=C.UTF-8
6 | export LANG=C.UTF-8
7 |
8 | source tools.sh
9 | source snc-library.sh
10 |
11 | # kill all the child processes for this script when it exits
12 | trap 'jobs=($(jobs -p)); [ -n "${jobs-}" ] && ((${#jobs})) && kill "${jobs[@]}" || true' EXIT
13 |
14 | # If the user set OKD_VERSION in the environment, then use it to override OPENSHIFT_VERSION, MIRROR, and OPENSHIFT_INSTALL_RELEASE_IMAGE_OVERRIDE
15 | # Unless, those variables are explicitly set as well.
16 | OKD_VERSION=${OKD_VERSION:-none}
17 | BUNDLE_TYPE="snc"
18 | if [[ ${OKD_VERSION} != "none" ]]
19 | then
20 | OPENSHIFT_VERSION=${OKD_VERSION}
21 | MIRROR=${MIRROR:-https://github.com/okd-project/okd/releases/download}
22 | BUNDLE_TYPE="okd"
23 | fi
24 |
25 | INSTALL_DIR=crc-tmp-install-data
26 | SNC_PRODUCT_NAME=${SNC_PRODUCT_NAME:-crc}
27 | SNC_CLUSTER_MEMORY=${SNC_CLUSTER_MEMORY:-14336}
28 | SNC_CLUSTER_CPUS=${SNC_CLUSTER_CPUS:-6}
29 | CRC_VM_DISK_SIZE=${CRC_VM_DISK_SIZE:-31}
30 | BASE_DOMAIN=${CRC_BASE_DOMAIN:-testing}
31 | CRC_PV_DIR="/mnt/pv-data"
32 | SSH="ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -i id_ecdsa_crc"
33 | SCP="scp -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -i id_ecdsa_crc"
34 | MIRROR=${MIRROR:-https://mirror.openshift.com/pub/openshift-v4/$ARCH/clients/ocp}
35 | CERT_ROTATION=${SNC_DISABLE_CERT_ROTATION:-enabled}
36 | USE_PATCHED_RELEASE_IMAGE=${SNC_USE_PATCHED_RELEASE_IMAGE:-disabled}
37 | HTPASSWD_FILE='users.htpasswd'
38 |
39 | run_preflight_checks ${BUNDLE_TYPE}
40 |
41 | # If user defined the OPENSHIFT_VERSION environment variable then use it.
42 | # Otherwise use the tagged version if available
43 | if test -n "${OPENSHIFT_VERSION-}"; then
44 | OPENSHIFT_RELEASE_VERSION=${OPENSHIFT_VERSION}
45 | echo "Using release ${OPENSHIFT_RELEASE_VERSION} from OPENSHIFT_VERSION"
46 | else
47 | OPENSHIFT_RELEASE_VERSION="$(curl -L "${MIRROR}"/candidate-4.20/release.txt | sed -n 's/^ *Version: *//p')"
48 | if test -n "${OPENSHIFT_RELEASE_VERSION}"; then
49 | echo "Using release ${OPENSHIFT_RELEASE_VERSION} from the latest mirror"
50 | else
51 | echo "Unable to determine an OpenShift release version. You may want to set the OPENSHIFT_VERSION environment variable explicitly."
52 | exit 1
53 | fi
54 | fi
55 |
56 | # Download the oc binary for specific OS environment
57 | OC=./openshift-clients/linux/oc
58 | download_oc
59 |
60 | if test -z "${OPENSHIFT_INSTALL_RELEASE_IMAGE_OVERRIDE-}"; then
61 | OPENSHIFT_INSTALL_RELEASE_IMAGE_OVERRIDE="$(curl -L "${MIRROR}/${OPENSHIFT_RELEASE_VERSION}/release.txt" | sed -n 's/^Pull From: //p')"
62 | elif test -n "${OPENSHIFT_VERSION-}"; then
63 | echo "Both OPENSHIFT_INSTALL_RELEASE_IMAGE_OVERRIDE and OPENSHIFT_VERSION are set, OPENSHIFT_INSTALL_RELEASE_IMAGE_OVERRIDE will take precedence"
64 | echo "OPENSHIFT_INSTALL_RELEASE_IMAGE_OVERRIDE: $OPENSHIFT_INSTALL_RELEASE_IMAGE_OVERRIDE"
65 | echo "OPENSHIFT_VERSION: $OPENSHIFT_VERSION"
66 | fi
67 | echo "Setting OPENSHIFT_INSTALL_RELEASE_IMAGE_OVERRIDE to ${OPENSHIFT_INSTALL_RELEASE_IMAGE_OVERRIDE}"
68 |
69 | # Extract openshift-install binary if not present in current directory
70 | if test -z ${OPENSHIFT_INSTALL-}; then
71 | OPENSHIFT_INSTALL=./openshift-install
72 | if [[ ! -f "$OPENSHIFT_INSTALL" || $("$OPENSHIFT_INSTALL" version | grep -oP "${OPENSHIFT_INSTALL} \\K\\S+") != "$OPENSHIFT_RELEASE_VERSION" ]]; then
73 | echo "Extracting OpenShift installer binary"
74 | ${OC} adm release extract -a ${OPENSHIFT_PULL_SECRET_PATH} ${OPENSHIFT_INSTALL_RELEASE_IMAGE_OVERRIDE} --command=openshift-install --to .
75 | fi
76 | fi
77 |
78 | if [[ ${USE_PATCHED_RELEASE_IMAGE} == "enabled" ]]
79 | then
80 | echo "Using release image with patched KAO/KCMO images"
81 | OPENSHIFT_INSTALL_RELEASE_IMAGE_OVERRIDE=quay.io/crcont/ocp-release:${OPENSHIFT_RELEASE_VERSION}-${yq_ARCH}
82 | echo "OPENSHIFT_INSTALL_RELEASE_IMAGE_OVERRIDE set to ${OPENSHIFT_INSTALL_RELEASE_IMAGE_OVERRIDE}"
83 | fi
84 |
85 | # Allow to disable debug by setting SNC_OPENSHIFT_INSTALL_NO_DEBUG in the environment
86 | if test -z "${SNC_OPENSHIFT_INSTALL_NO_DEBUG-}"; then
87 | OPENSHIFT_INSTALL_EXTRA_ARGS="--log-level debug"
88 | else
89 | OPENSHIFT_INSTALL_EXTRA_ARGS=""
90 | fi
91 | # Destroy an existing cluster and resources
92 | ${OPENSHIFT_INSTALL} --dir ${INSTALL_DIR} destroy cluster ${OPENSHIFT_INSTALL_EXTRA_ARGS} || echo "failed to destroy previous cluster. Continuing anyway"
93 | # Generate a new ssh keypair for this cluster
94 | # Create a 521bit ECDSA Key
95 | rm id_ecdsa_crc* || true
96 | ssh-keygen -t ecdsa -b 521 -N "" -f id_ecdsa_crc -C "core"
97 |
98 | # Use dnsmasq as dns in network manager config
99 | if ! grep -iqR dns=dnsmasq /etc/NetworkManager/conf.d/ ; then
100 | cat << EOF | sudo tee /etc/NetworkManager/conf.d/crc-snc-nm-dnsmasq.conf
101 | [main]
102 | dns=dnsmasq
103 | EOF
104 | fi
105 |
106 | # Clean up old DNS overlay file
107 | if [ -f /etc/NetworkManager/dnsmasq.d/openshift.conf ]; then
108 | sudo rm /etc/NetworkManager/dnsmasq.d/openshift.conf
109 | fi
110 |
111 | destroy_libvirt_resources rhcos-live.iso
112 | create_libvirt_resources
113 |
114 | # Set NetworkManager DNS overlay file
115 | cat << EOF | sudo tee /etc/NetworkManager/dnsmasq.d/crc-snc.conf
116 | server=/${SNC_PRODUCT_NAME}.${BASE_DOMAIN}/192.168.126.1
117 | address=/apps-${SNC_PRODUCT_NAME}.${BASE_DOMAIN}/192.168.126.11
118 | EOF
119 |
120 | # Reload the NetworkManager to make DNS overlay effective
121 | sudo systemctl reload NetworkManager
122 |
123 | if [[ ${CERT_ROTATION} == "enabled" ]]
124 | then
125 | # Disable the network time sync and set the clock to past (for a day) on host
126 | sudo timedatectl set-ntp off
127 | sudo date -s '-1 day'
128 | fi
129 |
130 | # Create the INSTALL_DIR for the installer and copy the install-config
131 | rm -fr ${INSTALL_DIR} && mkdir ${INSTALL_DIR} && cp install-config.yaml ${INSTALL_DIR}
132 | ${YQ} eval --inplace ".controlPlane.architecture = \"${yq_ARCH}\"" ${INSTALL_DIR}/install-config.yaml
133 | ${YQ} eval --inplace ".baseDomain = \"${BASE_DOMAIN}\"" ${INSTALL_DIR}/install-config.yaml
134 | ${YQ} eval --inplace ".metadata.name = \"${SNC_PRODUCT_NAME}\"" ${INSTALL_DIR}/install-config.yaml
135 | replace_pull_secret ${INSTALL_DIR}/install-config.yaml
136 | ${YQ} eval ".sshKey = \"$(cat id_ecdsa_crc.pub)\"" --inplace ${INSTALL_DIR}/install-config.yaml
137 |
138 | # Create the manifests using the INSTALL_DIR
139 | OPENSHIFT_INSTALL_RELEASE_IMAGE_OVERRIDE=$OPENSHIFT_INSTALL_RELEASE_IMAGE_OVERRIDE ${OPENSHIFT_INSTALL} --dir ${INSTALL_DIR} create manifests
140 |
141 | # Add CVO overrides before first start of the cluster. Objects declared in this file won't be created.
142 | ${YQ} eval-all --inplace 'select(fileIndex == 0) * select(filename == "cvo-overrides.yaml")' ${INSTALL_DIR}/manifests/cvo-overrides.yaml cvo-overrides.yaml
143 |
144 | # Add custom domain to cluster-ingress
145 | ${YQ} eval --inplace ".spec.domain = \"apps-${SNC_PRODUCT_NAME}.${BASE_DOMAIN}\"" ${INSTALL_DIR}/manifests/cluster-ingress-02-config.yml
146 | # Add network resource to lower the mtu for CNV
147 | cp cluster-network-03-config.yaml ${INSTALL_DIR}/manifests/
148 | # Add patch to mask the chronyd service on master
149 | cp 99_master-chronyd-mask.yaml $INSTALL_DIR/openshift/
150 | # Add dummy network unit file
151 | cp 99-openshift-machineconfig-master-dummy-networks.yaml $INSTALL_DIR/openshift/
152 | cp 99-openshift-machineconfig-master-console.yaml $INSTALL_DIR/openshift/
153 | # Add kubelet config resource to make change in kubelet
154 | DYNAMIC_DATA=$(base64 -w0 node-sizing-enabled.env) envsubst < 99_master-node-sizing-enabled-env.yaml.in > $INSTALL_DIR/openshift/99_master-node-sizing-enabled-env.yaml
155 | # Add codeReadyContainer as invoker to identify it with telemeter
156 | export OPENSHIFT_INSTALL_INVOKER="codeReadyContainers"
157 | export KUBECONFIG=${INSTALL_DIR}/auth/kubeconfig
158 |
159 | OPENSHIFT_INSTALL_RELEASE_IMAGE_OVERRIDE=$OPENSHIFT_INSTALL_RELEASE_IMAGE_OVERRIDE ${OPENSHIFT_INSTALL} --dir ${INSTALL_DIR} create single-node-ignition-config ${OPENSHIFT_INSTALL_EXTRA_ARGS}
160 | # mask the chronyd service on the bootstrap node
161 | cat <<< $(${JQ} '.systemd.units += [{"mask": true, "name": "chronyd.service"}]' ${INSTALL_DIR}/bootstrap-in-place-for-live-iso.ign) > ${INSTALL_DIR}/bootstrap-in-place-for-live-iso.ign
162 |
163 | # Download the image
164 | # https://docs.openshift.com/container-platform/latest/installing/installing_sno/install-sno-installing-sno.html#install-sno-installing-sno-manually
165 | # (Step retrieve the RHCOS iso url)
166 | ISO_URL=$(OPENSHIFT_INSTALL_RELEASE_IMAGE_OVERRIDE=$OPENSHIFT_INSTALL_RELEASE_IMAGE_OVERRIDE ${OPENSHIFT_INSTALL} coreos print-stream-json | jq -r ".architectures.${ARCH}.artifacts.metal.formats.iso.disk.location")
167 | ISO_CACHE_DIR=${ISO_CACHE_DIR:-$INSTALL_DIR}
168 | if [[ "$ISO_CACHE_DIR" != "$INSTALL_DIR" ]]; then
169 | mkdir -p "$ISO_CACHE_DIR"
170 |
171 | ISO_SHA256=$(OPENSHIFT_INSTALL_RELEASE_IMAGE_OVERRIDE=$OPENSHIFT_INSTALL_RELEASE_IMAGE_OVERRIDE ${OPENSHIFT_INSTALL} coreos print-stream-json | jq -r ".architectures.${ARCH}.artifacts.metal.formats.iso.disk.sha256")
172 | if [[ ! -f "${ISO_CACHE_DIR}/$(basename $ISO_URL)" || "${ISO_SHA256}" != $(sha256sum "${ISO_CACHE_DIR}/$(basename $ISO_URL)" | cut -d' ' -f1) ]]; then
173 | curl -L ${ISO_URL} -o "${ISO_CACHE_DIR}/$(basename $ISO_URL)"
174 | fi
175 |
176 | cp "${ISO_CACHE_DIR}/$(basename $ISO_URL)" "${INSTALL_DIR}/rhcos-live.iso"
177 | else
178 | curl -L ${ISO_URL} -o ${INSTALL_DIR}/rhcos-live.iso
179 | fi
180 |
181 | podman run --privileged --pull always --rm \
182 | -v /dev:/dev -v /run/udev:/run/udev -v $PWD:/data \
183 | -w /data quay.io/coreos/coreos-installer:release \
184 | iso ignition embed --force \
185 | --ignition-file ${INSTALL_DIR}/bootstrap-in-place-for-live-iso.ign \
186 | ${INSTALL_DIR}/rhcos-live.iso
187 |
188 | sudo mv -Z ${INSTALL_DIR}/rhcos-live.iso /var/lib/libvirt/${SNC_PRODUCT_NAME}/rhcos-live.iso
189 | create_vm rhcos-live.iso
190 |
191 | ${OPENSHIFT_INSTALL} --dir ${INSTALL_DIR} wait-for install-complete ${OPENSHIFT_INSTALL_EXTRA_ARGS} || ${OC} adm must-gather --dest-dir ${INSTALL_DIR}
192 |
193 | # Steps from https://www.redhat.com/en/blog/enabling-openshift-4-clusters-to-stop-and-resume-cluster-vms
194 | # which provide details how to rotate certs without wait for 24h
195 | retry ${OC} apply -f kubelet-bootstrap-cred-manager-ds.yaml
196 | retry ${OC} delete secrets/csr-signer-signer secrets/csr-signer -n openshift-kube-controller-manager-operator
197 | retry ${OC} adm wait-for-stable-cluster
198 |
199 | if [[ ${CERT_ROTATION} == "enabled" ]]
200 | then
201 | renew_certificates
202 | fi
203 |
204 | # Wait for install to complete, this provide another 30 mins to make resources (apis) stable
205 | ${OPENSHIFT_INSTALL} --dir ${INSTALL_DIR} wait-for install-complete ${OPENSHIFT_INSTALL_EXTRA_ARGS}
206 |
207 | # Remove the bootstrap-cred-manager daemonset and wait till it get deleted
208 | retry ${OC} delete daemonset.apps/kubelet-bootstrap-cred-manager -n openshift-machine-config-operator
209 | retry ${OC} wait --for=delete daemonset.apps/kubelet-bootstrap-cred-manager --timeout=60s -n openshift-machine-config-operator
210 |
211 | # Set the VM static hostname to crc-xxxxx-master-0 instead of localhost.localdomain
212 | HOSTNAME=$(${SSH} core@api.${SNC_PRODUCT_NAME}.${BASE_DOMAIN} hostnamectl status --transient)
213 | ${SSH} core@api.${SNC_PRODUCT_NAME}.${BASE_DOMAIN} sudo hostnamectl set-hostname ${HOSTNAME}
214 |
215 | create_json_description ${BUNDLE_TYPE}
216 |
217 | # Create persistent volumes
218 | create_pvs ${BUNDLE_TYPE}
219 |
220 | # Mark some of the deployments unmanaged by the cluster-version-operator (CVO)
221 | # https://github.com/openshift/cluster-version-operator/blob/master/docs/dev/clusterversion.md#setting-objects-unmanaged
222 | # Objects declared in this file are still created by the CVO at startup.
223 | # The CVO won't modify these objects anymore with the following command. Hence, we can remove them afterwards.
224 | retry ${OC} patch clusterversion version --type json -p "$(cat cvo-overrides-after-first-run.yaml)"
225 |
226 | # Scale route deployment from 2 to 1
227 | retry ${OC} scale --replicas=1 ingresscontroller/default -n openshift-ingress-operator
228 |
229 | # Set managementState Image Registry Operator configuration from Removed to Managed
230 | # because https://docs.openshift.com/container-platform/latest/registry/configuring_registry_storage/configuring-registry-storage-baremetal.html#registry-removed_configuring-registry-storage-baremetal
231 | # Set default route for registry CRD from false to true.
232 | retry ${OC} patch config.imageregistry.operator.openshift.io/cluster --patch '{"spec":{"managementState":"Managed","defaultRoute":true}}' --type=merge
233 |
234 | # Generate the htpasswd file to have admin and developer user
235 | generate_htpasswd_file ${INSTALL_DIR} ${HTPASSWD_FILE}
236 |
237 | # Add a user developer with htpasswd identity provider and give it sudoer role
238 | # Add kubeadmin user with cluster-admin role
239 | retry ${OC} create secret generic htpass-secret --from-file=htpasswd=${HTPASSWD_FILE} -n openshift-config
240 | retry ${OC} apply -f oauth_cr.yaml
241 | retry ${OC} create clusterrolebinding kubeadmin --clusterrole=cluster-admin --user=kubeadmin
242 |
243 | # Remove temp kubeadmin user
244 | retry ${OC} delete secrets kubeadmin -n kube-system
245 |
246 | # Add security message on the web console
247 | retry ${OC} create -f security-notice.yaml
248 |
249 | # Remove the Cluster ID with a empty string.
250 | retry ${OC} patch clusterversion version -p '{"spec":{"clusterID":""}}' --type merge
251 |
252 | # SCP the kubeconfig file to VM
253 | ${SCP} ${KUBECONFIG} core@api.${SNC_PRODUCT_NAME}.${BASE_DOMAIN}:/home/core/
254 | ${SSH} core@api.${SNC_PRODUCT_NAME}.${BASE_DOMAIN} -- 'sudo mv /home/core/kubeconfig /opt/'
255 |
256 | # Add exposed registry CA to VM
257 | retry ${OC} extract secret/router-ca --keys=tls.crt -n openshift-ingress-operator --confirm
258 | retry ${OC} create configmap registry-certs --from-file=default-route-openshift-image-registry.apps-${SNC_PRODUCT_NAME}.${BASE_DOMAIN}=tls.crt -n openshift-config
259 | retry ${OC} patch image.config.openshift.io cluster -p '{"spec": {"additionalTrustedCA": {"name": "registry-certs"}}}' --type merge
260 |
261 | # Remove the machine config for chronyd to make it active again
262 | retry ${OC} delete mc chronyd-mask
263 |
264 | # Wait for the cluster again to become stable because of all the patches/changes
265 | wait_till_cluster_stable
266 |
267 | # This section is used to create a custom-os image which have `/Users`
268 | # For more details check https://github.com/crc-org/snc/issues/1041#issuecomment-2785928976
269 | # This should be performed before removing pull secret
270 | # Set tmp KUBECONFIG because default kubeconfig have `system:admin` user which doesn't able to create
271 | # token to login to registry and kubeadmin user is required for that.
272 | export KUBECONFIG=/tmp/kubeconfig
273 | if [[ ${BUNDLE_TYPE} == "okd" ]]; then
274 | RHCOS_IMAGE=$(${OC} adm release info -a ${OPENSHIFT_PULL_SECRET_PATH} ${OPENSHIFT_INSTALL_RELEASE_IMAGE_OVERRIDE} --image-for=stream-coreos)
275 | else
276 | RHCOS_IMAGE=$(${OC} adm release info -a ${OPENSHIFT_PULL_SECRET_PATH} ${OPENSHIFT_INSTALL_RELEASE_IMAGE_OVERRIDE} --image-for=rhel-coreos)
277 | fi
278 | cat << EOF > ${INSTALL_DIR}/Containerfile
279 | FROM scratch
280 | RUN ln -sf var/Users /Users && mkdir /var/Users
281 | EOF
282 | podman build --from ${RHCOS_IMAGE} --authfile ${OPENSHIFT_PULL_SECRET_PATH} -t default-route-openshift-image-registry.apps-crc.testing/openshift-machine-config-operator/rhcos:latest --file ${INSTALL_DIR}/Containerfile .
283 | (
284 | set +x # disable the logging in the subshell to prevent the password leakage
285 | kubeadmin_pass=$(cat ${INSTALL_DIR}/auth/kubeadmin-password)
286 | retry ${OC} login -u kubeadmin -p "$kubeadmin_pass" --insecure-skip-tls-verify=true api.${SNC_PRODUCT_NAME}.${BASE_DOMAIN}:6443
287 | rm -f ${INSTALL_DIR}/auth/kubeadmin-password
288 | esc_pw="$(printf '%s' "$kubeadmin_pass" | sed -e 's/[\/&|\\]/\\&/g')"
289 | sed -i "s|$esc_pw|REDACTED|g" "${INSTALL_DIR}/.openshift_install.log"
290 | )
291 | retry ${OC} registry login -a ${INSTALL_DIR}/reg.json
292 | retry podman push --authfile ${INSTALL_DIR}/reg.json --tls-verify=false default-route-openshift-image-registry.apps-crc.testing/openshift-machine-config-operator/rhcos:latest
293 | cat << EOF > ${INSTALL_DIR}/custom-os-mc.yaml
294 | apiVersion: machineconfiguration.openshift.io/v1
295 | kind: MachineConfig
296 | metadata:
297 | labels:
298 | machineconfiguration.openshift.io/role: master
299 | name: custom-image
300 | spec:
301 | osImageURL: image-registry.openshift-image-registry.svc:5000/openshift-machine-config-operator/rhcos:latest
302 | EOF
303 | retry ${OC} apply -f ${INSTALL_DIR}/custom-os-mc.yaml
304 | sleep 60
305 | # Wait till machine config pool is updated correctly
306 | while retry ${OC} get mcp master -ojsonpath='{.status.conditions[?(@.type!="Updated")].status}' | grep True; do
307 | echo "Machine config still in updating/degrading state"
308 | done
309 |
310 | mc_before_removing_pullsecret=$(retry ${OC} get mc --sort-by=.metadata.creationTimestamp --no-headers -oname)
311 | # Replace pull secret with a null json string '{}'
312 | retry ${OC} replace -f pull-secret.yaml
313 | mc_after_removing_pullsecret=$(retry ${OC} get mc --sort-by=.metadata.creationTimestamp --no-headers -oname)
314 |
315 | while [ "${mc_before_removing_pullsecret}" == "${mc_after_removing_pullsecret}" ]; do
316 | echo "Machine config is still not rendered"
317 | mc_after_removing_pullsecret=$(retry ${OC} get mc --sort-by=.metadata.creationTimestamp --no-headers -oname)
318 | done
319 |
320 | wait_till_cluster_stable openshift-marketplace
321 |
322 | # Delete the pods which are there in Complete state
323 | retry ${OC} delete pod --field-selector=status.phase==Succeeded --all-namespaces
324 |
325 | # Delete outdated rendered master/worker machineconfigs and just keep the latest one
326 | ${OC} adm prune renderedmachineconfigs --confirm
327 | # Wait till machine config pool is updated correctly
328 | while retry ${OC} get mcp master -ojsonpath='{.status.conditions[?(@.type!="Updated")].status}' | grep True; do
329 | echo "Machine config still in updating/degrading state"
330 | done
331 |
332 | # Create a container from baremetal-runtimecfg image which consumed by nodeip-configuration service so it is
333 | # not deleted by `crictl rmi --prune` command
334 | BAREMETAL_RUNTIMECFG=$(${OC} adm release info -a ${OPENSHIFT_PULL_SECRET_PATH} ${OPENSHIFT_INSTALL_RELEASE_IMAGE_OVERRIDE} --image-for=baremetal-runtimecfg)
335 | ${SSH} core@api.${SNC_PRODUCT_NAME}.${BASE_DOMAIN} -- "sudo podman create --name baremetal_runtimecfg ${BAREMETAL_RUNTIMECFG}"
336 |
337 | # Remove unused images from container storage
338 | ${SSH} core@api.${SNC_PRODUCT_NAME}.${BASE_DOMAIN} -- 'sudo crictl rmi --prune'
339 |
340 | # Remove the baremetal_runtimecfg container which is temp created
341 | ${SSH} core@api.${SNC_PRODUCT_NAME}.${BASE_DOMAIN} -- "sudo podman rm baremetal_runtimecfg"
342 |
343 | # Create the /var/Users directory so it can become writeable
344 | # todo: remove it once custom image able to perform it
345 | ${SSH} core@api.${SNC_PRODUCT_NAME}.${BASE_DOMAIN} -- 'sudo mkdir /var/Users'
346 |
347 | # Check /Users directory is writeable
348 | ${SSH} core@api.${SNC_PRODUCT_NAME}.${BASE_DOMAIN} -- 'sudo mkdir /Users/foo && sudo rm -fr /Users/foo'
349 |
350 | # Remove the image stream of custom image
351 | retry ${OC} delete imagestream rhcos -n openshift-machine-config-operator
352 | retry ${OC} adm prune images --confirm --registry-url default-route-openshift-image-registry.apps-crc.testing --keep-younger-than=0s
353 |
--------------------------------------------------------------------------------