├── all.sh ├── README.md ├── config.static.sh ├── install.sh ├── Dockerfile ├── config.no-gen.sh ├── config.kv.sh └── entrypoint.sh /all.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | DIR=/srv/ceph 4 | VAR=/srv/ceph-var 5 | IP=10.1.4.10 6 | NET=10.1.4.0/24 7 | IMAGE=rhcs 8 | 9 | rm -rf ${VAR}/* /etc/ceph/* 10 | 11 | docker run -d --net=host -e MON_IP=${IP} -e CEPH_PUBLIC_NETWORK=${NET} -e CEPH_DAEMON=mon -v /etc/ceph:/etc/ceph -v ${VAR}:/var/lib/ceph ${IMAGE} 12 | 13 | for i in 0 1 2 14 | do 15 | umount /tmp/ceph_disk${i} 16 | dd if=/dev/zero of=${DIR}/d${i} bs=256M count=5 conv=notrunc 17 | mkfs -t xfs -f ${DIR}/d${i} 18 | mkdir -p /tmp/ceph_disk${i} 19 | mount -t xfs -o loop ${DIR}/d${i} /tmp/ceph_disk${i} 20 | docker run -d --privileged --pid=host --net=host -e MON_IP=${IP} -e CEPH_DAEMON=osd -e OSD_TYPE=directory -v /tmp/ceph_disk${i}:/var/lib/ceph/osd/ -v ${VAR}:/var/lib/ceph -v /etc/ceph:/etc/ceph ${IMAGE} 21 | done 22 | 23 | 24 | #ceph -w 25 | #docker stop $(docker ps -q) 26 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # RHCS containerization 2 | 3 | ## Clone 4 | 5 | git clone https://your_github_id:your_github_password@github.com/roofmonkey/rhcs 6 | 7 | ## build docker image 8 | 9 | ```console 10 | docker build -t rhcs . 11 | ``` 12 | 13 | ## prepare 14 | 15 | ```console 16 | mkdir -p /etc/ceph 17 | # for /var/lib/ceph 18 | mkdir -p /srv/ceph-var 19 | # directory for osd 0 20 | mkdir -p /srv/ceph 21 | # directory for osd 1 22 | mkdir -p /srv/ceph-1 23 | rm -rf /etc/ceph/* /srv/ceph/* /srv/ceph-1/* 24 | ``` 25 | 26 | ## start Ceph mon 27 | ```console 28 | docker run -ti --net=host -e MON_IP=10.1.4.12 -e CEPH_PUBLIC_NETWORK=10.1.4.0/24 -e CEPH_DAEMON=mon -e -v /etc/ceph:/etc/ceph -v /srv/ceph-var:/var/lib/ceph rhcs 29 | ``` 30 | 31 | ## start Ceph osd 0 32 | 33 | ```console 34 | docker run -ti --privileged --net=host -e MON_IP=10.1.4.12 -e CEPH_PUBLIC_NETWORK=10.1.4.0/24 -e CEPH_DAEMON=osd -e OSD_TYPE=directory -v /srv/ceph:/var/lib/ceph/osd/ -v /etc/ceph:/etc/ceph -v /srv/ceph-var:/var/lib/ceph rhcs 35 | ``` 36 | 37 | ## start Ceph osd 1 38 | ```console 39 | docker run -ti --privileged --net=host -e MON_IP=10.1.4.12 -e CEPH_PUBLIC_NETWORK=10.1.4.0/24 -e CEPH_DAEMON=osd -e OSD_TYPE=directory -v /srv/ceph-1:/var/lib/ceph/osd/ -v /etc/ceph:/etc/ceph -v /srv/ceph-var:/var/lib/ceph rhcs 40 | ``` 41 | -------------------------------------------------------------------------------- /config.static.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | function get_admin_key { 5 | # No-op for static 6 | echo "static: does not generate admin key" 7 | } 8 | 9 | function get_mon_config { 10 | if [ ! -e /etc/ceph/${CLUSTER}.conf ]; then 11 | fsid=$(uuidgen) 12 | cat </etc/ceph/${CLUSTER}.conf 13 | [global] 14 | fsid = $fsid 15 | mon initial members = ${MON_NAME} 16 | mon host = ${MON_IP} 17 | auth cluster required = cephx 18 | auth service required = cephx 19 | auth client required = cephx 20 | public network = ${CEPH_PUBLIC_NETWORK} 21 | cluster network = ${CEPH_CLUSTER_NETWORK} 22 | osd journal size = ${OSD_JOURNAL_SIZE} 23 | ENDHERE 24 | 25 | if [[ ! -z "$(ip -6 -o a | grep scope.global | awk '/eth/ { sub ("/..", "", $4); print $4 }' | head -n1)" ]]; then 26 | echo "ms_bind_ipv6 = true" >> /etc/ceph/${CLUSTER}.conf 27 | sed -i '/mon host/d' /etc/ceph/${CLUSTER}.conf 28 | echo "mon host = ${MON_IP}" >> /etc/ceph/${CLUSTER}.conf 29 | fi 30 | 31 | # Generate administrator key 32 | ceph-authtool /etc/ceph/${CLUSTER}.client.admin.keyring --create-keyring --gen-key -n client.admin --set-uid=0 --cap mon 'allow *' --cap osd 'allow *' --cap mds 'allow' 33 | 34 | # Generate the mon. key 35 | ceph-authtool /etc/ceph/${CLUSTER}.mon.keyring --create-keyring --gen-key -n mon. --cap mon 'allow *' 36 | 37 | # Create bootstrap key directories 38 | mkdir -p /var/lib/ceph/bootstrap-{osd,mds,rgw} 39 | 40 | # Generate the OSD bootstrap key 41 | ceph-authtool /var/lib/ceph/bootstrap-osd/${CLUSTER}.keyring --create-keyring --gen-key -n client.bootstrap-osd --cap mon 'allow profile bootstrap-osd' 42 | 43 | # Generate the MDS bootstrap key 44 | ceph-authtool /var/lib/ceph/bootstrap-mds/${CLUSTER}.keyring --create-keyring --gen-key -n client.bootstrap-mds --cap mon 'allow profile bootstrap-mds' 45 | 46 | # Generate the RGW bootstrap key 47 | ceph-authtool /var/lib/ceph/bootstrap-rgw/${CLUSTER}.keyring --create-keyring --gen-key -n client.bootstrap-rgw --cap mon 'allow profile bootstrap-rgw' 48 | 49 | # Generate initial monitor map 50 | monmaptool --create --add ${MON_NAME} "${MON_IP}:6789" --fsid ${fsid} /etc/ceph/monmap 51 | fi 52 | } 53 | 54 | function get_config { 55 | # No-op for static 56 | echo "static: does not generate config" 57 | } 58 | 59 | -------------------------------------------------------------------------------- /install.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | function mon_systemd { 3 | cat < ${HOST}/etc/systemd/system/multi-user.target.wants/ceph-mon@${MON_NAME}.service 4 | [Unit] 5 | Description=Ceph Monitor 6 | After=docker.service 7 | 8 | [Service] 9 | EnvironmentFile=/etc/environment 10 | ExecStartPre=-/usr/bin/docker stop %i 11 | ExecStartPre=/usr/bin/mkdir -p /etc/ceph /var/lib/ceph/mon 12 | ExecStart=/usr/bin/docker run --rm --name %i --net=host \ 13 | -v /var/lib/ceph:/var/lib/ceph \ 14 | -v /etc/ceph:/etc/ceph \ 15 | --privileged \ 16 | -e CEPH_DAEMON=MON \ 17 | -e MON_IP=${MON_IP} \ 18 | -e CEPH_PUBLIC_NETWORK=${CEPH_PUBLIC_NETWORK} \ 19 | -e MON_NAME=${MON_NAME} \ 20 | --name=${MON_NAME} \ 21 | ${IMAGE} 22 | ExecStopPost=-/usr/bin/docker stop %i 23 | Restart=always 24 | RestartSec=10s 25 | TimeoutStartSec=120 26 | TimeoutStopSec=15 27 | 28 | [Install] 29 | WantedBy=multi-user.target 30 | EOF 31 | } 32 | 33 | function osd_systemd { 34 | cat < ${HOST}/etc/systemd/system/multi-user.target.wants/ceph-osd@${DEVICE}.service 35 | [Unit] 36 | Description=Ceph OSD 37 | After=docker.service 38 | 39 | [Service] 40 | EnvironmentFile=/etc/environment 41 | ExecStartPre=-/usr/bin/docker stop osd-dev%i 42 | ExecStartPre=-/usr/bin/docker rm osd-dev%i 43 | ExecStart=/usr/bin/docker run --rm --net=host --pid=host\ 44 | -v /var/lib/ceph:/var/lib/ceph \ 45 | -v /etc/ceph:/etc/ceph \ 46 | -v /dev:/dev \ 47 | --privileged \ 48 | -e CEPH_DAEMON=OSD_CEPH_DISK_ACTIVATE \ 49 | -e OSD_DEVICE=/dev/%i \ 50 | --name=dev%i \ 51 | hchen/rhceph 52 | ExecStop=-/usr/bin/docker stop osd-dev%i 53 | ExecStopPost=-/usr/bin/docker rm osd-dev%i 54 | Restart=always 55 | RestartSec=10s 56 | TimeoutStartSec=120 57 | TimeoutStopSec=15 58 | 59 | [Install] 60 | WantedBy=multi-user.target 61 | EOF 62 | } 63 | 64 | 65 | # Normalize DAEMON to lowercase 66 | CEPH_DAEMON=$(echo ${CEPH_DAEMON} |tr '[:upper:]' '[:lower:]') 67 | case "$CEPH_DAEMON" in 68 | mon) 69 | mon_systemd 70 | ;; 71 | osd) 72 | DEVICE=`echo ${OSD_DEVICE}|sed 's/\/dev\///g'` 73 | osd_systemd 74 | ;; 75 | *) 76 | if [ ! -n "$CEPH_DAEMON" ]; then 77 | echo "ERROR- One of CEPH_DAEMON or a daemon parameter must be defined as the name " 78 | echo "of the daemon you want to install." 79 | echo "Valid values for CEPH_DAEMON are MON, OSD, MDS, RGW, RESTAPI" 80 | exit 1 81 | fi 82 | ;; 83 | esac 84 | 85 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM registry.access.redhat.com/rhel7.2 2 | 3 | MAINTAINER Huamin Chen "hchen@redhat.com" 4 | 5 | ENV container docker 6 | 7 | # This need to be removed later 8 | RUN rpm --import /etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release 9 | 10 | RUN yum-config-manager --add=http://download.eng.blr.redhat.com/pub/rhel/released/RHEL-7/7.2/Server/x86_64/os/ 11 | RUN yum-config-manager --add=http://download.lab.bos.redhat.com/rel-eng/RHCeph/1.3-RHEL-7/latest/Server-RH7-CEPH-MON-1.3/x86_64/os/ 12 | RUN yum-config-manager --add=http://download.lab.bos.redhat.com/rel-eng/RHCeph/1.3-RHEL-7/latest/Server-RH7-CEPH-OSD-1.3/x86_64/os/ 13 | RUN yum-config-manager --add=http://download.eng.blr.redhat.com/pub/rhel/released/RHEL-7/7.2/Server-optional/x86_64/os/ 14 | 15 | RUN yum -y update; yum clean all; \ 16 | (cd /lib/systemd/system/sysinit.target.wants/; for i in *; do [ $i == systemd-tmpfiles-setup.service ] || rm -f $i; done); \ 17 | rm -f /lib/systemd/system/multi-user.target.wants/*;\ 18 | rm -f /etc/systemd/system/*.wants/*;\ 19 | rm -f /lib/systemd/system/local-fs.target.wants/*; \ 20 | rm -f /lib/systemd/system/sockets.target.wants/*udev*; \ 21 | rm -f /lib/systemd/system/sockets.target.wants/*initctl*; \ 22 | rm -f /lib/systemd/system/basic.target.wants/*;\ 23 | rm -f /lib/systemd/system/anaconda.target.wants/*; 24 | 25 | RUN yum -y install ceph ceph-mon ceph-osd --nogpgcheck; yum clean all; 26 | 27 | # Editing /etc/redhat-storage-server release file 28 | RUN echo "Red Hat Ceph Storage Server 1.3 (Container)" > /etc/redhat-storage-release 29 | 30 | EXPOSE 6789 6800 6801 6802 6803 6804 6805 80 5000 31 | 32 | # Add volumes for Ceph config and data 33 | VOLUME ["/etc/ceph","/var/lib/ceph"] 34 | 35 | # Add bootstrap script 36 | ADD entrypoint.sh /entrypoint.sh 37 | ADD config.*.sh / 38 | 39 | # Execute the entrypoint 40 | WORKDIR / 41 | ENTRYPOINT ["/entrypoint.sh"] 42 | 43 | # Atomic specific labels 44 | ADD install.sh /install.sh 45 | LABEL Version="1.3" Description="This image has a running Ceph daemon (RHEL 7.2 + RHCS 1.3)" 46 | LABEL RUN="/usr/bin/docker run -d --net=host --pid=host -e MON_NAME=\${MON_NAME} -e MON_IP=\${MON_IP} -e CEPH_PUBLIC_NETWORK=\${CEPH_PUBLIC_NETWORK} -e CEPH_DAEMON=\${CEPH_DAEMON} -v /etc/ceph:/etc/ceph -v /var/lib/ceph:/var/lib/ceph \${IMAGE}" 47 | LABEL INSTALL="/usr/bin/docker run --rm --privileged -v /:/host -e MON_IP=\${MON_IP} -e CEPH_PUBLIC_NETWORK=\${CEPH_PUBLIC_NETWORK} -e CEPH_DAEMON=\${CEPH_DAEMON} -e MON_NAME=\${MON_NAME} -e OSD_DEVICE=\${OSD_DEVICE} -e HOST=/host -e IMAGE=\${IMAGE} --entrypoint=/install.sh \${IMAGE}" 48 | -------------------------------------------------------------------------------- /config.no-gen.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # don't generate if file already exists 3 | set -e 4 | 5 | function get_admin_key { 6 | # No-op for static 7 | echo "static: does not generate admin key" 8 | } 9 | 10 | function get_mon_config { 11 | if [ ! -e /etc/ceph/${CLUSTER}.conf ]; then 12 | fsid=$(uuidgen) 13 | cat </etc/ceph/${CLUSTER}.conf 14 | [global] 15 | fsid = $fsid 16 | mon initial members = ${MON_NAME} 17 | mon host = ${MON_IP} 18 | auth cluster required = cephx 19 | auth service required = cephx 20 | auth client required = cephx 21 | public network = ${CEPH_PUBLIC_NETWORK} 22 | cluster network = ${CEPH_CLUSTER_NETWORK} 23 | osd journal size = ${OSD_JOURNAL_SIZE} 24 | ENDHERE 25 | 26 | if [[ ! -z "$(ip -6 -o a | grep scope.global | awk '/eth/ { sub ("/..", "", $4); print $4 }' | head -n1)" ]]; then 27 | echo "ms_bind_ipv6 = true" >> /etc/ceph/${CLUSTER}.conf 28 | sed -i '/mon host/d' /etc/ceph/${CLUSTER}.conf 29 | echo "mon host = ${MON_IP}" >> /etc/ceph/${CLUSTER}.conf 30 | fi 31 | else 32 | fsid=`grep "fsid" /etc/ceph/${CLUSTER}.conf |awk '{print $NF}'` 33 | fi 34 | if [ ! -e /etc/ceph/${CLUSTER}.client.admin.keyring ]; then 35 | # Generate administrator key 36 | ceph-authtool /etc/ceph/${CLUSTER}.client.admin.keyring --create-keyring --gen-key -n client.admin --set-uid=0 --cap mon 'allow *' --cap osd 'allow *' --cap mds 'allow' 37 | fi 38 | 39 | if [ ! -e /etc/ceph/${CLUSTER}.mon.keyring ]; then 40 | # Generate the mon. key 41 | ceph-authtool /etc/ceph/${CLUSTER}.mon.keyring --create-keyring --gen-key -n mon. --cap mon 'allow *' 42 | fi 43 | 44 | # Create bootstrap key directories 45 | mkdir -p /var/lib/ceph/bootstrap-{osd,mds,rgw} 46 | 47 | if [ ! -e /var/lib/ceph/bootstrap-osd/${CLUSTER}.keyring ]; then 48 | # Generate the OSD bootstrap key 49 | ceph-authtool /var/lib/ceph/bootstrap-osd/${CLUSTER}.keyring --create-keyring --gen-key -n client.bootstrap-osd --cap mon 'allow profile bootstrap-osd' 50 | fi 51 | if [ ! -e /var/lib/ceph/bootstrap-mds/${CLUSTER}.keyring ]; then 52 | # Generate the MDS bootstrap key 53 | ceph-authtool /var/lib/ceph/bootstrap-mds/${CLUSTER}.keyring --create-keyring --gen-key -n client.bootstrap-mds --cap mon 'allow profile bootstrap-mds' 54 | fi 55 | if [ ! -e /var/lib/ceph/bootstrap-rgw/${CLUSTER}.keyring ]; then 56 | # Generate the RGW bootstrap key 57 | ceph-authtool /var/lib/ceph/bootstrap-rgw/${CLUSTER}.keyring --create-keyring --gen-key -n client.bootstrap-rgw --cap mon 'allow profile bootstrap-rgw' 58 | fi 59 | } 60 | 61 | function get_config { 62 | # No-op for static 63 | echo "static: does not generate config" 64 | } 65 | 66 | -------------------------------------------------------------------------------- /config.kv.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | function get_admin_key { 5 | kviator --kvstore=${KV_TYPE} --client=${KV_IP}:${KV_PORT} get ${CLUSTER_PATH}/adminKeyring > /etc/ceph/${CLUSTER}.client.admin.keyring 6 | } 7 | 8 | function get_mon_config { 9 | 10 | CLUSTER_PATH=ceph-config/${CLUSTER} 11 | 12 | echo "Adding Mon Host - ${MON_NAME}" 13 | kviator --kvstore=${KV_TYPE} --client=${KV_IP}:${KV_PORT} put ${CLUSTER_PATH}/mon_host/${MON_NAME} ${MON_IP} > /dev/null 2>&1 14 | 15 | # Acquire lock to not run into race conditions with parallel bootstraps 16 | until kviator --kvstore=${KV_TYPE} --client=${KV_IP}:${KV_PORT} cas ${CLUSTER_PATH}/lock $MON_NAME > /dev/null 2>&1 ; do 17 | echo "Configuration is locked by another host. Waiting." 18 | sleep 1 19 | done 20 | 21 | # Update config after initial mon creation 22 | if kviator --kvstore=${KV_TYPE} --client=${KV_IP}:${KV_PORT} get ${CLUSTER_PATH}/monSetupComplete > /dev/null 2>&1 ; then 23 | echo "Configuration found for cluster ${CLUSTER}. Writing to disk." 24 | 25 | 26 | until confd -onetime -backend ${KV_TYPE} -node ${KV_IP}:${KV_PORT} -prefix="/${CLUSTER_PATH}/" ; do 27 | echo "Waiting for confd to update templates..." 28 | sleep 1 29 | done 30 | 31 | # Check/Create bootstrap key directories 32 | mkdir -p /var/lib/ceph/bootstrap-{osd,mds,rgw} 33 | 34 | echo "Adding Keyrings" 35 | kviator --kvstore=${KV_TYPE} --client=${KV_IP}:${KV_PORT} get ${CLUSTER_PATH}/monKeyring > /etc/ceph/${CLUSTER}.mon.keyring 36 | kviator --kvstore=${KV_TYPE} --client=${KV_IP}:${KV_PORT} get ${CLUSTER_PATH}/adminKeyring > /etc/ceph/${CLUSTER}.client.admin.keyring 37 | kviator --kvstore=${KV_TYPE} --client=${KV_IP}:${KV_PORT} get ${CLUSTER_PATH}/bootstrapOsdKeyring > /var/lib/ceph/bootstrap-osd/${CLUSTER}.keyring 38 | kviator --kvstore=${KV_TYPE} --client=${KV_IP}:${KV_PORT} get ${CLUSTER_PATH}/bootstrapMdsKeyring > /var/lib/ceph/bootstrap-mds/${CLUSTER}.keyring 39 | kviator --kvstore=${KV_TYPE} --client=${KV_IP}:${KV_PORT} get ${CLUSTER_PATH}/bootstrapRgwKeyring > /var/lib/ceph/bootstrap-rgw/${CLUSTER}.keyring 40 | 41 | 42 | if [ ! -f /etc/ceph/monmap ]; then 43 | echo "Monmap is missing. Adding initial monmap..." 44 | kviator --kvstore=${KV_TYPE} --client=${KV_IP}:${KV_PORT} get ${CLUSTER_PATH}/monmap > /etc/ceph/monmap 45 | fi 46 | 47 | echo "Trying to get the most recent monmap..." 48 | if timeout 5 ceph ${CEPH_OPTS} mon getmap -o /etc/ceph/monmap; then 49 | echo "Monmap successfully retrieved." 50 | else 51 | echo "Peers not found, using initial monmap." 52 | fi 53 | 54 | else 55 | # Create initial Mon, keyring 56 | echo "No configuration found for cluster ${CLUSTER}. Generating." 57 | 58 | FSID=$(uuidgen) 59 | kviator --kvstore=${KV_TYPE} --client=${KV_IP}:${KV_PORT} put ${CLUSTER_PATH}/auth/fsid ${FSID} 60 | 61 | until confd -onetime -backend ${KV_TYPE} -node ${KV_IP}:${KV_PORT} -prefix="/${CLUSTER_PATH}/" ; do 62 | echo "Waiting for confd to write initial templates..." 63 | sleep 1 64 | done 65 | 66 | echo "Creating Keyrings" 67 | ceph-authtool /etc/ceph/${CLUSTER}.client.admin.keyring --create-keyring --gen-key -n client.admin --set-uid=0 --cap mon 'allow *' --cap osd 'allow *' --cap mds 'allow' 68 | ceph-authtool /etc/ceph/${CLUSTER}.mon.keyring --create-keyring --gen-key -n mon. --cap mon 'allow *' 69 | 70 | # Create bootstrap key directories 71 | mkdir -p /var/lib/ceph/bootstrap-{osd,mds,rgw} 72 | 73 | # Generate the OSD bootstrap key 74 | ceph-authtool /var/lib/ceph/bootstrap-osd/${CLUSTER}.keyring --create-keyring --gen-key -n client.bootstrap-osd --cap mon 'allow profile bootstrap-osd' 75 | 76 | # Generate the MDS bootstrap key 77 | ceph-authtool /var/lib/ceph/bootstrap-mds/${CLUSTER}.keyring --create-keyring --gen-key -n client.bootstrap-mds --cap mon 'allow profile bootstrap-mds' 78 | 79 | # Generate the RGW bootstrap key 80 | ceph-authtool /var/lib/ceph/bootstrap-rgw/${CLUSTER}.keyring --create-keyring --gen-key -n client.bootstrap-rgw --cap mon 'allow profile bootstrap-rgw' 81 | 82 | 83 | echo "Creating Monmap" 84 | monmaptool --create --add ${MON_NAME} "${MON_IP}:6789" --fsid ${FSID} /etc/ceph/monmap 85 | 86 | echo "Importing Keyrings and Monmap to KV" 87 | kviator --kvstore=${KV_TYPE} --client=${KV_IP}:${KV_PORT} put ${CLUSTER_PATH}/monKeyring - < /etc/ceph/${CLUSTER}.mon.keyring 88 | kviator --kvstore=${KV_TYPE} --client=${KV_IP}:${KV_PORT} put ${CLUSTER_PATH}/adminKeyring - < /etc/ceph/${CLUSTER}.client.admin.keyring 89 | kviator --kvstore=${KV_TYPE} --client=${KV_IP}:${KV_PORT} put ${CLUSTER_PATH}/bootstrapOsdKeyring - < /var/lib/ceph/bootstrap-osd/${CLUSTER}.keyring 90 | kviator --kvstore=${KV_TYPE} --client=${KV_IP}:${KV_PORT} put ${CLUSTER_PATH}/bootstrapMdsKeyring - < /var/lib/ceph/bootstrap-mds/${CLUSTER}.keyring 91 | kviator --kvstore=${KV_TYPE} --client=${KV_IP}:${KV_PORT} put ${CLUSTER_PATH}/bootstrapRgwKeyring - < /var/lib/ceph/bootstrap-rgw/${CLUSTER}.keyring 92 | 93 | kviator --kvstore=${KV_TYPE} --client=${KV_IP}:${KV_PORT} put ${CLUSTER_PATH}/monmap - < /etc/ceph/monmap 94 | 95 | echo "Completed initialization for ${MON_NAME}" 96 | kviator --kvstore=${KV_TYPE} --client=${KV_IP}:${KV_PORT} put ${CLUSTER_PATH}/monSetupComplete true > /dev/null 2>&1 97 | fi 98 | 99 | # Remove lock for other clients to install 100 | echo "Removing lock for ${MON_NAME}" 101 | kviator --kvstore=${KV_TYPE} --client=${KV_IP}:${KV_PORT} del ${CLUSTER_PATH}/lock > /dev/null 2>&1 102 | 103 | } 104 | 105 | function get_config { 106 | 107 | CLUSTER_PATH=ceph-config/${CLUSTER} 108 | 109 | until kviator --kvstore=${KV_TYPE} --client=${KV_IP}:${KV_PORT} get ${CLUSTER_PATH}/monSetupComplete > /dev/null 2>&1 ; do 110 | echo "OSD: Waiting for monitor setup to complete..." 111 | sleep 5 112 | done 113 | 114 | until confd -onetime -backend ${KV_TYPE} -node ${KV_IP}:${KV_PORT} -prefix="/${CLUSTER_PATH}/" ; do 115 | echo "Waiting for confd to update templates..." 116 | sleep 1 117 | done 118 | 119 | # Check/Create bootstrap key directories 120 | mkdir -p /var/lib/ceph/bootstrap-{osd,mds,rgw} 121 | 122 | echo "Adding bootstrap keyrings" 123 | kviator --kvstore=${KV_TYPE} --client=${KV_IP}:${KV_PORT} get ${CLUSTER_PATH}/bootstrapOsdKeyring > /var/lib/ceph/bootstrap-osd/${CLUSTER}.keyring 124 | kviator --kvstore=${KV_TYPE} --client=${KV_IP}:${KV_PORT} get ${CLUSTER_PATH}/bootstrapMdsKeyring > /var/lib/ceph/bootstrap-mds/${CLUSTER}.keyring 125 | kviator --kvstore=${KV_TYPE} --client=${KV_IP}:${KV_PORT} get ${CLUSTER_PATH}/bootstrapRgwKeyring > /var/lib/ceph/bootstrap-rgw/${CLUSTER}.keyring 126 | 127 | } 128 | 129 | -------------------------------------------------------------------------------- /entrypoint.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | : ${CLUSTER:=ceph} 5 | : ${CEPH_CLUSTER_NETWORK:=${CEPH_PUBLIC_NETWORK}} 6 | : ${CEPH_DAEMON:=${1}} # default daemon to first argument 7 | : ${CEPH_GET_ADMIN_KEY:=0} 8 | : ${HOSTNAME:=$(hostname -s)} 9 | : ${MON_NAME:=${HOSTNAME}} 10 | : ${MON_IP_AUTO_DETECT:=0} 11 | : ${MDS_NAME:=mds-${HOSTNAME}} 12 | : ${OSD_FORCE_ZAP:=0} 13 | : ${OSD_JOURNAL_SIZE:=100} 14 | : ${CRUSH_LOCATION:=root=default host=${HOSTNAME}} 15 | : ${CEPHFS_CREATE:=0} 16 | : ${CEPHFS_NAME:=cephfs} 17 | : ${CEPHFS_DATA_POOL:=${CEPHFS_NAME}_data} 18 | : ${CEPHFS_DATA_POOL_PG:=8} 19 | : ${CEPHFS_METADATA_POOL:=${CEPHFS_NAME}_metadata} 20 | : ${CEPHFS_METADATA_POOL_PG:=8} 21 | : ${RGW_NAME:=${HOSTNAME}} 22 | : ${RGW_CIVETWEB_PORT:=80} 23 | : ${RGW_REMOTE_CGI:=0} 24 | : ${RGW_REMOTE_CGI_PORT:=9000} 25 | : ${RGW_REMOTE_CGI_HOST:=0.0.0.0} 26 | : ${RESTAPI_IP:=0.0.0.0} 27 | : ${RESTAPI_PORT:=5000} 28 | : ${RESTAPI_BASE_URL:=/api/v0.1} 29 | : ${RESTAPI_LOG_LEVEL:=warning} 30 | : ${RESTAPI_LOG_FILE:=/var/log/ceph/ceph-restapi.log} 31 | : ${KV_TYPE:=none} # valid options: consul, etcd or none 32 | : ${KV_IP:=127.0.0.1} 33 | : ${KV_PORT:=4001} # PORT 8500 for Consul 34 | 35 | CEPH_OPTS="--cluster ${CLUSTER}" 36 | 37 | # ceph config file exists or die 38 | function check_config { 39 | if [[ ! -e /etc/ceph/${CLUSTER}.conf ]]; then 40 | echo "ERROR- /etc/ceph/${CLUSTER}.conf must exist; get it from your existing mon" 41 | exit 1 42 | fi 43 | } 44 | 45 | # ceph admin key exists or die 46 | function check_admin_key { 47 | if [[ ! -e /etc/ceph/${CLUSTER}.client.admin.keyring ]]; then 48 | echo "ERROR- /etc/ceph/${CLUSTER}.client.admin.keyring must exist; get it from your existing mon" 49 | exit 1 50 | fi 51 | } 52 | 53 | ########################### 54 | # Configuration generator # 55 | ########################### 56 | 57 | # Load in the bootstrapping routines 58 | # based on the data store 59 | case "$KV_TYPE" in 60 | etcd|consul) 61 | source /config.kv.sh 62 | ;; 63 | static) 64 | source /config.static.sh 65 | ;; 66 | *) 67 | source /config.no-gen.sh 68 | ;; 69 | esac 70 | 71 | 72 | ####### 73 | # MON # 74 | ####### 75 | 76 | function start_mon { 77 | if [ ! -n "$CEPH_PUBLIC_NETWORK" ]; then 78 | echo "ERROR- CEPH_PUBLIC_NETWORK must be defined as the name of the network for the OSDs" 79 | exit 1 80 | fi 81 | 82 | if [ ${MON_IP_AUTO_DETECT} -eq 1 ]; then 83 | MON_IP=$(ip -6 -o a | grep scope.global | awk '/eth/ { sub ("/..", "", $4); print $4 }' | head -n1) 84 | if [ -z "$MON_IP" ]; then 85 | MON_IP=$(ip -4 -o a | awk '/eth/ { sub ("/..", "", $4); print $4 }') 86 | fi 87 | elif [ ${MON_IP_AUTO_DETECT} -eq 4 ]; then 88 | MON_IP=$(ip -4 -o a | awk '/eth/ { sub ("/..", "", $4); print $4 }') 89 | elif [ ${MON_IP_AUTO_DETECT} -eq 6 ]; then 90 | MON_IP=$(ip -6 -o a | grep scope.global | awk '/eth/ { sub ("/..", "", $4); print $4 }' | head -n1) 91 | fi 92 | 93 | if [ ! -n "$MON_IP" ]; then 94 | echo "ERROR- MON_IP must be defined as the IP address of the monitor" 95 | exit 1 96 | fi 97 | 98 | # get_mon_config is also responsible for bootstrapping the 99 | # cluster, if necessary 100 | get_mon_config 101 | 102 | # If we don't have a monitor keyring, this is a new monitor 103 | if [ ! -e /var/lib/ceph/mon/${CLUSTER}-${MON_NAME}/keyring ]; then 104 | 105 | if [ ! -e /etc/ceph/${CLUSTER}.mon.keyring ]; then 106 | echo "ERROR- /etc/ceph/${CLUSTER}.mon.keyring must exist. You can extract it from your current monitor by running 'ceph auth get mon. -o /etc/ceph/${CLUSTER}.mon.keyring' or use a KV Store" 107 | exit 1 108 | fi 109 | 110 | monmapOpt="--monmap /etc/ceph/monmap" 111 | if [ ! -e /etc/ceph/monmap ]; then 112 | echo "/etc/ceph/monmap must exist. You can extract it from your current monitor by running 'ceph mon getmap -o /etc/ceph/monmap' or use a KV Store" 113 | monmapOpt="" 114 | fi 115 | 116 | # Testing if it's not the first monitor, if one key doesn't exist we assume none of them exist 117 | ceph-authtool /tmp/${CLUSTER}.mon.keyring --create-keyring --import-keyring /etc/ceph/${CLUSTER}.client.admin.keyring 118 | ceph-authtool /tmp/${CLUSTER}.mon.keyring --import-keyring /var/lib/ceph/bootstrap-osd/${CLUSTER}.keyring 119 | ceph-authtool /tmp/${CLUSTER}.mon.keyring --import-keyring /var/lib/ceph/bootstrap-mds/${CLUSTER}.keyring 120 | ceph-authtool /tmp/${CLUSTER}.mon.keyring --import-keyring /var/lib/ceph/bootstrap-rgw/${CLUSTER}.keyring 121 | ceph-authtool /tmp/${CLUSTER}.mon.keyring --import-keyring /etc/ceph/${CLUSTER}.mon.keyring 122 | 123 | # Make the monitor directory 124 | mkdir -p /var/lib/ceph/mon/${CLUSTER}-${MON_NAME} 125 | 126 | # Prepare the monitor daemon's directory with the map and keyring 127 | ceph-mon --mkfs -i ${MON_NAME} ${monmapOpt} --keyring /tmp/${CLUSTER}.mon.keyring 128 | 129 | # Clean up the temporary key 130 | rm /tmp/${CLUSTER}.mon.keyring 131 | fi 132 | 133 | # start MON 134 | exec /usr/bin/ceph-mon ${CEPH_OPTS} -d -i ${MON_NAME} --public-addr "${MON_IP}:6789" 135 | } 136 | 137 | 138 | ################ 139 | # OSD (common) # 140 | ################ 141 | 142 | function start_osd { 143 | get_config 144 | check_config 145 | 146 | if [ ${CEPH_GET_ADMIN_KEY} -eq "1" ]; then 147 | get_admin_key 148 | check_admin_key 149 | fi 150 | 151 | case "$OSD_TYPE" in 152 | directory) 153 | osd_directory 154 | ;; 155 | disk) 156 | osd_disk 157 | ;; 158 | activate) 159 | osd_activate 160 | ;; 161 | prepare) 162 | osd_disk_prepare 163 | ;; 164 | *) 165 | if [[ ! -d /var/lib/ceph/osd || -n "$(find /var/lib/ceph/osd -prune -empty)" ]]; then 166 | echo "No bootstrapped OSDs found; trying ceph-disk" 167 | osd_disk 168 | else 169 | if [ -z "${OSD_DEVICE}" ]; then 170 | echo "Bootstrapped OSD(s) found; using OSD directory" 171 | osd_directory 172 | else 173 | echo "Bootstrapped OSD(s) found; using ${OSD_DEVICE}" 174 | osd_activate 175 | fi 176 | fi 177 | ;; 178 | esac 179 | } 180 | 181 | 182 | ################# 183 | # OSD_DIRECTORY # 184 | ################# 185 | 186 | function osd_directory { 187 | if [[ ! -d /var/lib/ceph/osd ]]; then 188 | echo "ERROR- could not find any OSD, did you bind mount the OSD data directory?" 189 | echo "ERROR- use -v :" 190 | exit 1 191 | fi 192 | 193 | # check if anything is there, if not create an osd with directory 194 | if [[ -n "$(find /var/lib/ceph/osd -prune -empty)" ]]; then 195 | echo "Creating osd with ceph osd create" 196 | OSD_ID=$(ceph osd create) 197 | if [ "$OSD_ID" -eq "$OSD_ID" ] 2>/dev/null; then 198 | echo "OSD created with ID: ${OSD_ID}" 199 | else 200 | echo "OSD creation failed: ${OSD_ID}" 201 | exit 1 202 | fi 203 | 204 | # create the folder 205 | mkdir -p /var/lib/ceph/osd/${CLUSTER}-${OSD_ID} 206 | echo "created folder /var/lib/ceph/osd/${CLUSTER}-${OSD_ID}" 207 | fi 208 | 209 | for OSD_ID in $(ls /var/lib/ceph/osd | awk 'BEGIN { FS = "-" } ; { print $2 }'); do 210 | if [ -n "${JOURNAL_DIR}" ]; then 211 | OSD_J="${JOURNAL_DIR}/journal.${OSD_ID}" 212 | else 213 | if [ -n "${JOURNAL}" ]; then 214 | OSD_J=${JOURNAL} 215 | else 216 | OSD_J=/var/lib/ceph/osd/${CLUSTER}-${OSD_ID}/journal 217 | fi 218 | fi 219 | 220 | # Check to see if our OSD has been initialized 221 | if [ ! -e /var/lib/ceph/osd/${CLUSTER}-${OSD_ID}/keyring ]; then 222 | # Create OSD key and file structure 223 | ceph-osd ${CEPH_OPTS} -i $OSD_ID --mkfs --mkkey --mkjournal --osd-journal ${OSD_J} 224 | 225 | if [ ! -e /var/lib/ceph/bootstrap-osd/${CLUSTER}.keyring ]; then 226 | ceph auth get client.bootstrap-osd -o /var/lib/ceph/bootstrap-osd/${CLUSTER}.keyring 227 | fi 228 | 229 | timeout 10 ceph ${CEPH_OPTS} --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/${CLUSTER}.keyring health || exit 1 230 | 231 | # Add the OSD key 232 | ceph ${CEPH_OPTS} --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/${CLUSTER}.keyring auth add osd.${OSD_ID} -i /var/lib/ceph/osd/${CLUSTER}-${OSD_ID}/keyring osd 'allow *' mon 'allow profile osd' 233 | 234 | # Add the OSD to the CRUSH map 235 | if [ ! -n "${HOSTNAME}" ]; then 236 | echo "HOSTNAME not set; cannot add OSD to CRUSH map" 237 | exit 1 238 | fi 239 | OSD_WEIGHT=$(df -P -k /var/lib/ceph/osd/${CLUSTER}-$OSD_ID/ | tail -1 | awk '{ d= $2/1073741824 ; r = sprintf("%.2f", d); print r }') 240 | ceph ${CEPH_OPTS} --name=osd.${OSD_ID} --keyring=/var/lib/ceph/osd/${CLUSTER}-${OSD_ID}/keyring osd crush create-or-move -- ${OSD_ID} ${OSD_WEIGHT} ${CRUSH_LOCATION} 241 | fi 242 | 243 | #TODO: use systemd instead 244 | #service ceph start osd 245 | exec ceph-osd ${CEPH_OPTS} -f -d -i ${OSD_ID} --osd-journal ${OSD_J} -k /var/lib/ceph/osd/ceph-${OSD_ID}/keyring 246 | done 247 | 248 | 249 | } 250 | 251 | ######################### 252 | # OSD_CEPH_DISK_PREPARE # 253 | ######################### 254 | 255 | function osd_disk_prepare { 256 | if [[ -z "${OSD_DEVICE}" ]];then 257 | echo "ERROR- You must provide a device to build your OSD ie: /dev/sdb" 258 | exit 1 259 | fi 260 | 261 | if [ ! -e /var/lib/ceph/bootstrap-osd/${CLUSTER}.keyring ]; then 262 | echo "ERROR- /var/lib/ceph/bootstrap-ods/${CLUSTER}.keyring must exist. You can extract it from your current monitor by running 'ceph auth get client.bootstrap-ods -o /var/lib/ceph/bootstrap-ods/${CLUSTER}.keyring'" 263 | exit 1 264 | fi 265 | 266 | timeout 10 ceph ${CEPH_OPTS} --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/${CLUSTER}.keyring health || exit 1 267 | 268 | mkdir -p /var/lib/ceph/osd 269 | 270 | # TODO: 271 | # - add device format check (make sure only one device is passed 272 | 273 | if [[ "$(parted --script ${OSD_DEVICE} print | egrep '^ 1.*ceph data')" && ${OSD_FORCE_ZAP} -ne "1" ]]; then 274 | echo "ERROR- It looks like this device is an OSD, set OSD_FORCE_ZAP=1 to use this device anyway and zap its content" 275 | exit 1 276 | elif [[ "$(parted --script ${OSD_DEVICE} print | egrep '^ 1.*ceph data')" && ${OSD_FORCE_ZAP} -eq "1" ]]; then 277 | ceph-disk -v zap ${OSD_DEVICE} 278 | fi 279 | 280 | if [[ ! -z "${OSD_JOURNAL}" ]]; then 281 | ceph-disk -v prepare ${OSD_DEVICE} ${OSD_JOURNAL} 282 | else 283 | ceph-disk -v prepare ${OSD_DEVICE} 284 | fi 285 | } 286 | 287 | ################# 288 | # OSD_CEPH_DISK # 289 | ################# 290 | 291 | function osd_disk { 292 | osd_disk_prepare 293 | osd_activate 294 | } 295 | 296 | 297 | ########################## 298 | # OSD_CEPH_DISK_ACTIVATE # 299 | ########################## 300 | 301 | function osd_activate { 302 | if [[ -z "${OSD_DEVICE}" ]];then 303 | echo "ERROR- You must provide a device to build your OSD ie: /dev/sdb" 304 | exit 1 305 | fi 306 | timeout 10 bash -c "while [ ! -e ${OSD_DEVICE}1 ]; do sleep 1; done" 307 | mkdir -p /var/lib/ceph/osd 308 | ceph-disk -v activate ${OSD_DEVICE}1 309 | OSD_ID=$(cat /var/lib/ceph/osd/$(ls -ltr /var/lib/ceph/osd/ | tail -n1 | awk -v pattern="$CLUSTER" '$0 ~ pattern {print $9}')/whoami) 310 | OSD_WEIGHT=$(df -P -k /var/lib/ceph/osd/${CLUSTER}-$OSD_ID/ | tail -1 | awk '{ d= $2/1073741824 ; r = sprintf("%.2f", d); print r }') 311 | ceph ${CEPH_OPTS} --name=osd.${OSD_ID} --keyring=/var/lib/ceph/osd/${CLUSTER}-${OSD_ID}/keyring osd crush create-or-move -- ${OSD_ID} ${OSD_WEIGHT} ${CRUSH_LOCATION} 312 | 313 | # ceph-disk activiate has exec'ed /usr/bin/ceph-osd ${CEPH_OPTS} -f -d -i ${OSD_ID} 314 | # wait till ceph-osd exit 315 | OSD_PID=$(ps -ef |grep ceph-osd |grep osd.${OSD_ID} |awk '{print $2}') 316 | if [ ! -z ${OSD_ID} ]; then 317 | while [ -e /proc/${OSD_PID} ]; do sleep 1;done 318 | else 319 | exec /usr/bin/ceph-osd ${CEPH_OPTS} -f -d -i ${OSD_ID} 320 | fi 321 | } 322 | 323 | ####### 324 | # MDS # 325 | ####### 326 | 327 | function start_mds { 328 | get_config 329 | check_config 330 | 331 | # Check to see if we are a new MDS 332 | if [ ! -e /var/lib/ceph/mds/${CLUSTER}-${MDS_NAME}/keyring ]; then 333 | 334 | mkdir -p /var/lib/ceph/mds/${CLUSTER}-${MDS_NAME} 335 | 336 | if [ -e /etc/ceph/${CLUSTER}.client.admin.keyring ]; then 337 | KEYRING_OPT="--name client.admin --keyring /etc/ceph/${CLUSTER}.client.admin.keyring" 338 | elif [ -e /var/lib/ceph/bootstrap-mds/${CLUSTER}.keyring ]; then 339 | KEYRING_OPT="--name client.bootstrap-mds --keyring /var/lib/ceph/bootstrap-mds/${CLUSTER}.keyring" 340 | else 341 | echo "ERROR- Failed to bootstrap MDS: could not find admin or bootstrap-mds keyring. You can extract it from your current monitor by running 'ceph auth get client.bootstrap-mds -o /var/lib/ceph/bootstrap-mds/${CLUSTER}.keyring'" 342 | exit 1 343 | fi 344 | 345 | timeout 10 ceph ${CEPH_OPTS} $KEYRING_OPT health || exit 1 346 | 347 | # Generate the MDS key 348 | ceph ${CEPH_OPTS} $KEYRING_OPT auth get-or-create mds.$MDS_NAME osd 'allow rwx' mds 'allow' mon 'allow profile mds' -o /var/lib/ceph/mds/${CLUSTER}-${MDS_NAME}/keyring 349 | 350 | fi 351 | 352 | # NOTE (leseb): having the admin keyring is really a security issue 353 | # If we need to bootstrap a MDS we should probably create the following on the monitors 354 | # I understand that this handy to do this here 355 | # but having the admin key inside every container is a concern 356 | 357 | # Create the Ceph filesystem, if necessary 358 | if [ $CEPHFS_CREATE -eq 1 ]; then 359 | 360 | get_admin_key 361 | check_admin_key 362 | 363 | if [[ "$(ceph fs ls | grep -c name:.${CEPHFS_NAME},)" -eq "0" ]]; then 364 | # Make sure the specified data pool exists 365 | if ! ceph ${CEPH_OPTS} osd pool stats ${CEPHFS_DATA_POOL} > /dev/null 2>&1; then 366 | ceph ${CEPH_OPTS} osd pool create ${CEPHFS_DATA_POOL} ${CEPHFS_DATA_POOL_PG} 367 | fi 368 | 369 | # Make sure the specified metadata pool exists 370 | if ! ceph ${CEPH_OPTS} osd pool stats ${CEPHFS_METADATA_POOL} > /dev/null 2>&1; then 371 | ceph ${CEPH_OPTS} osd pool create ${CEPHFS_METADATA_POOL} ${CEPHFS_METADATA_POOL_PG} 372 | fi 373 | 374 | ceph ${CEPH_OPTS} fs new ${CEPHFS_NAME} ${CEPHFS_METADATA_POOL} ${CEPHFS_DATA_POOL} 375 | fi 376 | fi 377 | 378 | # NOTE: prefixing this with exec causes it to die (commit suicide) 379 | /usr/bin/ceph-mds ${CEPH_OPTS} -d -i ${MDS_NAME} 380 | } 381 | 382 | 383 | ####### 384 | # RGW # 385 | ####### 386 | 387 | function start_rgw { 388 | get_config 389 | check_config 390 | 391 | if [ ${CEPH_GET_ADMIN_KEY} -eq "1" ]; then 392 | get_admin_key 393 | check_admin_key 394 | fi 395 | 396 | # Check to see if our RGW has been initialized 397 | if [ ! -e /var/lib/ceph/radosgw/${RGW_NAME}/keyring ]; then 398 | 399 | mkdir -p /var/lib/ceph/radosgw/${RGW_NAME} 400 | 401 | if [ ! -e /var/lib/ceph/bootstrap-rgw/${CLUSTER}.keyring ]; then 402 | echo "ERROR- /var/lib/ceph/bootstrap-rgw/${CLUSTER}.keyring must exist. You can extract it from your current monitor by running 'ceph auth get client.bootstrap-rgw -o /var/lib/ceph/bootstrap-rgw/${CLUSTER}.keyring'" 403 | exit 1 404 | fi 405 | 406 | timeout 10 ceph ${CEPH_OPTS} --name client.bootstrap-rgw --keyring /var/lib/ceph/bootstrap-rgw/${CLUSTER}.keyring health || exit 1 407 | 408 | # Generate the RGW key 409 | ceph ${CEPH_OPTS} --name client.bootstrap-rgw --keyring /var/lib/ceph/bootstrap-rgw/${CLUSTER}.keyring auth get-or-create client.rgw.${RGW_NAME} osd 'allow rwx' mon 'allow rw' -o /var/lib/ceph/radosgw/${RGW_NAME}/keyring 410 | fi 411 | 412 | if [ "$RGW_REMOTE_CGI" -eq 1 ]; then 413 | /usr/bin/radosgw -d ${CEPH_OPTS} -n client.rgw.${RGW_NAME} -k /var/lib/ceph/radosgw/$RGW_NAME/keyring --rgw-socket-path="" --rgw-frontends="fastcgi socket_port=$RGW_REMOTE_CGI_PORT socket_host=$RGW_REMOTE_CGI_HOST" 414 | else 415 | /usr/bin/radosgw -d ${CEPH_OPTS} -n client.rgw.${RGW_NAME} -k /var/lib/ceph/radosgw/$RGW_NAME/keyring --rgw-socket-path="" --rgw-frontends="civetweb port=$RGW_CIVETWEB_PORT" 416 | fi 417 | } 418 | 419 | 420 | ########### 421 | # RESTAPI # 422 | ########### 423 | 424 | function start_restapi { 425 | get_config 426 | check_config 427 | 428 | # Ensure we have the admin key 429 | get_admin_key 430 | check_admin_key 431 | 432 | # Check to see if we need to add a [client.restapi] section; add, if necessary 433 | if [[ ! "$(egrep "\[client.restapi\]" /etc/ceph/${CLUSTER}.conf)" ]]; then 434 | cat <>/etc/ceph/${CLUSTER}.conf 435 | 436 | [client.restapi] 437 | public addr = ${RESTAPI_IP}:${RESTAPI_PORT} 438 | restapi base url = ${RESTAPI_BASE_URL} 439 | restapi log level = ${RESTAPI_LOG_LEVEL} 440 | log file = ${RESTAPI_LOG_FILE} 441 | ENDHERE 442 | fi 443 | 444 | # start ceph-rest-api 445 | exec /usr/bin/ceph-rest-api ${CEPH_OPTS} -n client.admin 446 | 447 | } 448 | 449 | ############### 450 | # CEPH_DAEMON # 451 | ############### 452 | 453 | # Normalize DAEMON to lowercase 454 | CEPH_DAEMON=$(echo ${CEPH_DAEMON} |tr '[:upper:]' '[:lower:]') 455 | 456 | # If we are given a valid first argument, set the 457 | # CEPH_DAEMON variable from it 458 | case "$CEPH_DAEMON" in 459 | mds) 460 | start_mds 461 | ;; 462 | mon) 463 | start_mon 464 | ;; 465 | osd) 466 | start_osd 467 | ;; 468 | osd_directory) 469 | OSD_TYPE="directory" 470 | start_osd 471 | ;; 472 | osd_ceph_disk) 473 | OSD_TYPE="disk" 474 | start_osd 475 | ;; 476 | osd_ceph_disk_prepare) 477 | OSD_TYPE="prepare" 478 | start_osd 479 | ;; 480 | osd_ceph_disk_activate) 481 | OSD_TYPE="activate" 482 | start_osd 483 | ;; 484 | rgw) 485 | start_rgw 486 | ;; 487 | restapi) 488 | start_restapi 489 | ;; 490 | *) 491 | if [ ! -n "$CEPH_DAEMON" ]; then 492 | echo "ERROR- One of CEPH_DAEMON or a daemon parameter must be defined as the name " 493 | echo "of the daemon you want to deploy." 494 | echo "Valid values for CEPH_DAEMON are MON, OSD, OSD_DIRECTORY, OSD_CEPH_DISK, OSD_CEPH_DISK_PREPARE, OSD_CEPH_DISK_ACTIVATE, MDS, RGW, RESTAPI" 495 | echo "Valid values for the daemon parameter are mon, osd, osd_directory, osd_ceph_disk, osd_ceph_disk_prepare, osd_ceph_disk_activate, mds, rgw, restapi" 496 | exit 1 497 | fi 498 | ;; 499 | esac 500 | 501 | exit 0 502 | --------------------------------------------------------------------------------