155 lines
3.9 KiB
Bash
Executable file
155 lines
3.9 KiB
Bash
Executable file
#!/bin/bash
|
|
|
|
echo ""
|
|
echo "... ] ATTACHING EXTERNAL CEPH AS CLUSTER STORAGE [ ..."
|
|
echo ""
|
|
|
|
HOME=$( cd "$(dirname "$0")" && pwd )
|
|
source $HOME/config
|
|
|
|
if [ -f $HOME/config-coreapps ]; then
|
|
echo "config-coreapps file FOUND :)"
|
|
source $HOME/config-coreapps
|
|
else
|
|
echo "config-coreapps file is missing."
|
|
exit 1
|
|
fi
|
|
|
|
export KUBECONFIG=$ADMIN_KUBECONFIG
|
|
|
|
kcmd='create'
|
|
if [ ! -z $1 ]; then
|
|
if [ $1 = 'rollback' ]; then
|
|
kcmd='delete'
|
|
fi
|
|
fi
|
|
|
|
if [ -z ${CEPH_ADMIN_KEY} ]; then
|
|
echo "Please provide ceph admin key using the command: "
|
|
echo "sudo ceph --cluster ceph auth get-key client.admin"
|
|
exit 1;
|
|
fi
|
|
|
|
if [ -z ${CEPH_USER_KEY} ]; then
|
|
echo "Please provide ceph user key using the command: "
|
|
echo "Use https://ceph.com/pgcalc/ to calculate the placement groups number"
|
|
echo "sudo ceph --cluster ceph osd pool create kube 1024 1024"
|
|
echo "sudo ceph --cluster ceph auth get-or-create client.kube mon 'allow r' osd 'allow rwx pool=kube'"
|
|
echo "sudo ceph --cluster ceph auth get-key client.kube"
|
|
exit 1;
|
|
fi
|
|
|
|
if [ $kcmd = 'create' ]; then
|
|
kubectl $kcmd -n kube-system secret generic ceph-secret --type="kubernetes.io/rbd" --from-literal=key=${CEPH_ADMIN_KEY}
|
|
kubectl $kcmd -n kube-system secret generic ceph-secret-kube --type="kubernetes.io/rbd" --from-literal=key=${CEPH_USER_KEY}
|
|
else
|
|
kubectl $kcmd -n kube-system secret ceph-secret
|
|
kubectl $kcmd -n kube-system secret ceph-secret-kube
|
|
fi
|
|
|
|
cat <<EOF | kubectl $kcmd -n kube-system -f -
|
|
kind: ClusterRole
|
|
apiVersion: rbac.authorization.k8s.io/v1
|
|
metadata:
|
|
name: rbd-provisioner
|
|
rules:
|
|
- apiGroups: [""]
|
|
resources: ["persistentvolumes"]
|
|
verbs: ["get", "list", "watch", "create", "delete"]
|
|
- apiGroups: [""]
|
|
resources: ["persistentvolumeclaims"]
|
|
verbs: ["get", "list", "watch", "update"]
|
|
- apiGroups: ["storage.k8s.io"]
|
|
resources: ["storageclasses"]
|
|
verbs: ["get", "list", "watch"]
|
|
- apiGroups: [""]
|
|
resources: ["events"]
|
|
verbs: ["create", "update", "patch"]
|
|
- apiGroups: [""]
|
|
resources: ["services"]
|
|
resourceNames: ["kube-dns","coredns"]
|
|
verbs: ["list", "get"]
|
|
- apiGroups: [""]
|
|
resources: ["endpoints"]
|
|
verbs: ["get", "list", "watch", "create", "update", "patch"]
|
|
---
|
|
kind: ClusterRoleBinding
|
|
apiVersion: rbac.authorization.k8s.io/v1
|
|
metadata:
|
|
name: rbd-provisioner
|
|
subjects:
|
|
- kind: ServiceAccount
|
|
name: rbd-provisioner
|
|
namespace: kube-system
|
|
roleRef:
|
|
kind: ClusterRole
|
|
name: rbd-provisioner
|
|
apiGroup: rbac.authorization.k8s.io
|
|
---
|
|
apiVersion: rbac.authorization.k8s.io/v1beta1
|
|
kind: Role
|
|
metadata:
|
|
name: rbd-provisioner
|
|
rules:
|
|
- apiGroups: [""]
|
|
resources: ["secrets"]
|
|
verbs: ["get"]
|
|
---
|
|
apiVersion: rbac.authorization.k8s.io/v1
|
|
kind: RoleBinding
|
|
metadata:
|
|
name: rbd-provisioner
|
|
roleRef:
|
|
apiGroup: rbac.authorization.k8s.io
|
|
kind: Role
|
|
name: rbd-provisioner
|
|
subjects:
|
|
- kind: ServiceAccount
|
|
name: rbd-provisioner
|
|
namespace: kube-system
|
|
---
|
|
apiVersion: v1
|
|
kind: ServiceAccount
|
|
metadata:
|
|
name: rbd-provisioner
|
|
---
|
|
EOF
|
|
|
|
#if [ $kcmd = 'create' ]; then
|
|
# #wait for the pod to start
|
|
# echo -n "] Waiting for the control pod to start..."
|
|
# while true; do
|
|
# #currentstatus=$($kubectl get pod $k8x_name -o jsonpath="{.status.phase}")
|
|
# currentstatus=$(kubectl -n kube-system get pods -l app=rbd-provisioner -o jsonpath="{.items[].status.phase}")
|
|
# if [ "$currentstatus" = "Running" ]; then
|
|
# echo -n ". done!"
|
|
# echo ""
|
|
# break
|
|
# fi
|
|
# sleep 1
|
|
# echo -n "."
|
|
# done
|
|
#fi
|
|
|
|
cat <<EOF | kubectl $kcmd -f -
|
|
kind: StorageClass
|
|
apiVersion: storage.k8s.io/v1
|
|
metadata:
|
|
name: rados-block
|
|
provisioner: kubernetes.io/rbd
|
|
parameters:
|
|
monitors: ${CEPH_MONITOR_1}:6789, ${CEPH_MONITOR_2}:6789, ${CEPH_MONITOR_3}:6789
|
|
adminId: admin
|
|
adminSecretName: ceph-secret
|
|
adminSecretNamespace: kube-system
|
|
pool: kube
|
|
userId: kube
|
|
userSecretName: ceph-secret-kube
|
|
userSecretNamespace: kube-system
|
|
fsType: ext4
|
|
imageFormat: "2"
|
|
imageFeatures: layering
|
|
reclaimPolicy: Delete
|
|
volumeBindingMode: Immediate
|
|
allowVolumeExpansion: true
|
|
EOF
|