import from rtg

This commit is contained in:
deflax 2024-05-18 04:45:52 +03:00
commit de790177af
98 changed files with 34963 additions and 0 deletions

15
.gitignore vendored Normal file
View file

@ -0,0 +1,15 @@
tmp/
.rnd
.kube/
.wget-hsts
__pycache__
#helm
.cache/
.config/
#k8x
cryptopack.*
config
config-coreapps
config-api.ini

58
README.md Normal file
View file

@ -0,0 +1,58 @@
#k8x - deploys k8s with 3 masters
tested OS:
- debian 10
# Configuration
config.dist - copy to config and setup the cluster deployment specific settings.
config-coreapps.dist - copy to config-coreapps and setup the core applications specific settings.
# Installation of cluster nodes
./build_crypto.sh - run once to generate a CA and pack it as cryptopack.b64 file which should be shared on all nodes
./init.sh - install k8s master or worker capabilities on each kubernetes node. requires the initial hostname to be set up as those listed in the config file. run simultaniously on all masters to setup etcd, and then once on each worker node.
# Installation of core applications
Note: Run the scripts from a node which has a config-coreapps config file
./deploy_system.sh - installs the calico sdn, coredns and metrics server into the kube-system namespace, setups helm and kubelet rbac
./deploy_cert_manager.sh - installs the letsencrypt acme ssl certificate manager into the cert-manager namespace
./deploy_ingress_nginx.sh - install the ingress-nginx ingress controller into the ingress-nginx namespace
./attach_storage_ceph.sh - create ceph storage keys into kube-system namespace and the rados-block storage class which pvcs could attach to for dynamic provisioning
./deploy_monitoring.sh - install prometheus, loki, grafana and zabbix
# Installation of additional environments
./deploy_env.sh staging - install the staging namespace with wildcard ssl
./deploy_env.sh develop - install the develop namespace with wildcard ssl, and common docker registry
./deploy_env.sh live - install the production namespace
# Tools
./health.sh - displays the cluster health
./health.sh objects - displays the cluster and cluster objects health
k9s - start the kubernetes ncurses admin panel
watch -n 5 "k get pods --sort-by=.status.startTime --all-namespaces -o wide | tac" - watch for cluster pod state changes
source config ; export KUBECONFIG=$ADMIN_KUBECONFIG - exports the cluster admin key to use the kubectl (or k) command cluster-wide
# Additional Tasks
1. Setup Grafana to access kube-apiserver in order to fetch pod metrics
Plugin: https://grafana.com/plugins/grafana-kubernetes-app
Data Source: https://kubernetes.default.svc.cluster.local
Auth: With Credentials, With CA Cert, TLS Client Auth

55
attach_private_registry.sh Executable file
View file

@ -0,0 +1,55 @@
#!/bin/bash
echo ""
echo "... ] ATTACHING PRIVATE DOCKER REGISTRY [ ..."
echo ""
HOME=$( cd "$(dirname "$0")" && pwd )
source $HOME/config
export KUBECONFIG=$ADMIN_KUBECONFIG
#TODO: Installing the default private registry (the one we use to host and reuse it as default on kubelet worker installation)
# could also be provided with this script as a secret and use it with ImagePullSecret.
if [ -z "$1" ]; then
echo "] Usage: ./attach_private_registry.sh <secret-name>"
exit 2
fi
REG_NAME="$1"
echo -n "] Target secret namespace: "
read NSPACE
if [ -z "$NSPACE" ]; then
echo "] No namespace"
exit 1
fi
echo -n "] Enter Docker registry user: "
read REGISTRY_USER
echo -n "] Enter Docker registry password (token): "
read REGISTRY_PASS
echo -n "] Enter Docker registry email: "
read REGISTRY_EMAIL
echo -n "] Enter Docker registry url (empty for docker hub): "
read REGISTRY_URL
if [ -z "$REGISTRY_URL" ]; then
CONFIG_URL="--docker-server=https://index.docker.io/v2/"
else
CONFIG_URL="--docker-server=https://${REGISTRY_URL}/v2/"
fi
SECRET_NAME="registry-${NSPACE}-${REG_NAME}"
CONFIG_SECRET="${SECRET_NAME} ${CONFIG_URL} --docker-username=${REGISTRY_USER} --docker-password=${REGISTRY_PASS} --docker-email=${REGISTRY_EMAIL}"
CMD="/usr/local/bin/k -n ${NSPACE} create secret docker-registry ${CONFIG_SECRET}"
echo ""
echo "Executing command: ${CMD}"
echo -n "Is that okay [y/n]? "
read answer
if [ "$answer" != "${answer#[Yy]}" ]; then
${CMD}
fi

155
attach_storage_ceph.sh Executable file
View file

@ -0,0 +1,155 @@
#!/bin/bash
echo ""
echo "... ] ATTACHING EXTERNAL CEPH AS CLUSTER STORAGE [ ..."
echo ""
HOME=$( cd "$(dirname "$0")" && pwd )
source $HOME/config
if [ -f $HOME/config-coreapps ]; then
echo "config-coreapps file FOUND :)"
source $HOME/config-coreapps
else
echo "config-coreapps file is missing."
exit 1
fi
export KUBECONFIG=$ADMIN_KUBECONFIG
kcmd='create'
if [ ! -z $1 ]; then
if [ $1 = 'rollback' ]; then
kcmd='delete'
fi
fi
if [ -z ${CEPH_ADMIN_KEY} ]; then
echo "Please provide ceph admin key using the command: "
echo "sudo ceph --cluster ceph auth get-key client.admin"
exit 1;
fi
if [ -z ${CEPH_USER_KEY} ]; then
echo "Please provide ceph user key using the command: "
echo "Use https://ceph.com/pgcalc/ to calculate the placement groups number"
echo "sudo ceph --cluster ceph osd pool create kube 1024 1024"
echo "sudo ceph --cluster ceph auth get-or-create client.kube mon 'allow r' osd 'allow rwx pool=kube'"
echo "sudo ceph --cluster ceph auth get-key client.kube"
exit 1;
fi
if [ $kcmd = 'create' ]; then
kubectl $kcmd -n kube-system secret generic ceph-secret --type="kubernetes.io/rbd" --from-literal=key=${CEPH_ADMIN_KEY}
kubectl $kcmd -n kube-system secret generic ceph-secret-kube --type="kubernetes.io/rbd" --from-literal=key=${CEPH_USER_KEY}
else
kubectl $kcmd -n kube-system secret ceph-secret
kubectl $kcmd -n kube-system secret ceph-secret-kube
fi
cat <<EOF | kubectl $kcmd -n kube-system -f -
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: rbd-provisioner
rules:
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "create", "delete"]
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "update", "patch"]
- apiGroups: [""]
resources: ["services"]
resourceNames: ["kube-dns","coredns"]
verbs: ["list", "get"]
- apiGroups: [""]
resources: ["endpoints"]
verbs: ["get", "list", "watch", "create", "update", "patch"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: rbd-provisioner
subjects:
- kind: ServiceAccount
name: rbd-provisioner
namespace: kube-system
roleRef:
kind: ClusterRole
name: rbd-provisioner
apiGroup: rbac.authorization.k8s.io
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: Role
metadata:
name: rbd-provisioner
rules:
- apiGroups: [""]
resources: ["secrets"]
verbs: ["get"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: rbd-provisioner
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: rbd-provisioner
subjects:
- kind: ServiceAccount
name: rbd-provisioner
namespace: kube-system
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: rbd-provisioner
---
EOF
#if [ $kcmd = 'create' ]; then
# #wait for the pod to start
# echo -n "] Waiting for the control pod to start..."
# while true; do
# #currentstatus=$($kubectl get pod $k8x_name -o jsonpath="{.status.phase}")
# currentstatus=$(kubectl -n kube-system get pods -l app=rbd-provisioner -o jsonpath="{.items[].status.phase}")
# if [ "$currentstatus" = "Running" ]; then
# echo -n ". done!"
# echo ""
# break
# fi
# sleep 1
# echo -n "."
# done
#fi
cat <<EOF | kubectl $kcmd -f -
kind: StorageClass
apiVersion: storage.k8s.io/v1
metadata:
name: rados-block
provisioner: kubernetes.io/rbd
parameters:
monitors: ${CEPH_MONITOR_1}:6789, ${CEPH_MONITOR_2}:6789, ${CEPH_MONITOR_3}:6789
adminId: admin
adminSecretName: ceph-secret
adminSecretNamespace: kube-system
pool: kube
userId: kube
userSecretName: ceph-secret-kube
userSecretNamespace: kube-system
fsType: ext4
imageFormat: "2"
imageFeatures: layering
reclaimPolicy: Delete
volumeBindingMode: Immediate
allowVolumeExpansion: true
EOF

BIN
blobs/apiextensions-apiserver.gz Executable file

Binary file not shown.

BIN
blobs/calicoctl.gz Executable file

Binary file not shown.

BIN
blobs/etcd.gz Executable file

Binary file not shown.

BIN
blobs/etcdctl.gz Executable file

Binary file not shown.

BIN
blobs/helm.gz Executable file

Binary file not shown.

BIN
blobs/k9s.gz Executable file

Binary file not shown.

BIN
blobs/kube-aggregator.gz Executable file

Binary file not shown.

BIN
blobs/kube-apiserver.gz Executable file

Binary file not shown.

BIN
blobs/kube-controller-manager.gz Executable file

Binary file not shown.

BIN
blobs/kube-proxy.gz Executable file

Binary file not shown.

BIN
blobs/kube-scheduler.gz Executable file

Binary file not shown.

BIN
blobs/kubeadm.gz Executable file

Binary file not shown.

BIN
blobs/kubectl.gz Executable file

Binary file not shown.

BIN
blobs/kubelet.gz Executable file

Binary file not shown.

BIN
blobs/mounter.gz Executable file

Binary file not shown.

195
build_crypto.sh Executable file
View file

@ -0,0 +1,195 @@
#!/bin/bash
echo "... ] BUILDING THE CRYPTOPACK.B64 FILE [ ..."
HOME=$( cd "$(dirname "$0")" && pwd )
source $HOME/config
apt update -q
apt install -y sharutils openssl
SSL_REPO=/tmp/k8x-cryptogen
mkdir -p ${SSL_REPO}
mkdir -p ${CONF_DIR}/{kube-controller-manager,kubelet,kube-proxy,kube-scheduler}
mkdir -p /var/lib/{kube-controller-manager,kubelet,kube-proxy,kube-scheduler}
#checks if we have the cryptopack file
if [ -f $HOME/cryptopack.b64 ]; then
echo "] cryptopack.b64 already generated. rebuilding..."
TSTAMP=`date +%s`
mv -v ${HOME}/cryptopack.b64 ${HOME}/cryptopack.b64.${TSTAMP}
fi
if [ -f ${CA_DIR}/ca-openssl.cnf ]; then
cp -v ${CA_DIR}/ca-openssl.cnf ${SSL_REPO}/ca-openssl.cnf
else
cat <<EOF | tee ${SSL_REPO}/ca-openssl.cnf
[req]
distinguished_name = req_distinguished_name
[req_distinguished_name]
[ v3_ca ]
basicConstraints = critical, CA:TRUE
keyUsage = critical, digitalSignature, keyEncipherment, keyCertSign
[ v3_req_helm ]
basicConstraints = CA:FALSE
keyUsage = critical, digitalSignature, keyEncipherment
extendedKeyUsage = serverAuth, clientAuth
[ v3_req_etcd ]
basicConstraints = CA:FALSE
keyUsage = critical, digitalSignature, keyEncipherment
extendedKeyUsage = serverAuth, clientAuth
subjectAltName = @alt_names_etcd
[ alt_names_etcd ]
DNS.1 = ${MASTER_1_NAME}
DNS.2 = ${MASTER_2_NAME}
DNS.3 = ${MASTER_3_NAME}
DNS.4 = ${CLUSTER_NAME}.${CLUSTER_DOMAIN}
DNS.5 = ${MASTER_1_NAME}.${CLUSTER_NAME}.${CLUSTER_DOMAIN}
DNS.6 = ${MASTER_2_NAME}.${CLUSTER_NAME}.${CLUSTER_DOMAIN}
DNS.7 = ${MASTER_3_NAME}.${CLUSTER_NAME}.${CLUSTER_DOMAIN}
IP.1 = ${ETCD_1_IP}
IP.2 = ${ETCD_2_IP}
IP.3 = ${ETCD_3_IP}
EOF
fi
#generate tokens
for object in admin kubelet kube-proxy kube-controller-manager kube-scheduler
do
if [ -f ${CA_DIR}/${object}.token ]; then
cp -v ${CA_DIR}/${object}.token ${SSL_REPO}/${object}.token
else
dd if=/dev/urandom bs=128 count=1 2>/dev/null | base64 -w 0 | tr -d "=+/" | dd bs=256 count=1 2>/dev/null > ${SSL_REPO}/${object}.token
fi
done
printf "\n] generating certificate authorities..."
#generate kube certificate authority
if [ -f ${CA_DIR}/ca.key ] && [ -f ${CA_DIR}/ca.crt ]; then
cp -v ${CA_DIR}/ca.key ${SSL_REPO}/ca.key
cp -v ${CA_DIR}/ca.crt ${SSL_REPO}/ca.crt
else
openssl ecparam -name secp521r1 -genkey -noout -out ${SSL_REPO}/ca.key
chmod 0600 ${SSL_REPO}/ca.key
openssl req -x509 -new -nodes -key ${SSL_REPO}/ca.key -days 20000 -out ${SSL_REPO}/ca.crt -subj "/CN=kubernetes-ca" -extensions v3_ca -config ${SSL_REPO}/ca-openssl.cnf
fi
#generate helm certificate authority
if [ -f ${CA_DIR}/helm-ca.key ] && [ -f ${CA_DIR}/helm-ca.crt ]; then
cp -v ${CA_DIR}/helm-ca.key ${SSL_REPO}/helm-ca.key
cp -v ${CA_DIR}/helm-ca.crt ${SSL_REPO}/helm-ca.crt
else
openssl ecparam -name secp521r1 -genkey -noout -out ${SSL_REPO}/helm-ca.key
chmod 0600 ${SSL_REPO}/helm-ca.key
openssl req -x509 -new -nodes -key ${SSL_REPO}/helm-ca.key -days 20000 -out ${SSL_REPO}/helm-ca.crt -subj "/CN=helm-ca" -extensions v3_ca -config ${SSL_REPO}/ca-openssl.cnf
fi
#generate etcd certificate authority
if [ -f ${CA_DIR}/etcd-ca.key ] && [ -f ${CA_DIR}/etcd-ca.crt ]; then
cp -v ${CA_DIR}/etcd-ca.key ${SSL_REPO}/etcd-ca.key
cp -v ${CA_DIR}/etcd-ca.crt ${SSL_REPO}/etcd-ca.crt
else
openssl ecparam -name secp521r1 -genkey -noout -out ${SSL_REPO}/etcd-ca.key
chmod 0600 ${SSL_REPO}/etcd-ca.key
openssl req -x509 -new -nodes -key ${SSL_REPO}/etcd-ca.key -days 20000 -out ${SSL_REPO}/etcd-ca.crt -subj "/CN=etcd-ca" -extensions v3_ca -config ${SSL_REPO}/ca-openssl.cnf
fi
#generate aggregator certificate authority
if [ -f ${CA_DIR}/aggregator-ca.key ] && [ -f ${CA_DIR}/aggregator-ca.crt ]; then
cp -v ${CA_DIR}/aggregator-ca.key ${SSL_REPO}/aggregator-ca.key
cp -v ${CA_DIR}/aggregator-ca.crt ${SSL_REPO}/aggregator-ca.crt
else
openssl ecparam -name secp521r1 -genkey -noout -out ${SSL_REPO}/aggregator-ca.key
chmod 0600 ${SSL_REPO}/aggregator-ca.key
openssl req -x509 -new -nodes -key ${SSL_REPO}/aggregator-ca.key -days 20000 -out ${SSL_REPO}/aggregator-ca.crt -subj "/CN=aggregator-ca" -extensions v3_ca -config ${SSL_REPO}/ca-openssl.cnf
fi
printf "\n] generating certificates..."
#create etcd certificate
if [ -f ${CA_DIR}/etcd.key ] && [ -f ${CA_DIR}/etcd.crt ]; then
cp -v ${CA_DIR}/etcd.key ${SSL_REPO}/etcd.key
cp -v ${CA_DIR}/etcd.crt ${SSL_REPO}/etcd.crt
cp -v ${CA_DIR}/etcd.csr ${SSL_REPO}/etcd.csr
else
openssl ecparam -name secp521r1 -genkey -noout -out ${SSL_REPO}/etcd.key
chmod 0600 ${SSL_REPO}/etcd.key
openssl req -new -key ${SSL_REPO}/etcd.key -subj "/CN=etcd" -out ${SSL_REPO}/etcd.csr
openssl x509 -req -in ${SSL_REPO}/etcd.csr -CA ${SSL_REPO}/etcd-ca.crt -CAkey ${SSL_REPO}/etcd-ca.key -CAcreateserial -out ${SSL_REPO}/etcd.crt -days 20000 -extensions v3_req_etcd -extfile ${SSL_REPO}/ca-openssl.cnf
fi
#create etcd peer certificate
if [ -f ${CA_DIR}/etcd-peer.key ] && [ -f ${CA_DIR}/etcd-peer.crt ]; then
cp -v ${CA_DIR}/etcd-peer.key ${SSL_REPO}/etcd-peer.key
cp -v ${CA_DIR}/etcd-peer.crt ${SSL_REPO}/etcd-peer.crt
cp -v ${CA_DIR}/etcd-peer.csr ${SSL_REPO}/etcd-peer.csr
else
openssl ecparam -name secp521r1 -genkey -noout -out ${SSL_REPO}/etcd-peer.key
chmod 0600 ${SSL_REPO}/etcd-peer.key
openssl req -new -key ${SSL_REPO}/etcd-peer.key -subj "/CN=etcd-peer" -out ${SSL_REPO}/etcd-peer.csr
openssl x509 -req -in ${SSL_REPO}/etcd-peer.csr -CA ${SSL_REPO}/etcd-ca.crt -CAkey ${SSL_REPO}/etcd-ca.key -CAcreateserial -out ${SSL_REPO}/etcd-peer.crt -days 20000 -extensions v3_req_etcd -extfile ${SSL_REPO}/ca-openssl.cnf
fi
#create helm server (tiller) certificate
if [ -f ${CA_DIR}/tiller.key ] && [ -f ${CA_DIR}/tiller.crt ]; then
cp -v ${CA_DIR}/tiller.key ${SSL_REPO}/tiller.key
cp -v ${CA_DIR}/tiller.crt ${SSL_REPO}/tiller.crt
cp -v ${CA_DIR}/tiller.csr ${SSL_REPO}/tiller.csr
else
openssl ecparam -name secp521r1 -genkey -noout -out ${SSL_REPO}/tiller.key
chmod 0600 ${SSL_REPO}/tiller.key
openssl req -new -key ${SSL_REPO}/tiller.key -subj "/CN=tiller" -out ${SSL_REPO}/tiller.csr
openssl x509 -req -in ${SSL_REPO}/tiller.csr -CA ${SSL_REPO}/helm-ca.crt -CAkey ${SSL_REPO}/helm-ca.key -CAcreateserial -out ${SSL_REPO}/tiller.crt -days 20000 -extensions v3_req_helm -extfile ${SSL_REPO}/ca-openssl.cnf
fi
#create helm client certificate
if [ -f ${CA_DIR}/helm.key ] && [ -f ${CA_DIR}/helm.crt ]; then
cp -v ${CA_DIR}/helm.key ${SSL_REPO}/helm.key
cp -v ${CA_DIR}/helm.crt ${SSL_REPO}/helm.crt
cp -v ${CA_DIR}/helm.csr ${SSL_REPO}/helm.csr
else
openssl ecparam -name secp521r1 -genkey -noout -out ${SSL_REPO}/helm.key
chmod 0600 ${SSL_REPO}/helm.key
openssl req -new -key ${SSL_REPO}/helm.key -subj "/CN=helm" -out ${SSL_REPO}/helm.csr
openssl x509 -req -in ${SSL_REPO}/helm.csr -CA ${SSL_REPO}/helm-ca.crt -CAkey ${SSL_REPO}/helm-ca.key -CAcreateserial -out ${SSL_REPO}/helm.crt -days 20000 -extensions v3_req_helm -extfile ${SSL_REPO}/ca-openssl.cnf
fi
#create aggregator proxy certificate
if [ -f ${CA_DIR}/aggregator.key ] && [ -f ${CA_DIR}/aggregator.crt ]; then
cp -v ${CA_DIR}/aggregator.key ${SSL_REPO}/aggregator.key
cp -v ${CA_DIR}/aggregator.crt ${SSL_REPO}/aggregator.crt
cp -v ${CA_DIR}/aggregator.csr ${SSL_REPO}/aggregator.csr
else
openssl ecparam -name secp521r1 -genkey -noout -out ${SSL_REPO}/aggregator.key
chmod 0600 ${SSL_REPO}/aggregator.key
openssl req -new -key ${SSL_REPO}/aggregator.key -subj "/CN=aggregator" -out ${SSL_REPO}/aggregator.csr
openssl x509 -req -in ${SSL_REPO}/aggregator.csr -CA ${SSL_REPO}/aggregator-ca.crt -CAkey ${SSL_REPO}/aggregator-ca.key -CAcreateserial -out ${SSL_REPO}/aggregator.crt -days 20000 -extensions v3_req_helm -extfile ${SSL_REPO}/ca-openssl.cnf
fi
printf "\n] generating root service account keypair..."
#generate root ServiceAccount public and private key
if [ -f ${CA_DIR}/sa.key ] && [ -f ${CA_DIR}/sa.pub ]; then
cp -v ${CA_DIR}/sa.key ${SSL_REPO}/sa.key
cp -v ${CA_DIR}/sa.pub ${SSL_REPO}/sa.pub
else
openssl ecparam -name secp521r1 -genkey -noout -out ${SSL_REPO}/sa.key
openssl ec -in ${SSL_REPO}/sa.key -outform PEM -pubout -out ${SSL_REPO}/sa.pub
chmod 0600 ${SSL_REPO}/sa.key
fi
printf "\n] packing the crypto files..."
tar cvf $HOME/cryptopack.tar ${SSL_REPO}/*
gzip -9 $HOME/cryptopack.tar
cat $HOME/cryptopack.tar.gz | base64 -w 0 > $HOME/cryptopack.b64
rm $HOME/cryptopack.tar.gz
rm -fr ${SSL_REPO}
clear
echo "exec the following command on the rest of the nodes to distribute the keys"
echo ;
packdata=`cat ${HOME}/cryptopack.b64`
echo "echo \"${packdata}\" > cryptopack.b64"

9
config-api.ini.dist Normal file
View file

@ -0,0 +1,9 @@
[k8x-cluster]
admin_kubeconfig = /etc/kubernetes/kubeconfig
cluster_api_url = https://10.15.0.2:16443
cluster_name = kube.virtual.local
le_cert_domain = example.com
[k8x-api]
authtoken = hackme
DEBUG = off

23
config-coreapps.dist Normal file
View file

@ -0,0 +1,23 @@
#TIME
NTP_SERVER=10.15.8.80
#STORAGE
CEPH_ADMIN_KEY=""
CEPH_USER_KEY=""
CEPH_MONITOR_1="10.15.8.91"
CEPH_MONITOR_2="10.15.8.92"
CEPH_MONITOR_3="10.15.8.93"
#REGISTRY
REGISTRY_SERVER=registry.example.tld
REGISTRY_USER=deployer
REGISTRY_PASS=pass123
#MONITORING
ZABBIX_SERVER="10.15.0.2"
ZABBIX_PSK=asdqwe123
ZABBIX_PSK_ID=PSK
GRAFANA_SMTP_HOST=email-smtp.eu-west-1.amazonaws.com
GRAFANA_SMTP_USER=user
GRAFANA_SMTP_PASSWORD="asdqwe123"
GRAFANA_SMTP_FROM_ADDRESS="no-reply@example.com"

75
config.dist Normal file
View file

@ -0,0 +1,75 @@
#K8X CONFIG
CLUSTER_NAME=kube
CLUSTER_DOMAIN=staging.example.com
MASTERS_DOMAIN=virtualkube.example.com
ADMIN_EMAIL=admin@example.com
CONF_DIR=/etc/kubernetes
CLOUDFLARE_API_KEY=000
HAPROXY_VRRP_AUTH=Pass
HAPROXY_STATS_AUTH=admin:Pass
CERT_MODE=true
#NETWORK
CNI_NET=172.16.0.0/16
SERVICE_NET=172.18.0.0/16
SERVICE_FIP=172.18.0.1
NODE_INTERFACE=tun0
ETCD_INTERFACE=tun0
MASTER_LB_IP=10.15.0.2
MASTER_LB_MASK=16
MASTER_1_NAME=master01
MASTER_1_IP=10.15.8.11
MASTER_2_NAME=master02
MASTER_2_IP=10.15.8.12
MASTER_3_NAME=master03
MASTER_3_IP=10.15.8.13
NODE_1_NAME=worker01
NODE_1_IP=10.15.8.21
NODE_2_NAME=worker02
NODE_2_IP=10.15.8.22
NODE_3_NAME=worker03
NODE_3_IP=10.15.8.23
NODE_4_NAME=worker04
NODE_4_IP=10.15.8.24
NODE_5_NAME=worker05
NODE_5_IP=10.15.8.25
NODE_6_NAME=worker06
NODE_6_IP=10.15.8.26
NODE_7_NAME=worker07
NODE_7_IP=10.15.8.27
NODE_8_NAME=worker08
NODE_8_IP=10.15.8.28
NODE_9_NAME=worker09
NODE_9_IP=10.15.8.29
NODE_10_NAME=worker10
NODE_10_IP=10.15.8.30
NODE_11_NAME=worker11
NODE_11_IP=10.15.8.31
NODE_12_NAME=worker12
NODE_12_IP=10.15.8.32
NODE_13_NAME=worker13
NODE_13_IP=10.15.8.33
NODE_14_NAME=worker14
NODE_14_IP=10.15.8.34
NODE_15_NAME=worker15
NODE_15_IP=10.15.8.35
ETCD_1_NAME=${MASTER_1_NAME}
ETCD_1_IP=10.15.8.11
ETCD_2_NAME=${MASTER_2_NAME}
ETCD_2_IP=10.15.8.12
ETCD_3_NAME=${MASTER_3_NAME}
ETCD_3_IP=10.15.8.13
ADMIN_KUBECONFIG=${CONF_DIR}/kubeconfig
CA_DIR=${CONF_DIR}/pki
FUNC_PATH="systemd"
NODE_IP=$(ip addr show dev ${NODE_INTERFACE} | grep 'inet ' | awk '{print $2}' | cut -d '/' -f 1 | grep -v ${MASTER_LB_IP})
NODE_NAME=$(hostname)
NODE_NAME_SHORT=$(hostname -s)
ETCD_IP=$(ip addr show dev ${ETCD_INTERFACE} | grep 'inet ' | awk '{print $2}' | cut -d '/' -f 1)

43
deploy_cert_manager.sh Executable file
View file

@ -0,0 +1,43 @@
#!/bin/bash
echo ""
echo "... ] DEPLOYING SYSTEM SERVICES [ ..."
HOME=$( cd "$(dirname "$0")" && pwd )
source $HOME/config
if [ -f $HOME/config-coreapps ]; then
echo "config-coreapps file FOUND :)"
source $HOME/config-coreapps
else
echo "config-coreapps file is missing."
exit 1
fi
export KUBECONFIG=$ADMIN_KUBECONFIG
export HELM_CACHE_HOME=~/.cache/helm
export HELM_CONFIG_HOME=~/.config/helm
export HELM_DATA_HOME=~/.local/share/helm
# Setup ACME
if $CERT_MODE; then
printf "\ndeploying cert-manager helm chart...\n"
helm repo add jetstack https://charts.jetstack.io
helm repo update
kubectl create namespace cert-manager
kubectl -n cert-manager create secret generic cf-api-secret --from-literal=cf-api-key=${CLOUDFLARE_API_KEY}
kubectl apply -f yaml/cert-manager/cert-manager.crds.yaml
helm install \
cert-manager jetstack/cert-manager \
--namespace cert-manager \
--version v1.1.0 \
-f yaml/cert-manager/values.yaml
printf "\nwaiting for cert-manager to finish installation...\n"
sleep 30
cat yaml/cert-manager/letsencrypt-staging-clusterissuer.yaml | sed "s/k8x_acme_email/${ADMIN_EMAIL}/" | kubectl -n kube-system apply -f -
cat yaml/cert-manager/letsencrypt-staging-dns-clusterissuer.yaml | sed "s/k8x_acme_email/${ADMIN_EMAIL}/" | kubectl -n kube-system apply -f -
cat yaml/cert-manager/letsencrypt-production-clusterissuer.yaml | sed "s/k8x_acme_email/${ADMIN_EMAIL}/" | kubectl -n kube-system apply -f -
cat yaml/cert-manager/letsencrypt-production-dns-clusterissuer.yaml | sed "s/k8x_acme_email/${ADMIN_EMAIL}/" | kubectl -n kube-system apply -f -
fi

72
deploy_env.sh Executable file
View file

@ -0,0 +1,72 @@
#!/bin/bash
echo ""
echo "... ] PREPARING ENVS [ ..."
HOME=$( cd "$(dirname "$0")" && pwd )
source $HOME/config
if [ -f $HOME/config-coreapps ]; then
echo "config-coreapps file FOUND :)"
source $HOME/config-coreapps
export CEPH_MONITOR_1
export CEPH_MONITOR_2
export CEPH_MONITOR_3
else
echo "config-coreapps file is missing."
exit 1
fi
export KUBECONFIG=$ADMIN_KUBECONFIG
nspace=$1
export nspace
# Setup namespace, Service Accounts, RBAC, Limit and namespace keypair
printf "\nsetting up ${nspace}... \n"
cat yaml/system/namespace.yaml | envsubst | kubectl apply -f -
printf "\nsetting up Service Accounts... \n"
/bin/bash tools/add_service_account.sh admin-sa ${nspace}
/bin/bash tools/add_service_account.sh backup-agent-sa ${nspace}
sleep 5
printf "\nsetting up RBAC... \n"
cat yaml/system/sa-rbac.yaml | envsubst | kubectl apply -f -
cat yaml/system/sa-rbac-backup-agent.yaml | envsubst | kubectl apply -f -
sleep 5
printf "\nsetting up resource limits... \n"
kubectl -n $nspace apply -f yaml/system/default-resource-limits.yaml
printf "\nsetting up shared keypair secret... \n"
openssl ecparam -genkey -name prime256v1 -noout -out /tmp/${nspace}_id_ecdsa
openssl ec -in /tmp/${nspace}_id_ecdsa -pubout -out /tmp/${nspace}_id_ecdsa.pub
kubectl -n $nspace create secret generic auth-keypair --from-file=id_ecdsa=/tmp/${nspace}_id_ecdsa --from-file=id_ecdsa.pub=/tmp/${nspace}_id_ecdsa.pub
rm /tmp/${nspace}_id_ecdsa
rm /tmp/${nspace}_id_ecdsa.pub
if [ "$nspace" = "develop" ]; then
# Setup the private docker registry
printf "\nsetting up Docker Registry... \n"
#create secret for the registry
if [ -f /tmp/regsecret ]; then
rm /tmp/regsecret
fi
htpasswd -Bb -c /tmp/regsecret $REGISTRY_USER $REGISTRY_PASS
regpassstr=`cat /tmp/regsecret | base64 -w 0`
cat <<EOF | kubectl -n develop apply -f -
apiVersion: v1
kind: Secret
metadata:
name: registry-sec
type: Opaque
data:
HTPASSWD: ${regpassstr}
EOF
kubectl -n ${nspace} apply -f env/registry-pv.yaml
kubectl -n ${nspace} apply -f env/registry.yaml
fi
if [ "$nspace" = "live" ]; then
exit 0
fi
printf "\ncreate a wildcard certificate secret with letsencrypt for the defined namespace...\n"
cat env/nspace-wild-cert.yaml | envsubst | kubectl -n ${nspace} apply -f -

42
deploy_ingress_nginx.sh Executable file
View file

@ -0,0 +1,42 @@
#!/bin/bash
echo ""
echo "... ] DEPLOYING SYSTEM SERVICES [ ..."
HOME=$( cd "$(dirname "$0")" && pwd )
source $HOME/config
if [ -f $HOME/config-coreapps ]; then
echo "config-coreapps file FOUND :)"
source $HOME/config-coreapps
else
echo "config-coreapps file is missing."
exit 1
fi
export KUBECONFIG=$ADMIN_KUBECONFIG
export HELM_CACHE_HOME=~/.cache/helm
export HELM_CONFIG_HOME=~/.config/helm
export HELM_DATA_HOME=~/.local/share/helm
# Setup Ingress-Nginx
kubectl create namespace ingress-nginx
if $CERT_MODE; then
printf "\ndeploying ingress-nginx helm chart WITH TLS termination in TCP/PROXY mode...\n"
k8x_proxy_mode="true"
else
printf "\ndeploying ingress-nginx helm chart WITHOUT TLS termination in HTTP mode...\n"
k8x_proxy_mode="false"
fi
k8x_maxmind_lic="4rD1ICHnexjd6KaY"
helm repo add ingress-nginx https://kubernetes.github.io/ingress-nginx
helm repo update
helm install \
ingress-nginx ingress-nginx/ingress-nginx \
--namespace ingress-nginx \
--set-string controller.config.use-proxy-protocol="${k8x_proxy_mode}" \
--set-string controller.maxmindLicenseKey="${k8x_maxmind_lic}" \
--version 3.15.2 \
-f yaml/ingress-nginx/values.yaml

73
deploy_monitoring.sh Executable file
View file

@ -0,0 +1,73 @@
#!/bin/bash
echo ""
echo "... ] DEPLOYING MONITORING [ ..."
HOME=$( cd "$(dirname "$0")" && pwd )
source $HOME/config
if [ -f $HOME/config-coreapps ]; then
echo "config-coreapps file FOUND :)"
source $HOME/config-coreapps
export CEPH_MONITOR_1
export CEPH_MONITOR_2
export CEPH_MONITOR_3
else
echo "config-coreapps file is missing."
exit 1
fi
export KUBECONFIG=$ADMIN_KUBECONFIG
export HELM_CACHE_HOME=~/.cache/helm
export HELM_CONFIG_HOME=~/.config/helm
export HELM_DATA_HOME=~/.local/share/helm
kubectl create namespace monitoring
printf "\ndeploying zabbix-agent for host monitoring...\n"
kubectl -n monitoring create secret generic zabbix-psk-secret --from-literal=zabbix_agentd.psk=${ZABBIX_PSK}
kubectl -n monitoring create secret generic zabbix-psk-id-secret --from-literal=zabbix_psk_id=${ZABBIX_PSK_ID}
cat yaml/monitoring/zabbix-agent-daemonset.yaml | sed "s/k8x_zabbix_server/${ZABBIX_SERVER}/" | kubectl -n monitoring apply -f -
printf "\ndeploying prometheus for metrics aggregation...\n"
helm repo add prometheus-community https://prometheus-community.github.io/helm-charts
helm repo update
cat yaml/monitoring/prometheus-volumes.yaml | envsubst | kubectl apply -f -
helm install \
prometheus prometheus-community/prometheus \
--namespace monitoring \
-f yaml/monitoring/prometheus-values.yaml
printf "\ndeploying grafana for monitoring dashboard...\n"
helm repo add grafana https://grafana.github.io/helm-charts
helm repo update
cat yaml/monitoring/grafana-volumes.yaml | envsubst | kubectl apply -f -
helm install \
grafana grafana/grafana \
--namespace monitoring \
-f yaml/monitoring/grafana-values.yaml \
--set env.GF_SMTP_ENABLED=true,env.GF_SMTP_HOST=${GRAFANA_SMTP_HOST},env.GF_SMTP_FROM_ADDRESS=${GRAFANA_SMTP_FROM_ADDRESS},env.GF_SMTP_USER=${GRAFANA_SMTP_USER},env.GF_SMTP_PASSWORD=${GRAFANA_SMTP_PASSWORD},env.GF_SMTP_SKIP_VERIFY=true
printf '\ngrafana login:\nuser: admin \npass: ' ; kubectl get secret --namespace monitoring grafana -o jsonpath="{.data.admin-password}" | base64 --decode ; echo
#kubectl -n monitoring create secret generic monitoring-http-secret --from-file=yaml/monitoring/auth
if $CERT_MODE; then
cat yaml/monitoring/grafana-cert.yaml | sed "s/k8x_domain/${CLUSTER_NAME}.${CLUSTER_DOMAIN}/" | kubectl -n monitoring apply -f -
cat yaml/monitoring/grafana-ingress-secure.yaml | sed "s/k8x_domain/${CLUSTER_NAME}.${CLUSTER_DOMAIN}/" | kubectl -n monitoring apply -f -
else
cat yaml/monitoring/grafana-ingress.yaml | sed "s/k8x_domain/${CLUSTER_NAME}.${CLUSTER_DOMAIN}/" | kubectl -n monitoring apply -f -
fi
exit 0
###
printf "\ndeploying loki for logs aggregation..."
cat yaml/monitoring/loki-volumes.yaml | envsubst | kubectl apply -f -
helm repo add loki https://grafana.github.io/loki/charts
helm repo update
helm install \
loki loki/loki \
--namespace monitoring \
-f yaml/monitoring/loki-values.yaml
helm install \
promtail loki/promtail \
--namespace monitoring \
--set "loki.serviceName=loki"

52
deploy_system.sh Executable file
View file

@ -0,0 +1,52 @@
#!/bin/bash
echo ""
echo "... ] DEPLOYING SYSTEM SERVICES [ ..."
HOME=$( cd "$(dirname "$0")" && pwd )
source $HOME/config
if [ -f $HOME/config-coreapps ]; then
echo "config-coreapps file FOUND :)"
source $HOME/config-coreapps
else
echo "config-coreapps file is missing."
exit 1
fi
export KUBECONFIG=$ADMIN_KUBECONFIG
export HELM_CACHE_HOME=~/.cache/helm
export HELM_CONFIG_HOME=~/.config/helm
export HELM_DATA_HOME=~/.local/share/helm
# Setup kube-apiserver RBAC for kubelet authorization
kubectl apply -f yaml/system/kube-apiserver-to-kubelet-clusterrole.yaml
kubectl apply -f yaml/system/kube-apiserver-to-kubelet-clusterrolebinding.yaml
# Setup Calico SDN
kubectl apply -f yaml/calico/rbac-calico-etcd.yaml
printf "\ndeploying calico sdn...\n"
ECA64=$( base64 -w0 ${CA_DIR}/etcd-ca.crt )
ECERT64=$( base64 -w0 ${CA_DIR}/etcd.crt )
EKEY64=$( base64 -w0 ${CA_DIR}/etcd.key )
cat yaml/calico/calico-etcd.yaml | \
sed -e "s@k8x_calico_etcd_endpoint@https://${ETCD_1_IP}:2379,https://${ETCD_2_IP}:2379,https://${ETCD_3_IP}:2379@g" | \
sed -e "s@#\ etcd-ca:\ null@etcd-ca:\ ${ECA64}@g" | \
sed -e "s@#\ etcd-cert:\ null@etcd-cert:\ ${ECERT64}@g" | \
sed -e "s@#\ etcd-key:\ null@etcd-key:\ ${EKEY64}@g" | \
sed -e "s@k8x_calico_pool@${CNI_NET}@g" | kubectl apply -f -
# Setup Helm package manager
printf "\nsetting up kubernetes helm repos...\n"
helm repo add stable https://kubernetes-charts.storage.googleapis.com
helm repo add incubator https://kubernetes-charts-incubator.storage.googleapis.com
helm repo update
# Setup DNS
printf "\ndeploying coredns...\n"
kubectl apply -f yaml/coredns/coredns.yaml
# Setup Metrics provider
printf "\ndeploying metrics-server...\n"
kubectl apply -f yaml/metrics-server/components.yaml

View file

@ -0,0 +1,26 @@
#setup iscsi server:
apt install tgt
mkdir /var/lib/docker/iscsi
cd /var/lib/docker/iscsi
dd if=/dev/zero of=prometheusdata.img bs=1M count=8K
dd if=/dev/zero of=prometheusalert.img bs=1M count=2K
dd if=/dev/zero of=grafana.img bs=1M count=10K
tgtadm --lld iscsi --op new --mode target --tid 1 -T iqn.2019-01.example.tld:prometheusdata
tgtadm --lld iscsi --op new --mode target --tid 2 -T iqn.2019-01.example.tld:prometheusalert
tgtadm --lld iscsi --op new --mode target --tid 3 -T iqn.2019-01.example.tld:grafana
tgtadm --lld iscsi --op new --mode target --tid 4 -T iqn.2019-02.example.tld:db-staging.img
tgtadm --lld iscsi --op new --mode logicalunit --tid 1 --lun 1 -b /var/lib/docker/iscsi/prometheusdata.img
tgtadm --lld iscsi --op new --mode logicalunit --tid 2 --lun 1 -b /var/lib/docker/iscsi/prometheusalert.img
tgtadm --lld iscsi --op new --mode logicalunit --tid 3 --lun 1 -b /var/lib/docker/iscsi/grafana.img
tgtadm --lld iscsi --op new --mode logicalunit --tid 4 --lun 1 -b /var/lib/docker/iscsi/db-staging.img
for i in $(seq 4 $END); do tgtadm --lld iscsi --op bind --mode target --tid $i -I 10.15.8.21; done
for i in $(seq 4 $END); do tgtadm --lld iscsi --op bind --mode target --tid $i -I 10.15.8.22; done
tgt-admin --dump | tee /etc/tgt/conf.d/sysiscsi.conf
#test client:
apt install open-iscsi
iscsiadm --mode discovery -t st -p 10.15.0.2
iscsiadm --mode node --targetname iqn.2019-01.example.tld:grafana -p 10.15.0.2 -l
cat /proc/partitions
iscsiadm --mode node --targetname iqn.2019-01.example.tld:grafana -p 10.15.0.2 -u

36
docs/notes Normal file
View file

@ -0,0 +1,36 @@
#Links
kube-apiserver haproxy stats:
- http://clustername.clusterdomain.tld:9000/stats
- https://kubernetes.io/blog/2016/08/security-best-practices-kubernetes-deployment/
#ceph common commands:
https://tracker.ceph.com/projects/ceph/wiki/10_Commands_Every_Ceph_Administrator_Should_Know
#ceph remove dead osd:
https://www.sebastien-han.fr/blog/2015/12/11/ceph-properly-remove-an-osd/
http://www.abeji.de/weave-vs-calico/
#patch crd to delete stuck termination
kubectl -n rook-ceph patch clusters.ceph.rook.io rook-ceph -p '{"metadata":{"finalizers": []}}' --type=merge
kubectl edit crd clusters.ceph.rook.io
#access dashboard via kubectl proxy:
kubectl proxy
http://127.0.0.1:8001/api/v1/namespaces/kube-system/services/https:kubernetes-dashboard:/proxy/
*node*
systemctl stop etcd.service
rm -fr /var/lib/etcd/member
vim /opt/etcd/config/etcd.conf
ETCD_INITIAL_CLUSTER_STATE=existing
*master*
/opt/etcd/bin/etcdctl member remove c02630faf3a2dd3d
/opt/etcd/bin/etcdctl member list
/opt/etcd/bin/etcdctl member add master03 http://192.168.8.13:2380
*node*
systemctl stop etcd.service

View file

@ -0,0 +1,27 @@
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: backup-agent-role
namespace: ${nspace}
rules:
- apiGroups: [""]
resources: ["pods", "pods/log"]
verbs: ["get", "list"]
- apiGroups: [""]
resources: ["pods/exec"]
verbs: ["create"]
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: backup-agent-rolebinding
namespace: ${nspace}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: backup-agent-role
subjects:
- kind: ServiceAccount
name: backup-agent-sa
namespace: ${nspace}

70
env/deprecated/sa-rbac.yaml vendored Normal file
View file

@ -0,0 +1,70 @@
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: admin-role
namespace: ${nspace}
rules:
- apiGroups: [ "", "extensions", "apps", "batch", "autoscaling" ]
resources: [ "pods", "daemonsets", "deployments", "services", "replicasets", "replicationcontrollers", "statefulsets", "horizontalpodautoscalers", "jobs", "cronjobs", "events", "ingresses", "persistentvolumeclaims", "certificates", "configmaps", "secrets", "logs", "pods/log", "pods/exec", "pods/portforward" ]
verbs: [ "get", "list", "watch", "create", "update", "delete", "patch" ]
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: admin-rolebinding
namespace: ${nspace}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: admin-role
subjects:
- kind: ServiceAccount
name: admin-sa
namespace: ${nspace}
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: Role
metadata:
name: admin-cert-role
namespace: ${nspace}
rules:
- apiGroups: [ "certmanager.k8s.io" ]
resources: [ "issuers", "certificates" ]
verbs: [ "get", "list", "watch", "create", "update", "delete" ]
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: admin-cert-rolebinding
namespace: ${nspace}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: admin-cert-role
subjects:
- kind: ServiceAccount
name: admin-sa
namespace: ${nspace}
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: admin-${nspace}-clusterrole
rules:
- apiGroups: [ "" ]
resources: [ "persistentvolumes" ]
verbs: [ "get", "list", "watch", "create", "update", "delete", "patch" ]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: admin-${nspace}-clusterrolebinding
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: admin-${nspace}-clusterrole
subjects:
- kind: ServiceAccount
name: admin-sa
namespace: ${nspace}

17
env/nspace-wild-cert.yaml vendored Normal file
View file

@ -0,0 +1,17 @@
---
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: wildcard-${nspace}-cert
namespace: ${nspace}
spec:
secretName: wildcard-${nspace}-tls
issuerRef:
name: letsencrypt-production-dns
kind: ClusterIssuer
commonName: '*.${nspace}.example.com'
dnsNames:
- '${nspace}.example.com'
- '*.${nspace}.example.com'
privateKey:
algorithm: ECDSA

27
env/registry-pv.yaml vendored Normal file
View file

@ -0,0 +1,27 @@
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: registry-data-pv
spec:
capacity:
storage: 420Gi
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Retain
nfs:
path: "/srv/registry/data"
server: 10.15.8.10
readOnly: false
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: registry-data-pvc
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 420Gi
volumeName: "registry-data-pv"

117
env/registry.yaml vendored Normal file
View file

@ -0,0 +1,117 @@
---
apiVersion: v1
kind: ConfigMap
metadata:
name: registry-config-cm
data:
registry-config.yml: |
version: 0.1
log:
fileds:
service: registry
storage:
cache:
blobdescriptor: inmemory
filesystem:
rootdirectory: /var/lib/registry
http:
addr: :5000
headers:
X-Content-Type-Options: [nosniff]
auth:
htpasswd:
realm: basic-realm
path: /auth/htpasswd
health:
storagedriver:
enabled: true
interval: 10s
threshold: 3
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: registry
labels:
app: registry
spec:
replicas: 1
selector:
matchLabels:
app: registry
strategy:
type: Recreate
template:
metadata:
labels:
app: registry
spec:
containers:
- name: registry
image: "registry:latest"
imagePullPolicy: IfNotPresent
securityContext:
privileged: false
ports:
- name: http
containerPort: 5000
protocol: TCP
volumeMounts:
- name: registry-data
mountPath: "/var/lib/registry"
- name: registry-config
mountPath: "/etc/docker/registry"
readOnly: true
- name: registry-htpasswd
mountPath: "/auth"
readOnly: true
volumes:
- name: registry-data
persistentVolumeClaim:
claimName: registry-data-pvc
- name: registry-config
configMap:
name: registry-config-cm
items:
- key: registry-config.yml
path: config.yml
- name: registry-htpasswd
secret:
secretName: registry-sec
items:
- key: HTPASSWD
path: htpasswd
---
apiVersion: v1
kind: Service
metadata:
name: registry
spec:
ports:
- port: 5000
selector:
app: registry
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: registry-ingress
annotations:
kubernetes.io/ingress.class: nginx
nginx.ingress.kubernetes.io/proxy-body-size: 2048m
spec:
rules:
- host: registry.develop.example.com
http:
paths:
- pathType: Prefix
path: /
backend:
service:
name: registry
port:
number: 5000
tls:
- secretName: wildcard-develop-tls
hosts:
- registry.develop.example.com

49
health.sh Executable file
View file

@ -0,0 +1,49 @@
#!/bin/bash
HOME=$( cd "$(dirname "$0")" && pwd )
source $HOME/config
export KUBECONFIG=$ADMIN_KUBECONFIG
export HELM_CACHE_HOME=~/.cache/helm
export HELM_CONFIG_HOME=~/.config/helm
export HELM_DATA_HOME=~/.local/share/helm
kubectl cluster-info
printf "\n [ etcd cluster health: ]\n"
/usr/local/bin/etcdctl --endpoints="https://${ETCD_1_IP}:2379,https://${ETCD_2_IP}:2379,https://${ETCD_3_IP}:2379" --cert ${CA_DIR}/etcd.crt --key ${CA_DIR}/etcd.key --cacert ${CA_DIR}/etcd-ca.crt endpoint status
printf "\n [ kubernetes components health: ]\n"
/usr/local/bin/kubectl get componentstatuses
printf "\n [ kubernetes nodes: ]\n"
kubectl get nodes -o wide
printf "\n [ helm releases: ]\n"
helm ls --all-namespaces
if [ "$1" = "all" ]; then
printf "\n [ kubernetes deployments: ]\n"
/usr/local/bin/kubectl get deployments --all-namespaces
printf "\n [ kubernetes services: ]\n"
/usr/local/bin/kubectl get services --all-namespaces -o wide
printf "\n [ kubernetes ingresses: ]\n"
/usr/local/bin/kubectl get ingresses.v1.networking.k8s.io --all-namespaces
printf "\n [ kubernates storage claims: ]\n"
/usr/local/bin/kubectl get pvc --all-namespaces
printf "\n [ kubernetes pods: ]\n"
/usr/local/bin/kubectl get pods --all-namespaces -o wide
fi
if [ "$1" = "ceph" ];
then
printf "\n [ ceph status: ]\n"
/usr/local/bin/kubectl -n rook-ceph exec -it rook-ceph-tools -- ceph osd status
/usr/local/bin/kubectl -n rook-ceph exec -it rook-ceph-tools -- ceph df
/usr/local/bin/kubectl -n rook-ceph exec -it rook-ceph-tools -- ceph status
fi
if [ ! -z "$1" ]; then
printf "\n [ $1 status: ]\n"
/usr/local/bin/kubectl get $1 --all-namespaces -o wide
fi

157
init.sh Executable file
View file

@ -0,0 +1,157 @@
#!/bin/bash
HOME=$( cd "$(dirname "$0")" && pwd )
source $HOME/config
export KUBECONFIG=$ADMIN_KUBECONFIG
# checks if we have the cryptopack file
if [ -f $HOME/cryptopack.b64 ]; then
echo "] cryptopack.b64 FOUND :)"
else
echo "] cryptopack.b64 is not found."
exit 0
fi
# checks if we have the necessary config files
if [ -f $HOME/config ]; then
echo "] config file FOUND :)"
else
echo "] config file is missing."
exit 0
fi
if [ -f $HOME/config-coreapps ]; then
echo "] config-coreapps file FOUND :)"
else
echo "] config-coreapps file is missing."
exit 1
fi
# checks if we have a system with nftables and revert it to iptables-legacy since docker doesnt deal with it yet.
OS_RELEASE=`lsb_release -cs`
if [ "$OS_RELEASE" = "buster" ]; then
if [ `readlink /etc/alternatives/iptables` == "/usr/sbin/iptables-nft" ]; then
update-alternatives --set iptables /usr/sbin/iptables-legacy
update-alternatives --set ip6tables /usr/sbin/ip6tables-legacy
update-alternatives --set arptables /usr/sbin/arptables-legacy
update-alternatives --set ebtables /usr/sbin/ebtables-legacy
systemctl mask nftables.service
echo "] PLEASE REBOOT FOR THIS CHANGES TO TAKE EFFECT."
exit
else
echo "] iptables seems to point to iptables-legacy. :)"
fi
fi
#this one could be done better for sure...
case "${NODE_NAME_SHORT}" in
${MASTER_1_NAME})
NODETYPE='controller'
NODEIP=${MASTER_1_IP}
;;
${MASTER_2_NAME})
NODETYPE='controller'
NODEIP=${MASTER_2_IP}
;;
${MASTER_3_NAME})
NODETYPE='controller'
NODEIP=${MASTER_3_IP}
;;
${NODE_1_NAME})
NODETYPE='worker'
NODEIP=${NODE_1_IP}
;;
${NODE_2_NAME})
NODETYPE='worker'
NODEIP=${NODE_2_IP}
;;
${NODE_3_NAME})
NODETYPE='worker'
NODEIP=${NODE_3_IP}
;;
${NODE_4_NAME})
NODETYPE='worker'
NODEIP=${NODE_4_IP}
;;
${NODE_5_NAME})
NODETYPE='worker'
NODEIP=${NODE_5_IP}
;;
${NODE_6_NAME})
NODETYPE='worker'
NODEIP=${NODE_6_IP}
;;
${NODE_7_NAME})
NODETYPE='worker'
NODEIP=${NODE_7_IP}
;;
${NODE_8_NAME})
NODETYPE='worker'
NODEIP=${NODE_8_IP}
;;
${NODE_9_NAME})
NODETYPE='worker'
NODEIP=${NODE_9_IP}
;;
${NODE_10_NAME})
NODETYPE='worker'
NODEIP=${NODE_10_IP}
;;
${NODE_11_NAME})
NODETYPE='worker'
NODEIP=${NODE_11_IP}
;;
${NODE_12_NAME})
NODETYPE='worker'
NODEIP=${NODE_12_IP}
;;
${NODE_13_NAME})
NODETYPE='worker'
NODEIP=${NODE_13_IP}
;;
${NODE_14_NAME})
NODETYPE='worker'
NODEIP=${NODE_14_IP}
;;
${NODE_15_NAME})
NODETYPE='worker'
NODEIP=${NODE_15_IP}
;;
*)
echo '] Node hostname not defined.'
exit 0
;;
esac
echo "... ] SETTING UP ${NODETYPE} NODE ${NODE_NAME_SHORT} WITH ${NODEIP} [ ..."
echo "] Setting up base ..."
/bin/bash $FUNC_PATH/init_hostname.sh
/bin/bash $FUNC_PATH/init_metal.sh
/bin/bash $FUNC_PATH/install_docker.sh
/bin/bash $FUNC_PATH/install_chrony.sh
/bin/bash ssl/install_kube_crypto.sh
/bin/bash ssl/create_admin.sh
if [ "$NODETYPE" == "controller" ]; then
echo "] Setting up master ..."
/bin/bash ssl/create_master.sh
/bin/bash $FUNC_PATH/install_etcd.sh
/bin/bash $FUNC_PATH/install_kube_apiserver.sh
/bin/bash $FUNC_PATH/install_kube_scheduler.sh
/bin/bash $FUNC_PATH/install_kube_controller_manager.sh
#/bin/bash $FUNC_PATH/install_kube_keepalived.sh
/bin/bash $FUNC_PATH/install_k8x_api.sh
fi
echo "] Setting up kube-api localhost loadbalacing ..."
/bin/bash $FUNC_PATH/install_kube_haproxy.sh
echo "] Setting up worker ..."
/bin/bash ssl/create_worker.sh
/bin/bash $FUNC_PATH/install_kubelet.sh
/bin/bash $FUNC_PATH/install_kube_proxy.sh
#label node
kubectl label nodes ${NODE_NAME_SHORT} nodetype=${NODETYPE} --overwrite
echo "... ] DONE :) [ ..."

29
ssl/create_admin.sh Executable file
View file

@ -0,0 +1,29 @@
#!/bin/bash
HOME=$( cd "$(dirname "$0")" && pwd )
source $HOME/../config
cat <<EOF | tee $CA_DIR/admin-openssl.cnf
[req]
distinguished_name = req_distinguished_name
[req_distinguished_name]
[ v3_req_client ]
basicConstraints = CA:FALSE
keyUsage = critical, digitalSignature, keyEncipherment
extendedKeyUsage = clientAuth
EOF
#generate admin cert
openssl ecparam -name secp521r1 -genkey -noout -out ${CA_DIR}/admin.key
chmod 0600 ${CA_DIR}/admin.key
openssl req -new -key ${CA_DIR}/admin.key -subj "/CN=kubernetes-admin/O=system:masters" -out ${CA_DIR}/admin.csr
openssl x509 -req -in ${CA_DIR}/admin.csr -CA ${CA_DIR}/ca.crt -CAkey ${CA_DIR}/ca.key -CAcreateserial -out ${CA_DIR}/admin.crt -days 20000 -extensions v3_req_client -extfile ${CA_DIR}/admin-openssl.cnf
#generate admin kubeconfig
TOKEN=`cat ${CA_DIR}/admin.token`
kubectl config set-cluster ${CLUSTER_NAME}.virtual.local --certificate-authority=${CA_DIR}/ca.crt --embed-certs=true --server=https://localhost:16443 --kubeconfig=$ADMIN_KUBECONFIG
kubectl config set-credentials admin --client-certificate=${CA_DIR}/admin.crt --client-key=${CA_DIR}/admin.key --embed-certs=true --token=$TOKEN --kubeconfig=$ADMIN_KUBECONFIG
kubectl config set-context ${CLUSTER_NAME}.virtual.local --cluster=${CLUSTER_NAME}.virtual.local --user=admin --kubeconfig=$ADMIN_KUBECONFIG
kubectl config use-context ${CLUSTER_NAME}.virtual.local --kubeconfig=$ADMIN_KUBECONFIG

85
ssl/create_master.sh Executable file
View file

@ -0,0 +1,85 @@
#!/bin/bash
HOME=$( cd "$(dirname "$0")" && pwd )
source $HOME/../config
cat <<EOF | tee $CA_DIR/master-openssl.cnf
[req]
distinguished_name = req_distinguished_name
[req_distinguished_name]
[ v3_req_client ]
basicConstraints = CA:FALSE
keyUsage = critical, digitalSignature, keyEncipherment
extendedKeyUsage = clientAuth
[ v3_req_server ]
basicConstraints = CA:FALSE
keyUsage = critical, digitalSignature, keyEncipherment
extendedKeyUsage = serverAuth
[ v3_req_apiserver ]
basicConstraints = CA:FALSE
keyUsage = critical, digitalSignature, keyEncipherment
extendedKeyUsage = serverAuth
subjectAltName = @alt_names_cluster
[ alt_names_cluster ]
DNS.1 = kubernetes
DNS.2 = kubernetes.default
DNS.3 = kubernetes.default.svc
DNS.4 = kubernetes.default.svc.cluster.local
DNS.5 = ${CLUSTER_NAME}.virtual.local
DNS.6 = ${CLUSTER_NAME}-api.virtual.local
DNS.7 = ${MASTER_1_NAME}.virtual.local
DNS.8 = ${MASTER_2_NAME}.virtual.local