import from rtg
This commit is contained in:
commit
de790177af
98 changed files with 34963 additions and 0 deletions
15
.gitignore
vendored
Normal file
15
.gitignore
vendored
Normal file
|
@ -0,0 +1,15 @@
|
||||||
|
tmp/
|
||||||
|
.rnd
|
||||||
|
.kube/
|
||||||
|
.wget-hsts
|
||||||
|
__pycache__
|
||||||
|
|
||||||
|
#helm
|
||||||
|
.cache/
|
||||||
|
.config/
|
||||||
|
|
||||||
|
#k8x
|
||||||
|
cryptopack.*
|
||||||
|
config
|
||||||
|
config-coreapps
|
||||||
|
config-api.ini
|
58
README.md
Normal file
58
README.md
Normal file
|
@ -0,0 +1,58 @@
|
||||||
|
#k8x - deploys k8s with 3 masters
|
||||||
|
|
||||||
|
tested OS:
|
||||||
|
- debian 10
|
||||||
|
|
||||||
|
# Configuration
|
||||||
|
|
||||||
|
config.dist - copy to config and setup the cluster deployment specific settings.
|
||||||
|
|
||||||
|
config-coreapps.dist - copy to config-coreapps and setup the core applications specific settings.
|
||||||
|
|
||||||
|
# Installation of cluster nodes
|
||||||
|
|
||||||
|
./build_crypto.sh - run once to generate a CA and pack it as cryptopack.b64 file which should be shared on all nodes
|
||||||
|
|
||||||
|
./init.sh - install k8s master or worker capabilities on each kubernetes node. requires the initial hostname to be set up as those listed in the config file. run simultaniously on all masters to setup etcd, and then once on each worker node.
|
||||||
|
|
||||||
|
# Installation of core applications
|
||||||
|
|
||||||
|
Note: Run the scripts from a node which has a config-coreapps config file
|
||||||
|
|
||||||
|
./deploy_system.sh - installs the calico sdn, coredns and metrics server into the kube-system namespace, setups helm and kubelet rbac
|
||||||
|
|
||||||
|
./deploy_cert_manager.sh - installs the letsencrypt acme ssl certificate manager into the cert-manager namespace
|
||||||
|
|
||||||
|
./deploy_ingress_nginx.sh - install the ingress-nginx ingress controller into the ingress-nginx namespace
|
||||||
|
|
||||||
|
./attach_storage_ceph.sh - create ceph storage keys into kube-system namespace and the rados-block storage class which pvcs could attach to for dynamic provisioning
|
||||||
|
|
||||||
|
./deploy_monitoring.sh - install prometheus, loki, grafana and zabbix
|
||||||
|
|
||||||
|
# Installation of additional environments
|
||||||
|
|
||||||
|
./deploy_env.sh staging - install the staging namespace with wildcard ssl
|
||||||
|
|
||||||
|
./deploy_env.sh develop - install the develop namespace with wildcard ssl, and common docker registry
|
||||||
|
|
||||||
|
./deploy_env.sh live - install the production namespace
|
||||||
|
|
||||||
|
# Tools
|
||||||
|
|
||||||
|
./health.sh - displays the cluster health
|
||||||
|
|
||||||
|
./health.sh objects - displays the cluster and cluster objects health
|
||||||
|
|
||||||
|
k9s - start the kubernetes ncurses admin panel
|
||||||
|
|
||||||
|
watch -n 5 "k get pods --sort-by=.status.startTime --all-namespaces -o wide | tac" - watch for cluster pod state changes
|
||||||
|
|
||||||
|
source config ; export KUBECONFIG=$ADMIN_KUBECONFIG - exports the cluster admin key to use the kubectl (or k) command cluster-wide
|
||||||
|
|
||||||
|
# Additional Tasks
|
||||||
|
|
||||||
|
1. Setup Grafana to access kube-apiserver in order to fetch pod metrics
|
||||||
|
Plugin: https://grafana.com/plugins/grafana-kubernetes-app
|
||||||
|
Data Source: https://kubernetes.default.svc.cluster.local
|
||||||
|
Auth: With Credentials, With CA Cert, TLS Client Auth
|
||||||
|
|
55
attach_private_registry.sh
Executable file
55
attach_private_registry.sh
Executable file
|
@ -0,0 +1,55 @@
|
||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
echo ""
|
||||||
|
echo "... ] ATTACHING PRIVATE DOCKER REGISTRY [ ..."
|
||||||
|
echo ""
|
||||||
|
|
||||||
|
HOME=$( cd "$(dirname "$0")" && pwd )
|
||||||
|
source $HOME/config
|
||||||
|
|
||||||
|
export KUBECONFIG=$ADMIN_KUBECONFIG
|
||||||
|
|
||||||
|
#TODO: Installing the default private registry (the one we use to host and reuse it as default on kubelet worker installation)
|
||||||
|
# could also be provided with this script as a secret and use it with ImagePullSecret.
|
||||||
|
|
||||||
|
if [ -z "$1" ]; then
|
||||||
|
echo "] Usage: ./attach_private_registry.sh <secret-name>"
|
||||||
|
exit 2
|
||||||
|
fi
|
||||||
|
REG_NAME="$1"
|
||||||
|
|
||||||
|
echo -n "] Target secret namespace: "
|
||||||
|
read NSPACE
|
||||||
|
if [ -z "$NSPACE" ]; then
|
||||||
|
echo "] No namespace"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo -n "] Enter Docker registry user: "
|
||||||
|
read REGISTRY_USER
|
||||||
|
|
||||||
|
echo -n "] Enter Docker registry password (token): "
|
||||||
|
read REGISTRY_PASS
|
||||||
|
|
||||||
|
echo -n "] Enter Docker registry email: "
|
||||||
|
read REGISTRY_EMAIL
|
||||||
|
|
||||||
|
echo -n "] Enter Docker registry url (empty for docker hub): "
|
||||||
|
read REGISTRY_URL
|
||||||
|
if [ -z "$REGISTRY_URL" ]; then
|
||||||
|
CONFIG_URL="--docker-server=https://index.docker.io/v2/"
|
||||||
|
else
|
||||||
|
CONFIG_URL="--docker-server=https://${REGISTRY_URL}/v2/"
|
||||||
|
fi
|
||||||
|
|
||||||
|
SECRET_NAME="registry-${NSPACE}-${REG_NAME}"
|
||||||
|
CONFIG_SECRET="${SECRET_NAME} ${CONFIG_URL} --docker-username=${REGISTRY_USER} --docker-password=${REGISTRY_PASS} --docker-email=${REGISTRY_EMAIL}"
|
||||||
|
CMD="/usr/local/bin/k -n ${NSPACE} create secret docker-registry ${CONFIG_SECRET}"
|
||||||
|
|
||||||
|
echo ""
|
||||||
|
echo "Executing command: ${CMD}"
|
||||||
|
echo -n "Is that okay [y/n]? "
|
||||||
|
read answer
|
||||||
|
if [ "$answer" != "${answer#[Yy]}" ]; then
|
||||||
|
${CMD}
|
||||||
|
fi
|
155
attach_storage_ceph.sh
Executable file
155
attach_storage_ceph.sh
Executable file
|
@ -0,0 +1,155 @@
|
||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
echo ""
|
||||||
|
echo "... ] ATTACHING EXTERNAL CEPH AS CLUSTER STORAGE [ ..."
|
||||||
|
echo ""
|
||||||
|
|
||||||
|
HOME=$( cd "$(dirname "$0")" && pwd )
|
||||||
|
source $HOME/config
|
||||||
|
|
||||||
|
if [ -f $HOME/config-coreapps ]; then
|
||||||
|
echo "config-coreapps file FOUND :)"
|
||||||
|
source $HOME/config-coreapps
|
||||||
|
else
|
||||||
|
echo "config-coreapps file is missing."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
export KUBECONFIG=$ADMIN_KUBECONFIG
|
||||||
|
|
||||||
|
kcmd='create'
|
||||||
|
if [ ! -z $1 ]; then
|
||||||
|
if [ $1 = 'rollback' ]; then
|
||||||
|
kcmd='delete'
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ -z ${CEPH_ADMIN_KEY} ]; then
|
||||||
|
echo "Please provide ceph admin key using the command: "
|
||||||
|
echo "sudo ceph --cluster ceph auth get-key client.admin"
|
||||||
|
exit 1;
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ -z ${CEPH_USER_KEY} ]; then
|
||||||
|
echo "Please provide ceph user key using the command: "
|
||||||
|
echo "Use https://ceph.com/pgcalc/ to calculate the placement groups number"
|
||||||
|
echo "sudo ceph --cluster ceph osd pool create kube 1024 1024"
|
||||||
|
echo "sudo ceph --cluster ceph auth get-or-create client.kube mon 'allow r' osd 'allow rwx pool=kube'"
|
||||||
|
echo "sudo ceph --cluster ceph auth get-key client.kube"
|
||||||
|
exit 1;
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ $kcmd = 'create' ]; then
|
||||||
|
kubectl $kcmd -n kube-system secret generic ceph-secret --type="kubernetes.io/rbd" --from-literal=key=${CEPH_ADMIN_KEY}
|
||||||
|
kubectl $kcmd -n kube-system secret generic ceph-secret-kube --type="kubernetes.io/rbd" --from-literal=key=${CEPH_USER_KEY}
|
||||||
|
else
|
||||||
|
kubectl $kcmd -n kube-system secret ceph-secret
|
||||||
|
kubectl $kcmd -n kube-system secret ceph-secret-kube
|
||||||
|
fi
|
||||||
|
|
||||||
|
cat <<EOF | kubectl $kcmd -n kube-system -f -
|
||||||
|
kind: ClusterRole
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
metadata:
|
||||||
|
name: rbd-provisioner
|
||||||
|
rules:
|
||||||
|
- apiGroups: [""]
|
||||||
|
resources: ["persistentvolumes"]
|
||||||
|
verbs: ["get", "list", "watch", "create", "delete"]
|
||||||
|
- apiGroups: [""]
|
||||||
|
resources: ["persistentvolumeclaims"]
|
||||||
|
verbs: ["get", "list", "watch", "update"]
|
||||||
|
- apiGroups: ["storage.k8s.io"]
|
||||||
|
resources: ["storageclasses"]
|
||||||
|
verbs: ["get", "list", "watch"]
|
||||||
|
- apiGroups: [""]
|
||||||
|
resources: ["events"]
|
||||||
|
verbs: ["create", "update", "patch"]
|
||||||
|
- apiGroups: [""]
|
||||||
|
resources: ["services"]
|
||||||
|
resourceNames: ["kube-dns","coredns"]
|
||||||
|
verbs: ["list", "get"]
|
||||||
|
- apiGroups: [""]
|
||||||
|
resources: ["endpoints"]
|
||||||
|
verbs: ["get", "list", "watch", "create", "update", "patch"]
|
||||||
|
---
|
||||||
|
kind: ClusterRoleBinding
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
metadata:
|
||||||
|
name: rbd-provisioner
|
||||||
|
subjects:
|
||||||
|
- kind: ServiceAccount
|
||||||
|
name: rbd-provisioner
|
||||||
|
namespace: kube-system
|
||||||
|
roleRef:
|
||||||
|
kind: ClusterRole
|
||||||
|
name: rbd-provisioner
|
||||||
|
apiGroup: rbac.authorization.k8s.io
|
||||||
|
---
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||||
|
kind: Role
|
||||||
|
metadata:
|
||||||
|
name: rbd-provisioner
|
||||||
|
rules:
|
||||||
|
- apiGroups: [""]
|
||||||
|
resources: ["secrets"]
|
||||||
|
verbs: ["get"]
|
||||||
|
---
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
kind: RoleBinding
|
||||||
|
metadata:
|
||||||
|
name: rbd-provisioner
|
||||||
|
roleRef:
|
||||||
|
apiGroup: rbac.authorization.k8s.io
|
||||||
|
kind: Role
|
||||||
|
name: rbd-provisioner
|
||||||
|
subjects:
|
||||||
|
- kind: ServiceAccount
|
||||||
|
name: rbd-provisioner
|
||||||
|
namespace: kube-system
|
||||||
|
---
|
||||||
|
apiVersion: v1
|
||||||
|
kind: ServiceAccount
|
||||||
|
metadata:
|
||||||
|
name: rbd-provisioner
|
||||||
|
---
|
||||||
|
EOF
|
||||||
|
|
||||||
|
#if [ $kcmd = 'create' ]; then
|
||||||
|
# #wait for the pod to start
|
||||||
|
# echo -n "] Waiting for the control pod to start..."
|
||||||
|
# while true; do
|
||||||
|
# #currentstatus=$($kubectl get pod $k8x_name -o jsonpath="{.status.phase}")
|
||||||
|
# currentstatus=$(kubectl -n kube-system get pods -l app=rbd-provisioner -o jsonpath="{.items[].status.phase}")
|
||||||
|
# if [ "$currentstatus" = "Running" ]; then
|
||||||
|
# echo -n ". done!"
|
||||||
|
# echo ""
|
||||||
|
# break
|
||||||
|
# fi
|
||||||
|
# sleep 1
|
||||||
|
# echo -n "."
|
||||||
|
# done
|
||||||
|
#fi
|
||||||
|
|
||||||
|
cat <<EOF | kubectl $kcmd -f -
|
||||||
|
kind: StorageClass
|
||||||
|
apiVersion: storage.k8s.io/v1
|
||||||
|
metadata:
|
||||||
|
name: rados-block
|
||||||
|
provisioner: kubernetes.io/rbd
|
||||||
|
parameters:
|
||||||
|
monitors: ${CEPH_MONITOR_1}:6789, ${CEPH_MONITOR_2}:6789, ${CEPH_MONITOR_3}:6789
|
||||||
|
adminId: admin
|
||||||
|
adminSecretName: ceph-secret
|
||||||
|
adminSecretNamespace: kube-system
|
||||||
|
pool: kube
|
||||||
|
userId: kube
|
||||||
|
userSecretName: ceph-secret-kube
|
||||||
|
userSecretNamespace: kube-system
|
||||||
|
fsType: ext4
|
||||||
|
imageFormat: "2"
|
||||||
|
imageFeatures: layering
|
||||||
|
reclaimPolicy: Delete
|
||||||
|
volumeBindingMode: Immediate
|
||||||
|
allowVolumeExpansion: true
|
||||||
|
EOF
|
BIN
blobs/apiextensions-apiserver.gz
Executable file
BIN
blobs/apiextensions-apiserver.gz
Executable file
Binary file not shown.
BIN
blobs/calicoctl.gz
Executable file
BIN
blobs/calicoctl.gz
Executable file
Binary file not shown.
BIN
blobs/etcd.gz
Executable file
BIN
blobs/etcd.gz
Executable file
Binary file not shown.
BIN
blobs/etcdctl.gz
Executable file
BIN
blobs/etcdctl.gz
Executable file
Binary file not shown.
BIN
blobs/helm.gz
Executable file
BIN
blobs/helm.gz
Executable file
Binary file not shown.
BIN
blobs/k9s.gz
Executable file
BIN
blobs/k9s.gz
Executable file
Binary file not shown.
BIN
blobs/kube-aggregator.gz
Executable file
BIN
blobs/kube-aggregator.gz
Executable file
Binary file not shown.
BIN
blobs/kube-apiserver.gz
Executable file
BIN
blobs/kube-apiserver.gz
Executable file
Binary file not shown.
BIN
blobs/kube-controller-manager.gz
Executable file
BIN
blobs/kube-controller-manager.gz
Executable file
Binary file not shown.
BIN
blobs/kube-proxy.gz
Executable file
BIN
blobs/kube-proxy.gz
Executable file
Binary file not shown.
BIN
blobs/kube-scheduler.gz
Executable file
BIN
blobs/kube-scheduler.gz
Executable file
Binary file not shown.
BIN
blobs/kubeadm.gz
Executable file
BIN
blobs/kubeadm.gz
Executable file
Binary file not shown.
BIN
blobs/kubectl.gz
Executable file
BIN
blobs/kubectl.gz
Executable file
Binary file not shown.
BIN
blobs/kubelet.gz
Executable file
BIN
blobs/kubelet.gz
Executable file
Binary file not shown.
BIN
blobs/mounter.gz
Executable file
BIN
blobs/mounter.gz
Executable file
Binary file not shown.
195
build_crypto.sh
Executable file
195
build_crypto.sh
Executable file
|
@ -0,0 +1,195 @@
|
||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
echo "... ] BUILDING THE CRYPTOPACK.B64 FILE [ ..."
|
||||||
|
|
||||||
|
HOME=$( cd "$(dirname "$0")" && pwd )
|
||||||
|
source $HOME/config
|
||||||
|
|
||||||
|
apt update -q
|
||||||
|
apt install -y sharutils openssl
|
||||||
|
|
||||||
|
SSL_REPO=/tmp/k8x-cryptogen
|
||||||
|
mkdir -p ${SSL_REPO}
|
||||||
|
mkdir -p ${CONF_DIR}/{kube-controller-manager,kubelet,kube-proxy,kube-scheduler}
|
||||||
|
mkdir -p /var/lib/{kube-controller-manager,kubelet,kube-proxy,kube-scheduler}
|
||||||
|
|
||||||
|
#checks if we have the cryptopack file
|
||||||
|
if [ -f $HOME/cryptopack.b64 ]; then
|
||||||
|
echo "] cryptopack.b64 already generated. rebuilding..."
|
||||||
|
TSTAMP=`date +%s`
|
||||||
|
mv -v ${HOME}/cryptopack.b64 ${HOME}/cryptopack.b64.${TSTAMP}
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ -f ${CA_DIR}/ca-openssl.cnf ]; then
|
||||||
|
cp -v ${CA_DIR}/ca-openssl.cnf ${SSL_REPO}/ca-openssl.cnf
|
||||||
|
else
|
||||||
|
cat <<EOF | tee ${SSL_REPO}/ca-openssl.cnf
|
||||||
|
[req]
|
||||||
|
distinguished_name = req_distinguished_name
|
||||||
|
|
||||||
|
[req_distinguished_name]
|
||||||
|
|
||||||
|
[ v3_ca ]
|
||||||
|
basicConstraints = critical, CA:TRUE
|
||||||
|
keyUsage = critical, digitalSignature, keyEncipherment, keyCertSign
|
||||||
|
|
||||||
|
[ v3_req_helm ]
|
||||||
|
basicConstraints = CA:FALSE
|
||||||
|
keyUsage = critical, digitalSignature, keyEncipherment
|
||||||
|
extendedKeyUsage = serverAuth, clientAuth
|
||||||
|
|
||||||
|
[ v3_req_etcd ]
|
||||||
|
basicConstraints = CA:FALSE
|
||||||
|
keyUsage = critical, digitalSignature, keyEncipherment
|
||||||
|
extendedKeyUsage = serverAuth, clientAuth
|
||||||
|
subjectAltName = @alt_names_etcd
|
||||||
|
|
||||||
|
[ alt_names_etcd ]
|
||||||
|
DNS.1 = ${MASTER_1_NAME}
|
||||||
|
DNS.2 = ${MASTER_2_NAME}
|
||||||
|
DNS.3 = ${MASTER_3_NAME}
|
||||||
|
DNS.4 = ${CLUSTER_NAME}.${CLUSTER_DOMAIN}
|
||||||
|
DNS.5 = ${MASTER_1_NAME}.${CLUSTER_NAME}.${CLUSTER_DOMAIN}
|
||||||
|
DNS.6 = ${MASTER_2_NAME}.${CLUSTER_NAME}.${CLUSTER_DOMAIN}
|
||||||
|
DNS.7 = ${MASTER_3_NAME}.${CLUSTER_NAME}.${CLUSTER_DOMAIN}
|
||||||
|
IP.1 = ${ETCD_1_IP}
|
||||||
|
IP.2 = ${ETCD_2_IP}
|
||||||
|
IP.3 = ${ETCD_3_IP}
|
||||||
|
EOF
|
||||||
|
fi
|
||||||
|
|
||||||
|
#generate tokens
|
||||||
|
for object in admin kubelet kube-proxy kube-controller-manager kube-scheduler
|
||||||
|
do
|
||||||
|
if [ -f ${CA_DIR}/${object}.token ]; then
|
||||||
|
cp -v ${CA_DIR}/${object}.token ${SSL_REPO}/${object}.token
|
||||||
|
else
|
||||||
|
dd if=/dev/urandom bs=128 count=1 2>/dev/null | base64 -w 0 | tr -d "=+/" | dd bs=256 count=1 2>/dev/null > ${SSL_REPO}/${object}.token
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
|
printf "\n] generating certificate authorities..."
|
||||||
|
#generate kube certificate authority
|
||||||
|
if [ -f ${CA_DIR}/ca.key ] && [ -f ${CA_DIR}/ca.crt ]; then
|
||||||
|
cp -v ${CA_DIR}/ca.key ${SSL_REPO}/ca.key
|
||||||
|
cp -v ${CA_DIR}/ca.crt ${SSL_REPO}/ca.crt
|
||||||
|
else
|
||||||
|
openssl ecparam -name secp521r1 -genkey -noout -out ${SSL_REPO}/ca.key
|
||||||
|
chmod 0600 ${SSL_REPO}/ca.key
|
||||||
|
openssl req -x509 -new -nodes -key ${SSL_REPO}/ca.key -days 20000 -out ${SSL_REPO}/ca.crt -subj "/CN=kubernetes-ca" -extensions v3_ca -config ${SSL_REPO}/ca-openssl.cnf
|
||||||
|
fi
|
||||||
|
|
||||||
|
#generate helm certificate authority
|
||||||
|
if [ -f ${CA_DIR}/helm-ca.key ] && [ -f ${CA_DIR}/helm-ca.crt ]; then
|
||||||
|
cp -v ${CA_DIR}/helm-ca.key ${SSL_REPO}/helm-ca.key
|
||||||
|
cp -v ${CA_DIR}/helm-ca.crt ${SSL_REPO}/helm-ca.crt
|
||||||
|
else
|
||||||
|
openssl ecparam -name secp521r1 -genkey -noout -out ${SSL_REPO}/helm-ca.key
|
||||||
|
chmod 0600 ${SSL_REPO}/helm-ca.key
|
||||||
|
openssl req -x509 -new -nodes -key ${SSL_REPO}/helm-ca.key -days 20000 -out ${SSL_REPO}/helm-ca.crt -subj "/CN=helm-ca" -extensions v3_ca -config ${SSL_REPO}/ca-openssl.cnf
|
||||||
|
fi
|
||||||
|
|
||||||
|
#generate etcd certificate authority
|
||||||
|
if [ -f ${CA_DIR}/etcd-ca.key ] && [ -f ${CA_DIR}/etcd-ca.crt ]; then
|
||||||
|
cp -v ${CA_DIR}/etcd-ca.key ${SSL_REPO}/etcd-ca.key
|
||||||
|
cp -v ${CA_DIR}/etcd-ca.crt ${SSL_REPO}/etcd-ca.crt
|
||||||
|
else
|
||||||
|
openssl ecparam -name secp521r1 -genkey -noout -out ${SSL_REPO}/etcd-ca.key
|
||||||
|
chmod 0600 ${SSL_REPO}/etcd-ca.key
|
||||||
|
openssl req -x509 -new -nodes -key ${SSL_REPO}/etcd-ca.key -days 20000 -out ${SSL_REPO}/etcd-ca.crt -subj "/CN=etcd-ca" -extensions v3_ca -config ${SSL_REPO}/ca-openssl.cnf
|
||||||
|
fi
|
||||||
|
|
||||||
|
#generate aggregator certificate authority
|
||||||
|
if [ -f ${CA_DIR}/aggregator-ca.key ] && [ -f ${CA_DIR}/aggregator-ca.crt ]; then
|
||||||
|
cp -v ${CA_DIR}/aggregator-ca.key ${SSL_REPO}/aggregator-ca.key
|
||||||
|
cp -v ${CA_DIR}/aggregator-ca.crt ${SSL_REPO}/aggregator-ca.crt
|
||||||
|
else
|
||||||
|
openssl ecparam -name secp521r1 -genkey -noout -out ${SSL_REPO}/aggregator-ca.key
|
||||||
|
chmod 0600 ${SSL_REPO}/aggregator-ca.key
|
||||||
|
openssl req -x509 -new -nodes -key ${SSL_REPO}/aggregator-ca.key -days 20000 -out ${SSL_REPO}/aggregator-ca.crt -subj "/CN=aggregator-ca" -extensions v3_ca -config ${SSL_REPO}/ca-openssl.cnf
|
||||||
|
fi
|
||||||
|
|
||||||
|
printf "\n] generating certificates..."
|
||||||
|
#create etcd certificate
|
||||||
|
if [ -f ${CA_DIR}/etcd.key ] && [ -f ${CA_DIR}/etcd.crt ]; then
|
||||||
|
cp -v ${CA_DIR}/etcd.key ${SSL_REPO}/etcd.key
|
||||||
|
cp -v ${CA_DIR}/etcd.crt ${SSL_REPO}/etcd.crt
|
||||||
|
cp -v ${CA_DIR}/etcd.csr ${SSL_REPO}/etcd.csr
|
||||||
|
else
|
||||||
|
openssl ecparam -name secp521r1 -genkey -noout -out ${SSL_REPO}/etcd.key
|
||||||
|
chmod 0600 ${SSL_REPO}/etcd.key
|
||||||
|
openssl req -new -key ${SSL_REPO}/etcd.key -subj "/CN=etcd" -out ${SSL_REPO}/etcd.csr
|
||||||
|
openssl x509 -req -in ${SSL_REPO}/etcd.csr -CA ${SSL_REPO}/etcd-ca.crt -CAkey ${SSL_REPO}/etcd-ca.key -CAcreateserial -out ${SSL_REPO}/etcd.crt -days 20000 -extensions v3_req_etcd -extfile ${SSL_REPO}/ca-openssl.cnf
|
||||||
|
fi
|
||||||
|
|
||||||
|
#create etcd peer certificate
|
||||||
|
if [ -f ${CA_DIR}/etcd-peer.key ] && [ -f ${CA_DIR}/etcd-peer.crt ]; then
|
||||||
|
cp -v ${CA_DIR}/etcd-peer.key ${SSL_REPO}/etcd-peer.key
|
||||||
|
cp -v ${CA_DIR}/etcd-peer.crt ${SSL_REPO}/etcd-peer.crt
|
||||||
|
cp -v ${CA_DIR}/etcd-peer.csr ${SSL_REPO}/etcd-peer.csr
|
||||||
|
else
|
||||||
|
openssl ecparam -name secp521r1 -genkey -noout -out ${SSL_REPO}/etcd-peer.key
|
||||||
|
chmod 0600 ${SSL_REPO}/etcd-peer.key
|
||||||
|
openssl req -new -key ${SSL_REPO}/etcd-peer.key -subj "/CN=etcd-peer" -out ${SSL_REPO}/etcd-peer.csr
|
||||||
|
openssl x509 -req -in ${SSL_REPO}/etcd-peer.csr -CA ${SSL_REPO}/etcd-ca.crt -CAkey ${SSL_REPO}/etcd-ca.key -CAcreateserial -out ${SSL_REPO}/etcd-peer.crt -days 20000 -extensions v3_req_etcd -extfile ${SSL_REPO}/ca-openssl.cnf
|
||||||
|
fi
|
||||||
|
|
||||||
|
#create helm server (tiller) certificate
|
||||||
|
if [ -f ${CA_DIR}/tiller.key ] && [ -f ${CA_DIR}/tiller.crt ]; then
|
||||||
|
cp -v ${CA_DIR}/tiller.key ${SSL_REPO}/tiller.key
|
||||||
|
cp -v ${CA_DIR}/tiller.crt ${SSL_REPO}/tiller.crt
|
||||||
|
cp -v ${CA_DIR}/tiller.csr ${SSL_REPO}/tiller.csr
|
||||||
|
else
|
||||||
|
openssl ecparam -name secp521r1 -genkey -noout -out ${SSL_REPO}/tiller.key
|
||||||
|
chmod 0600 ${SSL_REPO}/tiller.key
|
||||||
|
openssl req -new -key ${SSL_REPO}/tiller.key -subj "/CN=tiller" -out ${SSL_REPO}/tiller.csr
|
||||||
|
openssl x509 -req -in ${SSL_REPO}/tiller.csr -CA ${SSL_REPO}/helm-ca.crt -CAkey ${SSL_REPO}/helm-ca.key -CAcreateserial -out ${SSL_REPO}/tiller.crt -days 20000 -extensions v3_req_helm -extfile ${SSL_REPO}/ca-openssl.cnf
|
||||||
|
fi
|
||||||
|
|
||||||
|
#create helm client certificate
|
||||||
|
if [ -f ${CA_DIR}/helm.key ] && [ -f ${CA_DIR}/helm.crt ]; then
|
||||||
|
cp -v ${CA_DIR}/helm.key ${SSL_REPO}/helm.key
|
||||||
|
cp -v ${CA_DIR}/helm.crt ${SSL_REPO}/helm.crt
|
||||||
|
cp -v ${CA_DIR}/helm.csr ${SSL_REPO}/helm.csr
|
||||||
|
else
|
||||||
|
openssl ecparam -name secp521r1 -genkey -noout -out ${SSL_REPO}/helm.key
|
||||||
|
chmod 0600 ${SSL_REPO}/helm.key
|
||||||
|
openssl req -new -key ${SSL_REPO}/helm.key -subj "/CN=helm" -out ${SSL_REPO}/helm.csr
|
||||||
|
openssl x509 -req -in ${SSL_REPO}/helm.csr -CA ${SSL_REPO}/helm-ca.crt -CAkey ${SSL_REPO}/helm-ca.key -CAcreateserial -out ${SSL_REPO}/helm.crt -days 20000 -extensions v3_req_helm -extfile ${SSL_REPO}/ca-openssl.cnf
|
||||||
|
fi
|
||||||
|
|
||||||
|
#create aggregator proxy certificate
|
||||||
|
if [ -f ${CA_DIR}/aggregator.key ] && [ -f ${CA_DIR}/aggregator.crt ]; then
|
||||||
|
cp -v ${CA_DIR}/aggregator.key ${SSL_REPO}/aggregator.key
|
||||||
|
cp -v ${CA_DIR}/aggregator.crt ${SSL_REPO}/aggregator.crt
|
||||||
|
cp -v ${CA_DIR}/aggregator.csr ${SSL_REPO}/aggregator.csr
|
||||||
|
else
|
||||||
|
openssl ecparam -name secp521r1 -genkey -noout -out ${SSL_REPO}/aggregator.key
|
||||||
|
chmod 0600 ${SSL_REPO}/aggregator.key
|
||||||
|
openssl req -new -key ${SSL_REPO}/aggregator.key -subj "/CN=aggregator" -out ${SSL_REPO}/aggregator.csr
|
||||||
|
openssl x509 -req -in ${SSL_REPO}/aggregator.csr -CA ${SSL_REPO}/aggregator-ca.crt -CAkey ${SSL_REPO}/aggregator-ca.key -CAcreateserial -out ${SSL_REPO}/aggregator.crt -days 20000 -extensions v3_req_helm -extfile ${SSL_REPO}/ca-openssl.cnf
|
||||||
|
fi
|
||||||
|
|
||||||
|
|
||||||
|
printf "\n] generating root service account keypair..."
|
||||||
|
#generate root ServiceAccount public and private key
|
||||||
|
if [ -f ${CA_DIR}/sa.key ] && [ -f ${CA_DIR}/sa.pub ]; then
|
||||||
|
cp -v ${CA_DIR}/sa.key ${SSL_REPO}/sa.key
|
||||||
|
cp -v ${CA_DIR}/sa.pub ${SSL_REPO}/sa.pub
|
||||||
|
else
|
||||||
|
openssl ecparam -name secp521r1 -genkey -noout -out ${SSL_REPO}/sa.key
|
||||||
|
openssl ec -in ${SSL_REPO}/sa.key -outform PEM -pubout -out ${SSL_REPO}/sa.pub
|
||||||
|
chmod 0600 ${SSL_REPO}/sa.key
|
||||||
|
fi
|
||||||
|
|
||||||
|
printf "\n] packing the crypto files..."
|
||||||
|
tar cvf $HOME/cryptopack.tar ${SSL_REPO}/*
|
||||||
|
gzip -9 $HOME/cryptopack.tar
|
||||||
|
cat $HOME/cryptopack.tar.gz | base64 -w 0 > $HOME/cryptopack.b64
|
||||||
|
rm $HOME/cryptopack.tar.gz
|
||||||
|
rm -fr ${SSL_REPO}
|
||||||
|
clear
|
||||||
|
echo "exec the following command on the rest of the nodes to distribute the keys"
|
||||||
|
echo ;
|
||||||
|
packdata=`cat ${HOME}/cryptopack.b64`
|
||||||
|
echo "echo \"${packdata}\" > cryptopack.b64"
|
9
config-api.ini.dist
Normal file
9
config-api.ini.dist
Normal file
|
@ -0,0 +1,9 @@
|
||||||
|
[k8x-cluster]
|
||||||
|
admin_kubeconfig = /etc/kubernetes/kubeconfig
|
||||||
|
cluster_api_url = https://10.15.0.2:16443
|
||||||
|
cluster_name = kube.virtual.local
|
||||||
|
le_cert_domain = example.com
|
||||||
|
|
||||||
|
[k8x-api]
|
||||||
|
authtoken = hackme
|
||||||
|
DEBUG = off
|
23
config-coreapps.dist
Normal file
23
config-coreapps.dist
Normal file
|
@ -0,0 +1,23 @@
|
||||||
|
#TIME
|
||||||
|
NTP_SERVER=10.15.8.80
|
||||||
|
|
||||||
|
#STORAGE
|
||||||
|
CEPH_ADMIN_KEY=""
|
||||||
|
CEPH_USER_KEY=""
|
||||||
|
CEPH_MONITOR_1="10.15.8.91"
|
||||||
|
CEPH_MONITOR_2="10.15.8.92"
|
||||||
|
CEPH_MONITOR_3="10.15.8.93"
|
||||||
|
|
||||||
|
#REGISTRY
|
||||||
|
REGISTRY_SERVER=registry.example.tld
|
||||||
|
REGISTRY_USER=deployer
|
||||||
|
REGISTRY_PASS=pass123
|
||||||
|
|
||||||
|
#MONITORING
|
||||||
|
ZABBIX_SERVER="10.15.0.2"
|
||||||
|
ZABBIX_PSK=asdqwe123
|
||||||
|
ZABBIX_PSK_ID=PSK
|
||||||
|
GRAFANA_SMTP_HOST=email-smtp.eu-west-1.amazonaws.com
|
||||||
|
GRAFANA_SMTP_USER=user
|
||||||
|
GRAFANA_SMTP_PASSWORD="asdqwe123"
|
||||||
|
GRAFANA_SMTP_FROM_ADDRESS="no-reply@example.com"
|
75
config.dist
Normal file
75
config.dist
Normal file
|
@ -0,0 +1,75 @@
|
||||||
|
#K8X CONFIG
|
||||||
|
CLUSTER_NAME=kube
|
||||||
|
CLUSTER_DOMAIN=staging.example.com
|
||||||
|
MASTERS_DOMAIN=virtualkube.example.com
|
||||||
|
ADMIN_EMAIL=admin@example.com
|
||||||
|
CONF_DIR=/etc/kubernetes
|
||||||
|
CLOUDFLARE_API_KEY=000
|
||||||
|
HAPROXY_VRRP_AUTH=Pass
|
||||||
|
HAPROXY_STATS_AUTH=admin:Pass
|
||||||
|
CERT_MODE=true
|
||||||
|
|
||||||
|
#NETWORK
|
||||||
|
CNI_NET=172.16.0.0/16
|
||||||
|
SERVICE_NET=172.18.0.0/16
|
||||||
|
SERVICE_FIP=172.18.0.1
|
||||||
|
NODE_INTERFACE=tun0
|
||||||
|
ETCD_INTERFACE=tun0
|
||||||
|
|
||||||
|
MASTER_LB_IP=10.15.0.2
|
||||||
|
MASTER_LB_MASK=16
|
||||||
|
|
||||||
|
MASTER_1_NAME=master01
|
||||||
|
MASTER_1_IP=10.15.8.11
|
||||||
|
MASTER_2_NAME=master02
|
||||||
|
MASTER_2_IP=10.15.8.12
|
||||||
|
MASTER_3_NAME=master03
|
||||||
|
MASTER_3_IP=10.15.8.13
|
||||||
|
|
||||||
|
NODE_1_NAME=worker01
|
||||||
|
NODE_1_IP=10.15.8.21
|
||||||
|
NODE_2_NAME=worker02
|
||||||
|
NODE_2_IP=10.15.8.22
|
||||||
|
NODE_3_NAME=worker03
|
||||||
|
NODE_3_IP=10.15.8.23
|
||||||
|
NODE_4_NAME=worker04
|
||||||
|
NODE_4_IP=10.15.8.24
|
||||||
|
NODE_5_NAME=worker05
|
||||||
|
NODE_5_IP=10.15.8.25
|
||||||
|
NODE_6_NAME=worker06
|
||||||
|
NODE_6_IP=10.15.8.26
|
||||||
|
NODE_7_NAME=worker07
|
||||||
|
NODE_7_IP=10.15.8.27
|
||||||
|
NODE_8_NAME=worker08
|
||||||
|
NODE_8_IP=10.15.8.28
|
||||||
|
NODE_9_NAME=worker09
|
||||||
|
NODE_9_IP=10.15.8.29
|
||||||
|
NODE_10_NAME=worker10
|
||||||
|
NODE_10_IP=10.15.8.30
|
||||||
|
NODE_11_NAME=worker11
|
||||||
|
NODE_11_IP=10.15.8.31
|
||||||
|
NODE_12_NAME=worker12
|
||||||
|
NODE_12_IP=10.15.8.32
|
||||||
|
NODE_13_NAME=worker13
|
||||||
|
NODE_13_IP=10.15.8.33
|
||||||
|
NODE_14_NAME=worker14
|
||||||
|
NODE_14_IP=10.15.8.34
|
||||||
|
NODE_15_NAME=worker15
|
||||||
|
NODE_15_IP=10.15.8.35
|
||||||
|
|
||||||
|
ETCD_1_NAME=${MASTER_1_NAME}
|
||||||
|
ETCD_1_IP=10.15.8.11
|
||||||
|
ETCD_2_NAME=${MASTER_2_NAME}
|
||||||
|
ETCD_2_IP=10.15.8.12
|
||||||
|
ETCD_3_NAME=${MASTER_3_NAME}
|
||||||
|
ETCD_3_IP=10.15.8.13
|
||||||
|
|
||||||
|
ADMIN_KUBECONFIG=${CONF_DIR}/kubeconfig
|
||||||
|
CA_DIR=${CONF_DIR}/pki
|
||||||
|
FUNC_PATH="systemd"
|
||||||
|
|
||||||
|
NODE_IP=$(ip addr show dev ${NODE_INTERFACE} | grep 'inet ' | awk '{print $2}' | cut -d '/' -f 1 | grep -v ${MASTER_LB_IP})
|
||||||
|
NODE_NAME=$(hostname)
|
||||||
|
NODE_NAME_SHORT=$(hostname -s)
|
||||||
|
|
||||||
|
ETCD_IP=$(ip addr show dev ${ETCD_INTERFACE} | grep 'inet ' | awk '{print $2}' | cut -d '/' -f 1)
|
43
deploy_cert_manager.sh
Executable file
43
deploy_cert_manager.sh
Executable file
|
@ -0,0 +1,43 @@
|
||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
echo ""
|
||||||
|
echo "... ] DEPLOYING SYSTEM SERVICES [ ..."
|
||||||
|
|
||||||
|
HOME=$( cd "$(dirname "$0")" && pwd )
|
||||||
|
source $HOME/config
|
||||||
|
|
||||||
|
if [ -f $HOME/config-coreapps ]; then
|
||||||
|
echo "config-coreapps file FOUND :)"
|
||||||
|
source $HOME/config-coreapps
|
||||||
|
else
|
||||||
|
echo "config-coreapps file is missing."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
export KUBECONFIG=$ADMIN_KUBECONFIG
|
||||||
|
export HELM_CACHE_HOME=~/.cache/helm
|
||||||
|
export HELM_CONFIG_HOME=~/.config/helm
|
||||||
|
export HELM_DATA_HOME=~/.local/share/helm
|
||||||
|
|
||||||
|
# Setup ACME
|
||||||
|
if $CERT_MODE; then
|
||||||
|
printf "\ndeploying cert-manager helm chart...\n"
|
||||||
|
helm repo add jetstack https://charts.jetstack.io
|
||||||
|
helm repo update
|
||||||
|
kubectl create namespace cert-manager
|
||||||
|
kubectl -n cert-manager create secret generic cf-api-secret --from-literal=cf-api-key=${CLOUDFLARE_API_KEY}
|
||||||
|
kubectl apply -f yaml/cert-manager/cert-manager.crds.yaml
|
||||||
|
helm install \
|
||||||
|
cert-manager jetstack/cert-manager \
|
||||||
|
--namespace cert-manager \
|
||||||
|
--version v1.1.0 \
|
||||||
|
-f yaml/cert-manager/values.yaml
|
||||||
|
|
||||||
|
printf "\nwaiting for cert-manager to finish installation...\n"
|
||||||
|
sleep 30
|
||||||
|
cat yaml/cert-manager/letsencrypt-staging-clusterissuer.yaml | sed "s/k8x_acme_email/${ADMIN_EMAIL}/" | kubectl -n kube-system apply -f -
|
||||||
|
cat yaml/cert-manager/letsencrypt-staging-dns-clusterissuer.yaml | sed "s/k8x_acme_email/${ADMIN_EMAIL}/" | kubectl -n kube-system apply -f -
|
||||||
|
cat yaml/cert-manager/letsencrypt-production-clusterissuer.yaml | sed "s/k8x_acme_email/${ADMIN_EMAIL}/" | kubectl -n kube-system apply -f -
|
||||||
|
cat yaml/cert-manager/letsencrypt-production-dns-clusterissuer.yaml | sed "s/k8x_acme_email/${ADMIN_EMAIL}/" | kubectl -n kube-system apply -f -
|
||||||
|
fi
|
||||||
|
|
72
deploy_env.sh
Executable file
72
deploy_env.sh
Executable file
|
@ -0,0 +1,72 @@
|
||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
echo ""
|
||||||
|
echo "... ] PREPARING ENVS [ ..."
|
||||||
|
|
||||||
|
HOME=$( cd "$(dirname "$0")" && pwd )
|
||||||
|
source $HOME/config
|
||||||
|
|
||||||
|
if [ -f $HOME/config-coreapps ]; then
|
||||||
|
echo "config-coreapps file FOUND :)"
|
||||||
|
source $HOME/config-coreapps
|
||||||
|
export CEPH_MONITOR_1
|
||||||
|
export CEPH_MONITOR_2
|
||||||
|
export CEPH_MONITOR_3
|
||||||
|
else
|
||||||
|
echo "config-coreapps file is missing."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
export KUBECONFIG=$ADMIN_KUBECONFIG
|
||||||
|
|
||||||
|
nspace=$1
|
||||||
|
export nspace
|
||||||
|
|
||||||
|
# Setup namespace, Service Accounts, RBAC, Limit and namespace keypair
|
||||||
|
printf "\nsetting up ${nspace}... \n"
|
||||||
|
cat yaml/system/namespace.yaml | envsubst | kubectl apply -f -
|
||||||
|
printf "\nsetting up Service Accounts... \n"
|
||||||
|
/bin/bash tools/add_service_account.sh admin-sa ${nspace}
|
||||||
|
/bin/bash tools/add_service_account.sh backup-agent-sa ${nspace}
|
||||||
|
sleep 5
|
||||||
|
printf "\nsetting up RBAC... \n"
|
||||||
|
cat yaml/system/sa-rbac.yaml | envsubst | kubectl apply -f -
|
||||||
|
cat yaml/system/sa-rbac-backup-agent.yaml | envsubst | kubectl apply -f -
|
||||||
|
sleep 5
|
||||||
|
printf "\nsetting up resource limits... \n"
|
||||||
|
kubectl -n $nspace apply -f yaml/system/default-resource-limits.yaml
|
||||||
|
printf "\nsetting up shared keypair secret... \n"
|
||||||
|
openssl ecparam -genkey -name prime256v1 -noout -out /tmp/${nspace}_id_ecdsa
|
||||||
|
openssl ec -in /tmp/${nspace}_id_ecdsa -pubout -out /tmp/${nspace}_id_ecdsa.pub
|
||||||
|
kubectl -n $nspace create secret generic auth-keypair --from-file=id_ecdsa=/tmp/${nspace}_id_ecdsa --from-file=id_ecdsa.pub=/tmp/${nspace}_id_ecdsa.pub
|
||||||
|
rm /tmp/${nspace}_id_ecdsa
|
||||||
|
rm /tmp/${nspace}_id_ecdsa.pub
|
||||||
|
|
||||||
|
if [ "$nspace" = "develop" ]; then
|
||||||
|
# Setup the private docker registry
|
||||||
|
printf "\nsetting up Docker Registry... \n"
|
||||||
|
#create secret for the registry
|
||||||
|
if [ -f /tmp/regsecret ]; then
|
||||||
|
rm /tmp/regsecret
|
||||||
|
fi
|
||||||
|
htpasswd -Bb -c /tmp/regsecret $REGISTRY_USER $REGISTRY_PASS
|
||||||
|
regpassstr=`cat /tmp/regsecret | base64 -w 0`
|
||||||
|
cat <<EOF | kubectl -n develop apply -f -
|
||||||
|
apiVersion: v1
|
||||||
|
kind: Secret
|
||||||
|
metadata:
|
||||||
|
name: registry-sec
|
||||||
|
type: Opaque
|
||||||
|
data:
|
||||||
|
HTPASSWD: ${regpassstr}
|
||||||
|
EOF
|
||||||
|
kubectl -n ${nspace} apply -f env/registry-pv.yaml
|
||||||
|
kubectl -n ${nspace} apply -f env/registry.yaml
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ "$nspace" = "live" ]; then
|
||||||
|
exit 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
printf "\ncreate a wildcard certificate secret with letsencrypt for the defined namespace...\n"
|
||||||
|
cat env/nspace-wild-cert.yaml | envsubst | kubectl -n ${nspace} apply -f -
|
42
deploy_ingress_nginx.sh
Executable file
42
deploy_ingress_nginx.sh
Executable file
|
@ -0,0 +1,42 @@
|
||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
echo ""
|
||||||
|
echo "... ] DEPLOYING SYSTEM SERVICES [ ..."
|
||||||
|
|
||||||
|
HOME=$( cd "$(dirname "$0")" && pwd )
|
||||||
|
source $HOME/config
|
||||||
|
|
||||||
|
if [ -f $HOME/config-coreapps ]; then
|
||||||
|
echo "config-coreapps file FOUND :)"
|
||||||
|
source $HOME/config-coreapps
|
||||||
|
else
|
||||||
|
echo "config-coreapps file is missing."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
export KUBECONFIG=$ADMIN_KUBECONFIG
|
||||||
|
export HELM_CACHE_HOME=~/.cache/helm
|
||||||
|
export HELM_CONFIG_HOME=~/.config/helm
|
||||||
|
export HELM_DATA_HOME=~/.local/share/helm
|
||||||
|
|
||||||
|
# Setup Ingress-Nginx
|
||||||
|
kubectl create namespace ingress-nginx
|
||||||
|
if $CERT_MODE; then
|
||||||
|
printf "\ndeploying ingress-nginx helm chart WITH TLS termination in TCP/PROXY mode...\n"
|
||||||
|
k8x_proxy_mode="true"
|
||||||
|
else
|
||||||
|
printf "\ndeploying ingress-nginx helm chart WITHOUT TLS termination in HTTP mode...\n"
|
||||||
|
k8x_proxy_mode="false"
|
||||||
|
fi
|
||||||
|
k8x_maxmind_lic="4rD1ICHnexjd6KaY"
|
||||||
|
|
||||||
|
helm repo add ingress-nginx https://kubernetes.github.io/ingress-nginx
|
||||||
|
helm repo update
|
||||||
|
helm install \
|
||||||
|
ingress-nginx ingress-nginx/ingress-nginx \
|
||||||
|
--namespace ingress-nginx \
|
||||||
|
--set-string controller.config.use-proxy-protocol="${k8x_proxy_mode}" \
|
||||||
|
--set-string controller.maxmindLicenseKey="${k8x_maxmind_lic}" \
|
||||||
|
--version 3.15.2 \
|
||||||
|
-f yaml/ingress-nginx/values.yaml
|
||||||
|
|
73
deploy_monitoring.sh
Executable file
73
deploy_monitoring.sh
Executable file
|
@ -0,0 +1,73 @@
|
||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
echo ""
|
||||||
|
echo "... ] DEPLOYING MONITORING [ ..."
|
||||||
|
|
||||||
|
HOME=$( cd "$(dirname "$0")" && pwd )
|
||||||
|
source $HOME/config
|
||||||
|
|
||||||
|
if [ -f $HOME/config-coreapps ]; then
|
||||||
|
echo "config-coreapps file FOUND :)"
|
||||||
|
source $HOME/config-coreapps
|
||||||
|
export CEPH_MONITOR_1
|
||||||
|
export CEPH_MONITOR_2
|
||||||
|
export CEPH_MONITOR_3
|
||||||
|
else
|
||||||
|
echo "config-coreapps file is missing."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
export KUBECONFIG=$ADMIN_KUBECONFIG
|
||||||
|
export HELM_CACHE_HOME=~/.cache/helm
|
||||||
|
export HELM_CONFIG_HOME=~/.config/helm
|
||||||
|
export HELM_DATA_HOME=~/.local/share/helm
|
||||||
|
|
||||||
|
kubectl create namespace monitoring
|
||||||
|
|
||||||
|
printf "\ndeploying zabbix-agent for host monitoring...\n"
|
||||||
|
kubectl -n monitoring create secret generic zabbix-psk-secret --from-literal=zabbix_agentd.psk=${ZABBIX_PSK}
|
||||||
|
kubectl -n monitoring create secret generic zabbix-psk-id-secret --from-literal=zabbix_psk_id=${ZABBIX_PSK_ID}
|
||||||
|
cat yaml/monitoring/zabbix-agent-daemonset.yaml | sed "s/k8x_zabbix_server/${ZABBIX_SERVER}/" | kubectl -n monitoring apply -f -
|
||||||
|
|
||||||
|
printf "\ndeploying prometheus for metrics aggregation...\n"
|
||||||
|
helm repo add prometheus-community https://prometheus-community.github.io/helm-charts
|
||||||
|
helm repo update
|
||||||
|
cat yaml/monitoring/prometheus-volumes.yaml | envsubst | kubectl apply -f -
|
||||||
|
helm install \
|
||||||
|
prometheus prometheus-community/prometheus \
|
||||||
|
--namespace monitoring \
|
||||||
|
-f yaml/monitoring/prometheus-values.yaml
|
||||||
|
|
||||||
|
printf "\ndeploying grafana for monitoring dashboard...\n"
|
||||||
|
helm repo add grafana https://grafana.github.io/helm-charts
|
||||||
|
helm repo update
|
||||||
|
cat yaml/monitoring/grafana-volumes.yaml | envsubst | kubectl apply -f -
|
||||||
|
helm install \
|
||||||
|
grafana grafana/grafana \
|
||||||
|
--namespace monitoring \
|
||||||
|
-f yaml/monitoring/grafana-values.yaml \
|
||||||
|
--set env.GF_SMTP_ENABLED=true,env.GF_SMTP_HOST=${GRAFANA_SMTP_HOST},env.GF_SMTP_FROM_ADDRESS=${GRAFANA_SMTP_FROM_ADDRESS},env.GF_SMTP_USER=${GRAFANA_SMTP_USER},env.GF_SMTP_PASSWORD=${GRAFANA_SMTP_PASSWORD},env.GF_SMTP_SKIP_VERIFY=true
|
||||||
|
printf '\ngrafana login:\nuser: admin \npass: ' ; kubectl get secret --namespace monitoring grafana -o jsonpath="{.data.admin-password}" | base64 --decode ; echo
|
||||||
|
#kubectl -n monitoring create secret generic monitoring-http-secret --from-file=yaml/monitoring/auth
|
||||||
|
if $CERT_MODE; then
|
||||||
|
cat yaml/monitoring/grafana-cert.yaml | sed "s/k8x_domain/${CLUSTER_NAME}.${CLUSTER_DOMAIN}/" | kubectl -n monitoring apply -f -
|
||||||
|
cat yaml/monitoring/grafana-ingress-secure.yaml | sed "s/k8x_domain/${CLUSTER_NAME}.${CLUSTER_DOMAIN}/" | kubectl -n monitoring apply -f -
|
||||||
|
else
|
||||||
|
cat yaml/monitoring/grafana-ingress.yaml | sed "s/k8x_domain/${CLUSTER_NAME}.${CLUSTER_DOMAIN}/" | kubectl -n monitoring apply -f -
|
||||||
|
fi
|
||||||
|
|
||||||
|
exit 0
|
||||||
|
###
|
||||||
|
|
||||||
|
printf "\ndeploying loki for logs aggregation..."
|
||||||
|
cat yaml/monitoring/loki-volumes.yaml | envsubst | kubectl apply -f -
|
||||||
|
helm repo add loki https://grafana.github.io/loki/charts
|
||||||
|
helm repo update
|
||||||
|
helm install \
|
||||||
|
loki loki/loki \
|
||||||
|
--namespace monitoring \
|
||||||
|
-f yaml/monitoring/loki-values.yaml
|
||||||
|
helm install \
|
||||||
|
promtail loki/promtail \
|
||||||
|
--namespace monitoring \
|
||||||
|
--set "loki.serviceName=loki"
|
52
deploy_system.sh
Executable file
52
deploy_system.sh
Executable file
|
@ -0,0 +1,52 @@
|
||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
echo ""
|
||||||
|
echo "... ] DEPLOYING SYSTEM SERVICES [ ..."
|
||||||
|
|
||||||
|
HOME=$( cd "$(dirname "$0")" && pwd )
|
||||||
|
source $HOME/config
|
||||||
|
|
||||||
|
if [ -f $HOME/config-coreapps ]; then
|
||||||
|
echo "config-coreapps file FOUND :)"
|
||||||
|
source $HOME/config-coreapps
|
||||||
|
else
|
||||||
|
echo "config-coreapps file is missing."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
export KUBECONFIG=$ADMIN_KUBECONFIG
|
||||||
|
export HELM_CACHE_HOME=~/.cache/helm
|
||||||
|
export HELM_CONFIG_HOME=~/.config/helm
|
||||||
|
export HELM_DATA_HOME=~/.local/share/helm
|
||||||
|
|
||||||
|
# Setup kube-apiserver RBAC for kubelet authorization
|
||||||
|
kubectl apply -f yaml/system/kube-apiserver-to-kubelet-clusterrole.yaml
|
||||||
|
kubectl apply -f yaml/system/kube-apiserver-to-kubelet-clusterrolebinding.yaml
|
||||||
|
|
||||||
|
# Setup Calico SDN
|
||||||
|
kubectl apply -f yaml/calico/rbac-calico-etcd.yaml
|
||||||
|
printf "\ndeploying calico sdn...\n"
|
||||||
|
ECA64=$( base64 -w0 ${CA_DIR}/etcd-ca.crt )
|
||||||
|
ECERT64=$( base64 -w0 ${CA_DIR}/etcd.crt )
|
||||||
|
EKEY64=$( base64 -w0 ${CA_DIR}/etcd.key )
|
||||||
|
cat yaml/calico/calico-etcd.yaml | \
|
||||||
|
sed -e "s@k8x_calico_etcd_endpoint@https://${ETCD_1_IP}:2379,https://${ETCD_2_IP}:2379,https://${ETCD_3_IP}:2379@g" | \
|
||||||
|
sed -e "s@#\ etcd-ca:\ null@etcd-ca:\ ${ECA64}@g" | \
|
||||||
|
sed -e "s@#\ etcd-cert:\ null@etcd-cert:\ ${ECERT64}@g" | \
|
||||||
|
sed -e "s@#\ etcd-key:\ null@etcd-key:\ ${EKEY64}@g" | \
|
||||||
|
sed -e "s@k8x_calico_pool@${CNI_NET}@g" | kubectl apply -f -
|
||||||
|
|
||||||
|
# Setup Helm package manager
|
||||||
|
printf "\nsetting up kubernetes helm repos...\n"
|
||||||
|
helm repo add stable https://kubernetes-charts.storage.googleapis.com
|
||||||
|
helm repo add incubator https://kubernetes-charts-incubator.storage.googleapis.com
|
||||||
|
helm repo update
|
||||||
|
|
||||||
|
# Setup DNS
|
||||||
|
printf "\ndeploying coredns...\n"
|
||||||
|
kubectl apply -f yaml/coredns/coredns.yaml
|
||||||
|
|
||||||
|
# Setup Metrics provider
|
||||||
|
printf "\ndeploying metrics-server...\n"
|
||||||
|
kubectl apply -f yaml/metrics-server/components.yaml
|
||||||
|
|
26
docs/iscsi-server-howto.txt
Normal file
26
docs/iscsi-server-howto.txt
Normal file
|
@ -0,0 +1,26 @@
|
||||||
|
#setup iscsi server:
|
||||||
|
apt install tgt
|
||||||
|
mkdir /var/lib/docker/iscsi
|
||||||
|
cd /var/lib/docker/iscsi
|
||||||
|
dd if=/dev/zero of=prometheusdata.img bs=1M count=8K
|
||||||
|
dd if=/dev/zero of=prometheusalert.img bs=1M count=2K
|
||||||
|
dd if=/dev/zero of=grafana.img bs=1M count=10K
|
||||||
|
tgtadm --lld iscsi --op new --mode target --tid 1 -T iqn.2019-01.example.tld:prometheusdata
|
||||||
|
tgtadm --lld iscsi --op new --mode target --tid 2 -T iqn.2019-01.example.tld:prometheusalert
|
||||||
|
tgtadm --lld iscsi --op new --mode target --tid 3 -T iqn.2019-01.example.tld:grafana
|
||||||
|
tgtadm --lld iscsi --op new --mode target --tid 4 -T iqn.2019-02.example.tld:db-staging.img
|
||||||
|
tgtadm --lld iscsi --op new --mode logicalunit --tid 1 --lun 1 -b /var/lib/docker/iscsi/prometheusdata.img
|
||||||
|
tgtadm --lld iscsi --op new --mode logicalunit --tid 2 --lun 1 -b /var/lib/docker/iscsi/prometheusalert.img
|
||||||
|
tgtadm --lld iscsi --op new --mode logicalunit --tid 3 --lun 1 -b /var/lib/docker/iscsi/grafana.img
|
||||||
|
tgtadm --lld iscsi --op new --mode logicalunit --tid 4 --lun 1 -b /var/lib/docker/iscsi/db-staging.img
|
||||||
|
for i in $(seq 4 $END); do tgtadm --lld iscsi --op bind --mode target --tid $i -I 10.15.8.21; done
|
||||||
|
for i in $(seq 4 $END); do tgtadm --lld iscsi --op bind --mode target --tid $i -I 10.15.8.22; done
|
||||||
|
|
||||||
|
tgt-admin --dump | tee /etc/tgt/conf.d/sysiscsi.conf
|
||||||
|
|
||||||
|
#test client:
|
||||||
|
apt install open-iscsi
|
||||||
|
iscsiadm --mode discovery -t st -p 10.15.0.2
|
||||||
|
iscsiadm --mode node --targetname iqn.2019-01.example.tld:grafana -p 10.15.0.2 -l
|
||||||
|
cat /proc/partitions
|
||||||
|
iscsiadm --mode node --targetname iqn.2019-01.example.tld:grafana -p 10.15.0.2 -u
|
36
docs/notes
Normal file
36
docs/notes
Normal file
|
@ -0,0 +1,36 @@
|
||||||
|
#Links
|
||||||
|
|
||||||
|
kube-apiserver haproxy stats:
|
||||||
|
- http://clustername.clusterdomain.tld:9000/stats
|
||||||
|
|
||||||
|
- https://kubernetes.io/blog/2016/08/security-best-practices-kubernetes-deployment/
|
||||||
|
|
||||||
|
#ceph common commands:
|
||||||
|
https://tracker.ceph.com/projects/ceph/wiki/10_Commands_Every_Ceph_Administrator_Should_Know
|
||||||
|
|
||||||
|
#ceph remove dead osd:
|
||||||
|
https://www.sebastien-han.fr/blog/2015/12/11/ceph-properly-remove-an-osd/
|
||||||
|
|
||||||
|
http://www.abeji.de/weave-vs-calico/
|
||||||
|
|
||||||
|
#patch crd to delete stuck termination
|
||||||
|
kubectl -n rook-ceph patch clusters.ceph.rook.io rook-ceph -p '{"metadata":{"finalizers": []}}' --type=merge
|
||||||
|
kubectl edit crd clusters.ceph.rook.io
|
||||||
|
|
||||||
|
#access dashboard via kubectl proxy:
|
||||||
|
kubectl proxy
|
||||||
|
http://127.0.0.1:8001/api/v1/namespaces/kube-system/services/https:kubernetes-dashboard:/proxy/
|
||||||
|
|
||||||
|
*node*
|
||||||
|
systemctl stop etcd.service
|
||||||
|
rm -fr /var/lib/etcd/member
|
||||||
|
vim /opt/etcd/config/etcd.conf
|
||||||
|
ETCD_INITIAL_CLUSTER_STATE=existing
|
||||||
|
|
||||||
|
*master*
|
||||||
|
/opt/etcd/bin/etcdctl member remove c02630faf3a2dd3d
|
||||||
|
/opt/etcd/bin/etcdctl member list
|
||||||
|
/opt/etcd/bin/etcdctl member add master03 http://192.168.8.13:2380
|
||||||
|
|
||||||
|
*node*
|
||||||
|
systemctl stop etcd.service
|
27
env/deprecated/sa-rbac-backup-agent.yaml
vendored
Normal file
27
env/deprecated/sa-rbac-backup-agent.yaml
vendored
Normal file
|
@ -0,0 +1,27 @@
|
||||||
|
---
|
||||||
|
kind: Role
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||||
|
metadata:
|
||||||
|
name: backup-agent-role
|
||||||
|
namespace: ${nspace}
|
||||||
|
rules:
|
||||||
|
- apiGroups: [""]
|
||||||
|
resources: ["pods", "pods/log"]
|
||||||
|
verbs: ["get", "list"]
|
||||||
|
- apiGroups: [""]
|
||||||
|
resources: ["pods/exec"]
|
||||||
|
verbs: ["create"]
|
||||||
|
---
|
||||||
|
kind: RoleBinding
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||||
|
metadata:
|
||||||
|
name: backup-agent-rolebinding
|
||||||
|
namespace: ${nspace}
|
||||||
|
roleRef:
|
||||||
|
apiGroup: rbac.authorization.k8s.io
|
||||||
|
kind: Role
|
||||||
|
name: backup-agent-role
|
||||||
|
subjects:
|
||||||
|
- kind: ServiceAccount
|
||||||
|
name: backup-agent-sa
|
||||||
|
namespace: ${nspace}
|
70
env/deprecated/sa-rbac.yaml
vendored
Normal file
70
env/deprecated/sa-rbac.yaml
vendored
Normal file
|
@ -0,0 +1,70 @@
|
||||||
|
---
|
||||||
|
kind: Role
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||||
|
metadata:
|
||||||
|
name: admin-role
|
||||||
|
namespace: ${nspace}
|
||||||
|
rules:
|
||||||
|
- apiGroups: [ "", "extensions", "apps", "batch", "autoscaling" ]
|
||||||
|
resources: [ "pods", "daemonsets", "deployments", "services", "replicasets", "replicationcontrollers", "statefulsets", "horizontalpodautoscalers", "jobs", "cronjobs", "events", "ingresses", "persistentvolumeclaims", "certificates", "configmaps", "secrets", "logs", "pods/log", "pods/exec", "pods/portforward" ]
|
||||||
|
verbs: [ "get", "list", "watch", "create", "update", "delete", "patch" ]
|
||||||
|
---
|
||||||
|
kind: RoleBinding
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||||
|
metadata:
|
||||||
|
name: admin-rolebinding
|
||||||
|
namespace: ${nspace}
|
||||||
|
roleRef:
|
||||||
|
apiGroup: rbac.authorization.k8s.io
|
||||||
|
kind: Role
|
||||||
|
name: admin-role
|
||||||
|
subjects:
|
||||||
|
- kind: ServiceAccount
|
||||||
|
name: admin-sa
|
||||||
|
namespace: ${nspace}
|
||||||
|
---
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||||
|
kind: Role
|
||||||
|
metadata:
|
||||||
|
name: admin-cert-role
|
||||||
|
namespace: ${nspace}
|
||||||
|
rules:
|
||||||
|
- apiGroups: [ "certmanager.k8s.io" ]
|
||||||
|
resources: [ "issuers", "certificates" ]
|
||||||
|
verbs: [ "get", "list", "watch", "create", "update", "delete" ]
|
||||||
|
---
|
||||||
|
kind: RoleBinding
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||||
|
metadata:
|
||||||
|
name: admin-cert-rolebinding
|
||||||
|
namespace: ${nspace}
|
||||||
|
roleRef:
|
||||||
|
apiGroup: rbac.authorization.k8s.io
|
||||||
|
kind: Role
|
||||||
|
name: admin-cert-role
|
||||||
|
subjects:
|
||||||
|
- kind: ServiceAccount
|
||||||
|
name: admin-sa
|
||||||
|
namespace: ${nspace}
|
||||||
|
---
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
kind: ClusterRole
|
||||||
|
metadata:
|
||||||
|
name: admin-${nspace}-clusterrole
|
||||||
|
rules:
|
||||||
|
- apiGroups: [ "" ]
|
||||||
|
resources: [ "persistentvolumes" ]
|
||||||
|
verbs: [ "get", "list", "watch", "create", "update", "delete", "patch" ]
|
||||||
|
---
|
||||||
|
kind: ClusterRoleBinding
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
metadata:
|
||||||
|
name: admin-${nspace}-clusterrolebinding
|
||||||
|
roleRef:
|
||||||
|
apiGroup: rbac.authorization.k8s.io
|
||||||
|
kind: ClusterRole
|
||||||
|
name: admin-${nspace}-clusterrole
|
||||||
|
subjects:
|
||||||
|
- kind: ServiceAccount
|
||||||
|
name: admin-sa
|
||||||
|
namespace: ${nspace}
|
17
env/nspace-wild-cert.yaml
vendored
Normal file
17
env/nspace-wild-cert.yaml
vendored
Normal file
|
@ -0,0 +1,17 @@
|
||||||
|
---
|
||||||
|
apiVersion: cert-manager.io/v1
|
||||||
|
kind: Certificate
|
||||||
|
metadata:
|
||||||
|
name: wildcard-${nspace}-cert
|
||||||
|
namespace: ${nspace}
|
||||||
|
spec:
|
||||||
|
secretName: wildcard-${nspace}-tls
|
||||||
|
issuerRef:
|
||||||
|
name: letsencrypt-production-dns
|
||||||
|
kind: ClusterIssuer
|
||||||
|
commonName: '*.${nspace}.example.com'
|
||||||
|
dnsNames:
|
||||||
|
- '${nspace}.example.com'
|
||||||
|
- '*.${nspace}.example.com'
|
||||||
|
privateKey:
|
||||||
|
algorithm: ECDSA
|
27
env/registry-pv.yaml
vendored
Normal file
27
env/registry-pv.yaml
vendored
Normal file
|
@ -0,0 +1,27 @@
|
||||||
|
---
|
||||||
|
apiVersion: v1
|
||||||
|
kind: PersistentVolume
|
||||||
|
metadata:
|
||||||
|
name: registry-data-pv
|
||||||
|
spec:
|
||||||
|
capacity:
|
||||||
|
storage: 420Gi
|
||||||
|
accessModes:
|
||||||
|
- ReadWriteOnce
|
||||||
|
persistentVolumeReclaimPolicy: Retain
|
||||||
|
nfs:
|
||||||
|
path: "/srv/registry/data"
|
||||||
|
server: 10.15.8.10
|
||||||
|
readOnly: false
|
||||||
|
---
|
||||||
|
apiVersion: v1
|
||||||
|
kind: PersistentVolumeClaim
|
||||||
|
metadata:
|
||||||
|
name: registry-data-pvc
|
||||||
|
spec:
|
||||||
|
accessModes:
|
||||||
|
- ReadWriteOnce
|
||||||
|
resources:
|
||||||
|
requests:
|
||||||
|
storage: 420Gi
|
||||||
|
volumeName: "registry-data-pv"
|
117
env/registry.yaml
vendored
Normal file
117
env/registry.yaml
vendored
Normal file
|
@ -0,0 +1,117 @@
|
||||||
|
---
|
||||||
|
apiVersion: v1
|
||||||
|
kind: ConfigMap
|
||||||
|
metadata:
|
||||||
|
name: registry-config-cm
|
||||||
|
data:
|
||||||
|
registry-config.yml: |
|
||||||
|
version: 0.1
|
||||||
|
log:
|
||||||
|
fileds:
|
||||||
|
service: registry
|
||||||
|
storage:
|
||||||
|
cache:
|
||||||
|
blobdescriptor: inmemory
|
||||||
|
filesystem:
|
||||||
|
rootdirectory: /var/lib/registry
|
||||||
|
http:
|
||||||
|
addr: :5000
|
||||||
|
headers:
|
||||||
|
X-Content-Type-Options: [nosniff]
|
||||||
|
auth:
|
||||||
|
htpasswd:
|
||||||
|
realm: basic-realm
|
||||||
|
path: /auth/htpasswd
|
||||||
|
health:
|
||||||
|
storagedriver:
|
||||||
|
enabled: true
|
||||||
|
interval: 10s
|
||||||
|
threshold: 3
|
||||||
|
---
|
||||||
|
apiVersion: apps/v1
|
||||||
|
kind: Deployment
|
||||||
|
metadata:
|
||||||
|
name: registry
|
||||||
|
labels:
|
||||||
|
app: registry
|
||||||
|
spec:
|
||||||
|
replicas: 1
|
||||||
|
selector:
|
||||||
|
matchLabels:
|
||||||
|
app: registry
|
||||||
|
strategy:
|
||||||
|
type: Recreate
|
||||||
|
template:
|
||||||
|
metadata:
|
||||||
|
labels:
|
||||||
|
app: registry
|
||||||
|
spec:
|
||||||
|
containers:
|
||||||
|
- name: registry
|
||||||
|
image: "registry:latest"
|
||||||
|
imagePullPolicy: IfNotPresent
|
||||||
|
securityContext:
|
||||||
|
privileged: false
|
||||||
|
ports:
|
||||||
|
- name: http
|
||||||
|
containerPort: 5000
|
||||||
|
protocol: TCP
|
||||||
|
volumeMounts:
|
||||||
|
- name: registry-data
|
||||||
|
mountPath: "/var/lib/registry"
|
||||||
|
- name: registry-config
|
||||||
|
mountPath: "/etc/docker/registry"
|
||||||
|
readOnly: true
|
||||||
|
- name: registry-htpasswd
|
||||||
|
mountPath: "/auth"
|
||||||
|
readOnly: true
|
||||||
|
volumes:
|
||||||
|
- name: registry-data
|
||||||
|
persistentVolumeClaim:
|
||||||
|
claimName: registry-data-pvc
|
||||||
|
- name: registry-config
|
||||||
|
configMap:
|
||||||
|
name: registry-config-cm
|
||||||
|
items:
|
||||||
|
- key: registry-config.yml
|
||||||
|
path: config.yml
|
||||||
|
- name: registry-htpasswd
|
||||||
|
secret:
|
||||||
|
secretName: registry-sec
|
||||||
|
items:
|
||||||
|
- key: HTPASSWD
|
||||||
|
path: htpasswd
|
||||||
|
---
|
||||||
|
apiVersion: v1
|
||||||
|
kind: Service
|
||||||
|
metadata:
|
||||||
|
name: registry
|
||||||
|
spec:
|
||||||
|
ports:
|
||||||
|
- port: 5000
|
||||||
|
selector:
|
||||||
|
app: registry
|
||||||
|
---
|
||||||
|
apiVersion: networking.k8s.io/v1
|
||||||
|
kind: Ingress
|
||||||
|
metadata:
|
||||||
|
name: registry-ingress
|
||||||
|
annotations:
|
||||||
|
kubernetes.io/ingress.class: nginx
|
||||||
|
nginx.ingress.kubernetes.io/proxy-body-size: 2048m
|
||||||
|
spec:
|
||||||
|
rules:
|
||||||
|
- host: registry.develop.example.com
|
||||||
|
http:
|
||||||
|
paths:
|
||||||
|
- pathType: Prefix
|
||||||
|
path: /
|
||||||
|
backend:
|
||||||
|
service:
|
||||||
|
name: registry
|
||||||
|
port:
|
||||||
|
number: 5000
|
||||||
|
tls:
|
||||||
|
- secretName: wildcard-develop-tls
|
||||||
|
hosts:
|
||||||
|
- registry.develop.example.com
|
49
health.sh
Executable file
49
health.sh
Executable file
|
@ -0,0 +1,49 @@
|
||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
HOME=$( cd "$(dirname "$0")" && pwd )
|
||||||
|
source $HOME/config
|
||||||
|
|
||||||
|
export KUBECONFIG=$ADMIN_KUBECONFIG
|
||||||
|
export HELM_CACHE_HOME=~/.cache/helm
|
||||||
|
export HELM_CONFIG_HOME=~/.config/helm
|
||||||
|
export HELM_DATA_HOME=~/.local/share/helm
|
||||||
|
|
||||||
|
kubectl cluster-info
|
||||||
|
|
||||||
|
printf "\n [ etcd cluster health: ]\n"
|
||||||
|
/usr/local/bin/etcdctl --endpoints="https://${ETCD_1_IP}:2379,https://${ETCD_2_IP}:2379,https://${ETCD_3_IP}:2379" --cert ${CA_DIR}/etcd.crt --key ${CA_DIR}/etcd.key --cacert ${CA_DIR}/etcd-ca.crt endpoint status
|
||||||
|
|
||||||
|
printf "\n [ kubernetes components health: ]\n"
|
||||||
|
/usr/local/bin/kubectl get componentstatuses
|
||||||
|
|
||||||
|
printf "\n [ kubernetes nodes: ]\n"
|
||||||
|
kubectl get nodes -o wide
|
||||||
|
|
||||||
|
printf "\n [ helm releases: ]\n"
|
||||||
|
helm ls --all-namespaces
|
||||||
|
|
||||||
|
if [ "$1" = "all" ]; then
|
||||||
|
printf "\n [ kubernetes deployments: ]\n"
|
||||||
|
/usr/local/bin/kubectl get deployments --all-namespaces
|
||||||
|
printf "\n [ kubernetes services: ]\n"
|
||||||
|
/usr/local/bin/kubectl get services --all-namespaces -o wide
|
||||||
|
printf "\n [ kubernetes ingresses: ]\n"
|
||||||
|
/usr/local/bin/kubectl get ingresses.v1.networking.k8s.io --all-namespaces
|
||||||
|
printf "\n [ kubernates storage claims: ]\n"
|
||||||
|
/usr/local/bin/kubectl get pvc --all-namespaces
|
||||||
|
printf "\n [ kubernetes pods: ]\n"
|
||||||
|
/usr/local/bin/kubectl get pods --all-namespaces -o wide
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ "$1" = "ceph" ];
|
||||||
|
then
|
||||||
|
printf "\n [ ceph status: ]\n"
|
||||||
|
/usr/local/bin/kubectl -n rook-ceph exec -it rook-ceph-tools -- ceph osd status
|
||||||
|
/usr/local/bin/kubectl -n rook-ceph exec -it rook-ceph-tools -- ceph df
|
||||||
|
/usr/local/bin/kubectl -n rook-ceph exec -it rook-ceph-tools -- ceph status
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ ! -z "$1" ]; then
|
||||||
|
printf "\n [ $1 status: ]\n"
|
||||||
|
/usr/local/bin/kubectl get $1 --all-namespaces -o wide
|
||||||
|
fi
|
157
init.sh
Executable file
157
init.sh
Executable file
|
@ -0,0 +1,157 @@
|
||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
HOME=$( cd "$(dirname "$0")" && pwd )
|
||||||
|
source $HOME/config
|
||||||
|
|
||||||
|
export KUBECONFIG=$ADMIN_KUBECONFIG
|
||||||
|
|
||||||
|
# checks if we have the cryptopack file
|
||||||
|
if [ -f $HOME/cryptopack.b64 ]; then
|
||||||
|
echo "] cryptopack.b64 FOUND :)"
|
||||||
|
else
|
||||||
|
echo "] cryptopack.b64 is not found."
|
||||||
|
exit 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
# checks if we have the necessary config files
|
||||||
|
if [ -f $HOME/config ]; then
|
||||||
|
echo "] config file FOUND :)"
|
||||||
|
else
|
||||||
|
echo "] config file is missing."
|
||||||
|
exit 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ -f $HOME/config-coreapps ]; then
|
||||||
|
echo "] config-coreapps file FOUND :)"
|
||||||
|
else
|
||||||
|
echo "] config-coreapps file is missing."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# checks if we have a system with nftables and revert it to iptables-legacy since docker doesnt deal with it yet.
|
||||||
|
OS_RELEASE=`lsb_release -cs`
|
||||||
|
if [ "$OS_RELEASE" = "buster" ]; then
|
||||||
|
if [ `readlink /etc/alternatives/iptables` == "/usr/sbin/iptables-nft" ]; then
|
||||||
|
update-alternatives --set iptables /usr/sbin/iptables-legacy
|
||||||
|
update-alternatives --set ip6tables /usr/sbin/ip6tables-legacy
|
||||||
|
update-alternatives --set arptables /usr/sbin/arptables-legacy
|
||||||
|
update-alternatives --set ebtables /usr/sbin/ebtables-legacy
|
||||||
|
systemctl mask nftables.service
|
||||||
|
echo "] PLEASE REBOOT FOR THIS CHANGES TO TAKE EFFECT."
|
||||||
|
exit
|
||||||
|
else
|
||||||
|
echo "] iptables seems to point to iptables-legacy. :)"
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
#this one could be done better for sure...
|
||||||
|
case "${NODE_NAME_SHORT}" in
|
||||||
|
${MASTER_1_NAME})
|
||||||
|
NODETYPE='controller'
|
||||||
|
NODEIP=${MASTER_1_IP}
|
||||||
|
;;
|
||||||
|
${MASTER_2_NAME})
|
||||||
|
NODETYPE='controller'
|
||||||
|
NODEIP=${MASTER_2_IP}
|
||||||
|
;;
|
||||||
|
${MASTER_3_NAME})
|
||||||
|
NODETYPE='controller'
|
||||||
|
NODEIP=${MASTER_3_IP}
|
||||||
|
;;
|
||||||
|
${NODE_1_NAME})
|
||||||
|
NODETYPE='worker'
|
||||||
|
NODEIP=${NODE_1_IP}
|
||||||
|
;;
|
||||||
|
${NODE_2_NAME})
|
||||||
|
NODETYPE='worker'
|
||||||
|
NODEIP=${NODE_2_IP}
|
||||||
|
;;
|
||||||
|
${NODE_3_NAME})
|
||||||
|
NODETYPE='worker'
|
||||||
|
NODEIP=${NODE_3_IP}
|
||||||
|
;;
|
||||||
|
${NODE_4_NAME})
|
||||||
|
NODETYPE='worker'
|
||||||
|
NODEIP=${NODE_4_IP}
|
||||||
|
;;
|
||||||
|
${NODE_5_NAME})
|
||||||
|
NODETYPE='worker'
|
||||||
|
NODEIP=${NODE_5_IP}
|
||||||
|
;;
|
||||||
|
${NODE_6_NAME})
|
||||||
|
NODETYPE='worker'
|
||||||
|
NODEIP=${NODE_6_IP}
|
||||||
|
;;
|
||||||
|
${NODE_7_NAME})
|
||||||
|
NODETYPE='worker'
|
||||||
|
NODEIP=${NODE_7_IP}
|
||||||
|
;;
|
||||||
|
${NODE_8_NAME})
|
||||||
|
NODETYPE='worker'
|
||||||
|
NODEIP=${NODE_8_IP}
|
||||||
|
;;
|
||||||
|
${NODE_9_NAME})
|
||||||
|
NODETYPE='worker'
|
||||||
|
NODEIP=${NODE_9_IP}
|
||||||
|
;;
|
||||||
|
${NODE_10_NAME})
|
||||||
|
NODETYPE='worker'
|
||||||
|
NODEIP=${NODE_10_IP}
|
||||||
|
;;
|
||||||
|
${NODE_11_NAME})
|
||||||
|
NODETYPE='worker'
|
||||||
|
NODEIP=${NODE_11_IP}
|
||||||
|
;;
|
||||||
|
${NODE_12_NAME})
|
||||||
|
NODETYPE='worker'
|
||||||
|
NODEIP=${NODE_12_IP}
|
||||||
|
;;
|
||||||
|
${NODE_13_NAME})
|
||||||
|
NODETYPE='worker'
|
||||||
|
NODEIP=${NODE_13_IP}
|
||||||
|
;;
|
||||||
|
${NODE_14_NAME})
|
||||||
|
NODETYPE='worker'
|
||||||
|
NODEIP=${NODE_14_IP}
|
||||||
|
;;
|
||||||
|
${NODE_15_NAME})
|
||||||
|
NODETYPE='worker'
|
||||||
|
NODEIP=${NODE_15_IP}
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
echo '] Node hostname not defined.'
|
||||||
|
exit 0
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
echo "... ] SETTING UP ${NODETYPE} NODE ${NODE_NAME_SHORT} WITH ${NODEIP} [ ..."
|
||||||
|
|
||||||
|
echo "] Setting up base ..."
|
||||||
|
/bin/bash $FUNC_PATH/init_hostname.sh
|
||||||
|
/bin/bash $FUNC_PATH/init_metal.sh
|
||||||
|
/bin/bash $FUNC_PATH/install_docker.sh
|
||||||
|
/bin/bash $FUNC_PATH/install_chrony.sh
|
||||||
|
|
||||||
|
/bin/bash ssl/install_kube_crypto.sh
|
||||||
|
/bin/bash ssl/create_admin.sh
|
||||||
|
|
||||||
|
if [ "$NODETYPE" == "controller" ]; then
|
||||||
|
echo "] Setting up master ..."
|
||||||
|
/bin/bash ssl/create_master.sh
|
||||||
|
/bin/bash $FUNC_PATH/install_etcd.sh
|
||||||
|
/bin/bash $FUNC_PATH/install_kube_apiserver.sh
|
||||||
|
/bin/bash $FUNC_PATH/install_kube_scheduler.sh
|
||||||
|
/bin/bash $FUNC_PATH/install_kube_controller_manager.sh
|
||||||
|
#/bin/bash $FUNC_PATH/install_kube_keepalived.sh
|
||||||
|
/bin/bash $FUNC_PATH/install_k8x_api.sh
|
||||||
|
fi
|
||||||
|
echo "] Setting up kube-api localhost loadbalacing ..."
|
||||||
|
/bin/bash $FUNC_PATH/install_kube_haproxy.sh
|
||||||
|
|
||||||
|
echo "] Setting up worker ..."
|
||||||
|
/bin/bash ssl/create_worker.sh
|
||||||
|
/bin/bash $FUNC_PATH/install_kubelet.sh
|
||||||
|
/bin/bash $FUNC_PATH/install_kube_proxy.sh
|
||||||
|
|
||||||
|
#label node
|
||||||
|
kubectl label nodes ${NODE_NAME_SHORT} nodetype=${NODETYPE} --overwrite
|
||||||
|
echo "... ] DONE :) [ ..."
|
29
ssl/create_admin.sh
Executable file
29
ssl/create_admin.sh
Executable file
|
@ -0,0 +1,29 @@
|
||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
HOME=$( cd "$(dirname "$0")" && pwd )
|
||||||
|
source $HOME/../config
|
||||||
|
|
||||||
|
cat <<EOF | tee $CA_DIR/admin-openssl.cnf
|
||||||
|
[req]
|
||||||
|
distinguished_name = req_distinguished_name
|
||||||
|
|
||||||
|
[req_distinguished_name]
|
||||||
|
|
||||||
|
[ v3_req_client ]
|
||||||
|
basicConstraints = CA:FALSE
|
||||||
|
keyUsage = critical, digitalSignature, keyEncipherment
|
||||||
|
extendedKeyUsage = clientAuth
|
||||||
|
EOF
|
||||||
|
|
||||||
|
#generate admin cert
|
||||||
|
openssl ecparam -name secp521r1 -genkey -noout -out ${CA_DIR}/admin.key
|
||||||
|
chmod 0600 ${CA_DIR}/admin.key
|
||||||
|
openssl req -new -key ${CA_DIR}/admin.key -subj "/CN=kubernetes-admin/O=system:masters" -out ${CA_DIR}/admin.csr
|
||||||
|
openssl x509 -req -in ${CA_DIR}/admin.csr -CA ${CA_DIR}/ca.crt -CAkey ${CA_DIR}/ca.key -CAcreateserial -out ${CA_DIR}/admin.crt -days 20000 -extensions v3_req_client -extfile ${CA_DIR}/admin-openssl.cnf
|
||||||
|
|
||||||
|
#generate admin kubeconfig
|
||||||
|
TOKEN=`cat ${CA_DIR}/admin.token`
|
||||||
|
kubectl config set-cluster ${CLUSTER_NAME}.virtual.local --certificate-authority=${CA_DIR}/ca.crt --embed-certs=true --server=https://localhost:16443 --kubeconfig=$ADMIN_KUBECONFIG
|
||||||
|
kubectl config set-credentials admin --client-certificate=${CA_DIR}/admin.crt --client-key=${CA_DIR}/admin.key --embed-certs=true --token=$TOKEN --kubeconfig=$ADMIN_KUBECONFIG
|
||||||
|
kubectl config set-context ${CLUSTER_NAME}.virtual.local --cluster=${CLUSTER_NAME}.virtual.local --user=admin --kubeconfig=$ADMIN_KUBECONFIG
|
||||||
|
kubectl config use-context ${CLUSTER_NAME}.virtual.local --kubeconfig=$ADMIN_KUBECONFIG
|
85
ssl/create_master.sh
Executable file
85
ssl/create_master.sh
Executable file
|
@ -0,0 +1,85 @@
|
||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
HOME=$( cd "$(dirname "$0")" && pwd )
|
||||||
|
source $HOME/../config
|
||||||
|
|
||||||
|
cat <<EOF | tee $CA_DIR/master-openssl.cnf
|
||||||
|
[req]
|
||||||
|
distinguished_name = req_distinguished_name
|
||||||
|
|
||||||
|
[req_distinguished_name]
|
||||||
|
|
||||||
|
[ v3_req_client ]
|
||||||
|
basicConstraints = CA:FALSE
|
||||||
|
keyUsage = critical, digitalSignature, keyEncipherment
|
||||||
|
extendedKeyUsage = clientAuth
|
||||||
|
|
||||||
|
[ v3_req_server ]
|
||||||
|
basicConstraints = CA:FALSE
|
||||||
|
keyUsage = critical, digitalSignature, keyEncipherment
|
||||||
|
extendedKeyUsage = serverAuth
|
||||||
|
|
||||||
|
[ v3_req_apiserver ]
|
||||||
|
basicConstraints = CA:FALSE
|
||||||
|
keyUsage = critical, digitalSignature, keyEncipherment
|
||||||
|
extendedKeyUsage = serverAuth
|
||||||
|
subjectAltName = @alt_names_cluster
|
||||||
|
|
||||||
|
[ alt_names_cluster ]
|
||||||
|
DNS.1 = kubernetes
|
||||||
|
DNS.2 = kubernetes.default
|
||||||
|
DNS.3 = kubernetes.default.svc
|
||||||
|
DNS.4 = kubernetes.default.svc.cluster.local
|
||||||
|
DNS.5 = ${CLUSTER_NAME}.virtual.local
|
||||||
|
DNS.6 = ${CLUSTER_NAME}-api.virtual.local
|
||||||
|
DNS.7 = ${MASTER_1_NAME}.virtual.local
|
||||||
|
DNS.8 = ${MASTER_2_NAME}.virtual.local
|
||||||
|
DNS.9 = ${MASTER_3_NAME}.virtual.local
|
||||||
|
DNS.10 = ${MASTER_1_NAME}
|
||||||
|
DNS.11 = ${MASTER_2_NAME}
|
||||||
|
DNS.12 = ${MASTER_3_NAME}
|
||||||
|
DNS.13 = ${CLUSTER_NAME}.${CLUSTER_DOMAIN}
|
||||||
|
DNS.14 = ${MASTER_1_NAME}.${CLUSTER_NAME}.${CLUSTER_DOMAIN}
|
||||||
|
DNS.15 = ${MASTER_2_NAME}.${CLUSTER_NAME}.${CLUSTER_DOMAIN}
|
||||||
|
DNS.16 = ${MASTER_3_NAME}.${CLUSTER_NAME}.${CLUSTER_DOMAIN}
|
||||||
|
DNS.17 = localhost
|
||||||
|
DNS.18 = ${MASTERS_DOMAIN}
|
||||||
|
IP.1 = 127.0.0.1
|
||||||
|
IP.2 = ${SERVICE_FIP}
|
||||||
|
IP.3 = ${MASTER_LB_IP}
|
||||||
|
IP.4 = ${MASTER_1_IP}
|
||||||
|
IP.5 = ${MASTER_2_IP}
|
||||||
|
IP.6 = ${MASTER_3_IP}
|
||||||
|
EOF
|
||||||
|
|
||||||
|
#include all known tokens into the master
|
||||||
|
rm ${CA_DIR}/known_tokens.csv
|
||||||
|
for object in admin kube-proxy kubelet kube-controller-manager kube-scheduler
|
||||||
|
do
|
||||||
|
TOKEN=`cat ${CA_DIR}/${object}.token`
|
||||||
|
echo "$TOKEN,$object,$object" >> ${CA_DIR}/known_tokens.csv
|
||||||
|
done
|
||||||
|
|
||||||
|
#create cert for kube-apiserver
|
||||||
|
openssl ecparam -name secp521r1 -genkey -noout -out ${CA_DIR}/kube-apiserver.key
|
||||||
|
chmod 0600 ${CA_DIR}/kube-apiserver.key
|
||||||
|
openssl req -new -key ${CA_DIR}/kube-apiserver.key -subj "/CN=kube-apiserver" -out ${CA_DIR}/kube-apiserver.csr -config ${CA_DIR}/master-openssl.cnf
|
||||||
|
openssl x509 -req -in ${CA_DIR}/kube-apiserver.csr -CA ${CA_DIR}/ca.crt -CAkey ${CA_DIR}/ca.key -CAcreateserial -out ${CA_DIR}/kube-apiserver.crt -days 20000 -extensions v3_req_apiserver -extfile ${CA_DIR}/master-openssl.cnf
|
||||||
|
|
||||||
|
#create cert for kube-apiserver kubelet client
|
||||||
|
openssl ecparam -name secp521r1 -genkey -noout -out ${CA_DIR}/kube-apiserver-kubelet-client.key
|
||||||
|
chmod 0600 ${CA_DIR}/kube-apiserver-kubelet-client.key
|
||||||
|
openssl req -new -key ${CA_DIR}/kube-apiserver-kubelet-client.key -subj "/CN=kube-apiserver-kubelet-client/O=system:masters" -out ${CA_DIR}/kube-apiserver-kubelet-client.csr
|
||||||
|
openssl x509 -req -in ${CA_DIR}/kube-apiserver-kubelet-client.csr -CA ${CA_DIR}/ca.crt -CAkey ${CA_DIR}/ca.key -CAcreateserial -out ${CA_DIR}/kube-apiserver-kubelet-client.crt -days 20000 -extensions v3_req_client -extfile ${CA_DIR}/master-openssl.cnf
|
||||||
|
|
||||||
|
#create cert for kube-scheduler
|
||||||
|
openssl ecparam -name secp521r1 -genkey -noout -out ${CA_DIR}/kube-scheduler.key
|
||||||
|
chmod 0600 ${CA_DIR}/kube-scheduler.key
|
||||||
|
openssl req -new -key ${CA_DIR}/kube-scheduler.key -subj "/CN=system:kube-scheduler" -out ${CA_DIR}/kube-scheduler.csr
|
||||||
|
openssl x509 -req -in ${CA_DIR}/kube-scheduler.csr -CA ${CA_DIR}/ca.crt -CAkey ${CA_DIR}/ca.key -CAcreateserial -out ${CA_DIR}/kube-scheduler.crt -days 20000 -extensions v3_req_client -extfile ${CA_DIR}/master-openssl.cnf
|
||||||
|
|
||||||
|
#create cert for kube-controller-manager with service account key
|
||||||
|
cp -av ${CA_DIR}/sa.key ${CA_DIR}/kube-controller-manager.key
|
||||||
|
chmod 0600 ${CA_DIR}/kube-controller-manager.key
|
||||||
|
openssl req -new -key ${CA_DIR}/kube-controller-manager.key -subj "/CN=system:kube-controller-manager" -out ${CA_DIR}/kube-controller-manager.csr
|
||||||
|
openssl x509 -req -in ${CA_DIR}/kube-controller-manager.csr -CA ${CA_DIR}/ca.crt -CAkey ${CA_DIR}/ca.key -CAcreateserial -out ${CA_DIR}/kube-controller-manager.crt -days 20000 -extensions v3_req_client -extfile ${CA_DIR}/master-openssl.cnf
|
40
ssl/create_worker.sh
Executable file
40
ssl/create_worker.sh
Executable file
|
@ -0,0 +1,40 @@
|
||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
HOME=$( cd "$(dirname "$0")" && pwd )
|
||||||
|
source $HOME/../config
|
||||||
|
|
||||||
|
#DOCKER_IP=$(ip addr show dev docker0 | grep 'inet ' | cut -d: -f2 | awk '{print $2}' | cut -d '/' -f 1)
|
||||||
|
|
||||||
|
cat <<EOF | tee $CA_DIR/node-openssl.cnf
|
||||||
|
[req]
|
||||||
|
req_extensions = v3_req
|
||||||
|
distinguished_name = req_distinguished_name
|
||||||
|
|
||||||
|
[req_distinguished_name]
|
||||||
|
|
||||||
|
[ v3_req ]
|
||||||
|
basicConstraints = CA:FALSE
|
||||||
|
keyUsage = nonRepudiation, digitalSignature, keyEncipherment
|
||||||
|
subjectAltName = @alt_names
|
||||||
|
|
||||||
|
[alt_names]
|
||||||
|
DNS.1 = ${NODE_NAME}
|
||||||
|
DNS.2 = ${NODE_NAME_SHORT}.${CLUSTER_NAME}.${CLUSTER_DOMAIN}
|
||||||
|
DNS.3 = ${NODE_NAME_SHORT}
|
||||||
|
DNS.4 = ${NODE_NAME_SHORT}.virtual.local
|
||||||
|
IP.1 = ${NODE_IP}
|
||||||
|
EOF
|
||||||
|
#IP.2 = ${DOCKER_IP}
|
||||||
|
|
||||||
|
#generate cert for kubelet
|
||||||
|
openssl ecparam -name secp521r1 -genkey -noout -out ${CA_DIR}/kubelet.key
|
||||||
|
chmod 0600 ${CA_DIR}/kubelet.key
|
||||||
|
#openssl req -new -key $CA_DIR/kubelet.key -subj "/CN=system:node:${NODE_NAME_SHORT}.${CLUSTER_NAME}.${CLUSTER_DOMAIN}/O=system:nodes" -out $CA_DIR/kubelet.csr -config ${CA_DIR}/node-openssl.cnf
|
||||||
|
openssl req -new -key $CA_DIR/kubelet.key -subj "/CN=system:node:${NODE_NAME_SHORT}/O=system:nodes" -out $CA_DIR/kubelet.csr -config ${CA_DIR}/node-openssl.cnf
|
||||||
|
openssl x509 -req -in $CA_DIR/kubelet.csr -CA $CA_DIR/ca.crt -CAkey $CA_DIR/ca.key -CAcreateserial -out $CA_DIR/kubelet.crt -days 20000 -extensions v3_req -extfile $CA_DIR/node-openssl.cnf
|
||||||
|
|
||||||
|
#generate cert for kube-proxy
|
||||||
|
openssl ecparam -name secp521r1 -genkey -noout -out ${CA_DIR}/kube-proxy.key
|
||||||
|
chmod 0600 ${CA_DIR}/kube-proxy.key
|
||||||
|
openssl req -new -key ${CA_DIR}/kube-proxy.key -subj "/CN=system:kube-proxy/O=system:node-proxier" -out ${CA_DIR}/kube-proxy.csr -config ${CA_DIR}/node-openssl.cnf
|
||||||
|
openssl x509 -req -in ${CA_DIR}/kube-proxy.csr -CA ${CA_DIR}/ca.crt -CAkey ${CA_DIR}/ca.key -CAcreateserial -out ${CA_DIR}/kube-proxy.crt -days 20000 -extensions v3_req -extfile ${CA_DIR}/node-openssl.cnf
|
15
ssl/install_kube_crypto.sh
Executable file
15
ssl/install_kube_crypto.sh
Executable file
|
@ -0,0 +1,15 @@
|
||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
HOME=$( cd "$(dirname "$0")" && pwd )
|
||||||
|
source $HOME/../config
|
||||||
|
|
||||||
|
mkdir -p $CA_DIR
|
||||||
|
|
||||||
|
cat $HOME/../cryptopack.b64 | base64 -d > $HOME/../cryptopack.tar.gz
|
||||||
|
gzip -d $HOME/../cryptopack.tar.gz
|
||||||
|
mkdir -p /tmp/k8x-cryptounpack
|
||||||
|
tar -xvf $HOME/../cryptopack.tar -C /tmp/k8x-cryptounpack
|
||||||
|
rm $HOME/../cryptopack.tar
|
||||||
|
rsync -a -v /tmp/k8x-cryptounpack/tmp/k8x-cryptogen/* ${CA_DIR}/
|
||||||
|
rm -rf /tmp/k8x-cryptounpack
|
||||||
|
|
32
systemd/init_hostname.sh
Executable file
32
systemd/init_hostname.sh
Executable file
|
@ -0,0 +1,32 @@
|
||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
echo ""
|
||||||
|
echo "... ] INIT HOSTNAME [ ..."
|
||||||
|
|
||||||
|
HOME=$( cd "$(dirname "$0")" && pwd )
|
||||||
|
source $HOME/../config
|
||||||
|
|
||||||
|
#setup the /etc/hosts file
|
||||||
|
cat <<EOF | tee /etc/hosts
|
||||||
|
127.0.0.1 localhost
|
||||||
|
${MASTER_LB_IP} ${CLUSTER_NAME}.virtual.local ${MASTERS_DOMAIN} ${CLUSTER_NAME}.${CLUSTER_DOMAIN} ${CLUSTER_NAME}-api.virtual.local ${CLUSTER_NAME}-api ${CLUSTER_NAME}-etcd
|
||||||
|
${MASTER_1_IP} ${MASTER_1_NAME}.virtual.local ${MASTER_1_NAME}.${CLUSTER_NAME}.${CLUSTER_DOMAIN} ${MASTER_1_NAME}
|
||||||
|
${MASTER_2_IP} ${MASTER_2_NAME}.virtual.local ${MASTER_2_NAME}.${CLUSTER_NAME}.${CLUSTER_DOMAIN} ${MASTER_2_NAME}
|
||||||
|
${MASTER_3_IP} ${MASTER_3_NAME}.virtual.local ${MASTER_3_NAME}.${CLUSTER_NAME}.${CLUSTER_DOMAIN} ${MASTER_3_NAME}
|
||||||
|
|
||||||
|
${NODE_1_IP} ${NODE_1_NAME}.virtual.local ${NODE_1_NAME}.${CLUSTER_NAME}.${CLUSTER_DOMAIN} ${NODE_1_NAME}
|
||||||
|
${NODE_2_IP} ${NODE_2_NAME}.virtual.local ${NODE_2_NAME}.${CLUSTER_NAME}.${CLUSTER_DOMAIN} ${NODE_2_NAME}
|
||||||
|
${NODE_3_IP} ${NODE_3_NAME}.virtual.local ${NODE_3_NAME}.${CLUSTER_NAME}.${CLUSTER_DOMAIN} ${NODE_3_NAME}
|
||||||
|
${NODE_4_IP} ${NODE_4_NAME}.virtual.local ${NODE_4_NAME}.${CLUSTER_NAME}.${CLUSTER_DOMAIN} ${NODE_4_NAME}
|
||||||
|
${NODE_5_IP} ${NODE_5_NAME}.virtual.local ${NODE_5_NAME}.${CLUSTER_NAME}.${CLUSTER_DOMAIN} ${NODE_5_NAME}
|
||||||
|
${NODE_6_IP} ${NODE_6_NAME}.virtual.local ${NODE_6_NAME}.${CLUSTER_NAME}.${CLUSTER_DOMAIN} ${NODE_6_NAME}
|
||||||
|
${NODE_7_IP} ${NODE_7_NAME}.virtual.local ${NODE_7_NAME}.${CLUSTER_NAME}.${CLUSTER_DOMAIN} ${NODE_7_NAME}
|
||||||
|
${NODE_8_IP} ${NODE_8_NAME}.virtual.local ${NODE_8_NAME}.${CLUSTER_NAME}.${CLUSTER_DOMAIN} ${NODE_8_NAME}
|
||||||
|
${NODE_9_IP} ${NODE_9_NAME}.virtual.local ${NODE_9_NAME}.${CLUSTER_NAME}.${CLUSTER_DOMAIN} ${NODE_9_NAME}
|
||||||
|
${NODE_10_IP} ${NODE_10_NAME}.virtual.local ${NODE_10_NAME}.${CLUSTER_NAME}.${CLUSTER_DOMAIN} ${NODE_10_NAME}
|
||||||
|
${NODE_11_IP} ${NODE_11_NAME}.virtual.local ${NODE_11_NAME}.${CLUSTER_NAME}.${CLUSTER_DOMAIN} ${NODE_11_NAME}
|
||||||
|
${NODE_12_IP} ${NODE_12_NAME}.virtual.local ${NODE_12_NAME}.${CLUSTER_NAME}.${CLUSTER_DOMAIN} ${NODE_12_NAME}
|
||||||
|
${NODE_13_IP} ${NODE_13_NAME}.virtual.local ${NODE_13_NAME}.${CLUSTER_NAME}.${CLUSTER_DOMAIN} ${NODE_13_NAME}
|
||||||
|
${NODE_14_IP} ${NODE_14_NAME}.virtual.local ${NODE_14_NAME}.${CLUSTER_NAME}.${CLUSTER_DOMAIN} ${NODE_14_NAME}
|
||||||
|
${NODE_15_IP} ${NODE_15_NAME}.virtual.local ${NODE_15_NAME}.${CLUSTER_NAME}.${CLUSTER_DOMAIN} ${NODE_15_NAME}
|
||||||
|
EOF
|
84
systemd/init_metal.sh
Executable file
84
systemd/init_metal.sh
Executable file
|
@ -0,0 +1,84 @@
|
||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
echo ""
|
||||||
|
echo "... ] INIT METAL \\m/ [ ..."
|
||||||
|
|
||||||
|
HOME=$( cd "$(dirname "$0")" && pwd )
|
||||||
|
source $HOME/../config
|
||||||
|
|
||||||
|
echo "] prepare system repositories"
|
||||||
|
apt update -q
|
||||||
|
apt upgrade -y
|
||||||
|
|
||||||
|
echo "] installing common tools"
|
||||||
|
apt install -y sudo software-properties-common apt-transport-https daemonize htop iotop strace mc vim-nox openssl ca-certificates gnupg2 vim-nox attr jq tree apache2-utils
|
||||||
|
|
||||||
|
echo "] install network tools"
|
||||||
|
apt install -y iptables arptables ebtables conntrack ipset bridge-utils socat ipip rsync iftop nmap dnsutils wget curl
|
||||||
|
|
||||||
|
echo "] installing network storage clients"
|
||||||
|
apt install -y open-iscsi nfs-common lvm2 thin-provisioning-tools ceph-common
|
||||||
|
|
||||||
|
echo "] remove swap because kubernetes doesnt like it"
|
||||||
|
swapoff -a
|
||||||
|
sed -i '/ swap / s/^/#/' /etc/fstab
|
||||||
|
|
||||||
|
echo "] execute modprobe on node - workaround for heketi gluster"
|
||||||
|
modprobe dm_snapshot
|
||||||
|
modprobe dm_thin_pool
|
||||||
|
modprobe dm_mirror
|
||||||
|
|
||||||
|
echo "] prepare iptables"
|
||||||
|
modprobe ipip
|
||||||
|
modprobe xt_set
|
||||||
|
|
||||||
|
cat <<EOF > /etc/sysctl.d/k8s.conf
|
||||||
|
net.ipv4.ip_forward=1
|
||||||
|
net.ipv4.conf.all.forwarding=1
|
||||||
|
net.ipv4.ip_nonlocal_bind=1
|
||||||
|
net.bridge.bridge-nf-call-iptables=1
|
||||||
|
net.bridge.bridge-nf-call-ip6tables=1
|
||||||
|
net.netfilter.nf_conntrack_max=1000000
|
||||||
|
|
||||||
|
net.ipv6.conf.all.forwarding=1
|
||||||
|
|
||||||
|
# Disable IPv6
|
||||||
|
net.ipv6.conf.all.disable_ipv6=1
|
||||||
|
net.ipv6.conf.default.disable_ipv6=1
|
||||||
|
net.ipv6.conf.lo.disable_ipv6=1
|
||||||
|
|
||||||
|
# Increase the inotify limits
|
||||||
|
fs.inotify.max_user_instances=1024
|
||||||
|
fs.inotify.max_user_watches=65536
|
||||||
|
EOF
|
||||||
|
sysctl --system
|
||||||
|
|
||||||
|
echo "] unpacking kubectl"
|
||||||
|
gzip -v -c -d $HOME/../blobs/kubectl.gz > /usr/local/bin/kubectl
|
||||||
|
chmod +x /usr/local/bin/kubectl
|
||||||
|
|
||||||
|
echo "] unpacking etcdctl"
|
||||||
|
gzip -v -c -d $HOME/../blobs/etcdctl.gz > /usr/local/bin/etcdctl
|
||||||
|
chmod +x /usr/local/bin/etcdctl
|
||||||
|
|
||||||
|
echo "] unpacking calicoctl"
|
||||||
|
gzip -v -c -d $HOME/../blobs/calicoctl.gz > /usr/local/bin/calicoctl
|
||||||
|
chmod +x /usr/local/bin/calicoctl
|
||||||
|
|
||||||
|
echo "] unpacking helm chart manager"
|
||||||
|
gzip -v -c -d $HOME/../blobs/helm.gz > /usr/local/bin/helm
|
||||||
|
chmod +x /usr/local/bin/helm
|
||||||
|
|
||||||
|
echo "] unpacking k9s cli tool"
|
||||||
|
gzip -v -c -d $HOME/../blobs/k9s.gz > /usr/local/bin/k9s
|
||||||
|
chmod +x /usr/local/bin/k9s
|
||||||
|
|
||||||
|
echo "] copy some additional scripts"
|
||||||
|
cp -v $HOME/../tools/k /usr/local/bin
|
||||||
|
cp -v $HOME/../tools/logs-proxy /usr/local/bin
|
||||||
|
cp -v $HOME/../tools/tcp-proxy /usr/local/bin
|
||||||
|
|
||||||
|
echo "] create k8s service dirs"
|
||||||
|
mkdir -p ${CONF_DIR}/{kube-controller-manager,kubelet,kube-proxy,kube-scheduler}
|
||||||
|
mkdir -p ${CONF_DIR}
|
||||||
|
mkdir -p ${CONF_DIR}/manifests
|
28
systemd/install_chrony.sh
Executable file
28
systemd/install_chrony.sh
Executable file
|
@ -0,0 +1,28 @@
|
||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
echo ""
|
||||||
|
echo "... ] INSTALLING CHRONY NTP CLIENT [ ..."
|
||||||
|
|
||||||
|
HOME=$( cd "$(dirname "$0")" && pwd )
|
||||||
|
source $HOME/../config
|
||||||
|
|
||||||
|
systemctl stop chrony.service
|
||||||
|
|
||||||
|
apt-get update -q
|
||||||
|
apt-get install -y chrony
|
||||||
|
|
||||||
|
if [ ! -z ${NTP_SERVER} ]; then
|
||||||
|
cat <<EOF > /etc/chrony/chrony.conf
|
||||||
|
server ${NTP_SERVER} iburst
|
||||||
|
keyfile /etc/chrony/chrony.keys
|
||||||
|
driftfile /var/lib/chrony/chrony.drift
|
||||||
|
#log tracking measurements statistics
|
||||||
|
logdir /var/log/chrony
|
||||||
|
maxupdateskew 100.0
|
||||||
|
rtcsync
|
||||||
|
makestep 1 3
|
||||||
|
EOF
|
||||||
|
fi
|
||||||
|
|
||||||
|
systemctl enable chrony.service
|
||||||
|
systemctl start chrony.service
|
34
systemd/install_docker.sh
Executable file
34
systemd/install_docker.sh
Executable file
|
@ -0,0 +1,34 @@
|
||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
echo ""
|
||||||
|
echo "... ] INSTALLING DOCKER [ ..."
|
||||||
|
|
||||||
|
HOME=$( cd "$(dirname "$0")" && pwd )
|
||||||
|
source $HOME/../config
|
||||||
|
|
||||||
|
# remove lxcfs, as we don't need it if we're doing docker
|
||||||
|
service lxcfs stop
|
||||||
|
apt-get remove -y -q lxc-common lxcfs lxd lxd-client
|
||||||
|
|
||||||
|
#setup docker from the official repo
|
||||||
|
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add -
|
||||||
|
curl -fsSL https://download.docker.com/linux/debian/gpg | apt-key add -
|
||||||
|
DEBIANVER=`lsb_release -cs`
|
||||||
|
cat <<EOF > /etc/apt/sources.list.d/docker.list
|
||||||
|
deb [arch=amd64] https://download.docker.com/linux/$(. /etc/os-release; echo "$ID") ${DEBIANVER} stable nightly
|
||||||
|
EOF
|
||||||
|
|
||||||
|
apt-mark unhold containerd.io
|
||||||
|
apt-mark unhold docker-ce
|
||||||
|
apt-mark unhold docker-ce-cli
|
||||||
|
|
||||||
|
apt-get update
|
||||||
|
apt-get install --allow-downgrades -y containerd.io=1.4.1-1 docker-ce=5:19.03.13~3-0~debian-${DEBIANVER} docker-ce-cli=5:19.03.13~3-0~debian-${DEBIANVER}
|
||||||
|
|
||||||
|
systemctl enable docker
|
||||||
|
systemctl start docker
|
||||||
|
|
||||||
|
#prevents auto updating of docker package
|
||||||
|
apt-mark hold containerd.io
|
||||||
|
apt-mark hold docker-ce
|
||||||
|
apt-mark hold docker-ce-cli
|
56
systemd/install_etcd.sh
Executable file
56
systemd/install_etcd.sh
Executable file
|
@ -0,0 +1,56 @@
|
||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
echo ""
|
||||||
|
echo "... ] INSTALLING ETCD [ ..."
|
||||||
|
|
||||||
|
HOME=$( cd "$(dirname "$0")" && pwd )
|
||||||
|
source $HOME/../config
|
||||||
|
|
||||||
|
systemctl stop etcd.service
|
||||||
|
|
||||||
|
gzip -v -c -d $HOME/../blobs/etcd.gz > /usr/local/bin/etcd
|
||||||
|
chmod +x /usr/local/bin/etcd
|
||||||
|
|
||||||
|
#create new db dir
|
||||||
|
mkdir -p /var/lib/etcd/
|
||||||
|
|
||||||
|
cat <<EOF | tee /etc/systemd/system/etcd.service
|
||||||
|
[Unit]
|
||||||
|
Description=etcd
|
||||||
|
Documentation=https://coreos.com/etcd/docs/latest/
|
||||||
|
After=network.target
|
||||||
|
|
||||||
|
[Service]
|
||||||
|
User=root
|
||||||
|
Type=notify
|
||||||
|
ExecStart=/usr/local/bin/etcd \\
|
||||||
|
--name=${NODE_NAME_SHORT} \\
|
||||||
|
--listen-client-urls=https://${ETCD_IP}:2379,https://127.0.0.1:2379 \\
|
||||||
|
--advertise-client-urls=https://${ETCD_IP}:2379 \\
|
||||||
|
--data-dir=/var/lib/etcd \\
|
||||||
|
--cert-file=${CA_DIR}/etcd.crt \\
|
||||||
|
--key-file=${CA_DIR}/etcd.key \\
|
||||||
|
--peer-cert-file=${CA_DIR}/etcd-peer.crt \\
|
||||||
|
--peer-key-file=${CA_DIR}/etcd-peer.key \\
|
||||||
|
--trusted-ca-file=${CA_DIR}/etcd-ca.crt \\
|
||||||
|
--peer-trusted-ca-file=${CA_DIR}/etcd-ca.crt \\
|
||||||
|
--peer-client-cert-auth \\
|
||||||
|
--listen-peer-urls=https://${ETCD_IP}:2380 \\
|
||||||
|
--client-cert-auth \\
|
||||||
|
--initial-advertise-peer-urls=https://${ETCD_IP}:2380 \\
|
||||||
|
--initial-cluster-token="k8x-21b5f25de1" \\
|
||||||
|
--initial-cluster=${ETCD_1_NAME}=https://${ETCD_1_IP}:2380,${ETCD_2_NAME}=https://${ETCD_2_IP}:2380,${ETCD_3_NAME}=https://${ETCD_3_IP}:2380 \\
|
||||||
|
--initial-cluster-state=new
|
||||||
|
|
||||||
|
Restart=always
|
||||||
|
RestartSec=10s
|
||||||
|
LimitNOFILE=40000
|
||||||
|
|
||||||
|
[Install]
|
||||||
|
WantedBy=multi-user.target
|
||||||
|
EOF
|
||||||
|
|
||||||
|
systemctl daemon-reload
|
||||||
|
systemctl enable etcd.service
|
||||||
|
systemctl start etcd.service
|
||||||
|
|
75
systemd/install_kube_apiserver.sh
Executable file
75
systemd/install_kube_apiserver.sh
Executable file
|
@ -0,0 +1,75 @@
|
||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
echo ""
|
||||||
|
echo "... ] INSTALLING KUBE APISERVER [ ..."
|
||||||
|
|
||||||
|
HOME=$( cd "$(dirname "$0")" && pwd )
|
||||||
|
source $HOME/../config
|
||||||
|
|
||||||
|
systemctl stop kube-apiserver.service
|
||||||
|
|
||||||
|
gzip -v -c -d $HOME/../blobs/kube-apiserver.gz > /usr/local/bin/kube-apiserver
|
||||||
|
chmod +x /usr/local/bin/kube-apiserver
|
||||||
|
|
||||||
|
cat <<EOF | tee /etc/systemd/system/kube-apiserver.service
|
||||||
|
[Unit]
|
||||||
|
Description=Kubernetes API Server
|
||||||
|
Documentation=https://github.com/kubernetes/kubernetes
|
||||||
|
After=network.target
|
||||||
|
|
||||||
|
[Service]
|
||||||
|
User=root
|
||||||
|
ExecStart=/usr/local/bin/kube-apiserver \\
|
||||||
|
--advertise-address=${NODE_IP} \\
|
||||||
|
--bind-address=${NODE_IP} \\
|
||||||
|
--secure-port=6443 \\
|
||||||
|
--allow-privileged=true \\
|
||||||
|
--anonymous-auth=false \\
|
||||||
|
--apiserver-count=3 \\
|
||||||
|
--audit-log-maxage=30 \\
|
||||||
|
--audit-log-maxbackup=3 \\
|
||||||
|
--audit-log-maxsize=100 \\
|
||||||
|
--audit-log-path=/var/log/kube-audit.log \\
|
||||||
|
--authorization-mode=Node,RBAC \\
|
||||||
|
--client-ca-file=${CA_DIR}/ca.crt \\
|
||||||
|
--enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,AlwaysPullImages \\
|
||||||
|
--enable-swagger-ui=false \\
|
||||||
|
--etcd-cafile="${CA_DIR}/etcd-ca.crt" \\
|
||||||
|
--etcd-certfile="${CA_DIR}/etcd.crt" \\
|
||||||
|
--etcd-keyfile="${CA_DIR}/etcd.key" \\
|
||||||
|
--etcd-servers="https://${ETCD_1_IP}:2379,https://${ETCD_2_IP}:2379,https://${ETCD_3_IP}:2379" \\
|
||||||
|
--event-ttl=1h \\
|
||||||
|
--enable-bootstrap-token-auth \\
|
||||||
|
--kubelet-certificate-authority=${CA_DIR}/ca.crt \\
|
||||||
|
--kubelet-client-certificate=${CA_DIR}/kube-apiserver-kubelet-client.crt \\
|
||||||
|
--kubelet-client-key=${CA_DIR}/kube-apiserver-kubelet-client.key \\
|
||||||
|
--kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname \\
|
||||||
|
--proxy-client-key-file=${CA_DIR}/aggregator.key \\
|
||||||
|
--proxy-client-cert-file=${CA_DIR}/aggregator.crt \\
|
||||||
|
--kubelet-https=true \\
|
||||||
|
--runtime-config=api/all=true \\
|
||||||
|
--service-account-lookup=true \\
|
||||||
|
--service-account-key-file=${CA_DIR}/sa.pub \\
|
||||||
|
--service-cluster-ip-range=${SERVICE_NET} \\
|
||||||
|
--service-node-port-range=30000-32767 \\
|
||||||
|
--tls-cert-file=${CA_DIR}/kube-apiserver.crt \\
|
||||||
|
--tls-private-key-file=${CA_DIR}/kube-apiserver.key \\
|
||||||
|
--requestheader-client-ca-file=${CA_DIR}/aggregator-ca.crt \\
|
||||||
|
--requestheader-allowed-names=aggregator \\
|
||||||
|
--requestheader-username-headers=X-Remote-User \\
|
||||||
|
--requestheader-group-headers=X-Remote-Group \\
|
||||||
|
--requestheader-extra-headers-prefix=X-Remote-Extra- \\
|
||||||
|
--logtostderr=true \\
|
||||||
|
--v=2
|
||||||
|
|
||||||
|
Restart=on-failure
|
||||||
|
Type=notify
|
||||||
|
LimitNOFILE=65536
|
||||||
|
|
||||||
|
[Install]
|
||||||
|
WantedBy=multi-user.target
|
||||||
|
EOF
|
||||||
|
|
||||||
|
systemctl daemon-reload
|
||||||
|
systemctl enable kube-apiserver
|
||||||
|
systemctl start kube-apiserver
|
56
systemd/install_kube_controller_manager.sh
Executable file
56
systemd/install_kube_controller_manager.sh
Executable file
|
@ -0,0 +1,56 @@
|
||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
echo ""
|
||||||
|
echo "... ] INSTALLING KUBE CONTROLLER MANAGER [ ..."
|
||||||
|
|
||||||
|
HOME=$( cd "$(dirname "$0")" && pwd )
|
||||||
|
source $HOME/../config
|
||||||
|
|
||||||
|
systemctl stop kube-controller-manager
|
||||||
|
|
||||||
|
gzip -v -c -d $HOME/../blobs/kube-controller-manager.gz > /usr/local/bin/kube-controller-manager
|
||||||
|
chmod +x /usr/local/bin/kube-controller-manager
|
||||||
|
|
||||||
|
#generate kube-controller-manager's kubeconfig
|
||||||
|
rm ${CONF_DIR}/kube-controller-manager/kubeconfig
|
||||||
|
TOKEN=`cat ${CA_DIR}/kube-controller-manager.token`
|
||||||
|
kubectl config set-cluster ${CLUSTER_NAME}.virtual.local --certificate-authority=${CA_DIR}/ca.crt --embed-certs=true --server=https://${NODE_IP}:6443 --kubeconfig=${CONF_DIR}/kube-controller-manager/kubeconfig
|
||||||
|
kubectl config set-credentials system:kube-controller-manager --client-certificate=${CA_DIR}/kube-controller-manager.crt --client-key=${CA_DIR}/kube-controller-manager.key --embed-certs=true --token=$TOKEN --kubeconfig=${CONF_DIR}/kube-controller-manager/kubeconfig
|
||||||
|
kubectl config set-context ${CLUSTER_NAME}.virtual.local --cluster=${CLUSTER_NAME}.virtual.local --user=system:kube-controller-manager --kubeconfig=${CONF_DIR}/kube-controller-manager/kubeconfig
|
||||||
|
kubectl config use-context ${CLUSTER_NAME}.virtual.local --kubeconfig=${CONF_DIR}/kube-controller-manager/kubeconfig
|
||||||
|
|
||||||
|
cat <<EOF | tee /etc/systemd/system/kube-controller-manager.service
|
||||||
|
[Unit]
|
||||||
|
Description=Kubernetes Controller Manager
|
||||||
|
Documentation=https://github.com/kubernetes/kubernetes
|
||||||
|
|
||||||
|
[Service]
|
||||||
|
User=root
|
||||||
|
ExecStart=/usr/local/bin/kube-controller-manager \\
|
||||||
|
--allocate-node-cidrs=true \\
|
||||||
|
--bind-address=${NODE_IP} \\
|
||||||
|
--secure-port=10257 \\
|
||||||
|
--configure-cloud-routes=false \\
|
||||||
|
--cluster-cidr=${CNI_NET} \\
|
||||||
|
--cluster-name=${CLUSTER_NAME}.virtual.local \\
|
||||||
|
--cluster-signing-cert-file=${CA_DIR}/ca.crt \\
|
||||||
|
--cluster-signing-key-file=${CA_DIR}/ca.key \\
|
||||||
|
--kubeconfig=${CONF_DIR}/kube-controller-manager/kubeconfig \\
|
||||||
|
--leader-elect=true \\
|
||||||
|
--root-ca-file=${CA_DIR}/ca.crt \\
|
||||||
|
--service-account-private-key-file=${CA_DIR}/sa.key \\
|
||||||
|
--service-cluster-ip-range=${SERVICE_NET} \\
|
||||||
|
--use-service-account-credentials=true \\
|
||||||
|
--logtostderr=true \\
|
||||||
|
--v=2
|
||||||
|
|
||||||
|
Restart=on-failure
|
||||||
|
LimitNOFILE=65536
|
||||||
|
|
||||||
|
[Install]
|
||||||
|
WantedBy=multi-user.target
|
||||||
|
EOF
|
||||||
|
|
||||||
|
systemctl daemon-reload
|
||||||
|
systemctl enable kube-controller-manager
|
||||||
|
systemctl start kube-controller-manager
|
95
systemd/install_kube_haproxy.sh
Executable file
95
systemd/install_kube_haproxy.sh
Executable file
|
@ -0,0 +1,95 @@
|
||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
echo ""
|
||||||
|
echo "... ] INSTALLING KUBE-API HAPROXY [ ..."
|
||||||
|
|
||||||
|
HOME=$( cd "$(dirname "$0")" && pwd )
|
||||||
|
source $HOME/../config
|
||||||
|
|
||||||
|
#installing haproxy
|
||||||
|
apt-get update -q
|
||||||
|
apt-get install -y haproxy
|
||||||
|
|
||||||
|
#installing haproxy
|
||||||
|
systemctl stop haproxy.service
|
||||||
|
mv /etc/haproxy/haproxy.cfg /etc/haproxy/haproxy.old.cfg
|
||||||
|
|
||||||
|
cat <<EOF > /etc/haproxy/haproxy.cfg
|
||||||
|
global
|
||||||
|
log /dev/log local0
|
||||||
|
log /dev/log local1 notice
|
||||||
|
chroot /var/lib/haproxy
|
||||||
|
stats socket /run/haproxy/admin.sock mode 660 level admin
|
||||||
|
stats timeout 30s
|
||||||
|
user haproxy
|
||||||
|
group haproxy
|
||||||
|
daemon
|
||||||
|
|
||||||
|
# Default SSL material locations
|
||||||
|
ca-base /etc/ssl/certs
|
||||||
|
crt-base /etc/ssl/private
|
||||||
|
|
||||||
|
# Default ciphers to use on SSL-enabled listening sockets.
|
||||||
|
# For more information, see ciphers(1SSL). This list is from:
|
||||||
|
# https://hynek.me/articles/hardening-your-web-servers-ssl-ciphers/
|
||||||
|
# An alternative list with additional directives can be obtained from
|
||||||
|
# https://mozilla.github.io/server-side-tls/ssl-config-generator/?server=haproxy
|
||||||
|
ssl-default-bind-ciphers ECDH+AESGCM:DH+AESGCM:ECDH+AES256:DH+AES256:ECDH+AES128:DH+AES:RSA+AESGCM:RSA+AES:!aNULL:!MD5:!DSS
|
||||||
|
ssl-default-bind-options no-sslv3
|
||||||
|
|
||||||
|
defaults
|
||||||
|
log global
|
||||||
|
mode http
|
||||||
|
option httplog
|
||||||
|
option dontlognull
|
||||||
|
timeout connect 5s
|
||||||
|
timeout client 1d
|
||||||
|
timeout server 1d
|
||||||
|
timeout http-request 60s
|
||||||
|
timeout http-keep-alive 60s
|
||||||
|
errorfile 400 /etc/haproxy/errors/400.http
|
||||||
|
errorfile 403 /etc/haproxy/errors/403.http
|
||||||
|
errorfile 408 /etc/haproxy/errors/408.http
|
||||||
|
errorfile 500 /etc/haproxy/errors/500.http
|
||||||
|
errorfile 502 /etc/haproxy/errors/502.http
|
||||||
|
errorfile 503 /etc/haproxy/errors/503.http
|
||||||
|
errorfile 504 /etc/haproxy/errors/504.http
|
||||||
|
|
||||||
|
frontend monitor-in
|
||||||
|
bind 127.0.0.1:33305
|
||||||
|
mode http
|
||||||
|
option httplog
|
||||||
|
monitor-uri /monitor
|
||||||
|
|
||||||
|
listen stats
|
||||||
|
bind 127.0.0.1:9000
|
||||||
|
mode http
|
||||||
|
stats enable
|
||||||
|
stats hide-version
|
||||||
|
stats uri /stats
|
||||||
|
stats refresh 30s
|
||||||
|
stats realm Haproxy\ Statistics
|
||||||
|
stats auth ${HAPROXY_STATS_AUTH}
|
||||||
|
|
||||||
|
frontend ${CLUSTER_NAME}-api
|
||||||
|
bind *:16443
|
||||||
|
bind 127.0.0.1:16443
|
||||||
|
mode tcp
|
||||||
|
option tcplog
|
||||||
|
tcp-request inspect-delay 5s
|
||||||
|
default_backend ${CLUSTER_NAME}-api
|
||||||
|
|
||||||
|
backend ${CLUSTER_NAME}-api
|
||||||
|
mode tcp
|
||||||
|
option tcplog
|
||||||
|
option tcp-check
|
||||||
|
balance roundrobin
|
||||||
|
default-server inter 10s downinter 5s rise 2 fall 2 slowstart 60s maxconn 250 maxqueue 256 weight 100
|
||||||
|
server ${MASTER_1_NAME}-api ${MASTER_1_IP}:6443 check
|
||||||
|
server ${MASTER_2_NAME}-api ${MASTER_2_IP}:6443 check
|
||||||
|
server ${MASTER_3_NAME}-api ${MASTER_3_IP}:6443 check
|
||||||
|
EOF
|
||||||
|
|
||||||
|
systemctl daemon-reload
|
||||||
|
systemctl enable haproxy.service
|
||||||
|
systemctl start haproxy.service
|
59
systemd/install_kube_keepalived.sh
Normal file
59
systemd/install_kube_keepalived.sh
Normal file
|
@ -0,0 +1,59 @@
|
||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
echo ""
|
||||||
|
echo "... ] INSTALLING KUBE-API KEEPALIVED [ ..."
|
||||||
|
|
||||||
|
HOME=$( cd "$(dirname "$0")" && pwd )
|
||||||
|
source $HOME/../config
|
||||||
|
|
||||||
|
#installing keepalived
|
||||||
|
apt-get update -q
|
||||||
|
apt-get install -y keepalived
|
||||||
|
|
||||||
|
systemctl stop keepalived.service
|
||||||
|
|
||||||
|
#install keepalived
|
||||||
|
cat <<EOF > /etc/keepalived/keepalived.conf
|
||||||
|
! Configuration File for keepalived
|
||||||
|
|
||||||
|
global_defs {
|
||||||
|
notification_email {
|
||||||
|
root@localhost
|
||||||
|
}
|
||||||
|
notification_email_from vrrp@${NODE_NAME}
|
||||||
|
smtp_server localhost
|
||||||
|
smtp_connect_timeout 30
|
||||||
|
}
|
||||||
|
|
||||||
|
vrrp_script haproxy-check {
|
||||||
|
script "/usr/bin/killall -0 haproxy"
|
||||||
|
interval 2
|
||||||
|
weight 20
|
||||||
|
}
|
||||||
|
|
||||||
|
vrrp_instance haproxy-vip {
|
||||||
|
interface ${NODE_INTERFACE} #interconnect
|
||||||
|
state BACKUP
|
||||||
|
priority 101
|
||||||
|
virtual_router_id 51
|
||||||
|
nopreempt
|
||||||
|
advert_int 3
|
||||||
|
|
||||||
|
authentication {
|
||||||
|
auth_type PASS
|
||||||
|
auth_pass ${HAPROXY_VRRP_AUTH}
|
||||||
|
}
|
||||||
|
|
||||||
|
virtual_ipaddress {
|
||||||
|
${MASTER_LB_IP}/${MASTER_LB_MASK} dev ${NODE_INTERFACE}
|
||||||
|
}
|
||||||
|
|
||||||
|
track_script {
|
||||||
|
haproxy-check weight 20
|
||||||
|
}
|
||||||
|
}
|
||||||
|
EOF
|
||||||
|
|
||||||
|
systemctl daemon-reload
|
||||||
|
systemctl enable keepalived.service
|
||||||
|
systemctl start keepalived.service
|
85
systemd/install_kube_proxy.sh
Executable file
85
systemd/install_kube_proxy.sh
Executable file
|
@ -0,0 +1,85 @@
|
||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
echo ""
|
||||||
|
echo "... ] INSTALLING KUBE PROXY [ ..."
|
||||||
|
|
||||||
|
HOME=$( cd "$(dirname "$0")" && pwd )
|
||||||
|
source $HOME/../config
|
||||||
|
|
||||||
|
systemctl stop kube-proxy
|
||||||
|
|
||||||
|
gzip -v -c -d $HOME/../blobs/kube-proxy.gz > /usr/local/bin/kube-proxy
|
||||||
|
chmod +x /usr/local/bin/kube-proxy
|
||||||
|
|
||||||
|
#generate kube-proxy's kubeconfig file
|
||||||
|
rm ${CONF_DIR}/kube-proxy/kubeconfig
|
||||||
|
TOKEN=`cat ${CA_DIR}/kube-proxy.token`
|
||||||
|
kubectl config set-cluster ${CLUSTER_NAME}.virtual.local --certificate-authority=${CA_DIR}/ca.crt --embed-certs=true --server=https://localhost:16443 --kubeconfig=${CONF_DIR}/kube-proxy/kubeconfig
|
||||||
|
kubectl config set-credentials kube-proxy --client-certificate=${CA_DIR}/kube-proxy.crt --client-key=${CA_DIR}/kube-proxy.key --embed-certs=true --token=$TOKEN --kubeconfig=${CONF_DIR}/kube-proxy/kubeconfig
|
||||||
|
kubectl config set-context ${CLUSTER_NAME}.virtual.local --cluster=${CLUSTER_NAME}.virtual.local --user=kube-proxy --kubeconfig=${CONF_DIR}/kube-proxy/kubeconfig
|
||||||
|
kubectl config use-context ${CLUSTER_NAME}.virtual.local --kubeconfig=${CONF_DIR}/kube-proxy/kubeconfig
|
||||||
|
|
||||||
|
#generate kube-proxy's config file
|
||||||
|
rm ${CONF_DIR}/kube-proxy/kube-proxy-config.yaml
|
||||||
|
cat <<EOF | tee ${CONF_DIR}/kube-proxy/kube-proxy-config.yaml
|
||||||
|
apiVersion: kubeproxy.config.k8s.io/v1alpha1
|
||||||
|
bindAddress: ${NODE_IP}
|
||||||
|
clientConnection:
|
||||||
|
acceptContentTypes: ""
|
||||||
|
burst: 10
|
||||||
|
contentType: application/vnd.kubernetes.protobuf
|
||||||
|
kubeconfig: "${CONF_DIR}/kube-proxy/kubeconfig"
|
||||||
|
qps: 5
|
||||||
|
clusterCIDR: ""
|
||||||
|
configSyncPeriod: 15m0s
|
||||||
|
conntrack:
|
||||||
|
max: 0
|
||||||
|
maxPerCore: 32768
|
||||||
|
min: 131072
|
||||||
|
tcpCloseWaitTimeout: 1h0m0s
|
||||||
|
tcpEstablishedTimeout: 24h0m0s
|
||||||
|
enableProfiling: false
|
||||||
|
healthzBindAddress: ${NODE_IP}:10256
|
||||||
|
iptables:
|
||||||
|
masqueradeAll: false
|
||||||
|
masqueradeBit: 14
|
||||||
|
minSyncPeriod: 0s
|
||||||
|
syncPeriod: 30s
|
||||||
|
ipvs:
|
||||||
|
excludeCIDRs: null
|
||||||
|
minSyncPeriod: 0s
|
||||||
|
scheduler: ""
|
||||||
|
syncPeriod: 30s
|
||||||
|
kind: KubeProxyConfiguration
|
||||||
|
metricsBindAddress: 127.0.0.1:10249
|
||||||
|
mode: "iptables"
|
||||||
|
clusterCIDR: "${CNI_NET}"
|
||||||
|
nodePortAddresses: null
|
||||||
|
oomScoreAdj: -999
|
||||||
|
portRange: ""
|
||||||
|
resourceContainer: /kube-proxy
|
||||||
|
udpIdleTimeout: 250ms
|
||||||
|
EOF
|
||||||
|
|
||||||
|
cat <<EOF | tee /etc/systemd/system/kube-proxy.service
|
||||||
|
[Unit]
|
||||||
|
Description=Kubernetes Proxy
|
||||||
|
After=network.target
|
||||||
|
|
||||||
|
[Service]
|
||||||
|
ExecStart=/usr/local/bin/kube-proxy \\
|
||||||
|
--config=${CONF_DIR}/kube-proxy/kube-proxy-config.yaml \\
|
||||||
|
--master=https://localhost:16443 \\
|
||||||
|
--logtostderr=true \\
|
||||||
|
--v=2
|
||||||
|
|
||||||
|
Restart=on-failure
|
||||||
|
RestartSec=5
|
||||||
|
|
||||||
|
[Install]
|
||||||
|
WantedBy=multi-user.target
|
||||||
|
EOF
|
||||||
|
|
||||||
|
systemctl daemon-reload
|
||||||
|
systemctl enable kube-proxy
|
||||||
|
systemctl start kube-proxy
|
88
systemd/install_kube_scheduler.sh
Executable file
88
systemd/install_kube_scheduler.sh
Executable file
|
@ -0,0 +1,88 @@
|
||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
echo ""
|
||||||
|
echo "... ] INSTALLING KUBE SCHEDULER [ ..."
|
||||||
|
|
||||||
|
HOME=$( cd "$(dirname "$0")" && pwd )
|
||||||
|
source $HOME/../config
|
||||||
|
|
||||||
|
systemctl stop kube-scheduler
|
||||||
|
|
||||||
|
gzip -v -c -d $HOME/../blobs/kube-scheduler.gz > /usr/local/bin/kube-scheduler
|
||||||
|
chmod +x /usr/local/bin/kube-scheduler
|
||||||
|
|
||||||
|
#generate kube-scheduler's kubeconfig
|
||||||
|
rm ${CONF_DIR}/kube-scheduler/kubeconfig
|
||||||
|
TOKEN=`cat ${CA_DIR}/kube-scheduler.token`
|
||||||
|
kubectl config set-cluster ${CLUSTER_NAME}.virtual.local --certificate-authority=${CA_DIR}/ca.crt --embed-certs=true --server=https://${NODE_IP}:6443 --kubeconfig=${CONF_DIR}/kube-scheduler/kubeconfig
|
||||||
|
kubectl config set-credentials system:kube-scheduler --client-certificate=${CA_DIR}/kube-scheduler.crt --client-key=${CA_DIR}/kube-scheduler.key --embed-certs=true --token=$TOKEN --kubeconfig=${CONF_DIR}/kube-scheduler/kubeconfig
|
||||||
|
kubectl config set-context ${CLUSTER_NAME}.virtual.local --cluster=${CLUSTER_NAME}.virtual.local --user=system:kube-scheduler --kubeconfig=${CONF_DIR}/kube-scheduler/kubeconfig
|
||||||
|
kubectl config use-context ${CLUSTER_NAME}.virtual.local --kubeconfig=${CONF_DIR}/kube-scheduler/kubeconfig
|
||||||
|
|
||||||
|
#generate kube-scheduler's config file
|
||||||
|
rm ${CONF_DIR}/kube-scheduler/kube-scheduler-config.yaml
|
||||||
|
cat << EOF | tee ${CONF_DIR}/kube-scheduler/kube-scheduler-config.yaml
|
||||||
|
apiVersion: kubescheduler.config.k8s.io/v1beta1
|
||||||
|
kind: KubeSchedulerConfiguration
|
||||||
|
clientConnection:
|
||||||
|
kubeconfig: "${CONF_DIR}/kube-scheduler/kubeconfig"
|
||||||
|
leaderElection:
|
||||||
|
leaderElect: true
|
||||||
|
EOF
|
||||||
|
|
||||||
|
#cat << EOF | tee ${CONF_DIR}/kube-scheduler/kube-scheduler-config.yaml
|
||||||
|
#algorithmSource:
|
||||||
|
# provider: DefaultProvider
|
||||||
|
#apiVersion: kubescheduler.config.k8s.io/v1alpha1
|
||||||
|
#bindTimeoutSeconds: 600
|
||||||
|
#clientConnection:
|
||||||
|
# acceptContentTypes: ""
|
||||||
|
# burst: 100
|
||||||
|
# contentType: application/vnd.kubernetes.protobuf
|
||||||
|
# kubeconfig: "${CONF_DIR}/kube-scheduler/kubeconfig"
|
||||||
|
# qps: 50
|
||||||
|
#disablePreemption: false
|
||||||
|
#enableContentionProfiling: false
|
||||||
|
#enableProfiling: false
|
||||||
|
#failureDomains: kubernetes.io/hostname,failure-domain.beta.kubernetes.io/zone,failure-domain.beta.kubernetes.io/region
|
||||||
|
#hardPodAffinitySymmetricWeight: 1
|
||||||
|
#healthzBindAddress: 127.0.0.1:10251
|
||||||
|
#kind: KubeSchedulerConfiguration
|
||||||
|
#leaderElection:
|
||||||
|
# leaderElect: true
|
||||||
|
# leaseDuration: 15s
|
||||||
|
# lockObjectName: kube-scheduler
|
||||||
|
# lockObjectNamespace: kube-system
|
||||||
|
# renewDeadline: 10s
|
||||||
|
# resourceLock: endpoints
|
||||||
|
# retryPeriod: 2s
|
||||||
|
#metricsBindAddress: 127.0.0.1:10251
|
||||||
|
#percentageOfNodesToScore: 50
|
||||||
|
#schedulerName: default-scheduler
|
||||||
|
#EOF
|
||||||
|
|
||||||
|
# Setup systemd unit
|
||||||
|
cat <<EOF | tee /etc/systemd/system/kube-scheduler.service
|
||||||
|
[Unit]
|
||||||
|
Description=Kubernetes Scheduler
|
||||||
|
Documentation=https://github.com/kubernetes/kubernetes
|
||||||
|
|
||||||
|
[Service]
|
||||||
|
User=root
|
||||||
|
ExecStart=/usr/local/bin/kube-scheduler \\
|
||||||
|
--config=${CONF_DIR}/kube-scheduler/kube-scheduler-config.yaml \\
|
||||||
|
--bind-address=${NODE_IP} \\
|
||||||
|
--secure-port=10259 \\
|
||||||
|
--logtostderr=true \\
|
||||||
|
--v=2
|
||||||
|
|
||||||
|
Restart=on-failure
|
||||||
|
LimitNOFILE=65536
|
||||||
|
|
||||||
|
[Install]
|
||||||
|
WantedBy=multi-user.target
|
||||||
|
EOF
|
||||||
|
|
||||||
|
systemctl daemon-reload
|
||||||
|
systemctl enable kube-scheduler
|
||||||
|
systemctl start kube-scheduler
|
107
systemd/install_kubelet.sh
Executable file
107
systemd/install_kubelet.sh
Executable file
|
@ -0,0 +1,107 @@
|
||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
echo ""
|
||||||
|
echo "... ] INSTALLING KUBELET WORKER [ ..."
|
||||||
|
|
||||||
|
HOME=$( cd "$(dirname "$0")" && pwd )
|
||||||
|
source $HOME/../config
|
||||||
|
|
||||||
|
if [ -f $HOME/../config-coreapps ]; then
|
||||||
|
echo "config-coreapps file FOUND :)"
|
||||||
|
source $HOME/../config-coreapps
|
||||||
|
else
|
||||||
|
echo "config-coreapps file is missing."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
systemctl stop kubelet
|
||||||
|
|
||||||
|
gzip -v -c -d $HOME/../blobs/kubelet.gz > /usr/local/bin/kubelet
|
||||||
|
chmod +x /usr/local/bin/kubelet
|
||||||
|
|
||||||
|
#generate kubelet's kubeconfig file
|
||||||
|
rm ${CONF_DIR}/kubelet/kubeconfig
|
||||||
|
TOKEN=`cat ${CA_DIR}/kubelet.token`
|
||||||
|
kubectl config set-cluster ${CLUSTER_NAME}.virtual.local --certificate-authority=${CA_DIR}/ca.crt --embed-certs=true --server=https://localhost:16443 --kubeconfig=${CONF_DIR}/kubelet/kubeconfig
|
||||||
|
kubectl config set-credentials kubelet --client-certificate=${CA_DIR}/kubelet.crt --client-key=${CA_DIR}/kubelet.key --embed-certs=true --token=$TOKEN --kubeconfig=${CONF_DIR}/kubelet/kubeconfig
|
||||||
|
kubectl config set-context ${CLUSTER_NAME}.virtual.local --cluster=${CLUSTER_NAME}.virtual.local --user=kubelet --kubeconfig=${CONF_DIR}/kubelet/kubeconfig
|
||||||
|
kubectl config use-context ${CLUSTER_NAME}.virtual.local --kubeconfig=${CONF_DIR}/kubelet/kubeconfig
|
||||||
|
|
||||||
|
#generate kubelet's config file
|
||||||
|
rm ${CONF_DIR}/kubelet/kubelet-config.yaml
|
||||||
|
cat <<EOF | tee ${CONF_DIR}/kubelet/kubelet-config.yaml
|
||||||
|
kind: KubeletConfiguration
|
||||||
|
apiVersion: kubelet.config.k8s.io/v1beta1
|
||||||
|
address: ${NODE_IP}
|
||||||
|
readOnlyPort: 0
|
||||||
|
authentication:
|
||||||
|
anonymous:
|
||||||
|
enabled: false
|
||||||
|
webhook:
|
||||||
|
enabled: true
|
||||||
|
x509:
|
||||||
|
clientCAFile: "${CA_DIR}/ca.crt"
|
||||||
|
authorization:
|
||||||
|
mode: Webhook
|
||||||
|
tlsCertFile: "${CA_DIR}/kubelet.crt"
|
||||||
|
tlsPrivateKeyFile: "${CA_DIR}/kubelet.key"
|
||||||
|
clusterDomain: "cluster.local"
|
||||||
|
clusterDNS:
|
||||||
|
- "172.18.0.10"
|
||||||
|
eventRecordQPS: 0
|
||||||
|
enableDebuggingHandlers: true
|
||||||
|
runtimeRequestTimeout: "15m"
|
||||||
|
staticPodPath: "${CONF_DIR}/manifests"
|
||||||
|
evictionHard:
|
||||||
|
memory.available: "200Mi"
|
||||||
|
nodefs.available: "10%"
|
||||||
|
nodefs.inodesFree: "10%"
|
||||||
|
imagefs.available: "15%"
|
||||||
|
EOF
|
||||||
|
|
||||||
|
cat <<EOF | tee /etc/systemd/system/kubelet.service
|
||||||
|
[Unit]
|
||||||
|
Description=Kubernetes Kubelet
|
||||||
|
After=docker.service
|
||||||
|
Requires=docker.service
|
||||||
|
|
||||||
|
[Service]
|
||||||
|
ExecStart=/usr/local/bin/kubelet \\
|
||||||
|
--config=${CONF_DIR}/kubelet/kubelet-config.yaml \\
|
||||||
|
--kubeconfig=${CONF_DIR}/kubelet/kubeconfig \\
|
||||||
|
--container-runtime=docker \\
|
||||||
|
--container-runtime-endpoint=unix:///var/run/dockershim.sock \\
|
||||||
|
--image-pull-progress-deadline=2m \\
|
||||||
|
--network-plugin=cni \\
|
||||||
|
--register-node=true \\
|
||||||
|
--allowed-unsafe-sysctls 'net.*' \\
|
||||||
|
--hostname-override=${NODE_NAME_SHORT} \\
|
||||||
|
--logtostderr=true \\
|
||||||
|
--v=2
|
||||||
|
|
||||||
|
Restart=on-failure
|
||||||
|
RestartSec=5
|
||||||
|
KillMode=process
|
||||||
|
|
||||||
|
[Install]
|
||||||
|
WantedBy=multi-user.target
|
||||||
|
EOF
|
||||||
|
|
||||||
|
#generate config.json inside kubelet dir to access private registries
|
||||||
|
mkdir /var/lib/kubelet
|
||||||
|
chmod 750 /var/lib/kubelet
|
||||||
|
CONF64=`echo -n "$REGISTRY_USER:$REGISTRY_PASS" | base64 -w0 `
|
||||||
|
echo '{
|
||||||
|
"auths": {
|
||||||
|
"k8x_registry": {
|
||||||
|
"auth": "k8x_auth64"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"HttpHeaders": {
|
||||||
|
"User-Agent": "Docker-Client/18.09.6-ce (linux)"
|
||||||
|
}
|
||||||
|
}' | sed -e "s/k8x_registry/${REGISTRY_SERVER}/g" | sed -e "s/k8x_auth64/${CONF64}/g" > /var/lib/kubelet/config.json
|
||||||
|
|
||||||
|
systemctl daemon-reload
|
||||||
|
systemctl enable kubelet
|
||||||
|
systemctl start kubelet
|
23
tools/SCRAM/scram_master_processes.sh
Executable file
23
tools/SCRAM/scram_master_processes.sh
Executable file
|
@ -0,0 +1,23 @@
|
||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
echo "Executing SCRAM on a master node..."
|
||||||
|
|
||||||
|
systemctl stop kube-apiserver.service
|
||||||
|
systemctl disable kube-apiserver.service
|
||||||
|
|
||||||
|
systemctl stop kube-scheduler.service
|
||||||
|
systemctl disable kube-scheduler.service
|
||||||
|
|
||||||
|
systemctl stop kube-controller-manager.service
|
||||||
|
systemctl disable kube-controller-manager.service
|
||||||
|
|
||||||
|
systemctl stop etcd.service
|
||||||
|
systemctl disable etcd.service
|
||||||
|
|
||||||
|
systemctl stop haproxy.service
|
||||||
|
systemctl disable haproxy.service
|
||||||
|
|
||||||
|
systemctl stop keepalived.service
|
||||||
|
systemctl disable keepalived.service
|
||||||
|
|
||||||
|
systemctl daemon-reload
|
17
tools/SCRAM/scram_system.sh
Executable file
17
tools/SCRAM/scram_system.sh
Executable file
|
@ -0,0 +1,17 @@
|
||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
echo "Executing SCRAM on a kube services..."
|
||||||
|
|
||||||
|
HOME=$( cd "$(dirname "$0")" && pwd )
|
||||||
|
source $HOME/../../config
|
||||||
|
|
||||||
|
export KUBECONFIG=$ADMIN_KUBECONFIG
|
||||||
|
|
||||||
|
helm delete --purge ingress-nginx
|
||||||
|
kubectl delete ns ingress-nginx
|
||||||
|
|
||||||
|
kubectl -n kube-system delete -f yaml/metrics-server/components.yaml
|
||||||
|
|
||||||
|
kubectl -n kube-system delete -f yaml/coredns/coredns.yaml
|
||||||
|
|
||||||
|
kubectl -n kube-system delete -f yaml/calico/calico-etcd.yaml
|
14
tools/SCRAM/scram_worker_processes.sh
Executable file
14
tools/SCRAM/scram_worker_processes.sh
Executable file
|
@ -0,0 +1,14 @@
|
||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
echo "Executing SCRAM on a worker node..."
|
||||||
|
|
||||||
|
systemctl stop kubelet.service
|
||||||
|
systemctl disable kubelet.service
|
||||||
|
|
||||||
|
systemctl stop kube-proxy.service
|
||||||
|
systemctl disable kube-proxy.service
|
||||||
|
|
||||||
|
systemctl stop docker.service
|
||||||
|
systemctl disable docker.service
|
||||||
|
|
||||||
|
systemctl daemon-reload
|
92
tools/add_service_account.sh
Executable file
92
tools/add_service_account.sh
Executable file
|
@ -0,0 +1,92 @@
|
||||||
|
#!/bin/bash
|
||||||
|
set -e
|
||||||
|
set -o pipefail
|
||||||
|
|
||||||
|
# Add user to k8s using service account, no RBAC (must create RBAC after this script)
|
||||||
|
if [[ -z "$1" ]] || [[ -z "$2" ]]; then
|
||||||
|
echo "usage: $0 <service_account_name> <namespace>"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
SERVICE_ACCOUNT_NAME=$1
|
||||||
|
NAMESPACE="$2"
|
||||||
|
KUBECFG_FILE_NAME="/tmp/kube/k8s-${SERVICE_ACCOUNT_NAME}-${NAMESPACE}-conf"
|
||||||
|
TARGET_FOLDER="/tmp/kube"
|
||||||
|
|
||||||
|
create_target_folder() {
|
||||||
|
echo -n "Creating target directory to hold files in ${TARGET_FOLDER}..."
|
||||||
|
mkdir -p "${TARGET_FOLDER}"
|
||||||
|
printf "done"
|
||||||
|
}
|
||||||
|
|
||||||
|
create_service_account() {
|
||||||
|
echo -e "\\nCreating a service account: ${SERVICE_ACCOUNT_NAME} on namespace: ${NAMESPACE}"
|
||||||
|
kubectl create sa "${SERVICE_ACCOUNT_NAME}" --namespace "${NAMESPACE}"
|
||||||
|
}
|
||||||
|
|
||||||
|
get_secret_name_from_service_account() {
|
||||||
|
echo -e "\\nGetting secret of service account ${SERVICE_ACCOUNT_NAME}-${NAMESPACE}"
|
||||||
|
SECRET_NAME=$(kubectl get sa "${SERVICE_ACCOUNT_NAME}" --namespace "${NAMESPACE}" -o json | jq -r '.secrets[].name')
|
||||||
|
echo "Secret name: ${SECRET_NAME}"
|
||||||
|
}
|
||||||
|
|
||||||
|
extract_ca_crt_from_secret() {
|
||||||
|
echo -e -n "\\nExtracting ca.crt from secret..."
|
||||||
|
kubectl get secret "${SECRET_NAME}" --namespace "${NAMESPACE}" -o json | jq \
|
||||||
|
-r '.data["ca.crt"]' | base64 -d > "${TARGET_FOLDER}/ca.crt"
|
||||||
|
printf "done"
|
||||||
|
}
|
||||||
|
|
||||||
|
get_user_token_from_secret() {
|
||||||
|
echo -e -n "\\nGetting user token from secret..."
|
||||||
|
USER_TOKEN=$(kubectl get secret "${SECRET_NAME}" \
|
||||||
|
--namespace "${NAMESPACE}" -o json | jq -r '.data["token"]' | base64 -d)
|
||||||
|
printf "done"
|
||||||
|
}
|
||||||
|
|
||||||
|
set_kube_config_values() {
|
||||||
|
context=$(kubectl config current-context)
|
||||||
|
echo -e "\\nSetting current context to: $context"
|
||||||
|
|
||||||
|
CLUSTER_NAME=$(kubectl config get-contexts "$context" | awk '{print $3}' | tail -n 1)
|
||||||
|
echo "Cluster name: ${CLUSTER_NAME}"
|
||||||
|
|
||||||
|
ENDPOINT=$(kubectl config view \
|
||||||
|
-o jsonpath="{.clusters[?(@.name == \"${CLUSTER_NAME}\")].cluster.server}")
|
||||||
|
echo "Endpoint: ${ENDPOINT}"
|
||||||
|
|
||||||
|
# Set up the config
|
||||||
|
echo -e "\\nPreparing k8s-${SERVICE_ACCOUNT_NAME}-${NAMESPACE}-conf"
|
||||||
|
echo -n "Setting a cluster entry in kubeconfig..."
|
||||||
|
kubectl config set-cluster "${CLUSTER_NAME}" \
|
||||||
|
--kubeconfig="${KUBECFG_FILE_NAME}" \
|
||||||
|
--server="${ENDPOINT}" \
|
||||||
|
--certificate-authority="${TARGET_FOLDER}/ca.crt" \
|
||||||
|
--embed-certs=true
|
||||||
|
|
||||||
|
echo -n "Setting token credentials entry in kubeconfig..."
|
||||||
|
kubectl config set-credentials \
|
||||||
|
"${SERVICE_ACCOUNT_NAME}-${NAMESPACE}-${CLUSTER_NAME}" \
|
||||||
|
--kubeconfig="${KUBECFG_FILE_NAME}" \
|
||||||
|
--token="${USER_TOKEN}"
|
||||||
|
|
||||||
|
echo -n "Setting a context entry in kubeconfig..."
|
||||||
|
kubectl config set-context \
|
||||||
|
"${SERVICE_ACCOUNT_NAME}-${NAMESPACE}-${CLUSTER_NAME}" \
|
||||||
|
--kubeconfig="${KUBECFG_FILE_NAME}" \
|
||||||
|
--cluster="${CLUSTER_NAME}" \
|
||||||
|
--user="${SERVICE_ACCOUNT_NAME}-${NAMESPACE}-${CLUSTER_NAME}" \
|
||||||
|
--namespace="${NAMESPACE}"
|
||||||
|
|
||||||
|
echo -n "Setting the current-context in the kubeconfig file..."
|
||||||
|
kubectl config use-context "${SERVICE_ACCOUNT_NAME}-${NAMESPACE}-${CLUSTER_NAME}" \
|
||||||
|
--kubeconfig="${KUBECFG_FILE_NAME}"
|
||||||
|
}
|
||||||
|
|
||||||
|
create_target_folder
|
||||||
|
create_service_account
|
||||||
|
sleep 10
|
||||||
|
get_secret_name_from_service_account
|
||||||
|
extract_ca_crt_from_secret
|
||||||
|
get_user_token_from_secret
|
||||||
|
set_kube_config_values
|
18
tools/etcd_backup.sh
Executable file
18
tools/etcd_backup.sh
Executable file
|
@ -0,0 +1,18 @@
|
||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
HOME=$( cd "$(dirname "$0")" && pwd )
|
||||||
|
source $HOME/../config
|
||||||
|
|
||||||
|
rm -fr /var/lib/etcd-backup
|
||||||
|
mkdir -p /var/lib/etcd-backup
|
||||||
|
|
||||||
|
#etcd v2
|
||||||
|
/usr/local/bin/etcdctl backup --data-dir /var/lib/etcd --backup-dir /var/lib/etcd-backup
|
||||||
|
|
||||||
|
echo ${CA_DIR}
|
||||||
|
|
||||||
|
#etcd v3
|
||||||
|
ETCDCTL_API=3 /usr/local/bin/etcdctl --debug --endpoints="https://${ETCD_1_IP}:2379,https://${ETCD_2_IP}:2379,https://${ETCD_3_IP}:2379" --cert="${CA_DIR}/etcd.crt" --key="${CA_DIR}/etcd.key" --cacert="${CA_DIR}/etcd-ca.crt" snapshot save /var/lib/etcd-backup/v3snapshot.db
|
||||||
|
|
||||||
|
##RESTORE
|
||||||
|
#ETCDCTL_API=3 /usr/local/bin/etcdctl snapshot restore v3snapshot.db --name=master03 --initial-cluster=master01=https://192.168.8.11:2380,master02=https://192.168.8.12:2380,master03=https://192.168.8.13:2380 --initial-advertise-peer-urls=https://192.168.8.13:2380 --initial-cluster-token="k8x-21b5f25de1"
|
40
tools/etcd_member_replace.sh
Executable file
40
tools/etcd_member_replace.sh
Executable file
|
@ -0,0 +1,40 @@
|
||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# Usage: etcd_member_replace <member_id>
|
||||||
|
#
|
||||||
|
# This script removes a (faulty) member id from the existing etcd cluster and then defines a new id.
|
||||||
|
# The new etcd member should be started initialy with an empty datadir
|
||||||
|
# and synced once, before we start it again from the systemd unit
|
||||||
|
|
||||||
|
HOME=$( cd "$(dirname "$0")" && pwd )
|
||||||
|
source $HOME/../config
|
||||||
|
|
||||||
|
systemctl stop etcd
|
||||||
|
|
||||||
|
/usr/local/bin/etcdctl --endpoints="https://${ETCD_1_IP}:2379,https://${ETCD_2_IP}:2379,https://${ETCD_3_IP}:2379" --cert ${CA_DIR}/etcd.crt --key ${CA_DIR}/etcd.key --cacert ${CA_DIR}/etcd-ca.crt member remove $1
|
||||||
|
sleep 10
|
||||||
|
|
||||||
|
/usr/local/bin/etcdctl --endpoints="https://${ETCD_1_IP}:2379,https://${ETCD_2_IP}:2379,https://${ETCD_3_IP}:2379" --cert ${CA_DIR}/etcd.crt --key ${CA_DIR}/etcd.key --cacert ${CA_DIR}/etcd-ca.crt member add ${NODE_NAME_SHORT} --peer-urls="https://${NODE_IP}:2380"
|
||||||
|
|
||||||
|
/usr/local/bin/etcdctl --endpoints="https://${ETCD_1_IP}:2379,https://${ETCD_2_IP}:2379,https://${ETCD_3_IP}:2379" --cert ${CA_DIR}/etcd.crt --key ${CA_DIR}/etcd.key --cacert ${CA_DIR}/etcd-ca.crt member list
|
||||||
|
|
||||||
|
#rm -fr /var/lib/etcd/member/
|
||||||
|
|
||||||
|
/usr/local/bin/etcd \
|
||||||
|
--name=${NODE_NAME_SHORT} \
|
||||||
|
--listen-client-urls=https://${NODE_IP}:2379,https://127.0.0.1:2379 \
|
||||||
|
--advertise-client-urls=https://${NODE_IP}:2379 \
|
||||||
|
--data-dir=/var/lib/etcd \
|
||||||
|
--cert-file=${CA_DIR}/etcd.crt \
|
||||||
|
--key-file=${CA_DIR}/etcd.key \
|
||||||
|
--peer-cert-file=${CA_DIR}/etcd-peer.crt \
|
||||||
|
--peer-key-file=${CA_DIR}/etcd-peer.key \
|
||||||
|
--trusted-ca-file=${CA_DIR}/etcd-ca.crt \
|
||||||
|
--peer-trusted-ca-file=${CA_DIR}/etcd-ca.crt \
|
||||||
|
--peer-client-cert-auth \
|
||||||
|
--listen-peer-urls=https://${NODE_IP}:2380 \
|
||||||
|
--client-cert-auth \
|
||||||
|
--initial-advertise-peer-urls=https://${NODE_IP}:2380 \
|
||||||
|
--initial-cluster-token="k8x-21b5f25de1" \
|
||||||
|
--initial-cluster=${ETCD_1_NAME}=https://${ETCD_1_IP}:2380,${ETCD_2_NAME}=https://${ETCD_2_IP}:2380,${ETCD_3_NAME}=https://${ETCD_3_IP}:2380 \
|
||||||
|
--initial-cluster-state=existing
|
13
tools/get_calico_info.sh
Executable file
13
tools/get_calico_info.sh
Executable file
|
@ -0,0 +1,13 @@
|
||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
export ETCD_ENDPOINTS=https://master01:2379,https://master02:2379,https://master03:2379
|
||||||
|
export ETCD_KEY_FILE=/etc/kubernetes/pki/etcd.key
|
||||||
|
export ETCD_CERT_FILE=/etc/kubernetes/pki/etcd.crt
|
||||||
|
export ETCD_CA_CERT_FILE=/etc/kubernetes/pki/etcd-ca.crt
|
||||||
|
|
||||||
|
/usr/local/bin/calicoctl get profile -o wide
|
||||||
|
/usr/local/bin/calicoctl get bgppeers -o wide
|
||||||
|
/usr/local/bin/calicoctl get nodes -o wide
|
||||||
|
/usr/local/bin/calicoctl node status
|
||||||
|
|
||||||
|
#/usr/local/bin/calicoctl ipam show --ip 172.16.241.70
|
16
tools/get_kube_keys.sh
Executable file
16
tools/get_kube_keys.sh
Executable file
|
@ -0,0 +1,16 @@
|
||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
HOME=$( cd "$(dirname "$0")" && pwd )
|
||||||
|
source $HOME/../config
|
||||||
|
|
||||||
|
echo "press any key for api v2"
|
||||||
|
read
|
||||||
|
/usr/local/bin/etcdctl --endpoints="https://${ETCD_1_IP}:2379,https://${ETCD_2_IP}:2379,https://${ETCD_3_IP}:2379" --cert-file="${CA_DIR}/etcd.crt" --key-file="${CA_DIR}/etcd.key" --ca-file="${CA_DIR}/etcd-ca.crt" ls --recursive
|
||||||
|
echo ""
|
||||||
|
echo "press any key for api v3"
|
||||||
|
read
|
||||||
|
ETCDCTL_API=3 /usr/local/bin/etcdctl --endpoints="https://${ETCD_1_IP}:2379,https://${ETCD_2_IP}:2379,https://${ETCD_3_IP}:2379" --cert="${CA_DIR}/etcd.crt" --key="${CA_DIR}/etcd.key" --cacert="${CA_DIR}/etcd-ca.crt" get "" --prefix --keys-only
|
||||||
|
|
||||||
|
#/usr/local/bin/etcdctl --endpoints="https://192.168.8.11:2379" --cert-file="/etc/kubernetes/pki/etcd.crt" --key-file="/etc/kubernetes/pki/etcd.key" --ca-file="/etc/kubernetes/pki/etcd-ca.crt" ls --recursive
|
||||||
|
#ETCDCTL_API=3 /usr/local/bin/etcdctl --endpoints="https://192.168.8.11:2379" --cert="/etc/kubernetes/pki/etcd.crt" --key="/etc/kubernetes/pki/etcd.key" --cacert="/etc/kubernetes/pki/etcd-ca.crt" get / --prefix --keys-only
|
||||||
|
|
5
tools/k
Executable file
5
tools/k
Executable file
|
@ -0,0 +1,5 @@
|
||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
export KUBECONFIG=/etc/kubernetes/kubeconfig
|
||||||
|
|
||||||
|
kubectl "$@"
|
50
tools/logs-proxy
Executable file
50
tools/logs-proxy
Executable file
|
@ -0,0 +1,50 @@
|
||||||
|
#!/bin/sh
|
||||||
|
|
||||||
|
if [ -z "$SSH_ORIGINAL_COMMAND" ] ; then
|
||||||
|
echo ""
|
||||||
|
echo "Usage: ssh logs <namespace> <container_name_pattern> <lines> [grep pattern]"
|
||||||
|
echo "Example: ssh logs shared matches-front 100"
|
||||||
|
echo ""
|
||||||
|
exit
|
||||||
|
fi
|
||||||
|
|
||||||
|
NSPACE=`echo "$SSH_ORIGINAL_COMMAND" | awk '{print $1}'`
|
||||||
|
|
||||||
|
if [ "$NSPACE" = "kube-system" ] || [ "$NSPACE" = "monitoring" ]; then
|
||||||
|
echo "Access denied."
|
||||||
|
exit
|
||||||
|
fi
|
||||||
|
|
||||||
|
SERVICE=`echo "$SSH_ORIGINAL_COMMAND" | awk '{print $2}'`
|
||||||
|
|
||||||
|
if [ -z $SERVICE ]; then
|
||||||
|
KUBECONFIG=/home/logs/k8s-admin-conf /usr/local/bin/kubectl -n ${NSPACE} get pods
|
||||||
|
exit
|
||||||
|
fi
|
||||||
|
|
||||||
|
CONTAINER_NAME=`KUBECONFIG=/home/logs/k8s-admin-conf /usr/local/bin/kubectl -n ${NSPACE} get pods | grep "${SERVICE}" | awk '{print $1}'`
|
||||||
|
num_lines=$(echo "$CONTAINER_NAME" | wc -l)
|
||||||
|
if [ $num_lines -gt 1 ]; then
|
||||||
|
echo "Specify exact container name from:"
|
||||||
|
echo ""
|
||||||
|
echo "$CONTAINER_NAME"
|
||||||
|
echo ""
|
||||||
|
echo "Usage: ssh logs ${NSPACE} $SERVICE-rnd123 <lines>"
|
||||||
|
exit
|
||||||
|
fi
|
||||||
|
echo $CONTAINER_NAME
|
||||||
|
|
||||||
|
TAIL=`echo "$SSH_ORIGINAL_COMMAND" | awk '{print $3}'`
|
||||||
|
if [ -n "$TAIL" ] && [ "$TAIL" -eq "$TAIL" ] 2>/dev/null; then
|
||||||
|
TAIL="--tail $TAIL"
|
||||||
|
else
|
||||||
|
TAIL=''
|
||||||
|
fi
|
||||||
|
|
||||||
|
GREP_PATTERN=`echo "$SSH_ORIGINAL_COMMAND" | awk '{print $4}'`
|
||||||
|
|
||||||
|
if [ -n "$GREP_PATTERN" ]; then
|
||||||
|
KUBECONFIG=/home/logs/k8s-admin-conf /usr/local/bin/kubectl -n ${NSPACE} logs --timestamps --follow $TAIL $CONTAINER_NAME --all-containers | grep -E $GREP_PATTERN
|
||||||
|
else
|
||||||
|
KUBECONFIG=/home/logs/k8s-admin-conf /usr/local/bin/kubectl -n ${NSPACE} logs --timestamps --follow $TAIL $CONTAINER_NAME --all-containers
|
||||||
|
fi;
|
31
tools/memoryusage.sh
Executable file
31
tools/memoryusage.sh
Executable file
|
@ -0,0 +1,31 @@
|
||||||
|
#!/bin/bash
|
||||||
|
#!/usr/bin/env bash
|
||||||
|
|
||||||
|
# This script reproduces what the kubelet does
|
||||||
|
# to calculate memory.available relative to root cgroup.
|
||||||
|
|
||||||
|
# current memory usage
|
||||||
|
memory_capacity_in_kb=$(cat /proc/meminfo | grep MemTotal | awk '{print $2}')
|
||||||
|
memory_capacity_in_bytes=$((memory_capacity_in_kb * 1024))
|
||||||
|
memory_usage_in_bytes=$(cat /sys/fs/cgroup/memory/memory.usage_in_bytes)
|
||||||
|
memory_total_inactive_file=$(cat /sys/fs/cgroup/memory/memory.stat | grep total_inactive_file | awk '{print $2}')
|
||||||
|
|
||||||
|
memory_working_set=${memory_usage_in_bytes}
|
||||||
|
if [ "$memory_working_set" -lt "$memory_total_inactive_file" ];
|
||||||
|
then
|
||||||
|
memory_working_set=0
|
||||||
|
else
|
||||||
|
memory_working_set=$((memory_usage_in_bytes - memory_total_inactive_file))
|
||||||
|
fi
|
||||||
|
|
||||||
|
memory_available_in_bytes=$((memory_capacity_in_bytes - memory_working_set))
|
||||||
|
memory_available_in_kb=$((memory_available_in_bytes / 1024))
|
||||||
|
memory_available_in_mb=$((memory_available_in_kb / 1024))
|
||||||
|
|
||||||
|
echo "memory.capacity_in_bytes $memory_capacity_in_bytes"
|
||||||
|
echo "memory.usage_in_bytes $memory_usage_in_bytes"
|
||||||
|
echo "memory.total_inactive_file $memory_total_inactive_file"
|
||||||
|
echo "memory.working_set $memory_working_set"
|
||||||
|
echo "memory.available_in_bytes $memory_available_in_bytes"
|
||||||
|
echo "memory.available_in_kb $memory_available_in_kb"
|
||||||
|
echo "memory.available_in_mb $memory_available_in_mb"
|
37
tools/tcp-proxy
Executable file
37
tools/tcp-proxy
Executable file
|
@ -0,0 +1,37 @@
|
||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# For each user who needs to use this script you may create the .authorized_keys file using the following pattern:
|
||||||
|
#command="/usr/local/bin/tcp-proxy",no-user-rc,no-x11-forwarding,no-agent-forwarding,no-pty,permitopen="127.0.0.1:23306",permitopen="127.0.0.1:21443" ssh-rsa <KEY> user@host
|
||||||
|
|
||||||
|
APP="$SSH_ORIGINAL_COMMAND"
|
||||||
|
|
||||||
|
case "$APP" in
|
||||||
|
db)
|
||||||
|
USERPORT=23306
|
||||||
|
TARGETPORT=3306
|
||||||
|
;;
|
||||||
|
mssql)
|
||||||
|
USERPORT=21443
|
||||||
|
TARGETPORT=1433
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
echo "Usage: ssh remotecon@master01.staging.example.com -L3306:127.0.0.1:23306 <SERVICE_NAME>"
|
||||||
|
echo "Available services:\nmssql \ndb"
|
||||||
|
exit
|
||||||
|
esac
|
||||||
|
|
||||||
|
export KUBECONFIG=/home/remotecon/k8s-admin-sa-staging-conf
|
||||||
|
|
||||||
|
SVC=`kubectl get svc $APP --output=go-template --template='{{.metadata.name}}'`
|
||||||
|
echo "Port forwarding $SVC:$TARGETPORT to 127.0.0.1:$USERPORT ..."
|
||||||
|
|
||||||
|
FWDPID=`ps ax | grep "svc\/$SVC" | awk '{print $1}'`
|
||||||
|
if [ -z $FWDPID ] ; then
|
||||||
|
/usr/sbin/daemonize /usr/local/bin/kubectl port-forward svc/$SVC $USERPORT:$TARGETPORT
|
||||||
|
FWDPID=`ps ax | grep "svc\/$SVC" | awk '{print $1}'`
|
||||||
|
echo "Spawning new forwarder at pid $FWDPID."
|
||||||
|
else
|
||||||
|
echo "Using the running forwarder at pid $FWDPID."
|
||||||
|
fi
|
||||||
|
echo "Press any key to end the session..."
|
||||||
|
read X
|
528
yaml/calico/calico-etcd.yaml
Normal file
528
yaml/calico/calico-etcd.yaml
Normal file
|
@ -0,0 +1,528 @@
|
||||||
|
---
|
||||||
|
# Source: calico/templates/calico-etcd-secrets.yaml
|
||||||
|
# The following contains k8s Secrets for use with a TLS enabled etcd cluster.
|
||||||
|
# For information on populating Secrets, see http://kubernetes.io/docs/user-guide/secrets/
|
||||||
|
apiVersion: v1
|
||||||
|
kind: Secret
|
||||||
|
type: Opaque
|
||||||
|
metadata:
|
||||||
|
name: calico-etcd-secrets
|
||||||
|
namespace: kube-system
|
||||||
|
data:
|
||||||
|
# Populate the following with etcd TLS configuration if desired, but leave blank if
|
||||||
|
# not using TLS for etcd.
|
||||||
|
# The keys below should be uncommented and the values populated with the base64
|
||||||
|
# encoded contents of each file that would be associated with the TLS data.
|
||||||
|
# Example command for encoding a file contents: cat <file> | base64 -w 0
|
||||||
|
# etcd-key: null
|
||||||
|
# etcd-cert: null
|
||||||
|
# etcd-ca: null
|
||||||
|
---
|
||||||
|
# Source: calico/templates/calico-config.yaml
|
||||||
|
# This ConfigMap is used to configure a self-hosted Calico installation.
|
||||||
|
kind: ConfigMap
|
||||||
|
apiVersion: v1
|
||||||
|
metadata:
|
||||||
|
name: calico-config
|
||||||
|
namespace: kube-system
|
||||||
|
data:
|
||||||
|
# Configure this with the location of your etcd cluster.
|
||||||
|
etcd_endpoints: "k8x_calico_etcd_endpoint"
|
||||||
|
# If you're using TLS enabled etcd uncomment the following.
|
||||||
|
# You must also populate the Secret below with these files.
|
||||||
|
#etcd_ca: "" # "/calico-secrets/etcd-ca"
|
||||||
|
#etcd_cert: "" # "/calico-secrets/etcd-cert"
|
||||||
|
#etcd_key: "" # "/calico-secrets/etcd-key"
|
||||||
|
etcd_ca: "/calico-secrets/etcd-ca"
|
||||||
|
etcd_cert: "/calico-secrets/etcd-cert"
|
||||||
|
etcd_key: "/calico-secrets/etcd-key"
|
||||||
|
|
||||||
|
# Typha is disabled.
|
||||||
|
typha_service_name: "none"
|
||||||
|
# Configure the backend to use.
|
||||||
|
calico_backend: "bird"
|
||||||
|
|
||||||
|
# Configure the MTU to use
|
||||||
|
veth_mtu: "1410"
|
||||||
|
|
||||||
|
# The CNI network configuration to install on each node. The special
|
||||||
|
# values in this config will be automatically populated.
|
||||||
|
cni_network_config: |-
|
||||||
|
{
|
||||||
|
"name": "k8s-pod-network",
|
||||||
|
"cniVersion": "0.3.1",
|
||||||
|
"plugins": [
|
||||||
|
{
|
||||||
|
"type": "calico",
|
||||||
|
"log_level": "info",
|
||||||
|
"etcd_endpoints": "__ETCD_ENDPOINTS__",
|
||||||
|
"etcd_key_file": "__ETCD_KEY_FILE__",
|
||||||
|
"etcd_cert_file": "__ETCD_CERT_FILE__",
|
||||||
|
"etcd_ca_cert_file": "__ETCD_CA_CERT_FILE__",
|
||||||
|
"mtu": __CNI_MTU__,
|
||||||
|
"ipam": {
|
||||||
|
"type": "calico-ipam"
|
||||||
|
},
|
||||||
|
"policy": {
|
||||||
|
"type": "k8s"
|
||||||
|
},
|
||||||
|
"kubernetes": {
|
||||||
|
"kubeconfig": "__KUBECONFIG_FILEPATH__"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "portmap",
|
||||||
|
"snat": true,
|
||||||
|
"capabilities": {"portMappings": true}
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
|
||||||
|
---
|
||||||
|
# Source: calico/templates/rbac.yaml
|
||||||
|
|
||||||
|
# Include a clusterrole for the kube-controllers component,
|
||||||
|
# and bind it to the calico-kube-controllers serviceaccount.
|
||||||
|
kind: ClusterRole
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
metadata:
|
||||||
|
name: calico-kube-controllers
|
||||||
|
rules:
|
||||||
|
# Pods are monitored for changing labels.
|
||||||
|
# The node controller monitors Kubernetes nodes.
|
||||||
|
# Namespace and serviceaccount labels are used for policy.
|
||||||
|
- apiGroups: [""]
|
||||||
|
resources:
|
||||||
|
- pods
|
||||||
|
- nodes
|
||||||
|
- namespaces
|
||||||
|
- serviceaccounts
|
||||||
|
verbs:
|
||||||
|
- watch
|
||||||
|
- list
|
||||||
|
# Watch for changes to Kubernetes NetworkPolicies.
|
||||||
|
- apiGroups: ["networking.k8s.io"]
|
||||||
|
resources:
|
||||||
|
- networkpolicies
|
||||||
|
verbs:
|
||||||
|
- watch
|
||||||
|
- list
|
||||||
|
---
|
||||||
|
kind: ClusterRoleBinding
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
metadata:
|
||||||
|
name: calico-kube-controllers
|
||||||
|
roleRef:
|
||||||
|
apiGroup: rbac.authorization.k8s.io
|
||||||
|
kind: ClusterRole
|
||||||
|
name: calico-kube-controllers
|
||||||
|
subjects:
|
||||||
|
- kind: ServiceAccount
|
||||||
|
name: calico-kube-controllers
|
||||||
|
namespace: kube-system
|
||||||
|
---
|
||||||
|
# Include a clusterrole for the calico-node DaemonSet,
|
||||||
|
# and bind it to the calico-node serviceaccount.
|
||||||
|
kind: ClusterRole
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
metadata:
|
||||||
|
name: calico-node
|
||||||
|
rules:
|
||||||
|
# The CNI plugin needs to get pods, nodes, and namespaces.
|
||||||
|
- apiGroups: [""]
|
||||||
|
resources:
|
||||||
|
- pods
|
||||||
|
- nodes
|
||||||
|
- namespaces
|
||||||
|
verbs:
|
||||||
|
- get
|
||||||
|
- apiGroups: [""]
|
||||||
|
resources:
|
||||||
|
- endpoints
|
||||||
|
- services
|
||||||
|
verbs:
|
||||||
|
# Used to discover service IPs for advertisement.
|
||||||
|
- watch
|
||||||
|
- list
|
||||||
|
- apiGroups: [""]
|
||||||
|
resources:
|
||||||
|
- nodes/status
|
||||||
|
verbs:
|
||||||
|
# Needed for clearing NodeNetworkUnavailable flag.
|
||||||
|
- patch
|
||||||
|
---
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
kind: ClusterRoleBinding
|
||||||
|
metadata:
|
||||||
|
name: calico-node
|
||||||
|
roleRef:
|
||||||
|
apiGroup: rbac.authorization.k8s.io
|
||||||
|
kind: ClusterRole
|
||||||
|
name: calico-node
|
||||||
|
subjects:
|
||||||
|
- kind: ServiceAccount
|
||||||
|
name: calico-node
|
||||||
|
namespace: kube-system
|
||||||
|
|
||||||
|
---
|
||||||
|
# Source: calico/templates/calico-node.yaml
|
||||||
|
# This manifest installs the calico-node container, as well
|
||||||
|
# as the CNI plugins and network config on
|
||||||
|
# each master and worker node in a Kubernetes cluster.
|
||||||
|
kind: DaemonSet
|
||||||
|
apiVersion: apps/v1
|
||||||
|
metadata:
|
||||||
|
name: calico-node
|
||||||
|
namespace: kube-system
|
||||||
|
labels:
|
||||||
|
k8s-app: calico-node
|
||||||
|
spec:
|
||||||
|
selector:
|
||||||
|
matchLabels:
|
||||||
|
k8s-app: calico-node
|
||||||
|
updateStrategy:
|
||||||
|
type: RollingUpdate
|
||||||
|
rollingUpdate:
|
||||||
|
maxUnavailable: 1
|
||||||
|
template:
|
||||||
|
metadata:
|
||||||
|
labels:
|
||||||
|
k8s-app: calico-node
|
||||||
|
annotations:
|
||||||
|
# This, along with the CriticalAddonsOnly toleration below,
|
||||||
|
# marks the pod as a critical add-on, ensuring it gets
|
||||||
|
# priority scheduling and that its resources are reserved
|
||||||
|
# if it ever gets evicted.
|
||||||
|
scheduler.alpha.kubernetes.io/critical-pod: ''
|
||||||
|
spec:
|
||||||
|
nodeSelector:
|
||||||
|
beta.kubernetes.io/os: linux
|
||||||
|
hostNetwork: true
|
||||||
|
tolerations:
|
||||||
|
# Make sure calico-node gets scheduled on all nodes.
|
||||||
|
- effect: NoSchedule
|
||||||
|
operator: Exists
|
||||||
|
# Mark the pod as a critical add-on for rescheduling.
|
||||||
|
- key: CriticalAddonsOnly
|
||||||
|
operator: Exists
|
||||||
|
- effect: NoExecute
|
||||||
|
operator: Exists
|
||||||
|
serviceAccountName: calico-node
|
||||||
|
# Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a "force
|
||||||
|
# deletion": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods.
|
||||||
|
terminationGracePeriodSeconds: 0
|
||||||
|
priorityClassName: system-node-critical
|
||||||
|
initContainers:
|
||||||
|
# This container installs the CNI binaries
|
||||||
|
# and CNI network config file on each node.
|
||||||
|
- name: install-cni
|
||||||
|
image: calico/cni:v3.8.4
|
||||||
|
command: ["/install-cni.sh"]
|
||||||
|
env:
|
||||||
|
# Name of the CNI config file to create.
|
||||||
|
- name: CNI_CONF_NAME
|
||||||
|
value: "10-calico.conflist"
|
||||||
|
# The CNI network config to install on each node.
|
||||||
|
- name: CNI_NETWORK_CONFIG
|
||||||
|
valueFrom:
|
||||||
|
configMapKeyRef:
|
||||||
|
name: calico-config
|
||||||
|
key: cni_network_config
|
||||||
|
# The location of the etcd cluster.
|
||||||
|
- name: ETCD_ENDPOINTS
|
||||||
|
valueFrom:
|
||||||
|
configMapKeyRef:
|
||||||
|
name: calico-config
|
||||||
|
key: etcd_endpoints
|
||||||
|
# CNI MTU Config variable
|
||||||
|
- name: CNI_MTU
|
||||||
|
valueFrom:
|
||||||
|
configMapKeyRef:
|
||||||
|
name: calico-config
|
||||||
|
key: veth_mtu
|
||||||
|
# Prevents the container from sleeping forever.
|
||||||
|
- name: SLEEP
|
||||||
|
value: "false"
|
||||||
|
volumeMounts:
|
||||||
|
- mountPath: /host/opt/cni/bin
|
||||||
|
name: cni-bin-dir
|
||||||
|
- mountPath: /host/etc/cni/net.d
|
||||||
|
name: cni-net-dir
|
||||||
|
- mountPath: /calico-secrets
|
||||||
|
name: etcd-certs
|
||||||
|
securityContext:
|
||||||
|
privileged: true
|
||||||
|
# Adds a Flex Volume Driver that creates a per-pod Unix Domain Socket to allow Dikastes
|
||||||
|
# to communicate with Felix over the Policy Sync API.
|
||||||
|
- name: flexvol-driver
|
||||||
|
image: calico/pod2daemon-flexvol:v3.8.4
|
||||||
|
volumeMounts:
|
||||||
|
- name: flexvol-driver-host
|
||||||
|
mountPath: /host/driver
|
||||||
|
securityContext:
|
||||||
|
privileged: true
|
||||||
|
containers:
|
||||||
|
# Runs calico-node container on each Kubernetes node. This
|
||||||
|
# container programs network policy and routes on each
|
||||||
|
# host.
|
||||||
|
- name: calico-node
|
||||||
|
image: calico/node:v3.8.4
|
||||||
|
env:
|
||||||
|
# The location of the etcd cluster.
|
||||||
|
- name: ETCD_ENDPOINTS
|
||||||
|
valueFrom:
|
||||||
|
configMapKeyRef:
|
||||||
|
name: calico-config
|
||||||
|
key: etcd_endpoints
|
||||||
|
# Location of the CA certificate for etcd.
|
||||||
|
- name: ETCD_CA_CERT_FILE
|
||||||
|
valueFrom:
|
||||||
|
configMapKeyRef:
|
||||||
|
name: calico-config
|
||||||
|
key: etcd_ca
|
||||||
|
# Location of the client key for etcd.
|
||||||
|
- name: ETCD_KEY_FILE
|
||||||
|
valueFrom:
|
||||||
|
configMapKeyRef:
|
||||||
|
name: calico-config
|
||||||
|
key: etcd_key
|
||||||
|
# Location of the client certificate for etcd.
|
||||||
|
- name: ETCD_CERT_FILE
|
||||||
|
valueFrom:
|
||||||
|
configMapKeyRef:
|
||||||
|
name: calico-config
|
||||||
|
key: etcd_cert
|
||||||
|
# Set noderef for node controller.
|
||||||
|
- name: CALICO_K8S_NODE_REF
|
||||||
|
valueFrom:
|
||||||
|
fieldRef:
|
||||||
|
fieldPath: spec.nodeName
|
||||||
|
# Choose the backend to use.
|
||||||
|
- name: CALICO_NETWORKING_BACKEND
|
||||||
|
valueFrom:
|
||||||
|
configMapKeyRef:
|
||||||
|
name: calico-config
|
||||||
|
key: calico_backend
|
||||||
|
# Cluster type to identify the deployment type
|
||||||
|
- name: CLUSTER_TYPE
|
||||||
|
value: "k8s,bgp"
|
||||||
|
# Auto-detect the BGP IP address.
|
||||||
|
- name: IP
|
||||||
|
value: "autodetect"
|
||||||
|
# Enable IPIP
|
||||||
|
- name: CALICO_IPV4POOL_IPIP
|
||||||
|
value: "Always"
|
||||||
|
# Set MTU for tunnel device used if ipip is enabled
|
||||||
|
- name: FELIX_IPINIPMTU
|
||||||
|
valueFrom:
|
||||||
|
configMapKeyRef:
|
||||||
|
name: calico-config
|
||||||
|
key: veth_mtu
|
||||||
|
# The default IPv4 pool to create on startup if none exists. Pod IPs will be
|
||||||
|
# chosen from this range. Changing this value after installation will have
|
||||||
|
# no effect. This should fall within `--cluster-cidr`.
|
||||||
|
- name: CALICO_IPV4POOL_CIDR
|
||||||
|
value: "k8x_calico_pool"
|
||||||
|
# Disable file logging so `kubectl logs` works.
|
||||||
|
- name: CALICO_DISABLE_FILE_LOGGING
|
||||||
|
value: "true"
|
||||||
|
# Set Felix endpoint to host default action to ACCEPT.
|
||||||
|
- name: FELIX_DEFAULTENDPOINTTOHOSTACTION
|
||||||
|
value: "ACCEPT"
|
||||||
|
# Disable IPv6 on Kubernetes.
|
||||||
|
- name: FELIX_IPV6SUPPORT
|
||||||
|
value: "false"
|
||||||
|
# Set Felix logging to "info"
|
||||||
|
- name: FELIX_LOGSEVERITYSCREEN
|
||||||
|
value: "info"
|
||||||
|
- name: FELIX_HEALTHENABLED
|
||||||
|
value: "true"
|
||||||
|
securityContext:
|
||||||
|
privileged: true
|
||||||
|
resources:
|
||||||
|
requests:
|
||||||
|
cpu: 250m
|
||||||
|
livenessProbe:
|
||||||
|
httpGet:
|
||||||
|
path: /liveness
|
||||||
|
port: 9099
|
||||||
|
host: localhost
|
||||||
|
periodSeconds: 10
|
||||||
|
initialDelaySeconds: 10
|
||||||
|
failureThreshold: 6
|
||||||
|
readinessProbe:
|
||||||
|
exec:
|
||||||
|
command:
|
||||||
|
- /bin/calico-node
|
||||||
|
- -bird-ready
|
||||||
|
- -felix-ready
|
||||||
|
periodSeconds: 10
|
||||||
|
volumeMounts:
|
||||||
|
- mountPath: /lib/modules
|
||||||
|
name: lib-modules
|
||||||
|
readOnly: true
|
||||||
|
- mountPath: /run/xtables.lock
|
||||||
|
name: xtables-lock
|
||||||
|
readOnly: false
|
||||||
|
- mountPath: /var/run/calico
|
||||||
|
name: var-run-calico
|
||||||
|
readOnly: false
|
||||||
|
- mountPath: /var/lib/calico
|
||||||
|
name: var-lib-calico
|
||||||
|
readOnly: false
|
||||||
|
- mountPath: /calico-secrets
|
||||||
|
name: etcd-certs
|
||||||
|
- name: policysync
|
||||||
|
mountPath: /var/run/nodeagent
|
||||||
|
volumes:
|
||||||
|
# Used by calico-node.
|
||||||
|
- name: lib-modules
|
||||||
|
hostPath:
|
||||||
|
path: /lib/modules
|
||||||
|
- name: var-run-calico
|
||||||
|
hostPath:
|
||||||
|
path: /var/run/calico
|
||||||
|
- name: var-lib-calico
|
||||||
|
hostPath:
|
||||||
|
path: /var/lib/calico
|
||||||
|
- name: xtables-lock
|
||||||
|
hostPath:
|
||||||
|
path: /run/xtables.lock
|
||||||
|
type: FileOrCreate
|
||||||
|
# Used to install CNI.
|
||||||
|
- name: cni-bin-dir
|
||||||
|
hostPath:
|
||||||
|
path: /opt/cni/bin
|
||||||
|
- name: cni-net-dir
|
||||||
|
hostPath:
|
||||||
|
path: /etc/cni/net.d
|
||||||
|
# Mount in the etcd TLS secrets with mode 400.
|
||||||
|
# See https://kubernetes.io/docs/concepts/configuration/secret/
|
||||||
|
- name: etcd-certs
|
||||||
|
secret:
|
||||||
|
secretName: calico-etcd-secrets
|
||||||
|
defaultMode: 0400
|
||||||
|
# Used to create per-pod Unix Domain Sockets
|
||||||
|
- name: policysync
|
||||||
|
hostPath:
|
||||||
|
type: DirectoryOrCreate
|
||||||
|
path: /var/run/nodeagent
|
||||||
|
# Used to install Flex Volume Driver
|
||||||
|
- name: flexvol-driver-host
|
||||||
|
hostPath:
|
||||||
|
type: DirectoryOrCreate
|
||||||
|
path: /usr/libexec/kubernetes/kubelet-plugins/volume/exec/nodeagent~uds
|
||||||
|
---
|
||||||
|
|
||||||
|
apiVersion: v1
|
||||||
|
kind: ServiceAccount
|
||||||
|
metadata:
|
||||||
|
name: calico-node
|
||||||
|
namespace: kube-system
|
||||||
|
|
||||||
|
---
|
||||||
|
# Source: calico/templates/calico-kube-controllers.yaml
|
||||||
|
|
||||||
|
# See https://github.com/projectcalico/kube-controllers
|
||||||
|
apiVersion: apps/v1
|
||||||
|
kind: Deployment
|
||||||
|
metadata:
|
||||||
|
name: calico-kube-controllers
|
||||||
|
namespace: kube-system
|
||||||
|
labels:
|
||||||
|
k8s-app: calico-kube-controllers
|
||||||
|
spec:
|
||||||
|
# The controllers can only have a single active instance.
|
||||||
|
replicas: 1
|
||||||
|
selector:
|
||||||
|
matchLabels:
|
||||||
|
k8s-app: calico-kube-controllers
|
||||||
|
strategy:
|
||||||
|
type: Recreate
|
||||||
|
template:
|
||||||
|
metadata:
|
||||||
|
name: calico-kube-controllers
|
||||||
|
namespace: kube-system
|
||||||
|
labels:
|
||||||
|
k8s-app: calico-kube-controllers
|
||||||
|
annotations:
|
||||||
|
scheduler.alpha.kubernetes.io/critical-pod: ''
|
||||||
|
spec:
|
||||||
|
nodeSelector:
|
||||||
|
beta.kubernetes.io/os: linux
|
||||||
|
tolerations:
|
||||||
|
# Mark the pod as a critical add-on for rescheduling.
|
||||||
|
- key: CriticalAddonsOnly
|
||||||
|
operator: Exists
|
||||||
|
- key: node-role.kubernetes.io/master
|
||||||
|
effect: NoSchedule
|
||||||
|
serviceAccountName: calico-kube-controllers
|
||||||
|
priorityClassName: system-cluster-critical
|
||||||
|
# The controllers must run in the host network namespace so that
|
||||||
|
# it isn't governed by policy that would prevent it from working.
|
||||||
|
hostNetwork: true
|
||||||
|
containers:
|
||||||
|
- name: calico-kube-controllers
|
||||||
|
image: calico/kube-controllers:v3.8.4
|
||||||
|
env:
|
||||||
|
# The location of the etcd cluster.
|
||||||
|
- name: ETCD_ENDPOINTS
|
||||||
|
valueFrom:
|
||||||
|
configMapKeyRef:
|
||||||
|
name: calico-config
|
||||||
|
key: etcd_endpoints
|
||||||
|
# Location of the CA certificate for etcd.
|
||||||
|
- name: ETCD_CA_CERT_FILE
|
||||||
|
valueFrom:
|
||||||
|
configMapKeyRef:
|
||||||
|
name: calico-config
|
||||||
|
key: etcd_ca
|
||||||
|
# Location of the client key for etcd.
|
||||||
|
- name: ETCD_KEY_FILE
|
||||||
|
valueFrom:
|
||||||
|
configMapKeyRef:
|
||||||
|
name: calico-config
|
||||||
|
key: etcd_key
|
||||||
|
# Location of the client certificate for etcd.
|
||||||
|
- name: ETCD_CERT_FILE
|
||||||
|
valueFrom:
|
||||||
|
configMapKeyRef:
|
||||||
|
name: calico-config
|
||||||
|
key: etcd_cert
|
||||||
|
# Choose which controllers to run.
|
||||||
|
- name: ENABLED_CONTROLLERS
|
||||||
|
value: policy,namespace,serviceaccount,workloadendpoint,node
|
||||||
|
volumeMounts:
|
||||||
|
# Mount in the etcd TLS secrets.
|
||||||
|
- mountPath: /calico-secrets
|
||||||
|
name: etcd-certs
|
||||||
|
readinessProbe:
|
||||||
|
exec:
|
||||||
|
command:
|
||||||
|
- /usr/bin/check-status
|
||||||
|
- -r
|
||||||
|
volumes:
|
||||||
|
# Mount in the etcd TLS secrets with mode 400.
|
||||||
|
# See https://kubernetes.io/docs/concepts/configuration/secret/
|
||||||
|
- name: etcd-certs
|
||||||
|
secret:
|
||||||
|
secretName: calico-etcd-secrets
|
||||||
|
defaultMode: 0400
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
apiVersion: v1
|
||||||
|
kind: ServiceAccount
|
||||||
|
metadata:
|
||||||
|
name: calico-kube-controllers
|
||||||
|
namespace: kube-system
|
||||||
|
---
|
||||||
|
# Source: calico/templates/calico-typha.yaml
|
||||||
|
|
||||||
|
---
|
||||||
|
# Source: calico/templates/configure-canal.yaml
|
||||||
|
|
||||||
|
---
|
||||||
|
# Source: calico/templates/kdd-crds.yaml
|
||||||
|
|
||||||
|
|
52
yaml/calico/calicoctl-etcd.yaml
Normal file
52
yaml/calico/calicoctl-etcd.yaml
Normal file
|
@ -0,0 +1,52 @@
|
||||||
|
# Calico Version v3.8.4
|
||||||
|
# https://docs.projectcalico.org/v3.8/releases#v3.8.4
|
||||||
|
# This manifest includes the following component versions:
|
||||||
|
# calico/ctl:v3.8.4
|
||||||
|
|
||||||
|
apiVersion: v1
|
||||||
|
kind: Pod
|
||||||
|
metadata:
|
||||||
|
name: calicoctl
|
||||||
|
namespace: kube-system
|
||||||
|
spec:
|
||||||
|
nodeSelector:
|
||||||
|
beta.kubernetes.io/os: linux
|
||||||
|
hostNetwork: true
|
||||||
|
containers:
|
||||||
|
- name: calicoctl
|
||||||
|
image: calico/ctl:v3.8.4
|
||||||
|
command: ["/bin/sh", "-c", "while true; do sleep 3600; done"]
|
||||||
|
env:
|
||||||
|
- name: ETCD_ENDPOINTS
|
||||||
|
valueFrom:
|
||||||
|
configMapKeyRef:
|
||||||
|
name: calico-config
|
||||||
|
key: etcd_endpoints
|
||||||
|
# If you're using TLS enabled etcd uncomment the following.
|
||||||
|
# Location of the CA certificate for etcd.
|
||||||
|
# - name: ETCD_CA_CERT_FILE
|
||||||
|
# valueFrom:
|
||||||
|
# configMapKeyRef:
|
||||||
|
# name: calico-config
|
||||||
|
# key: etcd_ca
|
||||||
|
# Location of the client key for etcd.
|
||||||
|
# - name: ETCD_KEY_FILE
|
||||||
|
# valueFrom:
|
||||||
|
# configMapKeyRef:
|
||||||
|
# name: calico-config
|
||||||
|
# key: etcd_key
|
||||||
|
# Location of the client certificate for etcd.
|
||||||
|
# - name: ETCD_CERT_FILE
|
||||||
|
# valueFrom:
|
||||||
|
# configMapKeyRef:
|
||||||
|
# name: calico-config
|
||||||
|
# key: etcd_cert
|
||||||
|
# volumeMounts:
|
||||||
|
# - mountPath: /calico-secrets
|
||||||
|
# name: etcd-certs
|
||||||
|
volumes:
|
||||||
|
# If you're using TLS enabled etcd uncomment the following.
|
||||||
|
# - name: etcd-certs
|
||||||
|
# secret:
|
||||||
|
# secretName: calico-etcd-secrets
|
||||||
|
|
402
yaml/calico/deprecated/calico-etcd.yaml
Normal file
402
yaml/calico/deprecated/calico-etcd.yaml
Normal file
|
@ -0,0 +1,402 @@
|
||||||
|
# Calico Version v3.3.1
|
||||||
|
# https://docs.projectcalico.org/v3.3/releases#v3.3.1
|
||||||
|
# This manifest includes the following component versions:
|
||||||
|
# calico/node:v3.3.1
|
||||||
|
# calico/cni:v3.3.1
|
||||||
|
# calico/kube-controllers:v3.3.1
|
||||||
|
|
||||||
|
# This ConfigMap is used to configure a self-hosted Calico installation.
|
||||||
|
kind: ConfigMap
|
||||||
|
apiVersion: v1
|
||||||
|
metadata:
|
||||||
|
name: calico-config
|
||||||
|
namespace: kube-system
|
||||||
|
data:
|
||||||
|
# Configure this with the location of your etcd cluster.
|
||||||
|
etcd_endpoints: "k8x_calico_etcd_endpoint"
|
||||||
|
|
||||||
|
# If you're using TLS enabled etcd uncomment the following.
|
||||||
|
# You must also populate the Secret below with these files.
|
||||||
|
etcd_ca: "/calico-secrets/etcd-ca"
|
||||||
|
etcd_cert: "/calico-secrets/etcd-cert"
|
||||||
|
etcd_key: "/calico-secrets/etcd-key"
|
||||||
|
# Configure the Calico backend to use.
|
||||||
|
calico_backend: "bird"
|
||||||
|
|
||||||
|
# Configure the MTU to use
|
||||||
|
veth_mtu: "1410"
|
||||||
|
|
||||||
|
# The CNI network configuration to install on each node. The special
|
||||||
|
# values in this config will be automatically populated.
|
||||||
|
cni_network_config: |-
|
||||||
|
{
|
||||||
|
"name": "k8s-pod-network",
|
||||||
|
"cniVersion": "0.3.0",
|
||||||
|
"plugins": [
|
||||||
|
{
|
||||||
|
"type": "calico",
|
||||||
|
"log_level": "warning",
|
||||||
|
"etcd_endpoints": "__ETCD_ENDPOINTS__",
|
||||||
|
"etcd_key_file": "__ETCD_KEY_FILE__",
|
||||||
|
"etcd_cert_file": "__ETCD_CERT_FILE__",
|
||||||
|
"etcd_ca_cert_file": "__ETCD_CA_CERT_FILE__",
|
||||||
|
"mtu": __CNI_MTU__,
|
||||||
|
"ipam": {
|
||||||
|
"type": "calico-ipam"
|
||||||
|
},
|
||||||
|
"policy": {
|
||||||
|
"type": "k8s"
|
||||||
|
},
|
||||||
|
"kubernetes": {
|
||||||
|
"kubeconfig": "__KUBECONFIG_FILEPATH__"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "portmap",
|
||||||
|
"snat": true,
|
||||||
|
"capabilities": {"portMappings": true}
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
|
||||||
|
# The following contains k8s Secrets for use with a TLS enabled etcd cluster.
|
||||||
|
# For information on populating Secrets, see http://kubernetes.io/docs/user-guide/secrets/
|
||||||
|
apiVersion: v1
|
||||||
|
kind: Secret
|
||||||
|
type: Opaque
|
||||||
|
metadata:
|
||||||
|
name: calico-etcd-secrets
|
||||||
|
namespace: kube-system
|
||||||
|
data:
|
||||||
|
# Populate the following files with etcd TLS configuration if desired, but leave blank if
|
||||||
|
# not using TLS for etcd.
|
||||||
|
# This self-hosted install expects three files with the following names. The values
|
||||||
|
# should be base64 encoded strings of the entire contents of each file.
|
||||||
|
# etcd-key: null
|
||||||
|
# etcd-cert: null
|
||||||
|
# etcd-ca: null
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
# This manifest installs the calico/node container, as well
|
||||||
|
# as the Calico CNI plugins and network config on
|
||||||
|
# each master and worker node in a Kubernetes cluster.
|
||||||
|
kind: DaemonSet
|
||||||
|
apiVersion: extensions/v1beta1
|
||||||
|
metadata:
|
||||||
|
name: calico-node
|
||||||
|
namespace: kube-system
|
||||||
|
labels:
|
||||||
|
k8s-app: calico-node
|
||||||
|
spec:
|
||||||
|
selector:
|
||||||
|
matchLabels:
|
||||||
|
k8s-app: calico-node
|
||||||
|
updateStrategy:
|
||||||
|
type: RollingUpdate
|
||||||
|
rollingUpdate:
|
||||||
|
maxUnavailable: 1
|
||||||
|
template:
|
||||||
|
metadata:
|
||||||
|
labels:
|
||||||
|
k8s-app: calico-node
|
||||||
|
annotations:
|
||||||
|
# This, along with the CriticalAddonsOnly toleration below,
|
||||||
|
# marks the pod as a critical add-on, ensuring it gets
|
||||||
|
# priority scheduling and that its resources are reserved
|
||||||
|
# if it ever gets evicted.
|
||||||
|
scheduler.alpha.kubernetes.io/critical-pod: ''
|
||||||
|
spec:
|
||||||
|
nodeSelector:
|
||||||
|
beta.kubernetes.io/os: linux
|
||||||
|
hostNetwork: true
|
||||||
|
tolerations:
|
||||||
|
# Make sure calico-node gets scheduled on all nodes.
|
||||||
|
- effect: NoSchedule
|
||||||
|
operator: Exists
|
||||||
|
# Mark the pod as a critical add-on for rescheduling.
|
||||||
|
- key: CriticalAddonsOnly
|
||||||
|
operator: Exists
|
||||||
|
- effect: NoExecute
|
||||||
|
operator: Exists
|
||||||
|
serviceAccountName: calico-node
|
||||||
|
# Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a "force
|
||||||
|
# deletion": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods.
|
||||||
|
terminationGracePeriodSeconds: 0
|
||||||
|
containers:
|
||||||
|
# Runs calico/node container on each Kubernetes node. This
|
||||||
|
# container programs network policy and routes on each
|
||||||
|
# host.
|
||||||
|
- name: calico-node
|
||||||
|
image: quay.io/calico/node:v3.3.1
|
||||||
|
env:
|
||||||
|
# The location of the Calico etcd cluster.
|
||||||
|
- name: ETCD_ENDPOINTS
|
||||||
|
valueFrom:
|
||||||
|
configMapKeyRef:
|
||||||
|
name: calico-config
|
||||||
|
key: etcd_endpoints
|
||||||
|
# Location of the CA certificate for etcd.
|
||||||
|
- name: ETCD_CA_CERT_FILE
|
||||||
|
valueFrom:
|
||||||
|
configMapKeyRef:
|
||||||
|
name: calico-config
|
||||||
|
key: etcd_ca
|
||||||
|
# Location of the client key for etcd.
|
||||||
|
- name: ETCD_KEY_FILE
|
||||||
|
valueFrom:
|
||||||
|
configMapKeyRef:
|
||||||
|
name: calico-config
|
||||||
|
key: etcd_key
|
||||||
|
# Location of the client certificate for etcd.
|
||||||
|
- name: ETCD_CERT_FILE
|
||||||
|
valueFrom:
|
||||||
|
configMapKeyRef:
|
||||||
|
name: calico-config
|
||||||
|
key: etcd_cert
|
||||||
|
# Set noderef for node controller.
|
||||||
|
- name: CALICO_K8S_NODE_REF
|
||||||
|
valueFrom:
|
||||||
|
fieldRef:
|
||||||
|
fieldPath: spec.nodeName
|
||||||
|
# Choose the backend to use.
|
||||||
|
- name: CALICO_NETWORKING_BACKEND
|
||||||
|
valueFrom:
|
||||||
|
configMapKeyRef:
|
||||||
|
name: calico-config
|
||||||
|
key: calico_backend
|
||||||
|
# Cluster type to identify the deployment type
|
||||||
|
- name: CLUSTER_TYPE
|
||||||
|
value: "k8s,bgp"
|
||||||
|
# Auto-detect the BGP IP address.
|
||||||
|
- name: IP
|
||||||
|
value: "autodetect"
|
||||||
|
# Enable IPIP
|
||||||
|
- name: CALICO_IPV4POOL_IPIP
|
||||||
|
value: "Always"
|
||||||
|
# Set MTU for tunnel device used if ipip is enabled
|
||||||
|
- name: FELIX_IPINIPMTU
|
||||||
|
valueFrom:
|
||||||
|
configMapKeyRef:
|
||||||
|
name: calico-config
|
||||||
|
key: veth_mtu
|
||||||
|
# The default IPv4 pool to create on startup if none exists. Pod IPs will be
|
||||||
|
# chosen from this range. Changing this value after installation will have
|
||||||
|
# no effect. This should fall within `--cluster-cidr`.
|
||||||
|
- name: CALICO_IPV4POOL_CIDR
|
||||||
|
value: "k8x_calico_pool"
|
||||||
|
# Disable file logging so `kubectl logs` works.
|
||||||
|
- name: CALICO_DISABLE_FILE_LOGGING
|
||||||
|
value: "true"
|
||||||
|
# Set Felix endpoint to host default action to ACCEPT.
|
||||||
|
- name: FELIX_DEFAULTENDPOINTTOHOSTACTION
|
||||||
|
value: "ACCEPT"
|
||||||
|
# Disable IPv6 on Kubernetes.
|
||||||
|
- name: FELIX_IPV6SUPPORT
|
||||||
|
value: "false"
|
||||||
|
# Set Felix logging to "info"
|
||||||
|
- name: FELIX_LOGSEVERITYSCREEN
|
||||||
|
value: "warning"
|
||||||
|
- name: FELIX_HEALTHENABLED
|
||||||
|
value: "true"
|
||||||
|
securityContext:
|
||||||
|
privileged: true
|
||||||
|
resources:
|
||||||
|
requests:
|
||||||
|
cpu: 250m
|
||||||
|
livenessProbe:
|
||||||
|
httpGet:
|
||||||
|
path: /liveness
|
||||||
|
port: 9099
|
||||||
|
host: localhost
|
||||||
|
periodSeconds: 10
|
||||||
|
initialDelaySeconds: 10
|
||||||
|
failureThreshold: 6
|
||||||
|
readinessProbe:
|
||||||
|
exec:
|
||||||
|
command:
|
||||||
|
- /bin/calico-node
|
||||||
|
- -bird-ready
|
||||||
|
- -felix-ready
|
||||||
|
periodSeconds: 10
|
||||||
|
volumeMounts:
|
||||||
|
- mountPath: /lib/modules
|
||||||
|
name: lib-modules
|
||||||
|
readOnly: true
|
||||||
|
- mountPath: /run/xtables.lock
|
||||||
|
name: xtables-lock
|
||||||
|
readOnly: false
|
||||||
|
- mountPath: /var/run/calico
|
||||||
|
name: var-run-calico
|
||||||
|
readOnly: false
|
||||||
|
- mountPath: /var/lib/calico
|
||||||
|
name: var-lib-calico
|
||||||
|
readOnly: false
|
||||||
|
- mountPath: /calico-secrets
|
||||||
|
name: etcd-certs
|
||||||
|
# This container installs the Calico CNI binaries
|
||||||
|
# and CNI network config file on each node.
|
||||||
|
- name: install-cni
|
||||||
|
image: quay.io/calico/cni:v3.3.1
|
||||||
|
command: ["/install-cni.sh"]
|
||||||
|
env:
|
||||||
|
# Name of the CNI config file to create.
|
||||||
|
- name: CNI_CONF_NAME
|
||||||
|
value: "10-calico.conflist"
|
||||||
|
# The location of the Calico etcd cluster.
|
||||||
|
- name: ETCD_ENDPOINTS
|
||||||
|
valueFrom:
|
||||||
|
configMapKeyRef:
|
||||||
|
name: calico-config
|
||||||
|
key: etcd_endpoints
|
||||||
|
# The CNI network config to install on each node.
|
||||||
|
- name: CNI_NETWORK_CONFIG
|
||||||
|
valueFrom:
|
||||||
|
configMapKeyRef:
|
||||||
|
name: calico-config
|
||||||
|
key: cni_network_config
|
||||||
|
# CNI MTU Config variable
|
||||||
|
- name: CNI_MTU
|
||||||
|
valueFrom:
|
||||||
|
configMapKeyRef:
|
||||||
|
name: calico-config
|
||||||
|
key: veth_mtu
|
||||||
|
volumeMounts:
|
||||||
|
- mountPath: /host/opt/cni/bin
|
||||||
|
name: cni-bin-dir
|
||||||
|
- mountPath: /host/etc/cni/net.d
|
||||||
|
name: cni-net-dir
|
||||||
|
- mountPath: /calico-secrets
|
||||||
|
name: etcd-certs
|
||||||
|
volumes:
|
||||||
|
# Used by calico/node.
|
||||||
|
- name: lib-modules
|
||||||
|
hostPath:
|
||||||
|
path: /lib/modules
|
||||||
|
- name: var-run-calico
|
||||||
|
hostPath:
|
||||||
|
path: /var/run/calico
|
||||||
|
- name: var-lib-calico
|
||||||
|
hostPath:
|
||||||
|
path: /var/lib/calico
|
||||||
|
- name: xtables-lock
|
||||||
|
hostPath:
|
||||||
|
path: /run/xtables.lock
|
||||||
|
type: FileOrCreate
|
||||||
|
# Used to install CNI.
|
||||||
|
- name: cni-bin-dir
|
||||||
|
hostPath:
|
||||||
|
path: /opt/cni/bin
|
||||||
|
- name: cni-net-dir
|
||||||
|
hostPath:
|
||||||
|
path: /etc/cni/net.d
|
||||||
|
# Mount in the etcd TLS secrets with mode 400.
|
||||||
|
# See https://kubernetes.io/docs/concepts/configuration/secret/
|
||||||
|
- name: etcd-certs
|
||||||
|
secret:
|
||||||
|
secretName: calico-etcd-secrets
|
||||||
|
defaultMode: 0400
|
||||||
|
---
|
||||||
|
|
||||||
|
apiVersion: v1
|
||||||
|
kind: ServiceAccount
|
||||||
|
metadata:
|
||||||
|
name: calico-node
|
||||||
|
namespace: kube-system
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
# This manifest deploys the Calico Kubernetes controllers.
|
||||||
|
# See https://github.com/projectcalico/kube-controllers
|
||||||
|
apiVersion: extensions/v1beta1
|
||||||
|
kind: Deployment
|
||||||
|
metadata:
|
||||||
|
name: calico-kube-controllers
|
||||||
|
namespace: kube-system
|
||||||
|
labels:
|
||||||
|
k8s-app: calico-kube-controllers
|
||||||
|
annotations:
|
||||||
|
scheduler.alpha.kubernetes.io/critical-pod: ''
|
||||||
|
spec:
|
||||||
|
# The controllers can only have a single active instance.
|
||||||
|
replicas: 1
|
||||||
|
strategy:
|
||||||
|
type: Recreate
|
||||||
|
template:
|
||||||
|
metadata:
|
||||||
|
name: calico-kube-controllers
|
||||||
|
namespace: kube-system
|
||||||
|
labels:
|
||||||
|
k8s-app: calico-kube-controllers
|
||||||
|
spec:
|
||||||
|
nodeSelector:
|
||||||
|
beta.kubernetes.io/os: linux
|
||||||
|
# The controllers must run in the host network namespace so that
|
||||||
|
# it isn't governed by policy that would prevent it from working.
|
||||||
|
hostNetwork: true
|
||||||
|
tolerations:
|
||||||
|
# Mark the pod as a critical add-on for rescheduling.
|
||||||
|
- key: CriticalAddonsOnly
|
||||||
|
operator: Exists
|
||||||
|
- key: node-role.kubernetes.io/master
|
||||||
|
effect: NoSchedule
|
||||||
|
serviceAccountName: calico-kube-controllers
|
||||||
|
containers:
|
||||||
|
- name: calico-kube-controllers
|
||||||
|
image: quay.io/calico/kube-controllers:v3.3.1
|
||||||
|
env:
|
||||||
|
# The location of the Calico etcd cluster.
|
||||||
|
- name: ETCD_ENDPOINTS
|
||||||
|
valueFrom:
|
||||||
|
configMapKeyRef:
|
||||||
|
name: calico-config
|
||||||
|
key: etcd_endpoints
|
||||||
|
# Location of the CA certificate for etcd.
|
||||||
|
- name: ETCD_CA_CERT_FILE
|
||||||
|
valueFrom:
|
||||||
|
configMapKeyRef:
|
||||||
|
name: calico-config
|
||||||
|
key: etcd_ca
|
||||||
|
# Location of the client key for etcd.
|
||||||
|
- name: ETCD_KEY_FILE
|
||||||
|
valueFrom:
|
||||||
|
configMapKeyRef:
|
||||||
|
name: calico-config
|
||||||
|
key: etcd_key
|
||||||
|
# Location of the client certificate for etcd.
|
||||||
|
- name: ETCD_CERT_FILE
|
||||||
|
valueFrom:
|
||||||
|
configMapKeyRef:
|
||||||
|
name: calico-config
|
||||||
|
key: etcd_cert
|
||||||
|
# Choose which controllers to run.
|
||||||
|
- name: ENABLED_CONTROLLERS
|
||||||
|
value: policy,namespace,serviceaccount,workloadendpoint,node
|
||||||
|
volumeMounts:
|
||||||
|
# Mount in the etcd TLS secrets.
|
||||||
|
- mountPath: /calico-secrets
|
||||||
|
name: etcd-certs
|
||||||
|
readinessProbe:
|
||||||
|
exec:
|
||||||
|
command:
|
||||||
|
- /usr/bin/check-status
|
||||||
|
- -r
|
||||||
|
volumes:
|
||||||
|
# Mount in the etcd TLS secrets with mode 400.
|
||||||
|
# See https://kubernetes.io/docs/concepts/configuration/secret/
|
||||||
|
- name: etcd-certs
|
||||||
|
secret:
|
||||||
|
secretName: calico-etcd-secrets
|
||||||
|
defaultMode: 0400
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
apiVersion: v1
|
||||||
|
kind: ServiceAccount
|
||||||
|
metadata:
|
||||||
|
name: calico-kube-controllers
|
||||||
|
namespace: kube-system
|
||||||
|
|
72
yaml/calico/deprecated/rbac.yaml
Normal file
72
yaml/calico/deprecated/rbac.yaml
Normal file
|
@ -0,0 +1,72 @@
|
||||||
|
# Calico Version v3.3.1
|
||||||
|
# https://docs.projectcalico.org/v3.3/releases#v3.3.1
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
kind: ClusterRole
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||||
|
metadata:
|
||||||
|
name: calico-kube-controllers
|
||||||
|
rules:
|
||||||
|
- apiGroups:
|
||||||
|
- ""
|
||||||
|
- extensions
|
||||||
|
resources:
|
||||||
|
- pods
|
||||||
|
- namespaces
|
||||||
|
- networkpolicies
|
||||||
|
- nodes
|
||||||
|
- serviceaccounts
|
||||||
|
verbs:
|
||||||
|
- watch
|
||||||
|
- list
|
||||||
|
- apiGroups:
|
||||||
|
- networking.k8s.io
|
||||||
|
resources:
|
||||||
|
- networkpolicies
|
||||||
|
verbs:
|
||||||
|
- watch
|
||||||
|
- list
|
||||||
|
---
|
||||||
|
kind: ClusterRoleBinding
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||||
|
metadata:
|
||||||
|
name: calico-kube-controllers
|
||||||
|
roleRef:
|
||||||
|
apiGroup: rbac.authorization.k8s.io
|
||||||
|
kind: ClusterRole
|
||||||
|
name: calico-kube-controllers
|
||||||
|
subjects:
|
||||||
|
- kind: ServiceAccount
|
||||||
|
name: calico-kube-controllers
|
||||||
|
namespace: kube-system
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
kind: ClusterRole
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||||
|
metadata:
|
||||||
|
name: calico-node
|
||||||
|
rules:
|
||||||
|
- apiGroups: [""]
|
||||||
|
resources:
|
||||||
|
- pods
|
||||||
|
- nodes
|
||||||
|
- namespaces
|
||||||
|
verbs:
|
||||||
|
- get
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||||
|
kind: ClusterRoleBinding
|
||||||
|
metadata:
|
||||||
|
name: calico-node
|
||||||
|
roleRef:
|
||||||
|
apiGroup: rbac.authorization.k8s.io
|
||||||
|
kind: ClusterRole
|
||||||
|
name: calico-node
|
||||||
|
subjects:
|
||||||
|
- kind: ServiceAccount
|
||||||
|
name: calico-node
|
||||||
|
namespace: kube-system
|
572
yaml/calico/future/calico-etcd.yaml
Normal file
572
yaml/calico/future/calico-etcd.yaml
Normal file
|
@ -0,0 +1,572 @@
|
||||||
|
---
|
||||||
|
# Source: calico/templates/calico-etcd-secrets.yaml
|
||||||
|
# The following contains k8s Secrets for use with a TLS enabled etcd cluster.
|
||||||
|
# For information on populating Secrets, see http://kubernetes.io/docs/user-guide/secrets/
|
||||||
|
apiVersion: v1
|
||||||
|
kind: Secret
|
||||||
|
type: Opaque
|
||||||
|
metadata:
|
||||||
|
name: calico-etcd-secrets
|
||||||
|
namespace: kube-system
|
||||||
|
data:
|
||||||
|
# Populate the following with etcd TLS configuration if desired, but leave blank if
|
||||||
|
# not using TLS for etcd.
|
||||||
|
# The keys below should be uncommented and the values populated with the base64
|
||||||
|
# encoded contents of each file that would be associated with the TLS data.
|
||||||
|
# Example command for encoding a file contents: cat <file> | base64 -w 0
|
||||||
|
# etcd-key: null
|
||||||
|
# etcd-cert: null
|
||||||
|
# etcd-ca: null
|
||||||
|
---
|
||||||
|
# Source: calico/templates/calico-config.yaml
|
||||||
|
# This ConfigMap is used to configure a self-hosted Calico installation.
|
||||||
|
kind: ConfigMap
|
||||||
|
apiVersion: v1
|
||||||
|
metadata:
|
||||||
|
name: calico-config
|
||||||
|
namespace: kube-system
|
||||||
|
data:
|
||||||
|
# Configure this with the location of your etcd cluster.
|
||||||
|
etcd_endpoints: "k8x_calico_etcd_endpoint"
|
||||||
|
# If you're using TLS enabled etcd uncomment the following.
|
||||||
|
# You must also populate the Secret below with these files.
|
||||||
|
etcd_ca: "/calico-secrets/etcd-ca"
|
||||||
|
etcd_cert: "/calico-secrets/etcd-cert"
|
||||||
|
etcd_key: "/calico-secrets/etcd-key"
|
||||||
|
# Typha is disabled.
|
||||||
|
typha_service_name: "none"
|
||||||
|
# Configure the backend to use.
|
||||||
|
calico_backend: "bird"
|
||||||
|
# Configure the MTU to use for workload interfaces and tunnels.
|
||||||
|
# - If Wireguard is enabled, set to your network MTU - 60
|
||||||
|
# - Otherwise, if VXLAN or BPF mode is enabled, set to your network MTU - 50
|
||||||
|
# - Otherwise, if IPIP is enabled, set to your network MTU - 20
|
||||||
|
# - Otherwise, if not using any encapsulation, set to your network MTU.
|
||||||
|
veth_mtu: "1410"
|
||||||
|
|
||||||
|
# The CNI network configuration to install on each node. The special
|
||||||
|
# values in this config will be automatically populated.
|
||||||
|
cni_network_config: |-
|
||||||
|
{
|
||||||
|
"name": "k8s-pod-network",
|
||||||
|
"cniVersion": "0.3.1",
|
||||||
|
"plugins": [
|
||||||
|
{
|
||||||
|
"type": "calico",
|
||||||
|
"log_level": "info",
|
||||||
|
"log_file_path": "/var/log/calico/cni/cni.log",
|
||||||
|
"etcd_endpoints": "__ETCD_ENDPOINTS__",
|
||||||
|
"etcd_key_file": "__ETCD_KEY_FILE__",
|
||||||
|
"etcd_cert_file": "__ETCD_CERT_FILE__",
|
||||||
|
"etcd_ca_cert_file": "__ETCD_CA_CERT_FILE__",
|
||||||
|
"mtu": __CNI_MTU__,
|
||||||
|
"ipam": {
|
||||||
|
"type": "calico-ipam"
|
||||||
|
},
|
||||||
|
"policy": {
|
||||||
|
"type": "k8s"
|
||||||
|
},
|
||||||
|
"kubernetes": {
|
||||||
|
"kubeconfig": "__KUBECONFIG_FILEPATH__"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "portmap",
|
||||||
|
"snat": true,
|
||||||
|
"capabilities": {"portMappings": true}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "bandwidth",
|
||||||
|
"capabilities": {"bandwidth": true}
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
|
||||||
|
---
|
||||||
|
# Source: calico/templates/calico-kube-controllers-rbac.yaml
|
||||||
|
|
||||||
|
# Include a clusterrole for the kube-controllers component,
|
||||||
|
# and bind it to the calico-kube-controllers serviceaccount.
|
||||||
|
kind: ClusterRole
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
metadata:
|
||||||
|
name: calico-kube-controllers
|
||||||
|
rules:
|
||||||
|
# Pods are monitored for changing labels.
|
||||||
|
# The node controller monitors Kubernetes nodes.
|
||||||
|
# Namespace and serviceaccount labels are used for policy.
|
||||||
|
- apiGroups: [""]
|
||||||
|
resources:
|
||||||
|
- pods
|
||||||
|
- nodes
|
||||||
|
- namespaces
|
||||||
|
- serviceaccounts
|
||||||
|
verbs:
|
||||||
|
- watch
|
||||||
|
- list
|
||||||
|
- get
|
||||||
|
# Watch for changes to Kubernetes NetworkPolicies.
|
||||||
|
- apiGroups: ["networking.k8s.io"]
|
||||||
|
resources:
|
||||||
|
- networkpolicies
|
||||||
|
verbs:
|
||||||
|
- watch
|
||||||
|
- list
|
||||||
|
---
|
||||||
|
kind: ClusterRoleBinding
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
metadata:
|
||||||
|
name: calico-kube-controllers
|
||||||
|
roleRef:
|
||||||
|
apiGroup: rbac.authorization.k8s.io
|
||||||
|
kind: ClusterRole
|
||||||
|
name: calico-kube-controllers
|
||||||
|
subjects:
|
||||||
|
- kind: ServiceAccount
|
||||||
|
name: calico-kube-controllers
|
||||||
|
namespace: kube-system
|
||||||
|
---
|
||||||
|
|
||||||
|
---
|
||||||
|
# Source: calico/templates/calico-node-rbac.yaml
|
||||||
|
# Include a clusterrole for the calico-node DaemonSet,
|
||||||
|
# and bind it to the calico-node serviceaccount.
|
||||||
|
kind: ClusterRole
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
metadata:
|
||||||
|
name: calico-node
|
||||||
|
rules:
|
||||||
|
# The CNI plugin needs to get pods, nodes, and namespaces.
|
||||||
|
- apiGroups: [""]
|
||||||
|
resources:
|
||||||
|
- pods
|
||||||
|
- nodes
|
||||||
|
- namespaces
|
||||||
|
verbs:
|
||||||
|
- get
|
||||||
|
- apiGroups: [""]
|
||||||
|
resources:
|
||||||
|
- endpoints
|
||||||
|
- services
|
||||||
|
verbs:
|
||||||
|
# Used to discover service IPs for advertisement.
|
||||||
|
- watch
|
||||||
|
- list
|
||||||
|
# Pod CIDR auto-detection on kubeadm needs access to config maps.
|
||||||
|
- apiGroups: [""]
|
||||||
|
resources:
|
||||||
|
- configmaps
|
||||||
|
verbs:
|
||||||
|
- get
|
||||||
|
- apiGroups: [""]
|
||||||
|
resources:
|
||||||
|
- nodes/status
|
||||||
|
verbs:
|
||||||
|
# Needed for clearing NodeNetworkUnavailable flag.
|
||||||
|
- patch
|
||||||
|
|
||||||
|
---
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
kind: ClusterRoleBinding
|
||||||
|
metadata:
|
||||||
|
name: calico-node
|
||||||
|
roleRef:
|
||||||
|
apiGroup: rbac.authorization.k8s.io
|
||||||
|
kind: ClusterRole
|
||||||
|
name: calico-node
|
||||||
|
subjects:
|
||||||
|
- kind: ServiceAccount
|
||||||
|
name: calico-node
|
||||||
|
namespace: kube-system
|
||||||
|
|
||||||
|
---
|
||||||
|
# Source: calico/templates/calico-node.yaml
|
||||||
|
# This manifest installs the calico-node container, as well
|
||||||
|
# as the CNI plugins and network config on
|
||||||
|
# each master and worker node in a Kubernetes cluster.
|
||||||
|
kind: DaemonSet
|
||||||
|
apiVersion: apps/v1
|
||||||
|
metadata:
|
||||||
|
name: calico-node
|
||||||
|
namespace: kube-system
|
||||||
|
labels:
|
||||||
|
k8s-app: calico-node
|
||||||
|
spec:
|
||||||
|
selector:
|
||||||
|
matchLabels:
|
||||||
|
k8s-app: calico-node
|
||||||
|
updateStrategy:
|
||||||
|
type: RollingUpdate
|
||||||
|
rollingUpdate:
|
||||||
|
maxUnavailable: 1
|
||||||
|
template:
|
||||||
|
metadata:
|
||||||
|
labels:
|
||||||
|
k8s-app: calico-node
|
||||||
|
spec:
|
||||||
|
nodeSelector:
|
||||||
|
kubernetes.io/os: linux
|
||||||
|
hostNetwork: true
|
||||||
|
tolerations:
|
||||||
|
# Make sure calico-node gets scheduled on all nodes.
|
||||||
|
- effect: NoSchedule
|
||||||
|
operator: Exists
|
||||||
|
# Mark the pod as a critical add-on for rescheduling.
|
||||||
|
- key: CriticalAddonsOnly
|
||||||
|
operator: Exists
|
||||||
|
- effect: NoExecute
|
||||||
|
operator: Exists
|
||||||
|
serviceAccountName: calico-node
|
||||||
|
# Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a "force
|
||||||
|
# deletion": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods.
|
||||||
|
terminationGracePeriodSeconds: 0
|
||||||
|
priorityClassName: system-node-critical
|
||||||
|
initContainers:
|
||||||
|
# This container installs the CNI binaries
|
||||||
|
# and CNI network config file on each node.
|
||||||
|
- name: install-cni
|
||||||
|
image: calico/cni:v3.16.5
|
||||||
|
command: ["/opt/cni/bin/install"]
|
||||||
|
envFrom:
|
||||||
|
- configMapRef:
|
||||||
|
# Allow KUBERNETES_SERVICE_HOST and KUBERNETES_SERVICE_PORT to be overridden for eBPF mode.
|
||||||
|
name: kubernetes-services-endpoint
|
||||||
|
optional: true
|
||||||
|
env:
|
||||||
|
# Name of the CNI config file to create.
|
||||||
|
- name: CNI_CONF_NAME
|
||||||
|
value: "10-calico.conflist"
|
||||||
|
# The CNI network config to install on each node.
|
||||||
|
- name: CNI_NETWORK_CONFIG
|
||||||
|
valueFrom:
|
||||||
|
configMapKeyRef:
|
||||||
|
name: calico-config
|
||||||
|
key: cni_network_config
|
||||||
|
# The location of the etcd cluster.
|
||||||
|
- name: ETCD_ENDPOINTS
|
||||||
|
valueFrom:
|
||||||
|
configMapKeyRef:
|
||||||
|
name: calico-config
|
||||||
|
key: etcd_endpoints
|
||||||
|
# CNI MTU Config variable
|
||||||
|
- name: CNI_MTU
|
||||||
|
valueFrom:
|
||||||
|
configMapKeyRef:
|
||||||
|
name: calico-config
|
||||||
|
key: veth_mtu
|
||||||
|
# Prevents the container from sleeping forever.
|
||||||
|
- name: SLEEP
|
||||||
|
value: "false"
|
||||||
|
volumeMounts:
|
||||||
|
- mountPath: /host/opt/cni/bin
|
||||||
|
name: cni-bin-dir
|
||||||
|
- mountPath: /host/etc/cni/net.d
|
||||||
|
name: cni-net-dir
|
||||||
|
- mountPath: /calico-secrets
|
||||||
|
name: etcd-certs
|
||||||
|
securityContext:
|
||||||
|
privileged: true
|
||||||
|
# Adds a Flex Volume Driver that creates a per-pod Unix Domain Socket to allow Dikastes
|
||||||
|
# to communicate with Felix over the Policy Sync API.
|
||||||
|
- name: flexvol-driver
|
||||||
|
image: calico/pod2daemon-flexvol:v3.16.5
|
||||||
|
volumeMounts:
|
||||||
|
- name: flexvol-driver-host
|
||||||
|
mountPath: /host/driver
|
||||||
|
securityContext:
|
||||||
|
privileged: true
|
||||||
|
containers:
|
||||||
|
# Runs calico-node container on each Kubernetes node. This
|
||||||
|
# container programs network policy and routes on each
|
||||||
|
# host.
|
||||||
|
- name: calico-node
|
||||||
|
image: calico/node:v3.16.5
|
||||||
|
envFrom:
|
||||||
|
- configMapRef:
|
||||||
|
# Allow KUBERNETES_SERVICE_HOST and KUBERNETES_SERVICE_PORT to be overridden for eBPF mode.
|
||||||
|
name: kubernetes-services-endpoint
|
||||||
|
optional: true
|
||||||
|
env:
|
||||||
|
# The location of the etcd cluster.
|
||||||
|
- name: ETCD_ENDPOINTS
|
||||||
|
valueFrom:
|
||||||
|
configMapKeyRef:
|
||||||
|
name: calico-config
|
||||||
|
key: etcd_endpoints
|
||||||
|
# Location of the CA certificate for etcd.
|
||||||
|
- name: ETCD_CA_CERT_FILE
|
||||||
|
valueFrom:
|
||||||
|
configMapKeyRef:
|
||||||
|
name: calico-config
|
||||||
|
key: etcd_ca
|
||||||
|
# Location of the client key for etcd.
|
||||||
|
- name: ETCD_KEY_FILE
|
||||||
|
valueFrom:
|
||||||
|
configMapKeyRef:
|
||||||
|
name: calico-config
|
||||||
|
key: etcd_key
|
||||||
|
# Location of the client certificate for etcd.
|
||||||
|
- name: ETCD_CERT_FILE
|
||||||
|
valueFrom:
|
||||||
|
configMapKeyRef:
|
||||||
|
name: calico-config
|
||||||
|
key: etcd_cert
|
||||||
|
# Set noderef for node controller.
|
||||||
|
- name: CALICO_K8S_NODE_REF
|
||||||
|
valueFrom:
|
||||||
|
fieldRef:
|
||||||
|
fieldPath: spec.nodeName
|
||||||
|
# Choose the backend to use.
|
||||||
|
- name: CALICO_NETWORKING_BACKEND
|
||||||
|
valueFrom:
|
||||||
|
configMapKeyRef:
|
||||||
|
name: calico-config
|
||||||
|
key: calico_backend
|
||||||
|
# Cluster type to identify the deployment type
|
||||||
|
- name: CLUSTER_TYPE
|
||||||
|
value: "k8s,bgp"
|
||||||
|
# Auto-detect the BGP IP address.
|
||||||
|
- name: IP
|
||||||
|
value: "autodetect"
|
||||||
|
# Enable IPIP
|
||||||
|
- name: CALICO_IPV4POOL_IPIP
|
||||||
|
value: "Always"
|
||||||
|
# Enable or Disable VXLAN on the default IP pool.
|
||||||
|
- name: CALICO_IPV4POOL_VXLAN
|
||||||
|
value: "Never"
|
||||||
|
# Set MTU for tunnel device used if ipip is enabled
|
||||||
|
- name: FELIX_IPINIPMTU
|
||||||
|
valueFrom:
|
||||||
|
configMapKeyRef:
|
||||||
|
name: calico-config
|
||||||
|
key: veth_mtu
|
||||||
|
# Set MTU for the VXLAN tunnel device.
|
||||||
|
- name: FELIX_VXLANMTU
|
||||||
|
valueFrom:
|
||||||
|
configMapKeyRef:
|
||||||
|
name: calico-config
|
||||||
|
key: veth_mtu
|
||||||
|
# Set MTU for the Wireguard tunnel device.
|
||||||
|
- name: FELIX_WIREGUARDMTU
|
||||||
|
valueFrom:
|
||||||
|
configMapKeyRef:
|
||||||
|
name: calico-config
|
||||||
|
key: veth_mtu
|
||||||
|
# The default IPv4 pool to create on startup if none exists. Pod IPs will be
|
||||||
|
# chosen from this range. Changing this value after installation will have
|
||||||
|
# no effect. This should fall within `--cluster-cidr`.
|
||||||
|
- name: CALICO_IPV4POOL_CIDR
|
||||||
|
value: "k8x_calico_pool"
|
||||||
|
# Disable file logging so `kubectl logs` works.
|
||||||
|
- name: CALICO_DISABLE_FILE_LOGGING
|
||||||
|
value: "true"
|
||||||
|
# Set Felix endpoint to host default action to ACCEPT.
|
||||||
|
- name: FELIX_DEFAULTENDPOINTTOHOSTACTION
|
||||||
|
value: "ACCEPT"
|
||||||
|
# Disable IPv6 on Kubernetes.
|
||||||
|
- name: FELIX_IPV6SUPPORT
|
||||||
|
value: "false"
|
||||||
|
# Set Felix logging to "info"
|
||||||
|
- name: FELIX_LOGSEVERITYSCREEN
|
||||||
|
value: "info"
|
||||||
|
- name: FELIX_HEALTHENABLED
|
||||||
|
value: "true"
|
||||||
|
securityContext:
|
||||||
|
privileged: true
|
||||||
|
resources:
|
||||||
|
requests:
|
||||||
|
cpu: 250m
|
||||||
|
livenessProbe:
|
||||||
|
exec:
|
||||||
|
command:
|
||||||
|
- /bin/calico-node
|
||||||
|
- -felix-live
|
||||||
|
- -bird-live
|
||||||
|
periodSeconds: 10
|
||||||
|
initialDelaySeconds: 10
|
||||||
|
failureThreshold: 6
|
||||||
|
readinessProbe:
|
||||||
|
exec:
|
||||||
|
command:
|
||||||
|
- /bin/calico-node
|
||||||
|
- -felix-ready
|
||||||
|
- -bird-ready
|
||||||
|
periodSeconds: 10
|
||||||
|
volumeMounts:
|
||||||
|
- mountPath: /lib/modules
|
||||||
|
name: lib-modules
|
||||||
|
readOnly: true
|
||||||
|
- mountPath: /run/xtables.lock
|
||||||
|
name: xtables-lock
|
||||||
|
readOnly: false
|
||||||
|
- mountPath: /var/run/calico
|
||||||
|
name: var-run-calico
|
||||||
|
readOnly: false
|
||||||
|
- mountPath: /var/lib/calico
|
||||||
|
name: var-lib-calico
|
||||||
|
readOnly: false
|
||||||
|
- mountPath: /calico-secrets
|
||||||
|
name: etcd-certs
|
||||||
|
- name: policysync
|
||||||
|
mountPath: /var/run/nodeagent
|
||||||
|
# For eBPF mode, we need to be able to mount the BPF filesystem at /sys/fs/bpf so we mount in the
|
||||||
|
# parent directory.
|
||||||
|
- name: sysfs
|
||||||
|
mountPath: /sys/fs/
|
||||||
|
# Bidirectional means that, if we mount the BPF filesystem at /sys/fs/bpf it will propagate to the host.
|
||||||
|
# If the host is known to mount that filesystem already then Bidirectional can be omitted.
|
||||||
|
mountPropagation: Bidirectional
|
||||||
|
volumes:
|
||||||
|
# Used by calico-node.
|
||||||
|
- name: lib-modules
|
||||||
|
hostPath:
|
||||||
|
path: /lib/modules
|
||||||
|
- name: var-run-calico
|
||||||
|
hostPath:
|
||||||
|
path: /var/run/calico
|
||||||
|
- name: var-lib-calico
|
||||||
|
hostPath:
|
||||||
|
path: /var/lib/calico
|
||||||
|
- name: xtables-lock
|
||||||
|
hostPath:
|
||||||
|
path: /run/xtables.lock
|
||||||
|
type: FileOrCreate
|
||||||
|
- name: sysfs
|
||||||
|
hostPath:
|
||||||
|
path: /sys/fs/
|
||||||
|
type: DirectoryOrCreate
|
||||||
|
# Used to install CNI.
|
||||||
|
- name: cni-bin-dir
|
||||||
|
hostPath:
|
||||||
|
path: /opt/cni/bin
|
||||||
|
- name: cni-net-dir
|
||||||
|
hostPath:
|
||||||
|
path: /etc/cni/net.d
|
||||||
|
# Mount in the etcd TLS secrets with mode 400.
|
||||||
|
# See https://kubernetes.io/docs/concepts/configuration/secret/
|
||||||
|
- name: etcd-certs
|
||||||
|
secret:
|
||||||
|
secretName: calico-etcd-secrets
|
||||||
|
defaultMode: 0400
|
||||||
|
# Used to create per-pod Unix Domain Sockets
|
||||||
|
- name: policysync
|
||||||
|
hostPath:
|
||||||
|
type: DirectoryOrCreate
|
||||||
|
path: /var/run/nodeagent
|
||||||
|
# Used to install Flex Volume Driver
|
||||||
|
- name: flexvol-driver-host
|
||||||
|
hostPath:
|
||||||
|
type: DirectoryOrCreate
|
||||||
|
path: /usr/libexec/kubernetes/kubelet-plugins/volume/exec/nodeagent~uds
|
||||||
|
---
|
||||||
|
|
||||||
|
apiVersion: v1
|
||||||
|
kind: ServiceAccount
|
||||||
|
metadata:
|
||||||
|
name: calico-node
|
||||||
|
namespace: kube-system
|
||||||
|
|
||||||
|
---
|
||||||
|
# Source: calico/templates/calico-kube-controllers.yaml
|
||||||
|
# See https://github.com/projectcalico/kube-controllers
|
||||||
|
apiVersion: apps/v1
|
||||||
|
kind: Deployment
|
||||||
|
metadata:
|
||||||
|
name: calico-kube-controllers
|
||||||
|
namespace: kube-system
|
||||||
|
labels:
|
||||||
|
k8s-app: calico-kube-controllers
|
||||||
|
spec:
|
||||||
|
# The controllers can only have a single active instance.
|
||||||
|
replicas: 1
|
||||||
|
selector:
|
||||||
|
matchLabels:
|
||||||
|
k8s-app: calico-kube-controllers
|
||||||
|
strategy:
|
||||||
|
type: Recreate
|
||||||
|
template:
|
||||||
|
metadata:
|
||||||
|
name: calico-kube-controllers
|
||||||
|
namespace: kube-system
|
||||||
|
labels:
|
||||||
|
k8s-app: calico-kube-controllers
|
||||||
|
spec:
|
||||||
|
nodeSelector:
|
||||||
|
kubernetes.io/os: linux
|
||||||
|
tolerations:
|
||||||
|
# Mark the pod as a critical add-on for rescheduling.
|
||||||
|
- key: CriticalAddonsOnly
|
||||||
|
operator: Exists
|
||||||
|
- key: node-role.kubernetes.io/master
|
||||||
|
effect: NoSchedule
|
||||||
|
serviceAccountName: calico-kube-controllers
|
||||||
|
priorityClassName: system-cluster-critical
|
||||||
|
# The controllers must run in the host network namespace so that
|
||||||
|
# it isn't governed by policy that would prevent it from working.
|
||||||
|
hostNetwork: true
|
||||||
|
containers:
|
||||||
|
- name: calico-kube-controllers
|
||||||
|
image: calico/kube-controllers:v3.16.5
|
||||||
|
env:
|
||||||
|
# The location of the etcd cluster.
|
||||||
|
- name: ETCD_ENDPOINTS
|
||||||
|
valueFrom:
|
||||||
|
configMapKeyRef:
|
||||||
|
name: calico-config
|
||||||
|
key: etcd_endpoints
|
||||||
|
# Location of the CA certificate for etcd.
|
||||||
|
- name: ETCD_CA_CERT_FILE
|
||||||
|
valueFrom:
|
||||||
|
configMapKeyRef:
|
||||||
|
name: calico-config
|
||||||
|
key: etcd_ca
|
||||||
|
# Location of the client key for etcd.
|
||||||
|
- name: ETCD_KEY_FILE
|
||||||
|
valueFrom:
|
||||||
|
configMapKeyRef:
|
||||||
|
name: calico-config
|
||||||
|
key: etcd_key
|
||||||
|
# Location of the client certificate for etcd.
|
||||||
|
- name: ETCD_CERT_FILE
|
||||||
|
valueFrom:
|
||||||
|
configMapKeyRef:
|
||||||
|
name: calico-config
|
||||||
|
key: etcd_cert
|
||||||
|
# Choose which controllers to run.
|
||||||
|
- name: ENABLED_CONTROLLERS
|
||||||
|
value: policy,namespace,serviceaccount,workloadendpoint,node
|
||||||
|
volumeMounts:
|
||||||
|
# Mount in the etcd TLS secrets.
|
||||||
|
- mountPath: /calico-secrets
|
||||||
|
name: etcd-certs
|
||||||
|
readinessProbe:
|
||||||
|
exec:
|
||||||
|
command:
|
||||||
|
- /usr/bin/check-status
|
||||||
|
- -r
|
||||||
|
volumes:
|
||||||
|
# Mount in the etcd TLS secrets with mode 400.
|
||||||
|
# See https://kubernetes.io/docs/concepts/configuration/secret/
|
||||||
|
- name: etcd-certs
|
||||||
|
secret:
|
||||||
|
secretName: calico-etcd-secrets
|
||||||
|
defaultMode: 0400
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
apiVersion: v1
|
||||||
|
kind: ServiceAccount
|
||||||
|
metadata:
|
||||||
|
name: calico-kube-controllers
|
||||||
|
namespace: kube-system
|
||||||
|
|
||||||
|
---
|
||||||
|
# Source: calico/templates/calico-typha.yaml
|
||||||
|
|
||||||
|
---
|
||||||
|
# Source: calico/templates/configure-canal.yaml
|
||||||
|
|
||||||
|
---
|
||||||
|
# Source: calico/templates/kdd-crds.yaml
|
||||||
|
|
||||||
|
|
87
yaml/calico/rbac-etcd-calico.yaml
Normal file
87
yaml/calico/rbac-etcd-calico.yaml
Normal file
|
@ -0,0 +1,87 @@
|
||||||
|
---
|
||||||
|
# Source: calico/templates/rbac.yaml
|
||||||
|
|
||||||
|
# Include a clusterrole for the kube-controllers component,
|
||||||
|
# and bind it to the calico-kube-controllers serviceaccount.
|
||||||
|
kind: ClusterRole
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
metadata:
|
||||||
|
name: calico-kube-controllers
|
||||||
|
rules:
|
||||||
|
# Pods are monitored for changing labels.
|
||||||
|
# The node controller monitors Kubernetes nodes.
|
||||||
|
# Namespace and serviceaccount labels are used for policy.
|
||||||
|
- apiGroups: [""]
|
||||||
|
resources:
|
||||||
|
- pods
|
||||||
|
- nodes
|
||||||
|
- namespaces
|
||||||
|
- serviceaccounts
|
||||||
|
verbs:
|
||||||
|
- watch
|
||||||
|
- list
|
||||||
|
# Watch for changes to Kubernetes NetworkPolicies.
|
||||||
|
- apiGroups: ["networking.k8s.io"]
|
||||||
|
resources:
|
||||||
|
- networkpolicies
|
||||||
|
verbs:
|
||||||
|
- watch
|
||||||
|
- list
|
||||||
|
---
|
||||||
|
kind: ClusterRoleBinding
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
metadata:
|
||||||
|
name: calico-kube-controllers
|
||||||
|
roleRef:
|
||||||
|
apiGroup: rbac.authorization.k8s.io
|
||||||
|
kind: ClusterRole
|
||||||
|
name: calico-kube-controllers
|
||||||
|
subjects:
|
||||||
|
- kind: ServiceAccount
|
||||||
|
name: calico-kube-controllers
|
||||||
|
namespace: kube-system
|
||||||
|
---
|
||||||
|
# Include a clusterrole for the calico-node DaemonSet,
|
||||||
|
# and bind it to the calico-node serviceaccount.
|
||||||
|
kind: ClusterRole
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
metadata:
|
||||||
|
name: calico-node
|
||||||
|
rules:
|
||||||
|
# The CNI plugin needs to get pods, nodes, and namespaces.
|
||||||
|
- apiGroups: [""]
|
||||||
|
resources:
|
||||||
|
- pods
|
||||||
|
- nodes
|
||||||
|
- namespaces
|
||||||
|
verbs:
|
||||||
|
- get
|
||||||
|
- apiGroups: [""]
|
||||||
|
resources:
|
||||||
|
- endpoints
|
||||||
|
- services
|
||||||
|
verbs:
|
||||||
|
# Used to discover service IPs for advertisement.
|
||||||
|
- watch
|
||||||
|
- list
|
||||||
|
- apiGroups: [""]
|
||||||
|
resources:
|
||||||
|
- nodes/status
|
||||||
|
verbs:
|
||||||
|
# Needed for clearing NodeNetworkUnavailable flag.
|
||||||
|
- patch
|
||||||
|
---
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
kind: ClusterRoleBinding
|
||||||
|
metadata:
|
||||||
|
name: calico-node
|
||||||
|
roleRef:
|
||||||
|
apiGroup: rbac.authorization.k8s.io
|
||||||
|
kind: ClusterRole
|
||||||
|
name: calico-node
|
||||||
|
subjects:
|
||||||
|
- kind: ServiceAccount
|
||||||
|
name: calico-node
|
||||||
|
namespace: kube-system
|
||||||
|
|
||||||
|
|
25358
yaml/cert-manager/cert-manager.crds.yaml
Normal file
25358
yaml/cert-manager/cert-manager.crds.yaml
Normal file
File diff suppressed because it is too large
Load diff
20
yaml/cert-manager/letsencrypt-production-clusterissuer.yaml
Normal file
20
yaml/cert-manager/letsencrypt-production-clusterissuer.yaml
Normal file
|
@ -0,0 +1,20 @@
|
||||||
|
apiVersion: cert-manager.io/v1
|
||||||
|
kind: ClusterIssuer
|
||||||
|
metadata:
|
||||||
|
name: letsencrypt-production
|
||||||
|
spec:
|
||||||
|
acme:
|
||||||
|
# The ACME server URL
|
||||||
|
server: https://acme-v02.api.letsencrypt.org/directory
|
||||||
|
# Email address used for ACME registration
|
||||||
|
email: k8x_acme_email
|
||||||
|
# Name of a secret used to store the ACME account private key
|
||||||
|
privateKeySecretRef:
|
||||||
|
name: letsencrypt-sec-production
|
||||||
|
# Enable the HTTP-01 challenge provider
|
||||||
|
solvers:
|
||||||
|
# An empty 'selector' means that this solver matches all domains
|
||||||
|
- selector: {}
|
||||||
|
http01:
|
||||||
|
ingress:
|
||||||
|
class: nginx
|
|
@ -0,0 +1,25 @@
|
||||||
|
apiVersion: cert-manager.io/v1
|
||||||
|
kind: ClusterIssuer
|
||||||
|
metadata:
|
||||||
|
name: letsencrypt-production-dns
|
||||||
|
spec:
|
||||||
|
acme:
|
||||||
|
server: https://acme-v02.api.letsencrypt.org/directory
|
||||||
|
email: k8x_acme_email
|
||||||
|
|
||||||
|
# Name of a secret used to store the ACME account private key
|
||||||
|
privateKeySecretRef:
|
||||||
|
name: letsencrypt-sec-production-dns
|
||||||
|
|
||||||
|
# ACME DNS-01 provider configurations
|
||||||
|
solvers:
|
||||||
|
# An empty 'selector' means that this solver matches all domains
|
||||||
|
- selector: {}
|
||||||
|
dns01:
|
||||||
|
cloudflare:
|
||||||
|
email: k8x_acme_email
|
||||||
|
# !! Remember to create a k8s secret before
|
||||||
|
# kubectl create secret generic cloudflare-api-key-secret
|
||||||
|
apiKeySecretRef:
|
||||||
|
name: cf-api-secret
|
||||||
|
key: cf-api-key
|
20
yaml/cert-manager/letsencrypt-staging-clusterissuer.yaml
Normal file
20
yaml/cert-manager/letsencrypt-staging-clusterissuer.yaml
Normal file
|
@ -0,0 +1,20 @@
|
||||||
|
apiVersion: cert-manager.io/v1
|
||||||
|
kind: ClusterIssuer
|
||||||
|
metadata:
|
||||||
|
name: letsencrypt-staging
|
||||||
|
spec:
|
||||||
|
acme:
|
||||||
|
# The ACME server URL
|
||||||
|
server: https://acme-staging-v02.api.letsencrypt.org/directory
|
||||||
|
# Email address used for ACME registration
|
||||||
|
email: k8x_acme_email
|
||||||
|
# Name of a secret used to store the ACME account private key
|
||||||
|
privateKeySecretRef:
|
||||||
|
name: letsencrypt-sec-staging
|
||||||
|
# Enable the HTTP-01 challenge provider
|
||||||
|
solvers:
|
||||||
|
# An empty 'selector' means that this solver matches all domains
|
||||||
|
- selector: {}
|
||||||
|
http01:
|
||||||
|
ingress:
|
||||||
|
class: nginx
|
25
yaml/cert-manager/letsencrypt-staging-dns-clusterissuer.yaml
Normal file
25
yaml/cert-manager/letsencrypt-staging-dns-clusterissuer.yaml
Normal file
|
@ -0,0 +1,25 @@
|
||||||
|
apiVersion: cert-manager.io/v1
|
||||||
|
kind: ClusterIssuer
|
||||||
|
metadata:
|
||||||
|
name: letsencrypt-staging-dns
|
||||||
|
spec:
|
||||||
|
acme:
|
||||||
|
server: https://acme-staging-v02.api.letsencrypt.org/directory
|
||||||
|
email: k8x_acme_email
|
||||||
|
|
||||||
|
# Name of a secret used to store the ACME account private key
|
||||||
|
privateKeySecretRef:
|
||||||
|
name: letsencrypt-sec-staging-dns
|
||||||
|
|
||||||
|
# ACME DNS-01 provider configurations
|
||||||
|
solvers:
|
||||||
|
# An empty 'selector' means that this solver matches all domains
|
||||||
|
- selector: {}
|
||||||
|
dns01:
|
||||||
|
cloudflare:
|
||||||
|
email: k8x_acme_email
|
||||||
|
# !! Remember to create a k8s secret before
|
||||||
|
# kubectl create secret generic cloudflare-api-key-secret
|
||||||
|
apiKeySecretRef:
|
||||||
|
name: cf-api-secret
|
||||||
|
key: cf-api-key
|
354
yaml/cert-manager/values.yaml
Normal file
354
yaml/cert-manager/values.yaml
Normal file
|
@ -0,0 +1,354 @@
|
||||||
|
# Default values for cert-manager.
|
||||||
|
# This is a YAML-formatted file.
|
||||||
|
# Declare variables to be passed into your templates.
|
||||||
|
global:
|
||||||
|
## Reference to one or more secrets to be used when pulling images
|
||||||
|
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
|
||||||
|
##
|
||||||
|
imagePullSecrets: []
|
||||||
|
# - name: "image-pull-secret"
|
||||||
|
|
||||||
|
# Optional priority class to be used for the cert-manager pods
|
||||||
|
priorityClassName: ""
|
||||||
|
rbac:
|
||||||
|
create: true
|
||||||
|
|
||||||
|
podSecurityPolicy:
|
||||||
|
enabled: false
|
||||||
|
useAppArmor: true
|
||||||
|
|
||||||
|
# Set the verbosity of cert-manager. Range of 0 - 6 with 6 being the most verbose.
|
||||||
|
logLevel: 2
|
||||||
|
|
||||||
|
leaderElection:
|
||||||
|
# Override the namespace used to store the ConfigMap for leader election
|
||||||
|
namespace: "kube-system"
|
||||||
|
|
||||||
|
installCRDs: false
|
||||||
|
|
||||||
|
replicaCount: 1
|
||||||
|
|
||||||
|
strategy: {}
|
||||||
|
# type: RollingUpdate
|
||||||
|
# rollingUpdate:
|
||||||
|
# maxSurge: 0
|
||||||
|
# maxUnavailable: 1
|
||||||
|
|
||||||
|
# Comma separated list of feature gates that should be enabled on the
|
||||||
|
# controller pod.
|
||||||
|
featureGates: ""
|
||||||
|
|
||||||
|
image:
|
||||||
|
repository: quay.io/jetstack/cert-manager-controller
|
||||||
|
# You can manage a registry with
|
||||||
|
# registry: quay.io
|
||||||
|
# repository: jetstack/cert-manager-controller
|
||||||
|
|
||||||
|
# Override the image tag to deploy by setting this variable.
|
||||||
|
# If no value is set, the chart's appVersion will be used.
|
||||||
|
# tag: canary
|
||||||
|
|
||||||
|
# Setting a digest will override any tag
|
||||||
|
# digest: sha256:0e072dddd1f7f8fc8909a2ca6f65e76c5f0d2fcfb8be47935ae3457e8bbceb20
|
||||||
|
pullPolicy: IfNotPresent
|
||||||
|
|
||||||
|
# Override the namespace used to store DNS provider credentials etc. for ClusterIssuer
|
||||||
|
# resources. By default, the same namespace as cert-manager is deployed within is
|
||||||
|
# used. This namespace will not be automatically created by the Helm chart.
|
||||||
|
clusterResourceNamespace: ""
|
||||||
|
|
||||||
|
serviceAccount:
|
||||||
|
# Specifies whether a service account should be created
|
||||||
|
create: true
|
||||||
|
# The name of the service account to use.
|
||||||
|
# If not set and create is true, a name is generated using the fullname template
|
||||||
|
# name: ""
|
||||||
|
# Optional additional annotations to add to the controller's ServiceAccount
|
||||||
|
# annotations: {}
|
||||||
|
|
||||||
|
# Optional additional arguments
|
||||||
|
extraArgs: []
|
||||||
|
# Use this flag to set a namespace that cert-manager will use to store
|
||||||
|
# supporting resources required for each ClusterIssuer (default is kube-system)
|
||||||
|
# - --cluster-resource-namespace=kube-system
|
||||||
|
# When this flag is enabled, secrets will be automatically removed when the certificate resource is deleted
|
||||||
|
# - --enable-certificate-owner-ref=true
|
||||||
|
|
||||||
|
extraEnv: []
|
||||||
|
# - name: SOME_VAR
|
||||||
|
# value: 'some value'
|
||||||
|
|
||||||
|
resources: {}
|
||||||
|
# requests:
|
||||||
|
# cpu: 10m
|
||||||
|
# memory: 32Mi
|
||||||
|
|
||||||
|
# Pod Security Context
|
||||||
|
# ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
|
||||||
|
securityContext: {}
|
||||||
|
# legacy securityContext parameter format: if enabled is set to true, only fsGroup and runAsUser are supported
|
||||||
|
# securityContext:
|
||||||
|
# enabled: false
|
||||||
|
# fsGroup: 1001
|
||||||
|
# runAsUser: 1001
|
||||||
|
# to support additional securityContext parameters, omit the `enabled` parameter and simply specify the parameters
|
||||||
|
# you want to set, e.g.
|
||||||
|
# securityContext:
|
||||||
|
# fsGroup: 1000
|
||||||
|
# runAsUser: 1000
|
||||||
|
# runAsNonRoot: true
|
||||||
|
|
||||||
|
# Container Security Context to be set on the controller component container
|
||||||
|
# ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
|
||||||
|
containerSecurityContext: {}
|
||||||
|
# capabilities:
|
||||||
|
# drop:
|
||||||
|
# - ALL
|
||||||
|
# readOnlyRootFilesystem: true
|
||||||
|
# runAsNonRoot: true
|
||||||
|
|
||||||
|
|
||||||
|
volumes: []
|
||||||
|
|
||||||
|
volumeMounts: []
|
||||||
|
|
||||||
|
# Optional additional annotations to add to the controller Deployment
|
||||||
|
# deploymentAnnotations: {}
|
||||||
|
|
||||||
|
# Optional additional annotations to add to the controller Pods
|
||||||
|
# podAnnotations: {}
|
||||||
|
|
||||||
|
podLabels: {}
|
||||||
|
|
||||||
|
# Optional DNS settings, useful if you have a public and private DNS zone for
|
||||||
|
# the same domain on Route 53. What follows is an example of ensuring
|
||||||
|
# cert-manager can access an ingress or DNS TXT records at all times.
|
||||||
|
# NOTE: This requires Kubernetes 1.10 or `CustomPodDNS` feature gate enabled for
|
||||||
|
# the cluster to work.
|
||||||
|
# podDnsPolicy: "None"
|
||||||
|
# podDnsConfig:
|
||||||
|
# nameservers:
|
||||||
|
# - "1.1.1.1"
|
||||||
|
# - "8.8.8.8"
|
||||||
|
|
||||||
|
nodeSelector: {}
|
||||||
|
|
||||||
|
ingressShim: {}
|
||||||
|
# defaultIssuerName: ""
|
||||||
|
# defaultIssuerKind: ""
|
||||||
|
# defaultIssuerGroup: ""
|
||||||
|
|
||||||
|
prometheus:
|
||||||
|
enabled: true
|
||||||
|
servicemonitor:
|
||||||
|
enabled: false
|
||||||
|
prometheusInstance: default
|
||||||
|
targetPort: 9402
|
||||||
|
path: /metrics
|
||||||
|
interval: 60s
|
||||||
|
scrapeTimeout: 30s
|
||||||
|
labels: {}
|
||||||
|
|
||||||
|
# Use these variables to configure the HTTP_PROXY environment variables
|
||||||
|
# http_proxy: "http://proxy:8080"
|
||||||
|
# http_proxy: "http://proxy:8080"
|
||||||
|
# no_proxy: 127.0.0.1,localhost
|
||||||
|
|
||||||
|
# expects input structure as per specification https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.11/#affinity-v1-core
|
||||||
|
# for example:
|
||||||
|
# affinity:
|
||||||
|
# nodeAffinity:
|
||||||
|
# requiredDuringSchedulingIgnoredDuringExecution:
|
||||||
|
# nodeSelectorTerms:
|
||||||
|
# - matchExpressions:
|
||||||
|
# - key: foo.bar.com/role
|
||||||
|
# operator: In
|
||||||
|
# values:
|
||||||
|
# - master
|
||||||
|
affinity: {}
|
||||||
|
|
||||||
|
# expects input structure as per specification https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.11/#toleration-v1-core
|
||||||
|
# for example:
|
||||||
|
# tolerations:
|
||||||
|
# - key: foo.bar.com/role
|
||||||
|
# operator: Equal
|
||||||
|
# value: master
|
||||||
|
# effect: NoSchedule
|
||||||
|
tolerations: []
|
||||||
|
|
||||||
|
webhook:
|
||||||
|
replicaCount: 1
|
||||||
|
timeoutSeconds: 10
|
||||||
|
|
||||||
|
strategy: {}
|
||||||
|
# type: RollingUpdate
|
||||||
|
# rollingUpdate:
|
||||||
|
# maxSurge: 0
|
||||||
|
# maxUnavailable: 1
|
||||||
|
|
||||||
|
securityContext: {}
|
||||||
|
|
||||||
|
# Container Security Context to be set on the webhook component container
|
||||||
|
# ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
|
||||||
|
containerSecurityContext: {}
|
||||||
|
# capabilities:
|
||||||
|
# drop:
|
||||||
|
# - ALL
|
||||||
|
# readOnlyRootFilesystem: true
|
||||||
|
# runAsNonRoot: true
|
||||||
|
|
||||||
|
# Optional additional annotations to add to the webhook Deployment
|
||||||
|
# deploymentAnnotations: {}
|
||||||
|
|
||||||
|
# Optional additional annotations to add to the webhook Pods
|
||||||
|
# podAnnotations: {}
|
||||||
|
|
||||||
|
# Optional additional annotations to add to the webhook MutatingWebhookConfiguration
|
||||||
|
# mutatingWebhookConfigurationAnnotations: {}
|
||||||
|
|
||||||
|
# Optional additional annotations to add to the webhook ValidatingWebhookConfiguration
|
||||||
|
# validatingWebhookConfigurationAnnotations: {}
|
||||||
|
|
||||||
|
# Optional additional arguments for webhook
|
||||||
|
extraArgs: []
|
||||||
|
|
||||||
|
resources: {}
|
||||||
|
# requests:
|
||||||
|
# cpu: 10m
|
||||||
|
# memory: 32Mi
|
||||||
|
|
||||||
|
## Liveness and readiness probe values
|
||||||
|
## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes
|
||||||
|
##
|
||||||
|
livenessProbe:
|
||||||
|
failureThreshold: 3
|
||||||
|
initialDelaySeconds: 60
|
||||||
|
periodSeconds: 10
|
||||||
|
successThreshold: 1
|
||||||
|
timeoutSeconds: 1
|
||||||
|
readinessProbe:
|
||||||
|
failureThreshold: 3
|
||||||
|
initialDelaySeconds: 5
|
||||||
|
periodSeconds: 5
|
||||||
|
successThreshold: 1
|
||||||
|
timeoutSeconds: 1
|
||||||
|
|
||||||
|
nodeSelector: {}
|
||||||
|
|
||||||
|
affinity: {}
|
||||||
|
|
||||||
|
tolerations: []
|
||||||
|
|
||||||
|
# Optional additional labels to add to the Webhook Pods
|
||||||
|
podLabels: {}
|
||||||
|
|
||||||
|
image:
|
||||||
|
repository: quay.io/jetstack/cert-manager-webhook
|
||||||
|
# You can manage a registry with
|
||||||
|
# registry: quay.io
|
||||||
|
# repository: jetstack/cert-manager-webhook
|
||||||
|
|
||||||
|
# Override the image tag to deploy by setting this variable.
|
||||||
|
# If no value is set, the chart's appVersion will be used.
|
||||||
|
# tag: canary
|
||||||
|
|
||||||
|
# Setting a digest will override any tag
|
||||||
|
# digest: sha256:0e072dddd1f7f8fc8909a2ca6f65e76c5f0d2fcfb8be47935ae3457e8bbceb20
|
||||||
|
|
||||||
|
pullPolicy: IfNotPresent
|
||||||
|
|
||||||
|
serviceAccount:
|
||||||
|
# Specifies whether a service account should be created
|
||||||
|
create: true
|
||||||
|
# The name of the service account to use.
|
||||||
|
# If not set and create is true, a name is generated using the fullname template
|
||||||
|
# name: ""
|
||||||
|
# Optional additional annotations to add to the controller's ServiceAccount
|
||||||
|
# annotations: {}
|
||||||
|
|
||||||
|
# The port that the webhook should listen on for requests.
|
||||||
|
# In GKE private clusters, by default kubernetes apiservers are allowed to
|
||||||
|
# talk to the cluster nodes only on 443 and 10250. so configuring
|
||||||
|
# securePort: 10250, will work out of the box without needing to add firewall
|
||||||
|
# rules or requiring NET_BIND_SERVICE capabilities to bind port numbers <1000
|
||||||
|
securePort: 10260
|
||||||
|
|
||||||
|
# Specifies if the webhook should be started in hostNetwork mode.
|
||||||
|
#
|
||||||
|
# Required for use in some managed kubernetes clusters (such as AWS EKS) with custom
|
||||||
|
# CNI (such as calico), because control-plane managed by AWS cannot communicate
|
||||||
|
# with pods' IP CIDR and admission webhooks are not working
|
||||||
|
#
|
||||||
|
# Since the default port for the webhook conflicts with kubelet on the host
|
||||||
|
# network, `webhook.securePort` should be changed to an available port if
|
||||||
|
# running in hostNetwork mode.
|
||||||
|
hostNetwork: true
|
||||||
|
|
||||||
|
cainjector:
|
||||||
|
enabled: true
|
||||||
|
replicaCount: 1
|
||||||
|
|
||||||
|
strategy: {}
|
||||||
|
# type: RollingUpdate
|
||||||
|
# rollingUpdate:
|
||||||
|
# maxSurge: 0
|
||||||
|
# maxUnavailable: 1
|
||||||
|
|
||||||
|
securityContext: {}
|
||||||
|
|
||||||
|
# Container Security Context to be set on the cainjector component container
|
||||||
|
# ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
|
||||||
|
containerSecurityContext: {}
|
||||||
|
# capabilities:
|
||||||
|
# drop:
|
||||||
|
# - ALL
|
||||||
|
# readOnlyRootFilesystem: true
|
||||||
|
# runAsNonRoot: true
|
||||||
|
|
||||||
|
|
||||||
|
# Optional additional annotations to add to the cainjector Deployment
|
||||||
|
# deploymentAnnotations: {}
|
||||||
|
|
||||||
|
# Optional additional annotations to add to the cainjector Pods
|
||||||
|
# podAnnotations: {}
|
||||||
|
|
||||||
|
# Optional additional arguments for cainjector
|
||||||
|
extraArgs: []
|
||||||
|
|
||||||
|
resources: {}
|
||||||
|
# requests:
|
||||||
|
# cpu: 10m
|
||||||
|
# memory: 32Mi
|
||||||
|
|
||||||
|
nodeSelector: {}
|
||||||
|
|
||||||
|
affinity: {}
|
||||||
|
|
||||||
|
tolerations: []
|
||||||
|
|
||||||
|
# Optional additional labels to add to the CA Injector Pods
|
||||||
|
podLabels: {}
|
||||||
|
|
||||||
|
image:
|
||||||
|
repository: quay.io/jetstack/cert-manager-cainjector
|
||||||
|
# You can manage a registry with
|
||||||
|
# registry: quay.io
|
||||||
|
# repository: jetstack/cert-manager-cainjector
|
||||||
|
|
||||||
|
# Override the image tag to deploy by setting this variable.
|
||||||
|
# If no value is set, the chart's appVersion will be used.
|
||||||
|
# tag: canary
|
||||||
|
|
||||||
|
# Setting a digest will override any tag
|
||||||
|
# digest: sha256:0e072dddd1f7f8fc8909a2ca6f65e76c5f0d2fcfb8be47935ae3457e8bbceb20
|
||||||
|
|
||||||
|
pullPolicy: IfNotPresent
|
||||||
|
|
||||||
|
serviceAccount:
|
||||||
|
# Specifies whether a service account should be created
|
||||||
|
create: true
|
||||||
|
# The name of the service account to use.
|
||||||
|
# If not set and create is true, a name is generated using the fullname template
|
||||||
|
# name: ""
|
||||||
|
# Optional additional annotations to add to the controller's ServiceAccount
|
||||||
|
# annotations: {}
|
192
yaml/coredns/coredns.yaml
Normal file
192
yaml/coredns/coredns.yaml
Normal file
|
@ -0,0 +1,192 @@
|
||||||
|
apiVersion: v1
|
||||||
|
kind: ServiceAccount
|
||||||
|
metadata:
|
||||||
|
name: coredns
|
||||||
|
namespace: kube-system
|
||||||
|
---
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
kind: ClusterRole
|
||||||
|
metadata:
|
||||||
|
labels:
|
||||||
|
kubernetes.io/bootstrapping: rbac-defaults
|
||||||
|
name: system:coredns
|
||||||
|
rules:
|
||||||
|
- apiGroups:
|
||||||
|
- ""
|
||||||
|
resources:
|
||||||
|
- endpoints
|
||||||
|
- services
|
||||||
|
- pods
|
||||||
|
- namespaces
|
||||||
|
verbs:
|
||||||
|
- list
|
||||||
|
- watch
|
||||||
|
---
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
kind: ClusterRoleBinding
|
||||||
|
metadata:
|
||||||
|
annotations:
|
||||||
|
rbac.authorization.kubernetes.io/autoupdate: "true"
|
||||||
|
labels:
|
||||||
|
kubernetes.io/bootstrapping: rbac-defaults
|
||||||
|
name: system:coredns
|
||||||
|
roleRef:
|
||||||
|
apiGroup: rbac.authorization.k8s.io
|
||||||
|
kind: ClusterRole
|
||||||
|
name: system:coredns
|
||||||
|
subjects:
|
||||||
|
- kind: ServiceAccount
|
||||||
|
name: coredns
|
||||||
|
namespace: kube-system
|
||||||
|
---
|
||||||
|
apiVersion: v1
|
||||||
|
kind: ConfigMap
|
||||||
|
metadata:
|
||||||
|
name: coredns
|
||||||
|
namespace: kube-system
|
||||||
|
data:
|
||||||
|
Corefile: |
|
||||||
|
.:53 {
|
||||||
|
errors
|
||||||
|
health {
|
||||||
|
lameduck 5s
|
||||||
|
}
|
||||||
|
ready
|
||||||
|
kubernetes cluster.local in-addr.arpa ip6.arpa {
|
||||||
|
fallthrough in-addr.arpa ip6.arpa
|
||||||
|
}
|
||||||
|
prometheus :9153
|
||||||
|
forward . 1.1.1.1 8.8.8.8 {
|
||||||
|
max_concurrent 1000
|
||||||
|
}
|
||||||
|
cache 30
|
||||||
|
loop
|
||||||
|
reload
|
||||||
|
loadbalance
|
||||||
|
}
|
||||||
|
---
|
||||||
|
apiVersion: apps/v1
|
||||||
|
kind: Deployment
|
||||||
|
metadata:
|
||||||
|
name: coredns
|
||||||
|
namespace: kube-system
|
||||||
|
labels:
|
||||||
|
k8s-app: kube-dns
|
||||||
|
kubernetes.io/name: "CoreDNS"
|
||||||
|
spec:
|
||||||
|
replicas: 6
|
||||||
|
# replicas: not specified here:
|
||||||
|
# 1. Default is 1.
|
||||||
|
# 2. Will be tuned in real time if DNS horizontal auto-scaling is turned on.
|
||||||
|
strategy:
|
||||||
|
type: RollingUpdate
|
||||||
|
rollingUpdate:
|
||||||
|
maxUnavailable: 1
|
||||||
|
selector:
|
||||||
|
matchLabels:
|
||||||
|
k8s-app: kube-dns
|
||||||
|
template:
|
||||||
|
metadata:
|
||||||
|
labels:
|
||||||
|
k8s-app: kube-dns
|
||||||
|
spec:
|
||||||
|
priorityClassName: system-cluster-critical
|
||||||
|
serviceAccountName: coredns
|
||||||
|
tolerations:
|
||||||
|
- key: "CriticalAddonsOnly"
|
||||||
|
operator: "Exists"
|
||||||
|
nodeSelector:
|
||||||
|
kubernetes.io/os: linux
|
||||||
|
affinity:
|
||||||
|
podAntiAffinity:
|
||||||
|
preferredDuringSchedulingIgnoredDuringExecution:
|
||||||
|
- weight: 100
|
||||||
|
podAffinityTerm:
|
||||||
|
labelSelector:
|
||||||
|
matchExpressions:
|
||||||
|
- key: k8s-app
|
||||||
|
operator: In
|
||||||
|
values: ["kube-dns"]
|
||||||
|
topologyKey: kubernetes.io/hostname
|
||||||
|
containers:
|
||||||
|
- name: coredns
|
||||||
|
image: coredns/coredns:1.7.0
|
||||||
|
imagePullPolicy: IfNotPresent
|
||||||
|
resources:
|
||||||
|
limits:
|
||||||
|
memory: 170Mi
|
||||||
|
requests:
|
||||||
|
cpu: 100m
|
||||||
|
memory: 70Mi
|
||||||
|
args: [ "-conf", "/etc/coredns/Corefile" ]
|
||||||
|
volumeMounts:
|
||||||
|
- name: config-volume
|
||||||
|
mountPath: /etc/coredns
|
||||||
|
readOnly: true
|
||||||
|
ports:
|
||||||
|
- containerPort: 53
|
||||||
|
name: dns
|
||||||
|
protocol: UDP
|
||||||
|
- containerPort: 53
|
||||||
|
name: dns-tcp
|
||||||
|
protocol: TCP
|
||||||
|
- containerPort: 9153
|
||||||
|
name: metrics
|
||||||
|
protocol: TCP
|
||||||
|
securityContext:
|
||||||
|
allowPrivilegeEscalation: false
|
||||||
|
capabilities:
|
||||||
|
add:
|
||||||
|
- NET_BIND_SERVICE
|
||||||
|
drop:
|
||||||
|
- all
|
||||||
|
readOnlyRootFilesystem: true
|
||||||
|
livenessProbe:
|
||||||
|
httpGet:
|
||||||
|
path: /health
|
||||||
|
port: 8080
|
||||||
|
scheme: HTTP
|
||||||
|
initialDelaySeconds: 60
|
||||||
|
timeoutSeconds: 5
|
||||||
|
successThreshold: 1
|
||||||
|
failureThreshold: 5
|
||||||
|
readinessProbe:
|
||||||
|
httpGet:
|
||||||
|
path: /ready
|
||||||
|
port: 8181
|
||||||
|
scheme: HTTP
|
||||||
|
dnsPolicy: Default
|
||||||
|
volumes:
|
||||||
|
- name: config-volume
|
||||||
|
configMap:
|
||||||
|
name: coredns
|
||||||
|
items:
|
||||||
|
- key: Corefile
|
||||||
|
path: Corefile
|
||||||
|
---
|
||||||
|
apiVersion: v1
|
||||||
|
kind: Service
|
||||||
|
metadata:
|
||||||
|
name: kube-dns
|
||||||
|
namespace: kube-system
|
||||||
|
annotations:
|
||||||
|
prometheus.io/port: "9153"
|
||||||
|
prometheus.io/scrape: "true"
|
||||||
|
labels:
|
||||||
|
k8s-app: kube-dns
|
||||||
|
kubernetes.io/cluster-service: "true"
|
||||||
|
kubernetes.io/name: "CoreDNS"
|
||||||
|
spec:
|
||||||
|
selector:
|
||||||
|
k8s-app: kube-dns
|
||||||
|
clusterIP: 172.18.0.10
|
||||||
|
ports:
|
||||||
|
- name: dns
|
||||||
|
port: 53
|
||||||
|
protocol: UDP
|
||||||
|
- name: dns-tcp
|
||||||
|
port: 53
|
||||||
|
protocol: TCP
|
||||||
|
- name: metrics
|
||||||
|
port: 9153
|
||||||
|
protocol: TCP
|
18
yaml/helm/helm-rbac-config.yaml
Normal file
18
yaml/helm/helm-rbac-config.yaml
Normal file
|
@ -0,0 +1,18 @@
|
||||||
|
apiVersion: v1
|
||||||
|
kind: ServiceAccount
|
||||||
|
metadata:
|
||||||
|
name: tiller
|
||||||
|
namespace: kube-system
|
||||||
|
---
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||||
|
kind: ClusterRoleBinding
|
||||||
|
metadata:
|
||||||
|
name: tiller
|
||||||
|
roleRef:
|
||||||
|
apiGroup: rbac.authorization.k8s.io
|
||||||
|
kind: ClusterRole
|
||||||
|
name: cluster-admin
|
||||||
|
subjects:
|
||||||
|
- kind: ServiceAccount
|
||||||
|
name: tiller
|
||||||
|
namespace: kube-system
|
696
yaml/ingress-nginx/values-3.7.1.yaml
Normal file
696
yaml/ingress-nginx/values-3.7.1.yaml
Normal file
|
@ -0,0 +1,696 @@
|
||||||
|
## nginx configuration
|
||||||
|
## Ref: https://github.com/kubernetes/ingress-nginx/blob/master/controllers/nginx/configuration.md
|
||||||
|
##
|
||||||
|
controller:
|
||||||
|
image:
|
||||||
|
repository: k8s.gcr.io/ingress-nginx/controller
|
||||||
|
tag: "v0.40.2"
|
||||||
|
digest: sha256:e6019e536cfb921afb99408d5292fa88b017c49dd29d05fc8dbc456aa770d590
|
||||||
|
pullPolicy: IfNotPresent
|
||||||
|
# www-data -> uid 101
|
||||||
|
runAsUser: 101
|
||||||
|
allowPrivilegeEscalation: true
|
||||||
|
|
||||||
|
# Configures the ports the nginx-controller listens on
|
||||||
|
containerPort:
|
||||||
|
http: 80
|
||||||
|
https: 443
|
||||||
|
|
||||||
|
# Will add custom configuration options to Nginx https://kubernetes.github.io/ingress-nginx/user-guide/nginx-configuration/configmap/
|
||||||
|
#config: {}
|
||||||
|
config:
|
||||||
|
use-http2: "false"
|
||||||
|
use-proxy-protocol: "false"
|
||||||
|
client-header-buffer-size: "8k"
|
||||||
|
large-client-header-buffers: "4 16k"
|
||||||
|
use-forwarded-headers: "true"
|
||||||
|
use-geoip: "true"
|
||||||
|
use-geoip2: "true"
|
||||||
|
|
||||||
|
## Annotations to be added to the controller config configuration configmap
|
||||||
|
##
|
||||||
|
configAnnotations: {}
|
||||||
|
|
||||||
|
# Will add custom headers before sending traffic to backends according to https://github.com/kubernetes/ingress-nginx/tree/master/docs/examples/customization/custom-headers
|
||||||
|
#proxySetHeaders: {}
|
||||||
|
proxySetHeaders:
|
||||||
|
X-Country-Code: $geoip_country_code
|
||||||
|
|
||||||
|
# Will add custom headers before sending response traffic to the client according to: https://kubernetes.github.io/ingress-nginx/user-guide/nginx-configuration/configmap/#add-headers
|
||||||
|
addHeaders: {}
|
||||||
|
|
||||||
|
# Optionally customize the pod dnsConfig.
|
||||||
|
dnsConfig: {}
|
||||||
|
|
||||||
|
# Optionally change this to ClusterFirstWithHostNet in case you have 'hostNetwork: true'.
|
||||||
|
# By default, while using host network, name resolution uses the host's DNS. If you wish nginx-controller
|
||||||
|
# to keep resolving names inside the k8s network, use ClusterFirstWithHostNet.
|
||||||
|
#dnsPolicy: ClusterFirst
|
||||||
|
dnsPolicy: ClusterFirstWithHostNet
|
||||||
|
|
||||||
|
# Bare-metal considerations via the host network https://kubernetes.github.io/ingress-nginx/deploy/baremetal/#via-the-host-network
|
||||||
|
# Ingress status was blank because there is no Service exposing the NGINX Ingress controller in a configuration using the host network, the default --publish-service flag used in standard cloud setups does not apply
|
||||||
|
reportNodeInternalIp: false
|
||||||
|
|
||||||
|
# Required for use with CNI based kubernetes installations (such as ones set up by kubeadm),
|
||||||
|
# since CNI and hostport don't mix yet. Can be deprecated once https://github.com/kubernetes/kubernetes/issues/23920
|
||||||
|
# is merged
|
||||||
|
#hostNetwork: false
|
||||||
|
hostNetwork: true
|
||||||
|
|
||||||
|
## Use host ports 80 and 443
|
||||||
|
## Disabled by default
|
||||||
|
##
|
||||||
|
hostPort:
|
||||||
|
enabled: false
|
||||||
|
ports:
|
||||||
|
http: 80
|
||||||
|
https: 443
|
||||||
|
|
||||||
|
## Election ID to use for status update
|
||||||
|
##
|
||||||
|
electionID: ingress-controller-leader
|
||||||
|
|
||||||
|
## Name of the ingress class to route through this controller
|
||||||
|
##
|
||||||
|
ingressClass: nginx
|
||||||
|
|
||||||
|
# labels to add to the pod container metadata
|
||||||
|
podLabels: {}
|
||||||
|
# key: value
|
||||||
|
|
||||||
|
## Security Context policies for controller pods
|
||||||
|
##
|
||||||
|
podSecurityContext: {}
|
||||||
|
|
||||||
|
## See https://kubernetes.io/docs/tasks/administer-cluster/sysctl-cluster/ for
|
||||||
|
## notes on enabling and using sysctls
|
||||||
|
###
|
||||||
|
sysctls: {}
|
||||||
|
# sysctls:
|
||||||
|
# "net.core.somaxconn": "8192"
|
||||||
|
|
||||||
|
## Allows customization of the source of the IP address or FQDN to report
|
||||||
|
## in the ingress status field. By default, it reads the information provided
|
||||||
|
## by the service. If disable, the status field reports the IP address of the
|
||||||
|
## node or nodes where an ingress controller pod is running.
|
||||||
|
publishService:
|
||||||
|
enabled: true
|
||||||
|
## Allows overriding of the publish service to bind to
|
||||||
|
## Must be <namespace>/<service_name>
|
||||||
|
##
|
||||||
|
pathOverride: ""
|
||||||
|
|
||||||
|
## Limit the scope of the controller
|
||||||
|
##
|
||||||
|
scope:
|
||||||
|
enabled: false
|
||||||
|
namespace: "" # defaults to .Release.Namespace
|
||||||
|
|
||||||
|
## Allows customization of the configmap / nginx-configmap namespace
|
||||||
|
##
|
||||||
|
configMapNamespace: "" # defaults to .Release.Namespace
|
||||||
|
|
||||||
|
## Allows customization of the tcp-services-configmap
|
||||||
|
##
|
||||||
|
tcp:
|
||||||
|
configMapNamespace: "" # defaults to .Release.Namespace
|
||||||
|
## Annotations to be added to the tcp config configmap
|
||||||
|
annotations: {}
|
||||||
|
|
||||||
|
## Allows customization of the udp-services-configmap
|
||||||
|
##
|
||||||
|
udp:
|
||||||
|
configMapNamespace: "" # defaults to .Release.Namespace
|
||||||
|
## Annotations to be added to the udp config configmap
|
||||||
|
annotations: {}
|
||||||
|
|
||||||
|
## Additional command line arguments to pass to nginx-ingress-controller
|
||||||
|
## E.g. to specify the default SSL certificate you can use
|
||||||
|
## extraArgs:
|
||||||
|
## default-ssl-certificate: "<namespace>/<secret_name>"
|
||||||
|
extraArgs: {}
|
||||||
|
|
||||||
|
## Additional environment variables to set
|
||||||
|
extraEnvs: []
|
||||||
|
# extraEnvs:
|
||||||
|
# - name: FOO
|
||||||
|
# valueFrom:
|
||||||
|
# secretKeyRef:
|
||||||
|
# key: FOO
|
||||||
|
# name: secret-resource
|
||||||
|
|
||||||
|
## DaemonSet or Deployment
|
||||||
|
##
|
||||||
|
#kind: Deployment
|
||||||
|
kind: DaemonSet
|
||||||
|
|
||||||
|
## Annotations to be added to the controller Deployment or DaemonSet
|
||||||
|
##
|
||||||
|
annotations: {}
|
||||||
|
# keel.sh/pollSchedule: "@every 60m"
|
||||||
|
|
||||||
|
## Labels to be added to the controller Deployment or DaemonSet
|
||||||
|
##
|
||||||
|
labels: {}
|
||||||
|
# keel.sh/policy: patch
|
||||||
|
# keel.sh/trigger: poll
|
||||||
|
|
||||||
|
|
||||||
|
# The update strategy to apply to the Deployment or DaemonSet
|
||||||
|
##
|
||||||
|
updateStrategy: {}
|
||||||
|
# rollingUpdate:
|
||||||
|
# maxUnavailable: 1
|
||||||
|
# type: RollingUpdate
|
||||||
|
|
||||||
|
# minReadySeconds to avoid killing pods before we are ready
|
||||||
|
##
|
||||||
|
minReadySeconds: 0
|
||||||
|
|
||||||
|
|
||||||
|
## Node tolerations for server scheduling to nodes with taints
|
||||||
|
## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
|
||||||
|
##
|
||||||
|
tolerations: []
|
||||||
|
# - key: "key"
|
||||||
|
# operator: "Equal|Exists"
|
||||||
|
# value: "value"
|
||||||
|
# effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)"
|
||||||
|
|
||||||
|
## Affinity and anti-affinity
|
||||||
|
## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
|
||||||
|
##
|
||||||
|
affinity: {}
|
||||||
|
# # An example of preferred pod anti-affinity, weight is in the range 1-100
|
||||||
|
# podAntiAffinity:
|
||||||
|
# preferredDuringSchedulingIgnoredDuringExecution:
|
||||||
|
# - weight: 100
|
||||||
|
# podAffinityTerm:
|
||||||
|
# labelSelector:
|
||||||
|
# matchExpressions:
|
||||||
|
# - key: app.kubernetes.io/name
|
||||||
|
# operator: In
|
||||||
|
# values:
|
||||||
|
# - ingress-nginx
|
||||||
|
# - key: app.kubernetes.io/instance
|
||||||
|
# operator: In
|
||||||
|
# values:
|
||||||
|
# - ingress-nginx
|
||||||
|
# - key: app.kubernetes.io/component
|
||||||
|
# operator: In
|
||||||
|
# values:
|
||||||
|
# - controller
|
||||||
|
# topologyKey: kubernetes.io/hostname
|
||||||
|
|
||||||
|
# # An example of required pod anti-affinity
|
||||||
|
# podAntiAffinity:
|
||||||
|
# requiredDuringSchedulingIgnoredDuringExecution:
|
||||||
|
# - labelSelector:
|
||||||
|
# matchExpressions:
|
||||||
|
# - key: app.kubernetes.io/name
|
||||||
|
# operator: In
|
||||||
|
# values:
|
||||||
|
# - ingress-nginx
|
||||||
|
# - key: app.kubernetes.io/instance
|
||||||
|
# operator: In
|
||||||
|
# values:
|
||||||
|
# - ingress-nginx
|
||||||
|
# - key: app.kubernetes.io/component
|
||||||
|
# operator: In
|
||||||
|
# values:
|
||||||
|
# - controller
|
||||||
|
# topologyKey: "kubernetes.io/hostname"
|
||||||
|
|
||||||
|
## Topology spread constraints rely on node labels to identify the topology domain(s) that each Node is in.
|
||||||
|
## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/
|
||||||
|
##
|
||||||
|
topologySpreadConstraints: []
|
||||||
|
# - maxSkew: 1
|
||||||
|
# topologyKey: failure-domain.beta.kubernetes.io/zone
|
||||||
|
# whenUnsatisfiable: DoNotSchedule
|
||||||
|
# labelSelector:
|
||||||
|
# matchLabels:
|
||||||
|
# app.kubernetes.io/instance: ingress-nginx-internal
|
||||||
|
|
||||||
|
## terminationGracePeriodSeconds
|
||||||
|
## wait up to five minutes for the drain of connections
|
||||||
|
##
|
||||||
|
terminationGracePeriodSeconds: 300
|
||||||
|
|
||||||
|
## Node labels for controller pod assignment
|
||||||
|
## Ref: https://kubernetes.io/docs/user-guide/node-selection/
|
||||||
|
##
|
||||||
|
nodeSelector:
|
||||||
|
kubernetes.io/os: linux
|
||||||
|
|
||||||
|
## Liveness and readiness probe values
|
||||||
|
## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes
|
||||||
|
##
|
||||||
|
livenessProbe:
|
||||||
|
failureThreshold: 5
|
||||||
|
initialDelaySeconds: 10
|
||||||
|
periodSeconds: 10
|
||||||
|
successThreshold: 1
|
||||||
|
timeoutSeconds: 1
|
||||||
|
port: 10254
|
||||||
|
readinessProbe:
|
||||||
|
failureThreshold: 3
|
||||||
|
initialDelaySeconds: 10
|
||||||
|
periodSeconds: 10
|
||||||
|
successThreshold: 1
|
||||||
|
timeoutSeconds: 1
|
||||||
|
port: 10254
|
||||||
|
|
||||||
|
# Path of the health check endpoint. All requests received on the port defined by
|
||||||
|
# the healthz-port parameter are forwarded internally to this path.
|
||||||
|
healthCheckPath: "/healthz"
|
||||||
|
|
||||||
|
## Annotations to be added to controller pods
|
||||||
|
##
|
||||||
|
podAnnotations: {}
|
||||||
|
|
||||||
|
replicaCount: 1
|
||||||
|
|
||||||
|
minAvailable: 1
|
||||||
|
|
||||||
|
# Define requests resources to avoid probe issues due to CPU utilization in busy nodes
|
||||||
|
# ref: https://github.com/kubernetes/ingress-nginx/issues/4735#issuecomment-551204903
|
||||||
|
# Ideally, there should be no limits.
|
||||||
|
# https://engineering.indeedblog.com/blog/2019/12/cpu-throttling-regression-fix/
|
||||||
|
resources:
|
||||||
|
# limits:
|
||||||
|
# cpu: 100m
|
||||||
|
# memory: 90Mi
|
||||||
|
requests:
|
||||||
|
cpu: 100m
|
||||||
|
memory: 90Mi
|
||||||
|
|
||||||
|
autoscaling:
|
||||||
|
enabled: false
|
||||||
|
minReplicas: 1
|
||||||
|
maxReplicas: 11
|
||||||
|
targetCPUUtilizationPercentage: 50
|
||||||
|
targetMemoryUtilizationPercentage: 50
|
||||||
|
|
||||||
|
autoscalingTemplate: []
|
||||||
|
# Custom or additional autoscaling metrics
|
||||||
|
# ref: https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/#support-for-custom-metrics
|
||||||
|
# - type: Pods
|
||||||
|
# pods:
|
||||||
|
# metric:
|
||||||
|
# name: nginx_ingress_controller_nginx_process_requests_total
|
||||||
|
# target:
|
||||||
|
# type: AverageValue
|
||||||
|
# averageValue: 10000m
|
||||||
|
|
||||||
|
## Enable mimalloc as a drop-in replacement for malloc.
|
||||||
|
## ref: https://github.com/microsoft/mimalloc
|
||||||
|
##
|
||||||
|
enableMimalloc: true
|
||||||
|
|
||||||
|
## Override NGINX template
|
||||||
|
customTemplate:
|
||||||
|
configMapName: ""
|
||||||
|
configMapKey: ""
|
||||||
|
|
||||||
|
service:
|
||||||
|
enabled: true
|
||||||
|
|
||||||
|
annotations: {}
|
||||||
|
labels: {}
|
||||||
|
# clusterIP: ""
|
||||||
|
|
||||||
|
## List of IP addresses at which the controller services are available
|
||||||
|
## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips
|
||||||
|
##
|
||||||
|
externalIPs: []
|
||||||
|
|
||||||
|
# loadBalancerIP: ""
|
||||||
|
loadBalancerSourceRanges: []
|
||||||
|
|
||||||
|
enableHttp: true
|
||||||
|
enableHttps: true
|
||||||
|
|
||||||
|
## Set external traffic policy to: "Local" to preserve source IP on
|
||||||
|
## providers supporting it
|
||||||
|
## Ref: https://kubernetes.io/docs/tutorials/services/source-ip/#source-ip-for-services-with-typeloadbalancer
|
||||||
|
# externalTrafficPolicy: ""
|
||||||
|
|
||||||
|
# Must be either "None" or "ClientIP" if set. Kubernetes will default to "None".
|
||||||
|
# Ref: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies
|
||||||
|
# sessionAffinity: ""
|
||||||
|
|
||||||
|
# specifies the health check node port (numeric port number) for the service. If healthCheckNodePort isn’t specified,
|
||||||
|
# the service controller allocates a port from your cluster’s NodePort range.
|
||||||
|
# Ref: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip
|
||||||
|
# healthCheckNodePort: 0
|
||||||
|
|
||||||
|
ports:
|
||||||
|
http: 80
|
||||||
|
https: 443
|
||||||
|
|
||||||
|
targetPorts:
|
||||||
|
http: http
|
||||||
|
https: https
|
||||||
|
|
||||||
|
#type: LoadBalancer
|
||||||
|
|
||||||
|
type: NodePort
|
||||||
|
# nodePorts:
|
||||||
|
# http: 32080
|
||||||
|
# https: 32443
|
||||||
|
# tcp:
|
||||||
|
# 8080: 32808
|
||||||
|
nodePorts:
|
||||||
|
http: ""
|
||||||
|
https: ""
|
||||||
|
tcp: {}
|
||||||
|
udp: {}
|
||||||
|
|
||||||
|
## Enables an additional internal load balancer (besides the external one).
|
||||||
|
## Annotations are mandatory for the load balancer to come up. Varies with the cloud service.
|
||||||
|
internal:
|
||||||
|
enabled: false
|
||||||
|
annotations: {}
|
||||||
|
|
||||||
|
## Restrict access For LoadBalancer service. Defaults to 0.0.0.0/0.
|
||||||
|
loadBalancerSourceRanges: []
|
||||||
|
|
||||||
|
## Set external traffic policy to: "Local" to preserve source IP on
|
||||||
|
## providers supporting it
|
||||||
|
## Ref: https://kubernetes.io/docs/tutorials/services/source-ip/#source-ip-for-services-with-typeloadbalancer
|
||||||
|
# externalTrafficPolicy: ""
|
||||||
|
|
||||||
|
extraContainers: []
|
||||||
|
## Additional containers to be added to the controller pod.
|
||||||
|
## See https://github.com/lemonldap-ng-controller/lemonldap-ng-controller as example.
|
||||||
|
# - name: my-sidecar
|
||||||
|
# image: nginx:latest
|
||||||
|
# - name: lemonldap-ng-controller
|
||||||
|
# image: lemonldapng/lemonldap-ng-controller:0.2.0
|
||||||
|
# args:
|
||||||
|
# - /lemonldap-ng-controller
|
||||||
|
# - --alsologtostderr
|
||||||
|
# - --configmap=$(POD_NAMESPACE)/lemonldap-ng-configuration
|
||||||
|
# env:
|
||||||
|
# - name: POD_NAME
|
||||||
|
# valueFrom:
|
||||||
|
# fieldRef:
|
||||||
|
# fieldPath: metadata.name
|
||||||
|
# - name: POD_NAMESPACE
|
||||||
|
# valueFrom:
|
||||||
|
# fieldRef:
|
||||||
|
# fieldPath: metadata.namespace
|
||||||
|
# volumeMounts:
|
||||||
|
# - name: copy-portal-skins
|
||||||
|
# mountPath: /srv/var/lib/lemonldap-ng/portal/skins
|
||||||
|
|
||||||
|
extraVolumeMounts: []
|
||||||
|
## Additional volumeMounts to the controller main container.
|
||||||
|
# - name: copy-portal-skins
|
||||||
|
# mountPath: /var/lib/lemonldap-ng/portal/skins
|
||||||
|
|
||||||
|
extraVolumes: []
|
||||||
|
## Additional volumes to the controller pod.
|
||||||
|
# - name: copy-portal-skins
|
||||||
|
# emptyDir: {}
|
||||||
|
|
||||||
|
extraInitContainers: []
|
||||||
|
## Containers, which are run before the app containers are started.
|
||||||
|
# - name: init-myservice
|
||||||
|
# image: busybox
|
||||||
|
# command: ['sh', '-c', 'until nslookup myservice; do echo waiting for myservice; sleep 2; done;']
|
||||||
|
|
||||||
|
admissionWebhooks:
|
||||||
|
annotations: {}
|
||||||
|
enabled: true
|
||||||
|
failurePolicy: Fail
|
||||||
|
# timeoutSeconds: 10
|
||||||
|
port: 8443
|
||||||
|
certificate: "/usr/local/certificates/cert"
|
||||||
|
key: "/usr/local/certificates/key"
|
||||||
|
namespaceSelector: {}
|
||||||
|
objectSelector: {}
|
||||||
|
|
||||||
|
service:
|
||||||
|
annotations: {}
|
||||||
|
# clusterIP: ""
|
||||||
|
externalIPs: []
|
||||||
|
# loadBalancerIP: ""
|
||||||
|
loadBalancerSourceRanges: []
|
||||||
|
servicePort: 443
|
||||||
|
type: ClusterIP
|
||||||
|
|
||||||
|
patch:
|
||||||
|
enabled: true
|
||||||
|
image:
|
||||||
|
repository: docker.io/jettech/kube-webhook-certgen
|
||||||
|
tag: v1.5.0
|
||||||
|
pullPolicy: IfNotPresent
|
||||||
|
## Provide a priority class name to the webhook patching job
|
||||||
|
##
|
||||||
|
priorityClassName: ""
|
||||||
|
podAnnotations: {}
|
||||||
|
nodeSelector: {}
|
||||||
|
tolerations: []
|
||||||
|
runAsUser: 2000
|
||||||
|
|
||||||
|
metrics:
|
||||||
|
port: 10254
|
||||||
|
# if this port is changed, change healthz-port: in extraArgs: accordingly
|
||||||
|
enabled: false
|
||||||
|
|
||||||
|
service:
|
||||||
|
annotations: {}
|
||||||
|
# prometheus.io/scrape: "true"
|
||||||
|
# prometheus.io/port: "10254"
|
||||||
|
|
||||||
|
# clusterIP: ""
|
||||||
|
|
||||||
|
## List of IP addresses at which the stats-exporter service is available
|
||||||
|
## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips
|
||||||
|
##
|
||||||
|
externalIPs: []
|
||||||
|
|
||||||
|
# loadBalancerIP: ""
|
||||||
|
loadBalancerSourceRanges: []
|
||||||
|
servicePort: 9913
|
||||||
|
type: ClusterIP
|
||||||
|
# externalTrafficPolicy: ""
|
||||||
|
# nodePort: ""
|
||||||
|
|
||||||
|
serviceMonitor:
|
||||||
|
enabled: false
|
||||||
|
additionalLabels: {}
|
||||||
|
namespace: ""
|
||||||
|
namespaceSelector: {}
|
||||||
|
# Default: scrape .Release.Namespace only
|
||||||
|
# To scrape all, use the following:
|
||||||
|
# namespaceSelector:
|
||||||
|
# any: true
|
||||||
|
scrapeInterval: 30s
|
||||||
|
# honorLabels: true
|
||||||
|
targetLabels: []
|
||||||
|
metricRelabelings: []
|
||||||
|
|
||||||
|
prometheusRule:
|
||||||
|
enabled: false
|
||||||
|
additionalLabels: {}
|
||||||
|
# namespace: ""
|
||||||
|
rules: []
|
||||||
|
# # These are just examples rules, please adapt them to your needs
|
||||||
|
# - alert: NGINXConfigFailed
|
||||||
|
# expr: count(nginx_ingress_controller_config_last_reload_successful == 0) > 0
|
||||||
|
# for: 1s
|
||||||
|
# labels:
|
||||||
|
# severity: critical
|
||||||
|
# annotations:
|
||||||
|
# description: bad ingress config - nginx config test failed
|
||||||
|
# summary: uninstall the latest ingress changes to allow config reloads to resume
|
||||||
|
# - alert: NGINXCertificateExpiry
|
||||||
|
# expr: (avg(nginx_ingress_controller_ssl_expire_time_seconds) by (host) - time()) < 604800
|
||||||
|
# for: 1s
|
||||||
|
# labels:
|
||||||
|
# severity: critical
|
||||||
|
# annotations:
|
||||||
|
# description: ssl certificate(s) will expire in less then a week
|
||||||
|
# summary: renew expiring certificates to avoid downtime
|
||||||
|
# - alert: NGINXTooMany500s
|
||||||
|
# expr: 100 * ( sum( nginx_ingress_controller_requests{status=~"5.+"} ) / sum(nginx_ingress_controller_requests) ) > 5
|
||||||
|
# for: 1m
|
||||||
|
# labels:
|
||||||
|
# severity: warning
|
||||||
|
# annotations:
|
||||||
|
# description: Too many 5XXs
|
||||||
|
# summary: More than 5% of all requests returned 5XX, this requires your attention
|
||||||
|
# - alert: NGINXTooMany400s
|
||||||
|
# expr: 100 * ( sum( nginx_ingress_controller_requests{status=~"4.+"} ) / sum(nginx_ingress_controller_requests) ) > 5
|
||||||
|
# for: 1m
|
||||||
|
# labels:
|
||||||
|
# severity: warning
|
||||||
|
# annotations:
|
||||||
|
# description: Too many 4XXs
|
||||||
|
# summary: More than 5% of all requests returned 4XX, this requires your attention
|
||||||
|
|
||||||
|
## Improve connection draining when ingress controller pod is deleted using a lifecycle hook:
|
||||||
|
## With this new hook, we increased the default terminationGracePeriodSeconds from 30 seconds
|
||||||
|
## to 300, allowing the draining of connections up to five minutes.
|
||||||
|
## If the active connections end before that, the pod will terminate gracefully at that time.
|
||||||
|
## To effectively take advantage of this feature, the Configmap feature
|
||||||
|
## worker-shutdown-timeout new value is 240s instead of 10s.
|
||||||
|
##
|
||||||
|
lifecycle:
|
||||||
|
preStop:
|
||||||
|
exec:
|
||||||
|
command:
|
||||||
|
- /wait-shutdown
|
||||||
|
|
||||||
|
priorityClassName: ""
|
||||||
|
|
||||||
|
## Rollback limit
|
||||||
|
##
|
||||||
|
revisionHistoryLimit: 10
|
||||||
|
|
||||||
|
# Maxmind license key to download GeoLite2 Databases
|
||||||
|
# https://blog.maxmind.com/2019/12/18/significant-changes-to-accessing-and-using-geolite2-databases
|
||||||
|
maxmindLicenseKey: ""
|
||||||
|
|
||||||
|
## Default 404 backend
|
||||||
|
##
|
||||||
|
defaultBackend:
|
||||||
|
##
|
||||||
|
enabled: false
|
||||||
|
|
||||||
|
image:
|
||||||
|
repository: k8s.gcr.io/defaultbackend-amd64
|
||||||
|
tag: "1.5"
|
||||||
|
pullPolicy: IfNotPresent
|
||||||
|
# nobody user -> uid 65534
|
||||||
|
runAsUser: 65534
|
||||||
|
runAsNonRoot: true
|
||||||
|
readOnlyRootFilesystem: true
|
||||||
|
allowPrivilegeEscalation: false
|
||||||
|
|
||||||
|
extraArgs: {}
|
||||||
|
|
||||||
|
serviceAccount:
|
||||||
|
create: true
|
||||||
|
name:
|
||||||
|
## Additional environment variables to set for defaultBackend pods
|
||||||
|
extraEnvs: []
|
||||||
|
|
||||||
|
port: 8080
|
||||||
|
|
||||||
|
## Readiness and liveness probes for default backend
|
||||||
|
## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/
|
||||||
|
##
|
||||||
|
livenessProbe:
|
||||||
|
failureThreshold: 3
|
||||||
|
initialDelaySeconds: 30
|
||||||
|
periodSeconds: 10
|
||||||
|
successThreshold: 1
|
||||||
|
timeoutSeconds: 5
|
||||||
|
readinessProbe:
|
||||||
|
failureThreshold: 6
|
||||||
|
initialDelaySeconds: 0
|
||||||
|
periodSeconds: 5
|
||||||
|
successThreshold: 1
|
||||||
|
timeoutSeconds: 5
|
||||||
|
|
||||||
|
## Node tolerations for server scheduling to nodes with taints
|
||||||
|
## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
|
||||||
|
##
|
||||||
|
tolerations: []
|
||||||
|
# - key: "key"
|
||||||
|
# operator: "Equal|Exists"
|
||||||
|
# value: "value"
|
||||||
|
# effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)"
|
||||||
|
|
||||||
|
affinity: {}
|
||||||
|
|
||||||
|
## Security Context policies for controller pods
|
||||||
|
## See https://kubernetes.io/docs/tasks/administer-cluster/sysctl-cluster/ for
|
||||||
|
## notes on enabling and using sysctls
|
||||||
|
##
|
||||||
|
podSecurityContext: {}
|
||||||
|
|
||||||
|
# labels to add to the pod container metadata
|
||||||
|
podLabels: {}
|
||||||
|
# key: value
|
||||||
|
|
||||||
|
## Node labels for default backend pod assignment
|
||||||
|
## Ref: https://kubernetes.io/docs/user-guide/node-selection/
|
||||||
|
##
|
||||||
|
nodeSelector: {}
|
||||||
|
|
||||||
|
## Annotations to be added to default backend pods
|
||||||
|
##
|
||||||
|
podAnnotations: {}
|
||||||
|
|
||||||
|
replicaCount: 1
|
||||||
|
|
||||||
|
minAvailable: 1
|
||||||
|
|
||||||
|
resources: {}
|
||||||
|
# limits:
|
||||||
|
# cpu: 10m
|
||||||
|
# memory: 20Mi
|
||||||
|
# requests:
|
||||||
|
# cpu: 10m
|
||||||
|
# memory: 20Mi
|
||||||
|
|
||||||
|
autoscaling:
|
||||||
|
enabled: false
|
||||||
|
minReplicas: 1
|
||||||
|
maxReplicas: 2
|
||||||
|
targetCPUUtilizationPercentage: 50
|
||||||
|
targetMemoryUtilizationPercentage: 50
|
||||||
|
|
||||||
|
service:
|
||||||
|
annotations: {}
|
||||||
|
|
||||||
|
# clusterIP: ""
|
||||||
|
|
||||||
|
## List of IP addresses at which the default backend service is available
|
||||||
|
## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips
|
||||||
|
##
|
||||||
|
externalIPs: []
|
||||||
|
|
||||||
|
# loadBalancerIP: ""
|
||||||
|
loadBalancerSourceRanges: []
|
||||||
|
servicePort: 80
|
||||||
|
type: ClusterIP
|
||||||
|
|
||||||
|
priorityClassName: ""
|
||||||
|
|
||||||
|
## Enable RBAC as per https://github.com/kubernetes/ingress/tree/master/examples/rbac/nginx and https://github.com/kubernetes/ingress/issues/266
|
||||||
|
rbac:
|
||||||
|
create: true
|
||||||
|
scope: false
|
||||||
|
|
||||||
|
# If true, create & use Pod Security Policy resources
|
||||||
|
# https://kubernetes.io/docs/concepts/policy/pod-security-policy/
|
||||||
|
podSecurityPolicy:
|
||||||
|
enabled: false
|
||||||
|
|
||||||
|
serviceAccount:
|
||||||
|
create: true
|
||||||
|
name:
|
||||||
|
|
||||||
|
## Optional array of imagePullSecrets containing private registry credentials
|
||||||
|
## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
|
||||||
|
imagePullSecrets: []
|
||||||
|
# - name: secretName
|
||||||
|
|
||||||
|
# TCP service key:value pairs
|
||||||
|
# Ref: https://github.com/kubernetes/contrib/tree/master/ingress/controllers/nginx/examples/tcp
|
||||||
|
##
|
||||||
|
tcp: {}
|
||||||
|
# 8080: "default/example-tcp-svc:9000"
|
||||||
|
|
||||||
|
# UDP service key:value pairs
|
||||||
|
# Ref: https://github.com/kubernetes/contrib/tree/master/ingress/controllers/nginx/examples/udp
|
||||||
|
##
|
||||||
|
udp: {}
|
||||||
|
# 53: "kube-system/kube-dns:53"
|
738
yaml/ingress-nginx/values.yaml
Normal file
738
yaml/ingress-nginx/values.yaml
Normal file
|
@ -0,0 +1,738 @@
|
||||||
|
## nginx configuration
|
||||||
|
## Ref: https://github.com/kubernetes/ingress-nginx/blob/master/controllers/nginx/configuration.md
|
||||||
|
##
|
||||||
|
|
||||||
|
## Overrides for generated resource names
|
||||||
|
# See templates/_helpers.tpl
|
||||||
|
# nameOverride:
|
||||||
|
# fullnameOverride:
|
||||||
|
|
||||||
|
controller:
|
||||||
|
name: controller
|
||||||
|
image:
|
||||||
|
repository: k8s.gcr.io/ingress-nginx/controller
|
||||||
|
tag: "v0.41.2"
|
||||||
|
digest: sha256:1f4f402b9c14f3ae92b11ada1dfe9893a88f0faeb0b2f4b903e2c67a0c3bf0de
|
||||||
|
pullPolicy: IfNotPresent
|
||||||
|
# www-data -> uid 101
|
||||||
|
runAsUser: 101
|
||||||
|
allowPrivilegeEscalation: true
|
||||||
|
|
||||||
|
# Configures the ports the nginx-controller listens on
|
||||||
|
containerPort:
|
||||||
|
http: 80
|
||||||
|
https: 443
|
||||||
|
|
||||||
|
# Will add custom configuration options to Nginx https://kubernetes.github.io/ingress-nginx/user-guide/nginx-configuration/configmap/
|
||||||
|
#config: {}
|
||||||
|
config:
|
||||||
|
use-proxy-protocol: "false"
|
||||||
|
client-header-buffer-size: "8k"
|
||||||
|
large-client-header-buffers: "4 16k"
|
||||||
|
use-forwarded-headers: "true"
|
||||||
|
use-geoip: "true"
|
||||||
|
use-geoip2: "true"
|
||||||
|
|
||||||
|
## Annotations to be added to the controller config configuration configmap
|
||||||
|
##
|
||||||
|
configAnnotations: {}
|
||||||
|
|
||||||
|
# Will add custom headers before sending traffic to backends according to https://github.com/kubernetes/ingress-nginx/tree/master/docs/examples/customization/custom-headers
|
||||||
|
#proxySetHeaders: {}
|
||||||
|
proxySetHeaders:
|
||||||
|
X-Country-Code: $geoip_country_code
|
||||||
|
|
||||||
|
# Will add custom headers before sending response traffic to the client according to: https://kubernetes.github.io/ingress-nginx/user-guide/nginx-configuration/configmap/#add-headers
|
||||||
|
addHeaders: {}
|
||||||
|
|
||||||
|
# Optionally customize the pod dnsConfig.
|
||||||
|
dnsConfig: {}
|
||||||
|
|
||||||
|
# Optionally change this to ClusterFirstWithHostNet in case you have 'hostNetwork: true'.
|
||||||
|
# By default, while using host network, name resolution uses the host's DNS. If you wish nginx-controller
|
||||||
|
# to keep resolving names inside the k8s network, use ClusterFirstWithHostNet.
|
||||||
|
#dnsPolicy: ClusterFirst
|
||||||
|
dnsPolicy: ClusterFirstWithHostNet
|
||||||
|
|
||||||
|
# Bare-metal considerations via the host network https://kubernetes.github.io/ingress-nginx/deploy/baremetal/#via-the-host-network
|
||||||
|
# Ingress status was blank because there is no Service exposing the NGINX Ingress controller in a configuration using the host network, the default --publish-service flag used in standard cloud setups does not apply
|
||||||
|
reportNodeInternalIp: false
|
||||||
|
|
||||||
|
# Required for use with CNI based kubernetes installations (such as ones set up by kubeadm),
|
||||||
|
# since CNI and hostport don't mix yet. Can be deprecated once https://github.com/kubernetes/kubernetes/issues/23920
|
||||||
|
# is merged
|
||||||
|
#hostNetwork: false
|
||||||
|
hostNetwork: true
|
||||||
|
|
||||||
|
## Use host ports 80 and 443
|
||||||
|
## Disabled by default
|
||||||
|
##
|
||||||
|
hostPort:
|
||||||
|
enabled: false
|
||||||
|
ports:
|
||||||
|
http: 80
|
||||||
|
https: 443
|
||||||
|
|
||||||
|
## Election ID to use for status update
|
||||||
|
##
|
||||||
|
electionID: ingress-controller-leader
|
||||||
|
|
||||||
|
## Name of the ingress class to route through this controller
|
||||||
|
##
|
||||||
|
ingressClass: nginx
|
||||||
|
|
||||||
|
# labels to add to the pod container metadata
|
||||||
|
podLabels: {}
|
||||||
|
# key: value
|
||||||
|
|
||||||
|
## Security Context policies for controller pods
|
||||||
|
##
|
||||||
|
podSecurityContext: {}
|
||||||
|
|
||||||
|
## See https://kubernetes.io/docs/tasks/administer-cluster/sysctl-cluster/ for
|
||||||
|
## notes on enabling and using sysctls
|
||||||
|
###
|
||||||
|
sysctls: {}
|
||||||
|
# sysctls:
|
||||||
|
# "net.core.somaxconn": "8192"
|
||||||
|
|
||||||
|
## Allows customization of the source of the IP address or FQDN to report
|
||||||
|
## in the ingress status field. By default, it reads the information provided
|
||||||
|
## by the service. If disable, the status field reports the IP address of the
|
||||||
|
## node or nodes where an ingress controller pod is running.
|
||||||
|
publishService:
|
||||||
|
enabled: true
|
||||||
|
## Allows overriding of the publish service to bind to
|
||||||
|
## Must be <namespace>/<service_name>
|
||||||
|
##
|
||||||
|
pathOverride: ""
|
||||||
|
|
||||||
|
## Limit the scope of the controller
|
||||||
|
##
|
||||||
|
scope:
|
||||||
|
enabled: false
|
||||||
|
namespace: "" # defaults to .Release.Namespace
|
||||||
|
|
||||||
|
## Allows customization of the configmap / nginx-configmap namespace
|
||||||
|
##
|
||||||
|
configMapNamespace: "" # defaults to .Release.Namespace
|
||||||
|
|
||||||
|
## Allows customization of the tcp-services-configmap
|
||||||
|
##
|
||||||
|
tcp:
|
||||||
|
configMapNamespace: "" # defaults to .Release.Namespace
|
||||||
|
## Annotations to be added to the tcp config configmap
|
||||||
|
annotations: {}
|
||||||
|
|
||||||
|
## Allows customization of the udp-services-configmap
|
||||||
|
##
|
||||||
|
udp:
|
||||||
|
configMapNamespace: "" # defaults to .Release.Namespace
|
||||||
|
## Annotations to be added to the udp config configmap
|
||||||
|
annotations: {}
|
||||||
|
|
||||||
|
# Maxmind license key to download GeoLite2 Databases
|
||||||
|
# https://blog.maxmind.com/2019/12/18/significant-changes-to-accessing-and-using-geolite2-databases
|
||||||
|
maxmindLicenseKey: ""
|
||||||
|
|
||||||
|
## Additional command line arguments to pass to nginx-ingress-controller
|
||||||
|
## E.g. to specify the default SSL certificate you can use
|
||||||
|
## extraArgs:
|
||||||
|
## default-ssl-certificate: "<namespace>/<secret_name>"
|
||||||
|
extraArgs: {}
|
||||||
|
|
||||||
|
## Additional environment variables to set
|
||||||
|
extraEnvs: []
|
||||||
|
# extraEnvs:
|
||||||
|
# - name: FOO
|
||||||
|
# valueFrom:
|
||||||
|
# secretKeyRef:
|
||||||
|
# key: FOO
|
||||||
|
# name: secret-resource
|
||||||
|
|
||||||
|
## DaemonSet or Deployment
|
||||||
|
##
|
||||||
|
#kind: Deployment
|
||||||
|
kind: DaemonSet
|
||||||
|
|
||||||
|
## Annotations to be added to the controller Deployment or DaemonSet
|
||||||
|
##
|
||||||
|
annotations: {}
|
||||||
|
# keel.sh/pollSchedule: "@every 60m"
|
||||||
|
|
||||||
|
## Labels to be added to the controller Deployment or DaemonSet
|
||||||
|
##
|
||||||
|
labels: {}
|
||||||
|
# keel.sh/policy: patch
|
||||||
|
# keel.sh/trigger: poll
|
||||||
|
|
||||||
|
|
||||||
|
# The update strategy to apply to the Deployment or DaemonSet
|
||||||
|
##
|
||||||
|
updateStrategy: {}
|
||||||
|
# rollingUpdate:
|
||||||
|
# maxUnavailable: 1
|
||||||
|
# type: RollingUpdate
|
||||||
|
|
||||||
|
# minReadySeconds to avoid killing pods before we are ready
|
||||||
|
##
|
||||||
|
minReadySeconds: 0
|
||||||
|
|
||||||
|
|
||||||
|
## Node tolerations for server scheduling to nodes with taints
|
||||||
|
## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
|
||||||
|
##
|
||||||
|
tolerations: []
|
||||||
|
# - key: "key"
|
||||||
|
# operator: "Equal|Exists"
|
||||||
|
# value: "value"
|
||||||
|
# effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)"
|
||||||
|
|
||||||
|
## Affinity and anti-affinity
|
||||||
|
## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
|
||||||
|
##
|
||||||
|
affinity: {}
|
||||||
|
# # An example of preferred pod anti-affinity, weight is in the range 1-100
|
||||||
|
# podAntiAffinity:
|
||||||
|
# preferredDuringSchedulingIgnoredDuringExecution:
|
||||||
|
# - weight: 100
|
||||||
|
# podAffinityTerm:
|
||||||
|
# labelSelector:
|
||||||
|
# matchExpressions:
|
||||||
|
# - key: app.kubernetes.io/name
|
||||||
|
# operator: In
|
||||||
|
# values:
|
||||||
|
# - ingress-nginx
|
||||||
|
# - key: app.kubernetes.io/instance
|
||||||
|
# operator: In
|
||||||
|
# values:
|
||||||
|
# - ingress-nginx
|
||||||
|
# - key: app.kubernetes.io/component
|
||||||
|
# operator: In
|
||||||
|
# values:
|
||||||
|
# - controller
|
||||||
|
# topologyKey: kubernetes.io/hostname
|
||||||
|
|
||||||
|
# # An example of required pod anti-affinity
|
||||||
|
# podAntiAffinity:
|
||||||
|
# requiredDuringSchedulingIgnoredDuringExecution:
|
||||||
|
# - labelSelector:
|
||||||
|
# matchExpressions:
|
||||||
|
# - key: app.kubernetes.io/name
|
||||||
|
# operator: In
|
||||||
|
# values:
|
||||||
|
# - ingress-nginx
|
||||||
|
# - key: app.kubernetes.io/instance
|
||||||
|
# operator: In
|
||||||
|
# values:
|
||||||
|
# - ingress-nginx
|
||||||
|
# - key: app.kubernetes.io/component
|
||||||
|
# operator: In
|
||||||
|
# values:
|
||||||
|
# - controller
|
||||||
|
# topologyKey: "kubernetes.io/hostname"
|
||||||
|
|
||||||
|
## Topology spread constraints rely on node labels to identify the topology domain(s) that each Node is in.
|
||||||
|
## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/
|
||||||
|
##
|
||||||
|
topologySpreadConstraints: []
|
||||||
|
# - maxSkew: 1
|
||||||
|
# topologyKey: failure-domain.beta.kubernetes.io/zone
|
||||||
|
# whenUnsatisfiable: DoNotSchedule
|
||||||
|
# labelSelector:
|
||||||
|
# matchLabels:
|
||||||
|
# app.kubernetes.io/instance: ingress-nginx-internal
|
||||||
|
|
||||||
|
## terminationGracePeriodSeconds
|
||||||
|
## wait up to five minutes for the drain of connections
|
||||||
|
##
|
||||||
|
terminationGracePeriodSeconds: 300
|
||||||
|
|
||||||
|
## Node labels for controller pod assignment
|
||||||
|
## Ref: https://kubernetes.io/docs/user-guide/node-selection/
|
||||||
|
##
|
||||||
|
nodeSelector:
|
||||||
|
kubernetes.io/os: linux
|
||||||
|
|
||||||
|
## Liveness and readiness probe values
|
||||||
|
## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes
|
||||||
|
##
|
||||||
|
livenessProbe:
|
||||||
|
failureThreshold: 5
|
||||||
|
initialDelaySeconds: 10
|
||||||
|
periodSeconds: 10
|
||||||
|
successThreshold: 1
|
||||||
|
timeoutSeconds: 1
|
||||||
|
port: 10254
|
||||||
|
readinessProbe:
|
||||||
|
failureThreshold: 3
|
||||||
|
initialDelaySeconds: 10
|
||||||
|
periodSeconds: 10
|
||||||
|
successThreshold: 1
|
||||||
|
timeoutSeconds: 1
|
||||||
|
port: 10254
|
||||||
|
|
||||||
|
# Path of the health check endpoint. All requests received on the port defined by
|
||||||
|
# the healthz-port parameter are forwarded internally to this path.
|
||||||
|
healthCheckPath: "/healthz"
|
||||||
|
|
||||||
|
## Annotations to be added to controller pods
|
||||||
|
##
|
||||||
|
podAnnotations: {}
|
||||||
|
|
||||||
|
replicaCount: 1
|
||||||
|
|
||||||
|
minAvailable: 1
|
||||||
|
|
||||||
|
# Define requests resources to avoid probe issues due to CPU utilization in busy nodes
|
||||||
|
# ref: https://github.com/kubernetes/ingress-nginx/issues/4735#issuecomment-551204903
|
||||||
|
# Ideally, there should be no limits.
|
||||||
|
# https://engineering.indeedblog.com/blog/2019/12/cpu-throttling-regression-fix/
|
||||||
|
resources:
|
||||||
|
# limits:
|
||||||
|
# cpu: 100m
|
||||||
|
# memory: 90Mi
|
||||||
|
requests:
|
||||||
|
cpu: 100m
|
||||||
|
memory: 90Mi
|
||||||
|
|
||||||
|
# Mutually exclusive with keda autoscaling
|
||||||
|
autoscaling:
|
||||||
|
enabled: false
|
||||||
|
minReplicas: 1
|
||||||
|
maxReplicas: 11
|
||||||
|
targetCPUUtilizationPercentage: 50
|
||||||
|
targetMemoryUtilizationPercentage: 50
|
||||||
|
|
||||||
|
autoscalingTemplate: []
|
||||||
|
# Custom or additional autoscaling metrics
|
||||||
|
# ref: https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/#support-for-custom-metrics
|
||||||
|
# - type: Pods
|
||||||
|
# pods:
|
||||||
|
# metric:
|
||||||
|
# name: nginx_ingress_controller_nginx_process_requests_total
|
||||||
|
# target:
|
||||||
|
# type: AverageValue
|
||||||
|
# averageValue: 10000m
|
||||||
|
|
||||||
|
# Mutually exclusive with hpa autoscaling
|
||||||
|
keda:
|
||||||
|
apiVersion: "keda.sh/v1alpha1"
|
||||||
|
# apiVersion changes with keda 1.x vs 2.x
|
||||||
|
# 2.x = keda.sh/v1alpha1
|
||||||
|
# 1.x = keda.k8s.io/v1alpha1
|
||||||
|
enabled: false
|
||||||
|
minReplicas: 1
|
||||||
|
maxReplicas: 11
|
||||||
|
pollingInterval: 30
|
||||||
|
cooldownPeriod: 300
|
||||||
|
restoreToOriginalReplicaCount: false
|
||||||
|
triggers: []
|
||||||
|
# - type: prometheus
|
||||||
|
# metadata:
|
||||||
|
# serverAddress: http://<prometheus-host>:9090
|
||||||
|
# metricName: http_requests_total
|
||||||
|
# threshold: '100'
|
||||||
|
# query: sum(rate(http_requests_total{deployment="my-deployment"}[2m]))
|
||||||
|
|
||||||
|
behavior: {}
|
||||||
|
# scaleDown:
|
||||||
|
# stabilizationWindowSeconds: 300
|
||||||
|
# policies:
|
||||||
|
# - type: Pods
|
||||||
|
# value: 1
|
||||||
|
# periodSeconds: 180
|
||||||
|
# scaleUp:
|
||||||
|
# stabilizationWindowSeconds: 300
|
||||||
|
# policies:
|
||||||
|
# - type: Pods
|
||||||
|
# value: 2
|
||||||
|
# periodSeconds: 60
|
||||||
|
|
||||||
|
## Enable mimalloc as a drop-in replacement for malloc.
|
||||||
|
## ref: https://github.com/microsoft/mimalloc
|
||||||
|
##
|
||||||
|
enableMimalloc: true
|
||||||
|
|
||||||
|
## Override NGINX template
|
||||||
|
customTemplate:
|
||||||
|
configMapName: ""
|
||||||
|
configMapKey: ""
|
||||||
|
|
||||||
|
service:
|
||||||
|
enabled: true
|
||||||
|
|
||||||
|
annotations: {}
|
||||||
|
labels: {}
|
||||||
|
# clusterIP: ""
|
||||||
|
|
||||||
|
## List of IP addresses at which the controller services are available
|
||||||
|
## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips
|
||||||
|
##
|
||||||
|
externalIPs: []
|
||||||
|
|
||||||
|
# loadBalancerIP: ""
|
||||||
|
loadBalancerSourceRanges: []
|
||||||
|
|
||||||
|
enableHttp: true
|
||||||
|
enableHttps: true
|
||||||
|
|
||||||
|
## Set external traffic policy to: "Local" to preserve source IP on
|
||||||
|
## providers supporting it
|
||||||
|
## Ref: https://kubernetes.io/docs/tutorials/services/source-ip/#source-ip-for-services-with-typeloadbalancer
|
||||||
|
# externalTrafficPolicy: ""
|
||||||
|
|
||||||
|
# Must be either "None" or "ClientIP" if set. Kubernetes will default to "None".
|
||||||
|
# Ref: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies
|
||||||
|
# sessionAffinity: ""
|
||||||
|
|
||||||
|
# specifies the health check node port (numeric port number) for the service. If healthCheckNodePort isn’t specified,
|
||||||
|
# the service controller allocates a port from your cluster’s NodePort range.
|
||||||
|
# Ref: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip
|
||||||
|
# healthCheckNodePort: 0
|
||||||
|
|
||||||
|
ports:
|
||||||
|
http: 80
|
||||||
|
https: 443
|
||||||
|
|
||||||
|
targetPorts:
|
||||||
|
http: http
|
||||||
|
https: https
|
||||||
|
|
||||||
|
#type: LoadBalancer
|
||||||
|
|
||||||
|
type: NodePort
|
||||||
|
# nodePorts:
|
||||||
|
# http: 32080
|
||||||
|
# https: 32443
|
||||||
|
# tcp:
|
||||||
|
# 8080: 32808
|
||||||
|
nodePorts:
|
||||||
|
http: ""
|
||||||
|
https: ""
|
||||||
|
tcp: {}
|
||||||
|
udp: {}
|
||||||
|
|
||||||
|
## Enables an additional internal load balancer (besides the external one).
|
||||||
|
## Annotations are mandatory for the load balancer to come up. Varies with the cloud service.
|
||||||
|
internal:
|
||||||
|
enabled: false
|
||||||
|
annotations: {}
|
||||||
|
|
||||||
|
## Restrict access For LoadBalancer service. Defaults to 0.0.0.0/0.
|
||||||
|
loadBalancerSourceRanges: []
|
||||||
|
|
||||||
|
## Set external traffic policy to: "Local" to preserve source IP on
|
||||||
|
## providers supporting it
|
||||||
|
## Ref: https://kubernetes.io/docs/tutorials/services/source-ip/#source-ip-for-services-with-typeloadbalancer
|
||||||
|
# externalTrafficPolicy: ""
|
||||||
|
|
||||||
|
extraContainers: []
|
||||||
|
## Additional containers to be added to the controller pod.
|
||||||
|
## See https://github.com/lemonldap-ng-controller/lemonldap-ng-controller as example.
|
||||||
|
# - name: my-sidecar
|
||||||
|
# image: nginx:latest
|
||||||
|
# - name: lemonldap-ng-controller
|
||||||
|
# image: lemonldapng/lemonldap-ng-controller:0.2.0
|
||||||
|
# args:
|
||||||
|
# - /lemonldap-ng-controller
|
||||||
|
# - --alsologtostderr
|
||||||
|
# - --configmap=$(POD_NAMESPACE)/lemonldap-ng-configuration
|
||||||
|
# env:
|
||||||
|
# - name: POD_NAME
|
||||||
|
# valueFrom:
|
||||||
|
# fieldRef:
|
||||||
|
# fieldPath: metadata.name
|
||||||
|
# - name: POD_NAMESPACE
|
||||||
|
# valueFrom:
|
||||||
|
# fieldRef:
|
||||||
|
# fieldPath: metadata.namespace
|
||||||
|
# volumeMounts:
|
||||||
|
# - name: copy-portal-skins
|
||||||
|
# mountPath: /srv/var/lib/lemonldap-ng/portal/skins
|
||||||
|
|
||||||
|
extraVolumeMounts: []
|
||||||
|
## Additional volumeMounts to the controller main container.
|
||||||
|
# - name: copy-portal-skins
|
||||||
|
# mountPath: /var/lib/lemonldap-ng/portal/skins
|
||||||
|
|
||||||
|
extraVolumes: []
|
||||||
|
## Additional volumes to the controller pod.
|
||||||
|
# - name: copy-portal-skins
|
||||||
|
# emptyDir: {}
|
||||||
|
|
||||||
|
extraInitContainers: []
|
||||||
|
## Containers, which are run before the app containers are started.
|
||||||
|
# - name: init-myservice
|
||||||
|
# image: busybox
|
||||||
|
# command: ['sh', '-c', 'until nslookup myservice; do echo waiting for myservice; sleep 2; done;']
|
||||||
|
|
||||||
|
admissionWebhooks:
|
||||||
|
annotations: {}
|
||||||
|
enabled: true
|
||||||
|
failurePolicy: Fail
|
||||||
|
# timeoutSeconds: 10
|
||||||
|
port: 8443
|
||||||
|
certificate: "/usr/local/certificates/cert"
|
||||||
|
key: "/usr/local/certificates/key"
|
||||||
|
namespaceSelector: {}
|
||||||
|
objectSelector: {}
|
||||||
|
|
||||||
|
service:
|
||||||
|
annotations: {}
|
||||||
|
# clusterIP: ""
|
||||||
|
externalIPs: []
|
||||||
|
# loadBalancerIP: ""
|
||||||
|
loadBalancerSourceRanges: []
|
||||||
|
servicePort: 443
|
||||||
|
type: ClusterIP
|
||||||
|
|
||||||
|
patch:
|
||||||
|
enabled: true
|
||||||
|
image:
|
||||||
|
repository: docker.io/jettech/kube-webhook-certgen
|
||||||
|
tag: v1.5.0
|
||||||
|
pullPolicy: IfNotPresent
|
||||||
|
## Provide a priority class name to the webhook patching job
|
||||||
|
##
|
||||||
|
priorityClassName: ""
|
||||||
|
podAnnotations: {}
|
||||||
|
nodeSelector: {}
|
||||||
|
tolerations: []
|
||||||
|
runAsUser: 2000
|
||||||
|
|
||||||
|
metrics:
|
||||||
|
port: 10254
|
||||||
|
# if this port is changed, change healthz-port: in extraArgs: accordingly
|
||||||
|
enabled: false
|
||||||
|
|
||||||
|
service:
|
||||||
|
annotations: {}
|
||||||
|
# prometheus.io/scrape: "true"
|
||||||
|
# prometheus.io/port: "10254"
|
||||||
|
|
||||||
|
# clusterIP: ""
|
||||||
|
|
||||||
|
## List of IP addresses at which the stats-exporter service is available
|
||||||
|
## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips
|
||||||
|
##
|
||||||
|
externalIPs: []
|
||||||
|
|
||||||
|
# loadBalancerIP: ""
|
||||||
|
loadBalancerSourceRanges: []
|
||||||
|
servicePort: 9913
|
||||||
|
type: ClusterIP
|
||||||
|
# externalTrafficPolicy: ""
|
||||||
|
# nodePort: ""
|
||||||
|
|
||||||
|
serviceMonitor:
|
||||||
|
enabled: false
|
||||||
|
additionalLabels: {}
|
||||||
|
namespace: ""
|
||||||
|
namespaceSelector: {}
|
||||||
|
# Default: scrape .Release.Namespace only
|
||||||
|
# To scrape all, use the following:
|
||||||
|
# namespaceSelector:
|
||||||
|
# any: true
|
||||||
|
scrapeInterval: 30s
|
||||||
|
# honorLabels: true
|
||||||
|
targetLabels: []
|
||||||
|
metricRelabelings: []
|
||||||
|
|
||||||
|
prometheusRule:
|
||||||
|
enabled: false
|
||||||
|
additionalLabels: {}
|
||||||
|
# namespace: ""
|
||||||
|
rules: []
|
||||||
|
# # These are just examples rules, please adapt them to your needs
|
||||||
|
# - alert: NGINXConfigFailed
|
||||||
|
# expr: count(nginx_ingress_controller_config_last_reload_successful == 0) > 0
|
||||||
|
# for: 1s
|
||||||
|
# labels:
|
||||||
|
# severity: critical
|
||||||
|
# annotations:
|
||||||
|
# description: bad ingress config - nginx config test failed
|
||||||
|
# summary: uninstall the latest ingress changes to allow config reloads to resume
|
||||||
|
# - alert: NGINXCertificateExpiry
|
||||||
|
# expr: (avg(nginx_ingress_controller_ssl_expire_time_seconds) by (host) - time()) < 604800
|
||||||
|
# for: 1s
|
||||||
|
# labels:
|
||||||
|
# severity: critical
|
||||||
|
# annotations:
|
||||||
|
# description: ssl certificate(s) will expire in less then a week
|
||||||
|
# summary: renew expiring certificates to avoid downtime
|
||||||
|
# - alert: NGINXTooMany500s
|
||||||
|
# expr: 100 * ( sum( nginx_ingress_controller_requests{status=~"5.+"} ) / sum(nginx_ingress_controller_requests) ) > 5
|
||||||
|
# for: 1m
|
||||||
|
# labels:
|
||||||
|
# severity: warning
|
||||||
|
# annotations:
|
||||||
|
# description: Too many 5XXs
|
||||||
|
# summary: More than 5% of all requests returned 5XX, this requires your attention
|
||||||
|
# - alert: NGINXTooMany400s
|
||||||
|
# expr: 100 * ( sum( nginx_ingress_controller_requests{status=~"4.+"} ) / sum(nginx_ingress_controller_requests) ) > 5
|
||||||
|
# for: 1m
|
||||||
|
# labels:
|
||||||
|
# severity: warning
|
||||||
|
# annotations:
|
||||||
|
# description: Too many 4XXs
|
||||||
|
# summary: More than 5% of all requests returned 4XX, this requires your attention
|
||||||
|
|
||||||
|
## Improve connection draining when ingress controller pod is deleted using a lifecycle hook:
|
||||||
|
## With this new hook, we increased the default terminationGracePeriodSeconds from 30 seconds
|
||||||
|
## to 300, allowing the draining of connections up to five minutes.
|
||||||
|
## If the active connections end before that, the pod will terminate gracefully at that time.
|
||||||
|
## To effectively take advantage of this feature, the Configmap feature
|
||||||
|
## worker-shutdown-timeout new value is 240s instead of 10s.
|
||||||
|
##
|
||||||
|
lifecycle:
|
||||||
|
preStop:
|
||||||
|
exec:
|
||||||
|
command:
|
||||||
|
- /wait-shutdown
|
||||||
|
|
||||||
|
priorityClassName: ""
|
||||||
|
|
||||||
|
## Rollback limit
|
||||||
|
##
|
||||||
|
revisionHistoryLimit: 10
|
||||||
|
|
||||||
|
## Default 404 backend
|
||||||
|
##
|
||||||
|
defaultBackend:
|
||||||
|
##
|
||||||
|
enabled: false
|
||||||
|
|
||||||
|
name: defaultbackend
|
||||||
|
image:
|
||||||
|
repository: k8s.gcr.io/defaultbackend-amd64
|
||||||
|
tag: "1.5"
|
||||||
|
pullPolicy: IfNotPresent
|
||||||
|
# nobody user -> uid 65534
|
||||||
|
runAsUser: 65534
|
||||||
|
runAsNonRoot: true
|
||||||
|
readOnlyRootFilesystem: true
|
||||||
|
allowPrivilegeEscalation: false
|
||||||
|
|
||||||
|
extraArgs: {}
|
||||||
|
|
||||||
|
serviceAccount:
|
||||||
|
create: true
|
||||||
|
name:
|
||||||
|
## Additional environment variables to set for defaultBackend pods
|
||||||
|
extraEnvs: []
|
||||||
|
|
||||||
|
port: 8080
|
||||||
|
|
||||||
|
## Readiness and liveness probes for default backend
|
||||||
|
## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/
|
||||||
|
##
|
||||||
|
livenessProbe:
|
||||||
|
failureThreshold: 3
|
||||||
|
initialDelaySeconds: 30
|
||||||
|
periodSeconds: 10
|
||||||
|
successThreshold: 1
|
||||||
|
timeoutSeconds: 5
|
||||||
|
readinessProbe:
|
||||||
|
failureThreshold: 6
|
||||||
|
initialDelaySeconds: 0
|
||||||
|
periodSeconds: 5
|
||||||
|
successThreshold: 1
|
||||||
|
timeoutSeconds: 5
|
||||||
|
|
||||||
|
## Node tolerations for server scheduling to nodes with taints
|
||||||
|
## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
|
||||||
|
##
|
||||||
|
tolerations: []
|
||||||
|
# - key: "key"
|
||||||
|
# operator: "Equal|Exists"
|
||||||
|
# value: "value"
|
||||||
|
# effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)"
|
||||||
|
|
||||||
|
affinity: {}
|
||||||
|
|
||||||
|
## Security Context policies for controller pods
|
||||||
|
## See https://kubernetes.io/docs/tasks/administer-cluster/sysctl-cluster/ for
|
||||||
|
## notes on enabling and using sysctls
|
||||||
|
##
|
||||||
|
podSecurityContext: {}
|
||||||
|
|
||||||
|
# labels to add to the pod container metadata
|
||||||
|
podLabels: {}
|
||||||
|
# key: value
|
||||||
|
|
||||||
|
## Node labels for default backend pod assignment
|
||||||
|
## Ref: https://kubernetes.io/docs/user-guide/node-selection/
|
||||||
|
##
|
||||||
|
nodeSelector: {}
|
||||||
|
|
||||||
|
## Annotations to be added to default backend pods
|
||||||
|
##
|
||||||
|
podAnnotations: {}
|
||||||
|
|
||||||
|
replicaCount: 1
|
||||||
|
|
||||||
|
minAvailable: 1
|
||||||
|
|
||||||
|
resources: {}
|
||||||
|
# limits:
|
||||||
|
# cpu: 10m
|
||||||
|
# memory: 20Mi
|
||||||
|
# requests:
|
||||||
|
# cpu: 10m
|
||||||
|
# memory: 20Mi
|
||||||
|
|
||||||
|
autoscaling:
|
||||||
|
enabled: false
|
||||||
|
minReplicas: 1
|
||||||
|
maxReplicas: 2
|
||||||
|
targetCPUUtilizationPercentage: 50
|
||||||
|
targetMemoryUtilizationPercentage: 50
|
||||||
|
|
||||||
|
service:
|
||||||
|
annotations: {}
|
||||||
|
|
||||||
|
# clusterIP: ""
|
||||||
|
|
||||||
|
## List of IP addresses at which the default backend service is available
|
||||||
|
## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips
|
||||||
|
##
|
||||||
|
externalIPs: []
|
||||||
|
|
||||||
|
# loadBalancerIP: ""
|
||||||
|
loadBalancerSourceRanges: []
|
||||||
|
servicePort: 80
|
||||||
|
type: ClusterIP
|
||||||
|
|
||||||
|
priorityClassName: ""
|
||||||
|
|
||||||
|
## Enable RBAC as per https://github.com/kubernetes/ingress/tree/master/examples/rbac/nginx and https://github.com/kubernetes/ingress/issues/266
|
||||||
|
rbac:
|
||||||
|
create: true
|
||||||
|
scope: false
|
||||||
|
|
||||||
|
# If true, create & use Pod Security Policy resources
|
||||||
|
# https://kubernetes.io/docs/concepts/policy/pod-security-policy/
|
||||||
|
podSecurityPolicy:
|
||||||
|
enabled: false
|
||||||
|
|
||||||
|
serviceAccount:
|
||||||
|
create: true
|
||||||
|
name:
|
||||||
|
|
||||||
|
## Optional array of imagePullSecrets containing private registry credentials
|
||||||
|
## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
|
||||||
|
imagePullSecrets: []
|
||||||
|
# - name: secretName
|
||||||
|
|
||||||
|
# TCP service key:value pairs
|
||||||
|
# Ref: https://github.com/kubernetes/contrib/tree/master/ingress/controllers/nginx/examples/tcp
|
||||||
|
##
|
||||||
|
tcp: {}
|
||||||
|
# 8080: "default/example-tcp-svc:9000"
|
||||||
|
|
||||||
|
# UDP service key:value pairs
|
||||||
|
# Ref: https://github.com/kubernetes/contrib/tree/master/ingress/controllers/nginx/examples/udp
|
||||||
|
##
|
||||||
|
udp: {}
|
||||||
|
# 53: "kube-system/kube-dns:53"
|
186
yaml/metrics-server/components.yaml
Normal file
186
yaml/metrics-server/components.yaml
Normal file
|
@ -0,0 +1,186 @@
|
||||||
|
apiVersion: v1
|
||||||
|
kind: ServiceAccount
|
||||||
|
metadata:
|
||||||
|
labels:
|
||||||
|
k8s-app: metrics-server
|
||||||
|
name: metrics-server
|
||||||
|
namespace: kube-system
|
||||||
|
---
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
kind: ClusterRole
|
||||||
|
metadata:
|
||||||
|
labels:
|
||||||
|
k8s-app: metrics-server
|
||||||
|
rbac.authorization.k8s.io/aggregate-to-admin: "true"
|
||||||
|
rbac.authorization.k8s.io/aggregate-to-edit: "true"
|
||||||
|
rbac.authorization.k8s.io/aggregate-to-view: "true"
|
||||||
|
name: system:aggregated-metrics-reader
|
||||||
|
rules:
|
||||||
|
- apiGroups:
|
||||||
|
- metrics.k8s.io
|
||||||
|
resources:
|
||||||
|
- pods
|
||||||
|
- nodes
|
||||||
|
verbs:
|
||||||
|
- get
|
||||||
|
- list
|
||||||
|
- watch
|
||||||
|
---
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
kind: ClusterRole
|
||||||
|
metadata:
|
||||||
|
labels:
|
||||||
|
k8s-app: metrics-server
|
||||||
|
name: system:metrics-server
|
||||||
|
rules:
|
||||||
|
- apiGroups:
|
||||||
|
- ""
|
||||||
|
resources:
|
||||||
|
- pods
|
||||||
|
- nodes
|
||||||
|
- nodes/stats
|
||||||
|
- namespaces
|
||||||
|
- configmaps
|
||||||
|
verbs:
|
||||||
|
- get
|
||||||
|
- list
|
||||||
|
- watch
|
||||||
|
---
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
kind: RoleBinding
|
||||||
|
metadata:
|
||||||
|
labels:
|
||||||
|
k8s-app: metrics-server
|
||||||
|
name: metrics-server-auth-reader
|
||||||
|
namespace: kube-system
|
||||||
|
roleRef:
|
||||||
|
apiGroup: rbac.authorization.k8s.io
|
||||||
|
kind: Role
|
||||||
|
name: extension-apiserver-authentication-reader
|
||||||
|
subjects:
|
||||||
|
- kind: ServiceAccount
|
||||||
|
name: metrics-server
|
||||||
|
namespace: kube-system
|
||||||
|
---
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
kind: ClusterRoleBinding
|
||||||
|
metadata:
|
||||||
|
labels:
|
||||||
|
k8s-app: metrics-server
|
||||||
|
name: metrics-server:system:auth-delegator
|
||||||
|
roleRef:
|
||||||
|
apiGroup: rbac.authorization.k8s.io
|
||||||
|
kind: ClusterRole
|
||||||
|
name: system:auth-delegator
|
||||||
|
subjects:
|
||||||
|
- kind: ServiceAccount
|
||||||
|
name: metrics-server
|
||||||
|
namespace: kube-system
|
||||||
|
---
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
kind: ClusterRoleBinding
|
||||||
|
metadata:
|
||||||
|
labels:
|
||||||
|
k8s-app: metrics-server
|
||||||
|
name: system:metrics-server
|
||||||
|
roleRef:
|
||||||
|
apiGroup: rbac.authorization.k8s.io
|
||||||
|
kind: ClusterRole
|
||||||
|
name: system:metrics-server
|
||||||
|
subjects:
|
||||||
|
- kind: ServiceAccount
|
||||||
|
name: metrics-server
|
||||||
|
namespace: kube-system
|
||||||
|
---
|
||||||
|
apiVersion: v1
|
||||||
|
kind: Service
|
||||||
|
metadata:
|
||||||
|
labels:
|
||||||
|
k8s-app: metrics-server
|
||||||
|
name: metrics-server
|
||||||
|
namespace: kube-system
|
||||||
|
spec:
|
||||||
|
ports:
|
||||||
|
- name: https
|
||||||
|
port: 443
|
||||||
|
protocol: TCP
|
||||||
|
targetPort: https
|
||||||
|
selector:
|
||||||
|
k8s-app: metrics-server
|
||||||
|
---
|
||||||
|
apiVersion: apps/v1
|
||||||
|
kind: Deployment
|
||||||
|
metadata:
|
||||||
|
labels:
|
||||||
|
k8s-app: metrics-server
|
||||||
|
name: metrics-server
|
||||||
|
namespace: kube-system
|
||||||
|
spec:
|
||||||
|
selector:
|
||||||
|
matchLabels:
|
||||||
|
k8s-app: metrics-server
|
||||||
|
strategy:
|
||||||
|
rollingUpdate:
|
||||||
|
maxUnavailable: 0
|
||||||
|
template:
|
||||||
|
metadata:
|
||||||
|
labels:
|
||||||
|
k8s-app: metrics-server
|
||||||
|
spec:
|
||||||
|
containers:
|
||||||
|
- args:
|
||||||
|
- --cert-dir=/tmp
|
||||||
|
- --secure-port=4443
|
||||||
|
- --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname
|
||||||
|
- --kubelet-use-node-status-port
|
||||||
|
image: k8s.gcr.io/metrics-server/metrics-server:v0.4.1
|
||||||
|
imagePullPolicy: IfNotPresent
|
||||||
|
livenessProbe:
|
||||||
|
failureThreshold: 3
|
||||||
|
httpGet:
|
||||||
|
path: /livez
|
||||||
|
port: https
|
||||||
|
scheme: HTTPS
|
||||||
|
periodSeconds: 10
|
||||||
|
name: metrics-server
|
||||||
|
ports:
|
||||||
|
- containerPort: 4443
|
||||||
|
name: https
|
||||||
|
protocol: TCP
|
||||||
|
readinessProbe:
|
||||||
|
failureThreshold: 3
|
||||||
|
httpGet:
|
||||||
|
path: /readyz
|
||||||
|
port: https
|
||||||
|
scheme: HTTPS
|
||||||
|
periodSeconds: 10
|
||||||
|
securityContext:
|
||||||
|
readOnlyRootFilesystem: true
|
||||||
|
runAsNonRoot: true
|
||||||
|
runAsUser: 1000
|
||||||
|
volumeMounts:
|
||||||
|
- mountPath: /tmp
|
||||||
|
name: tmp-dir
|
||||||
|
nodeSelector:
|
||||||
|
kubernetes.io/os: linux
|
||||||
|
priorityClassName: system-cluster-critical
|
||||||
|
serviceAccountName: metrics-server
|
||||||
|
volumes:
|
||||||
|
- emptyDir: {}
|
||||||
|
name: tmp-dir
|
||||||
|
---
|
||||||
|
apiVersion: apiregistration.k8s.io/v1
|
||||||
|
kind: APIService
|
||||||
|
metadata:
|
||||||
|
labels:
|
||||||
|
k8s-app: metrics-server
|
||||||
|
name: v1beta1.metrics.k8s.io
|
||||||
|
spec:
|
||||||
|
group: metrics.k8s.io
|
||||||
|
groupPriorityMinimum: 100
|
||||||
|
insecureSkipTLSVerify: true
|
||||||
|
service:
|
||||||
|
name: metrics-server
|
||||||
|
namespace: kube-system
|
||||||
|
version: v1beta1
|
||||||
|
versionPriority: 100
|
16
yaml/monitoring/grafana-cert.yaml
Normal file
16
yaml/monitoring/grafana-cert.yaml
Normal file
|
@ -0,0 +1,16 @@
|
||||||
|
---
|
||||||
|
apiVersion: cert-manager.io/v1
|
||||||
|
kind: Certificate
|
||||||
|
metadata:
|
||||||
|
name: grafana-cert
|
||||||
|
namespace: monitoring
|
||||||
|
spec:
|
||||||
|
secretName: grafana-tls
|
||||||
|
issuerRef:
|
||||||
|
name: letsencrypt-production-dns
|
||||||
|
kind: ClusterIssuer
|
||||||
|
commonName: 'monitoring.k8x_domain'
|
||||||
|
dnsNames:
|
||||||
|
- 'monitoring.k8x_domain'
|
||||||
|
privateKey:
|
||||||
|
algorithm: ECDSA
|
23
yaml/monitoring/grafana-ingress-secure.yaml
Normal file
23
yaml/monitoring/grafana-ingress-secure.yaml
Normal file
|
@ -0,0 +1,23 @@
|
||||||
|
---
|
||||||
|
apiVersion: networking.k8s.io/v1
|
||||||
|
kind: Ingress
|
||||||
|
metadata:
|
||||||
|
name: grafana-ingress
|
||||||
|
annotations:
|
||||||
|
kubernetes.io/ingress.class: nginx
|
||||||
|
spec:
|
||||||
|
rules:
|
||||||
|
- host: monitoring.k8x_domain
|
||||||
|
http:
|
||||||
|
paths:
|
||||||
|
- pathType: Prefix
|
||||||
|
path: /
|
||||||
|
backend:
|
||||||
|
service:
|
||||||
|
name: grafana
|
||||||
|
port:
|
||||||
|
number: 80
|
||||||
|
tls:
|
||||||
|
- secretName: grafana-tls
|
||||||
|
hosts:
|
||||||
|
- monitoring.k8x_domain
|
19
yaml/monitoring/grafana-ingress.yaml
Normal file
19
yaml/monitoring/grafana-ingress.yaml
Normal file
|
@ -0,0 +1,19 @@
|
||||||
|
---
|
||||||
|
apiVersion: networking.k8s.io/v1
|
||||||
|
kind: Ingress
|
||||||
|
metadata:
|
||||||
|
name: grafana-ingress
|
||||||
|
annotations:
|
||||||
|
kubernetes.io/ingress.class: nginx
|
||||||
|
spec:
|
||||||
|
rules:
|
||||||
|
- host: monitoring.k8x_domain
|
||||||
|
http:
|
||||||
|
paths:
|
||||||
|
- pathType: Prefix
|
||||||
|
path: /
|
||||||
|
backend:
|
||||||
|
service:
|
||||||
|
name: grafana
|
||||||
|
port:
|
||||||
|
number: 80
|
663
yaml/monitoring/grafana-values.yaml
Normal file
663
yaml/monitoring/grafana-values.yaml
Normal file
|
@ -0,0 +1,663 @@
|
||||||
|
rbac:
|
||||||
|
create: true
|
||||||
|
## Use an existing ClusterRole/Role (depending on rbac.namespaced false/true)
|
||||||
|
# useExistingRole: name-of-some-(cluster)role
|
||||||
|
pspEnabled: true
|
||||||
|
pspUseAppArmor: true
|
||||||
|
namespaced: false
|
||||||
|
extraRoleRules: []
|
||||||
|
# - apiGroups: []
|
||||||
|
# resources: []
|
||||||
|
# verbs: []
|
||||||
|
extraClusterRoleRules: []
|
||||||
|
# - apiGroups: []
|
||||||
|
# resources: []
|
||||||
|
# verbs: []
|
||||||
|
serviceAccount:
|
||||||
|
create: true
|
||||||
|
name:
|
||||||
|
nameTest:
|
||||||
|
# annotations:
|
||||||
|
# eks.amazonaws.com/role-arn: arn:aws:iam::123456789000:role/iam-role-name-here
|
||||||
|
|
||||||
|
replicas: 1
|
||||||
|
|
||||||
|
## See `kubectl explain poddisruptionbudget.spec` for more
|
||||||
|
## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/
|
||||||
|
podDisruptionBudget: {}
|
||||||
|
# minAvailable: 1
|
||||||
|
# maxUnavailable: 1
|
||||||
|
|
||||||
|
## See `kubectl explain deployment.spec.strategy` for more
|
||||||
|
## ref: https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#strategy
|
||||||
|
deploymentStrategy:
|
||||||
|
type: RollingUpdate
|
||||||
|
|
||||||
|
readinessProbe:
|
||||||
|
httpGet:
|
||||||
|
path: /api/health
|
||||||
|
port: 3000
|
||||||
|
|
||||||
|
livenessProbe:
|
||||||
|
httpGet:
|
||||||
|
path: /api/health
|
||||||
|
port: 3000
|
||||||
|
initialDelaySeconds: 60
|
||||||
|
timeoutSeconds: 30
|
||||||
|
failureThreshold: 10
|
||||||
|
|
||||||
|
## Use an alternate scheduler, e.g. "stork".
|
||||||
|
## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/
|
||||||
|
##
|
||||||
|
# schedulerName: "default-scheduler"
|
||||||
|
|
||||||
|
image:
|
||||||
|
repository: grafana/grafana
|
||||||
|
tag: 7.2.1
|
||||||
|
sha: ""
|
||||||
|
pullPolicy: IfNotPresent
|
||||||
|
|
||||||
|
## Optionally specify an array of imagePullSecrets.
|
||||||
|
## Secrets must be manually created in the namespace.
|
||||||
|
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
|
||||||
|
##
|
||||||
|
# pullSecrets:
|
||||||
|
# - myRegistrKeySecretName
|
||||||
|
|
||||||
|
testFramework:
|
||||||
|
enabled: true
|
||||||
|
image: "bats/bats"
|
||||||
|
tag: "v1.1.0"
|
||||||
|
imagePullPolicy: IfNotPresent
|
||||||
|
securityContext: {}
|
||||||
|
|
||||||
|
securityContext:
|
||||||
|
runAsUser: 472
|
||||||
|
runAsGroup: 472
|
||||||
|
fsGroup: 472
|
||||||
|
|
||||||
|
|
||||||
|
extraConfigmapMounts: []
|
||||||
|
# - name: certs-configmap
|
||||||
|
# mountPath: /etc/grafana/ssl/
|
||||||
|
# subPath: certificates.crt # (optional)
|
||||||
|
# configMap: certs-configmap
|
||||||
|
# readOnly: true
|
||||||
|
|
||||||
|
|
||||||
|
extraEmptyDirMounts: []
|
||||||
|
# - name: provisioning-notifiers
|
||||||
|
# mountPath: /etc/grafana/provisioning/notifiers
|
||||||
|
|
||||||
|
|
||||||
|
## Assign a PriorityClassName to pods if set
|
||||||
|
# priorityClassName:
|
||||||
|
|
||||||
|
downloadDashboardsImage:
|
||||||
|
repository: curlimages/curl
|
||||||
|
tag: 7.70.0
|
||||||
|
sha: ""
|
||||||
|
pullPolicy: IfNotPresent
|
||||||
|
|
||||||
|
downloadDashboards:
|
||||||
|
env: {}
|
||||||
|
resources: {}
|
||||||
|
|
||||||
|
## Pod Annotations
|
||||||
|
# podAnnotations: {}
|
||||||
|
|
||||||
|
## Pod Labels
|
||||||
|
# podLabels: {}
|
||||||
|
|
||||||
|
podPortName: grafana
|
||||||
|
|
||||||
|
## Deployment annotations
|
||||||
|
# annotations: {}
|
||||||
|
|
||||||
|
## Expose the grafana service to be accessed from outside the cluster (LoadBalancer service).
|
||||||
|
## or access it from within the cluster (ClusterIP service). Set the service type and the port to serve it.
|
||||||
|
## ref: http://kubernetes.io/docs/user-guide/services/
|
||||||
|
##
|
||||||
|
service:
|
||||||
|
type: ClusterIP
|
||||||
|
port: 80
|
||||||
|
targetPort: 3000
|
||||||
|
# targetPort: 4181 To be used with a proxy extraContainer
|
||||||
|
annotations: {}
|
||||||
|
labels: {}
|
||||||
|
portName: service
|
||||||
|
|
||||||
|
serviceMonitor:
|
||||||
|
## If true, a ServiceMonitor CRD is created for a prometheus operator
|
||||||
|
## https://github.com/coreos/prometheus-operator
|
||||||
|
##
|
||||||
|
enabled: false
|
||||||
|
path: /metrics
|
||||||
|
# namespace: monitoring (defaults to use the namespace this chart is deployed to)
|
||||||
|
labels: {}
|
||||||
|
interval: 1m
|
||||||
|
scrapeTimeout: 30s
|
||||||
|
relabelings: []
|
||||||
|
|
||||||
|
extraExposePorts: []
|
||||||
|
# - name: keycloak
|
||||||
|
# port: 8080
|
||||||
|
# targetPort: 8080
|
||||||
|
# type: ClusterIP
|
||||||
|
|
||||||
|
# overrides pod.spec.hostAliases in the grafana deployment's pods
|
||||||
|
hostAliases: []
|
||||||
|
# - ip: "1.2.3.4"
|
||||||
|
# hostnames:
|
||||||
|
# - "my.host.com"
|
||||||
|
|
||||||
|
ingress:
|
||||||
|
enabled: false
|
||||||
|
# Values can be templated
|
||||||
|
annotations: {}
|
||||||
|
# kubernetes.io/ingress.class: nginx
|
||||||
|
# kubernetes.io/tls-acme: "true"
|
||||||
|
labels: {}
|
||||||
|
path: /
|
||||||
|
hosts:
|
||||||
|
- chart-example.local
|
||||||
|
## Extra paths to prepend to every host configuration. This is useful when working with annotation based services.
|
||||||
|
extraPaths: []
|
||||||
|
# - path: /*
|
||||||
|
# backend:
|
||||||
|
# serviceName: ssl-redirect
|
||||||
|
# servicePort: use-annotation
|
||||||
|
tls: []
|
||||||
|
# - secretName: chart-example-tls
|
||||||
|
# hosts:
|
||||||
|
# - chart-example.local
|
||||||
|
|
||||||
|
resources: {}
|
||||||
|
# limits:
|
||||||
|
# cpu: 100m
|
||||||
|
# memory: 128Mi
|
||||||
|
# requests:
|
||||||
|
# cpu: 100m
|
||||||
|
# memory: 128Mi
|
||||||
|
|
||||||
|
## Node labels for pod assignment
|
||||||
|
## ref: https://kubernetes.io/docs/user-guide/node-selection/
|
||||||
|
#
|
||||||
|
nodeSelector: {}
|
||||||
|
|
||||||
|
## Tolerations for pod assignment
|
||||||
|
## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
|
||||||
|
##
|
||||||
|
tolerations: []
|
||||||
|
|
||||||
|
## Affinity for pod assignment
|
||||||
|
## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
|
||||||
|
##
|
||||||
|
affinity: {}
|
||||||
|
|
||||||
|
extraInitContainers: []
|
||||||
|
|
||||||
|
## Enable an Specify container in extraContainers. This is meant to allow adding an authentication proxy to a grafana pod
|
||||||
|
extraContainers: |
|
||||||
|
# - name: proxy
|
||||||
|
# image: quay.io/gambol99/keycloak-proxy:latest
|
||||||
|
# args:
|
||||||
|
# - -provider=github
|
||||||
|
# - -client-id=
|
||||||
|
# - -client-secret=
|
||||||
|
# - -github-org=<ORG_NAME>
|
||||||
|
# - -email-domain=*
|
||||||
|
# - -cookie-secret=
|
||||||
|
# - -http-address=http://0.0.0.0:4181
|
||||||
|
# - -upstream-url=http://127.0.0.1:3000
|
||||||
|
# ports:
|
||||||
|
# - name: proxy-web
|
||||||
|
# containerPort: 4181
|
||||||
|
|
||||||
|
## Volumes that can be used in init containers that will not be mounted to deployment pods
|
||||||
|
extraContainerVolumes: []
|
||||||
|
# - name: volume-from-secret
|
||||||
|
# secret:
|
||||||
|
# secretName: secret-to-mount
|
||||||
|
# - name: empty-dir-volume
|
||||||
|
# emptyDir: {}
|
||||||
|
|
||||||
|
## Enable persistence using Persistent Volume Claims
|
||||||
|
## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/
|
||||||
|
##
|
||||||
|
persistence:
|
||||||
|
type: pvc
|
||||||
|
enabled: true
|
||||||
|
# storageClassName: default
|
||||||
|
accessModes:
|
||||||
|
- ReadWriteOnce
|
||||||
|
size: 8Gi
|
||||||
|
# annotations: {}
|
||||||
|
finalizers:
|
||||||
|
- kubernetes.io/pvc-protection
|
||||||
|
subPath: ""
|
||||||
|
existingClaim: "monitoring-grafana-data-pvc"
|
||||||
|
|
||||||
|
initChownData:
|
||||||
|
## If false, data ownership will not be reset at startup
|
||||||
|
## This allows the prometheus-server to be run with an arbitrary user
|
||||||
|
##
|
||||||
|
enabled: true
|
||||||
|
|
||||||
|
## initChownData container image
|
||||||
|
##
|
||||||
|
image:
|
||||||
|
repository: busybox
|
||||||
|
tag: "1.31.1"
|
||||||
|
sha: ""
|
||||||
|
pullPolicy: IfNotPresent
|
||||||
|
|
||||||
|
## initChownData resource requests and limits
|
||||||
|
## Ref: http://kubernetes.io/docs/user-guide/compute-resources/
|
||||||
|
##
|
||||||
|
resources: {}
|
||||||
|
# limits:
|
||||||
|
# cpu: 100m
|
||||||
|
# memory: 128Mi
|
||||||
|
# requests:
|
||||||
|
# cpu: 100m
|
||||||
|
# memory: 128Mi
|
||||||
|
|
||||||
|
|
||||||
|
# Administrator credentials when not using an existing secret (see below)
|
||||||
|
adminUser: admin
|
||||||
|
# adminPassword: strongpassword
|
||||||
|
|
||||||
|
# Use an existing secret for the admin user.
|
||||||
|
admin:
|
||||||
|
existingSecret: ""
|
||||||
|
userKey: admin-user
|
||||||
|
passwordKey: admin-password
|
||||||
|
|
||||||
|
## Define command to be executed at startup by grafana container
|
||||||
|
## Needed if using `vault-env` to manage secrets (ref: https://banzaicloud.com/blog/inject-secrets-into-pods-vault/)
|
||||||
|
## Default is "run.sh" as defined in grafana's Dockerfile
|
||||||
|
# command:
|
||||||
|
# - "sh"
|
||||||
|
# - "/run.sh"
|
||||||
|
|
||||||
|
## Use an alternate scheduler, e.g. "stork".
|
||||||
|
## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/
|
||||||
|
##
|
||||||
|
# schedulerName:
|
||||||
|
|
||||||
|
## Extra environment variables that will be pass onto deployment pods
|
||||||
|
##
|
||||||
|
## to provide grafana with access to CloudWatch on AWS EKS:
|
||||||
|
## 1. create an iam role of type "Web identity" with provider oidc.eks.* (note the provider for later)
|
||||||
|
## 2. edit the "Trust relationships" of the role, add a line inside the StringEquals clause using the
|
||||||
|
## same oidc eks provider as noted before (same as the existing line)
|
||||||
|
## also, replace NAMESPACE and prometheus-operator-grafana with the service account namespace and name
|
||||||
|
##
|
||||||
|
## "oidc.eks.us-east-1.amazonaws.com/id/XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX:sub": "system:serviceaccount:NAMESPACE:prometheus-operator-grafana",
|
||||||
|
##
|
||||||
|
## 3. attach a policy to the role, you can use a built in policy called CloudWatchReadOnlyAccess
|
||||||
|
## 4. use the following env: (replace 123456789000 and iam-role-name-here with your aws account number and role name)
|
||||||
|
##
|
||||||
|
## env:
|
||||||
|
## AWS_ROLE_ARN: arn:aws:iam::123456789000:role/iam-role-name-here
|
||||||
|
## AWS_WEB_IDENTITY_TOKEN_FILE: /var/run/secrets/eks.amazonaws.com/serviceaccount/token
|
||||||
|
## AWS_REGION: us-east-1
|
||||||
|
##
|
||||||
|
## 5. uncomment the EKS section in extraSecretMounts: below
|
||||||
|
## 6. uncomment the annotation section in the serviceAccount: above
|
||||||
|
## make sure to replace arn:aws:iam::123456789000:role/iam-role-name-here with your role arn
|
||||||
|
|
||||||
|
env: {}
|
||||||
|
|
||||||
|
## "valueFrom" environment variable references that will be added to deployment pods
|
||||||
|
## ref: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.17/#envvarsource-v1-core
|
||||||
|
## Renders in container spec as:
|
||||||
|
## env:
|
||||||
|
## ...
|
||||||
|
## - name: <key>
|
||||||
|
## valueFrom:
|
||||||
|
## <value rendered as YAML>
|
||||||
|
envValueFrom: {}
|
||||||
|
|
||||||
|
## The name of a secret in the same kubernetes namespace which contain values to be added to the environment
|
||||||
|
## This can be useful for auth tokens, etc. Value is templated.
|
||||||
|
envFromSecret: ""
|
||||||
|
|
||||||
|
## Sensible environment variables that will be rendered as new secret object
|
||||||
|
## This can be useful for auth tokens, etc
|
||||||
|
envRenderSecret: {}
|
||||||
|
|
||||||
|
## Additional grafana server secret mounts
|
||||||
|
# Defines additional mounts with secrets. Secrets must be manually created in the namespace.
|
||||||
|
extraSecretMounts: []
|
||||||
|
# - name: secret-files
|
||||||
|
# mountPath: /etc/secrets
|
||||||
|
# secretName: grafana-secret-files
|
||||||
|
# readOnly: true
|
||||||
|
# subPath: ""
|
||||||
|
#
|
||||||
|
# for AWS EKS (cloudwatch) use the following (see also instruction in env: above)
|
||||||
|
# - name: aws-iam-token
|
||||||
|
# mountPath: /var/run/secrets/eks.amazonaws.com/serviceaccount
|
||||||
|
# readOnly: true
|
||||||
|
# projected:
|
||||||
|
# defaultMode: 420
|
||||||
|
# sources:
|
||||||
|
# - serviceAccountToken:
|
||||||
|
# audience: sts.amazonaws.com
|
||||||
|
# expirationSeconds: 86400
|
||||||
|
# path: token
|
||||||
|
|
||||||
|
## Additional grafana server volume mounts
|
||||||
|
# Defines additional volume mounts.
|
||||||
|
extraVolumeMounts: []
|
||||||
|
# - name: extra-volume
|
||||||
|
# mountPath: /mnt/volume
|
||||||
|
# readOnly: true
|
||||||
|
# existingClaim: volume-claim
|
||||||
|
|
||||||
|
## Pass the plugins you want installed as a list.
|
||||||
|
##
|
||||||
|
plugins: []
|
||||||
|
# - digrich-bubblechart-panel
|
||||||
|
# - grafana-clock-panel
|
||||||
|
|
||||||
|
## Configure grafana datasources
|
||||||
|
## ref: http://docs.grafana.org/administration/provisioning/#datasources
|
||||||
|
##
|
||||||
|
#datasources: {}
|
||||||
|
# datasources.yaml:
|
||||||
|
# apiVersion: 1
|
||||||
|
# datasources:
|
||||||
|
# - name: Prometheus
|
||||||
|
# type: prometheus
|
||||||
|
# url: http://prometheus-prometheus-server
|
||||||
|
# access: proxy
|
||||||
|
# isDefault: true
|
||||||
|
# - name: CloudWatch
|
||||||
|
# type: cloudwatch
|
||||||
|
# access: proxy
|
||||||
|
# uid: cloudwatch
|
||||||
|
# editable: false
|
||||||
|
# jsonData:
|
||||||
|
# authType: credentials
|
||||||
|
# defaultRegion: us-east-1
|
||||||
|
|
||||||
|
datasources:
|
||||||
|
datasources.yaml:
|
||||||
|
apiVersion: 1
|
||||||
|
datasources:
|
||||||
|
- name: Prometheus
|
||||||
|
type: prometheus
|
||||||
|
url: http://prometheus-server.monitoring.svc.cluster.local
|
||||||
|
access: proxy
|
||||||
|
isDefault: true
|
||||||
|
- name: Loki
|
||||||
|
type: loki
|
||||||
|
url: http://loki.monitoring.svc.cluster.local:3100
|
||||||
|
accesS: proxy
|
||||||
|
isDefault: false
|
||||||
|
|
||||||
|
## Configure notifiers
|
||||||
|
## ref: http://docs.grafana.org/administration/provisioning/#alert-notification-channels
|
||||||
|
##
|
||||||
|
notifiers: {}
|
||||||
|
# notifiers.yaml:
|
||||||
|
# notifiers:
|
||||||
|
# - name: email-notifier
|
||||||
|
# type: email
|
||||||
|
# uid: email1
|
||||||
|
# # either:
|
||||||
|
# org_id: 1
|
||||||
|
# # or
|
||||||
|
# org_name: Main Org.
|
||||||
|
# is_default: true
|
||||||
|
# settings:
|
||||||
|
# addresses: an_email_address@example.com
|
||||||
|
# delete_notifiers:
|
||||||
|
|
||||||
|
## Configure grafana dashboard providers
|
||||||
|
## ref: http://docs.grafana.org/administration/provisioning/#dashboards
|
||||||
|
##
|
||||||
|
## `path` must be /var/lib/grafana/dashboards/<provider_name>
|
||||||
|
##
|
||||||
|
dashboardProviders: {}
|
||||||
|
# dashboardproviders.yaml:
|
||||||
|
# apiVersion: 1
|
||||||
|
# providers:
|
||||||
|
# - name: 'default'
|
||||||
|
# orgId: 1
|
||||||
|
# folder: ''
|
||||||
|
# type: file
|
||||||
|
# disableDeletion: false
|
||||||
|
# editable: true
|
||||||
|
# options:
|
||||||
|
# path: /var/lib/grafana/dashboards/default
|
||||||
|
|
||||||
|
## Configure grafana dashboard to import
|
||||||
|
## NOTE: To use dashboards you must also enable/configure dashboardProviders
|
||||||
|
## ref: https://grafana.com/dashboards
|
||||||
|
##
|
||||||
|
## dashboards per provider, use provider name as key.
|
||||||
|
##
|
||||||
|
dashboards: {}
|
||||||
|
# default:
|
||||||
|
# some-dashboard:
|
||||||
|
# json: |
|
||||||
|
# $RAW_JSON
|
||||||
|
# custom-dashboard:
|
||||||
|
# file: dashboards/custom-dashboard.json
|
||||||
|
# prometheus-stats:
|
||||||
|
# gnetId: 2
|
||||||
|
# revision: 2
|
||||||
|
# datasource: Prometheus
|
||||||
|
# local-dashboard:
|
||||||
|
# url: https://example.com/repository/test.json
|
||||||
|
# local-dashboard-base64:
|
||||||
|
# url: https://example.com/repository/test-b64.json
|
||||||
|
# b64content: true
|
||||||
|
|
||||||
|
## Reference to external ConfigMap per provider. Use provider name as key and ConfigMap name as value.
|
||||||
|
## A provider dashboards must be defined either by external ConfigMaps or in values.yaml, not in both.
|
||||||
|
## ConfigMap data example:
|
||||||
|
##
|
||||||
|
## data:
|
||||||
|
## example-dashboard.json: |
|
||||||
|
## RAW_JSON
|
||||||
|
##
|
||||||
|
dashboardsConfigMaps: {}
|
||||||
|
# default: ""
|
||||||
|
|
||||||
|
## Grafana's primary configuration
|
||||||
|
## NOTE: values in map will be converted to ini format
|
||||||
|
## ref: http://docs.grafana.org/installation/configuration/
|
||||||
|
##
|
||||||
|
grafana.ini:
|
||||||
|
paths:
|
||||||
|
data: /var/lib/grafana/data
|
||||||
|
logs: /var/log/grafana
|
||||||
|
plugins: /var/lib/grafana/plugins
|
||||||
|
provisioning: /etc/grafana/provisioning
|
||||||
|
analytics:
|
||||||
|
check_for_updates: true
|
||||||
|
log:
|
||||||
|
mode: console
|
||||||
|
grafana_net:
|
||||||
|
url: https://grafana.net
|
||||||
|
## grafana Authentication can be enabled with the following values on grafana.ini
|
||||||
|
# server:
|
||||||
|
# The full public facing url you use in browser, used for redirects and emails
|
||||||
|
# root_url:
|
||||||
|
# https://grafana.com/docs/grafana/latest/auth/github/#enable-github-in-grafana
|
||||||
|
# auth.github:
|
||||||
|
# enabled: false
|
||||||
|
# allow_sign_up: false
|
||||||
|
# scopes: user:email,read:org
|
||||||
|
# auth_url: https://github.com/login/oauth/authorize
|
||||||
|
# token_url: https://github.com/login/oauth/access_token
|
||||||
|
# api_url: https://api.github.com/user
|
||||||
|
# team_ids:
|
||||||
|
# allowed_organizations:
|
||||||
|
# client_id:
|
||||||
|
# client_secret:
|
||||||
|
## LDAP Authentication can be enabled with the following values on grafana.ini
|
||||||
|
## NOTE: Grafana will fail to start if the value for ldap.toml is invalid
|
||||||
|
# auth.ldap:
|
||||||
|
# enabled: true
|
||||||
|
# allow_sign_up: true
|
||||||
|
# config_file: /etc/grafana/ldap.toml
|
||||||
|
|
||||||
|
## Grafana's LDAP configuration
|
||||||
|
## Templated by the template in _helpers.tpl
|
||||||
|
## NOTE: To enable the grafana.ini must be configured with auth.ldap.enabled
|
||||||
|
## ref: http://docs.grafana.org/installation/configuration/#auth-ldap
|
||||||
|
## ref: http://docs.grafana.org/installation/ldap/#configuration
|
||||||
|
ldap:
|
||||||
|
enabled: false
|
||||||
|
# `existingSecret` is a reference to an existing secret containing the ldap configuration
|
||||||
|
# for Grafana in a key `ldap-toml`.
|
||||||
|
existingSecret: ""
|
||||||
|
# `config` is the content of `ldap.toml` that will be stored in the created secret
|
||||||
|
config: ""
|
||||||
|
# config: |-
|
||||||
|
# verbose_logging = true
|
||||||
|
|
||||||
|
# [[servers]]
|
||||||
|
# host = "my-ldap-server"
|
||||||
|
# port = 636
|
||||||
|
# use_ssl = true
|
||||||
|
# start_tls = false
|
||||||
|
# ssl_skip_verify = false
|
||||||
|
# bind_dn = "uid=%s,ou=users,dc=myorg,dc=com"
|
||||||
|
|
||||||
|
## Grafana's SMTP configuration
|
||||||
|
## NOTE: To enable, grafana.ini must be configured with smtp.enabled
|
||||||
|
## ref: http://docs.grafana.org/installation/configuration/#smtp
|
||||||
|
smtp:
|
||||||
|
# `existingSecret` is a reference to an existing secret containing the smtp configuration
|
||||||
|
# for Grafana.
|
||||||
|
existingSecret: ""
|
||||||
|
userKey: "user"
|
||||||
|
passwordKey: "password"
|
||||||
|
|
||||||
|
## Sidecars that collect the configmaps with specified label and stores the included files them into the respective folders
|
||||||
|
## Requires at least Grafana 5 to work and can't be used together with parameters dashboardProviders, datasources and dashboards
|
||||||
|
sidecar:
|
||||||
|
image:
|
||||||
|
repository: kiwigrid/k8s-sidecar
|
||||||
|
tag: 0.1.209
|
||||||
|
sha: ""
|
||||||
|
imagePullPolicy: IfNotPresent
|
||||||
|
resources: {}
|
||||||
|
# limits:
|
||||||
|
# cpu: 100m
|
||||||
|
# memory: 100Mi
|
||||||
|
# requests:
|
||||||
|
# cpu: 50m
|
||||||
|
# memory: 50Mi
|
||||||
|
# skipTlsVerify Set to true to skip tls verification for kube api calls
|
||||||
|
# skipTlsVerify: true
|
||||||
|
enableUniqueFilenames: false
|
||||||
|
dashboards:
|
||||||
|
enabled: false
|
||||||
|
SCProvider: true
|
||||||
|
# label that the configmaps with dashboards are marked with
|
||||||
|
label: grafana_dashboard
|
||||||
|
# folder in the pod that should hold the collected dashboards (unless `defaultFolderName` is set)
|
||||||
|
folder: /tmp/dashboards
|
||||||
|
# The default folder name, it will create a subfolder under the `folder` and put dashboards in there instead
|
||||||
|
defaultFolderName: null
|
||||||
|
# If specified, the sidecar will search for dashboard config-maps inside this namespace.
|
||||||
|
# Otherwise the namespace in which the sidecar is running will be used.
|
||||||
|
# It's also possible to specify ALL to search in all namespaces
|
||||||
|
searchNamespace: null
|
||||||
|
# If specified, the sidecar will look for annotation with this name to create folder and put graph here.
|
||||||
|
# You can use this parameter together with `provider.foldersFromFilesStructure`to annotate configmaps and create folder structure.
|
||||||
|
folderAnnotation: null
|
||||||
|
# provider configuration that lets grafana manage the dashboards
|
||||||
|
provider:
|
||||||
|
# name of the provider, should be unique
|
||||||
|
name: sidecarProvider
|
||||||
|
# orgid as configured in grafana
|
||||||
|
orgid: 1
|
||||||
|
# folder in which the dashboards should be imported in grafana
|
||||||
|
folder: ''
|
||||||
|
# type of the provider
|
||||||
|
type: file
|
||||||
|
# disableDelete to activate a import-only behaviour
|
||||||
|
disableDelete: false
|
||||||
|
# allow updating provisioned dashboards from the UI
|
||||||
|
allowUiUpdates: false
|
||||||
|
# allow Grafana to replicate dashboard structure from filesystem
|
||||||
|
foldersFromFilesStructure: false
|
||||||
|
datasources:
|
||||||
|
enabled: false
|
||||||
|
# label that the configmaps with datasources are marked with
|
||||||
|
label: grafana_datasource
|
||||||
|
# If specified, the sidecar will search for datasource config-maps inside this namespace.
|
||||||
|
# Otherwise the namespace in which the sidecar is running will be used.
|
||||||
|
# It's also possible to specify ALL to search in all namespaces
|
||||||
|
searchNamespace: null
|
||||||
|
notifiers:
|
||||||
|
enabled: false
|
||||||
|
# label that the configmaps with notifiers are marked with
|
||||||
|
label: grafana_notifier
|
||||||
|
# If specified, the sidecar will search for notifier config-maps inside this namespace.
|
||||||
|
# Otherwise the namespace in which the sidecar is running will be used.
|
||||||
|
# It's also possible to specify ALL to search in all namespaces
|
||||||
|
searchNamespace: null
|
||||||
|
|
||||||
|
## Override the deployment namespace
|
||||||
|
##
|
||||||
|
namespaceOverride: ""
|
||||||
|
|
||||||
|
## Number of old ReplicaSets to retain
|
||||||
|
##
|
||||||
|
revisionHistoryLimit: 10
|
||||||
|
|
||||||
|
## Add a seperate remote image renderer deployment/service
|
||||||
|
imageRenderer:
|
||||||
|
# Enable the image-renderer deployment & service
|
||||||
|
enabled: false
|
||||||
|
replicas: 1
|
||||||
|
image:
|
||||||
|
# image-renderer Image repository
|
||||||
|
repository: grafana/grafana-image-renderer
|
||||||
|
# image-renderer Image tag
|
||||||
|
tag: latest
|
||||||
|
# image-renderer Image sha (optional)
|
||||||
|
sha: ""
|
||||||
|
# image-renderer ImagePullPolicy
|
||||||
|
pullPolicy: Always
|
||||||
|
# extra environment variables
|
||||||
|
env: {}
|
||||||
|
# RENDERING_ARGS: --disable-gpu,--window-size=1280x758
|
||||||
|
# RENDERING_MODE: clustered
|
||||||
|
# image-renderer deployment securityContext
|
||||||
|
securityContext: {}
|
||||||
|
# image-renderer deployment Host Aliases
|
||||||
|
hostAliases: []
|
||||||
|
# image-renderer deployment priority class
|
||||||
|
priorityClassName: ''
|
||||||
|
service:
|
||||||
|
# image-renderer service port name
|
||||||
|
portName: 'http'
|
||||||
|
# image-renderer service port used by both service and deployment
|
||||||
|
port: 8081
|
||||||
|
# name of the image-renderer port on the pod
|
||||||
|
podPortName: http
|
||||||
|
# number of image-renderer replica sets to keep
|
||||||
|
revisionHistoryLimit: 10
|
||||||
|
networkPolicy:
|
||||||
|
# Enable a NetworkPolicy to limit inbound traffic to only the created grafana pods
|
||||||
|
limitIngress: true
|
||||||
|
# Enable a NetworkPolicy to limit outbound traffic to only the created grafana pods
|
||||||
|
limitEgress: false
|
||||||
|
resources: {}
|
||||||
|
# limits:
|
||||||
|
# cpu: 100m
|
||||||
|
# memory: 100Mi
|
||||||
|
# requests:
|
||||||
|
# cpu: 50m
|
||||||
|
# memory: 50Mi
|
38
yaml/monitoring/grafana-volumes.yaml
Normal file
38
yaml/monitoring/grafana-volumes.yaml
Normal file
|
@ -0,0 +1,38 @@
|
||||||
|
apiVersion: v1
|
||||||
|
kind: PersistentVolume
|
||||||
|
metadata:
|
||||||
|
name: monitoring-grafana-data-pv
|
||||||
|
spec:
|
||||||
|
capacity:
|
||||||
|
storage: 8Gi
|
||||||
|
accessModes:
|
||||||
|
- ReadWriteOnce
|
||||||
|
persistentVolumeReclaimPolicy: Retain
|
||||||
|
rbd:
|
||||||
|
monitors:
|
||||||
|
- ${CEPH_MONITOR_1}:6789
|
||||||
|
- ${CEPH_MONITOR_2}:6789
|
||||||
|
- ${CEPH_MONITOR_3}:6789
|
||||||
|
pool: kube
|
||||||
|
user: kube
|
||||||
|
secretRef:
|
||||||
|
name: ceph-secret-kube
|
||||||
|
namespace: kube-system
|
||||||
|
fsType: ext4
|
||||||
|
readOnly: false
|
||||||
|
image: monitoring-grafana-data
|
||||||
|
persistentVolumeReclaimPolicy: Retain
|
||||||
|
---
|
||||||
|
apiVersion: "v1"
|
||||||
|
kind: "PersistentVolumeClaim"
|
||||||
|
metadata:
|
||||||
|
name: monitoring-grafana-data-pvc
|
||||||
|
namespace: monitoring
|
||||||
|
spec:
|
||||||
|
accessModes:
|
||||||
|
- ReadWriteOnce
|
||||||
|
resources:
|
||||||
|
requests:
|
||||||
|
storage: 8Gi
|
||||||
|
volumeName: "monitoring-grafana-data-pv"
|
||||||
|
|
250
yaml/monitoring/loki-values.yaml
Normal file
250
yaml/monitoring/loki-values.yaml
Normal file
|
@ -0,0 +1,250 @@
|
||||||
|
image:
|
||||||
|
repository: grafana/loki
|
||||||
|
tag: 2.0.0
|
||||||
|
pullPolicy: IfNotPresent
|
||||||
|
|
||||||
|
## Optionally specify an array of imagePullSecrets.
|
||||||
|
## Secrets must be manually created in the namespace.
|
||||||
|
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
|
||||||
|
##
|
||||||
|
# pullSecrets:
|
||||||
|
# - myRegistryKeySecretName
|
||||||
|
|
||||||
|
ingress:
|
||||||
|
enabled: false
|
||||||
|
annotations: {}
|
||||||
|
# kubernetes.io/ingress.class: nginx
|
||||||
|
# kubernetes.io/tls-acme: "true"
|
||||||
|
hosts:
|
||||||
|
- host: chart-example.local
|
||||||
|
paths: []
|
||||||
|
tls: []
|
||||||
|
# - secretName: chart-example-tls
|
||||||
|
# hosts:
|
||||||
|
# - chart-example.local
|
||||||
|
|
||||||
|
## Affinity for pod assignment
|
||||||
|
## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
|
||||||
|
affinity: {}
|
||||||
|
# podAntiAffinity:
|
||||||
|
# requiredDuringSchedulingIgnoredDuringExecution:
|
||||||
|
# - labelSelector:
|
||||||
|
# matchExpressions:
|
||||||
|
# - key: app
|
||||||
|
# operator: In
|
||||||
|
# values:
|
||||||
|
# - loki
|
||||||
|
# topologyKey: "kubernetes.io/hostname"
|
||||||
|
|
||||||
|
## StatefulSet annotations
|
||||||
|
annotations: {}
|
||||||
|
|
||||||
|
# enable tracing for debug, need install jaeger and specify right jaeger_agent_host
|
||||||
|
tracing:
|
||||||
|
jaegerAgentHost:
|
||||||
|
|
||||||
|
config:
|
||||||
|
auth_enabled: false
|
||||||
|
ingester:
|
||||||
|
chunk_idle_period: 3m
|
||||||
|
chunk_block_size: 262144
|
||||||
|
chunk_retain_period: 1m
|
||||||
|
max_transfer_retries: 0
|
||||||
|
lifecycler:
|
||||||
|
ring:
|
||||||
|
kvstore:
|
||||||
|
store: inmemory
|
||||||
|
replication_factor: 1
|
||||||
|
|
||||||
|
## Different ring configs can be used. E.g. Consul
|
||||||
|
# ring:
|
||||||
|
# store: consul
|
||||||
|
# replication_factor: 1
|
||||||
|
# consul:
|
||||||
|
# host: "consul:8500"
|
||||||
|
# prefix: ""
|
||||||
|
# http_client_timeout: "20s"
|
||||||
|
# consistent_reads: true
|
||||||
|
limits_config:
|
||||||
|
enforce_metric_name: false
|
||||||
|
reject_old_samples: true
|
||||||
|
reject_old_samples_max_age: 168h
|
||||||
|
schema_config:
|
||||||
|
configs:
|
||||||
|
- from: 2020-10-24
|
||||||
|
store: boltdb-shipper
|
||||||
|
object_store: filesystem
|
||||||
|
schema: v11
|
||||||
|
index:
|
||||||
|
prefix: index_
|
||||||
|
period: 168h
|
||||||
|
server:
|
||||||
|
http_listen_port: 3100
|
||||||
|
storage_config:
|
||||||
|
boltdb_shipper:
|
||||||
|
active_index_directory: /data/loki/boltdb-shipper-active
|
||||||
|
cache_location: /data/loki/boltdb-shipper-cache
|
||||||
|
cache_ttl: 24h # Can be increased for faster performance over longer query periods, uses more disk space
|
||||||
|
shared_store: filesystem
|
||||||
|
filesystem:
|
||||||
|
directory: /data/loki/chunks
|
||||||
|
chunk_store_config:
|
||||||
|
max_look_back_period: 2016h
|
||||||
|
table_manager:
|
||||||
|
retention_deletes_enabled: true
|
||||||
|
retention_period: 2016h
|
||||||
|
compactor:
|
||||||
|
working_directory: /data/loki/boltdb-shipper-compactor
|
||||||
|
shared_store: filesystem
|
||||||
|
|
||||||
|
## Additional Loki container arguments, e.g. log level (debug, info, warn, error)
|
||||||
|
extraArgs: {}
|
||||||
|
# log.level: debug
|
||||||
|
|
||||||
|
livenessProbe:
|
||||||
|
httpGet:
|
||||||
|
path: /ready
|
||||||
|
port: http-metrics
|
||||||
|
initialDelaySeconds: 45
|
||||||
|
|
||||||
|
## ref: https://kubernetes.io/docs/concepts/services-networking/network-policies/
|
||||||
|
networkPolicy:
|
||||||
|
enabled: false
|
||||||
|
|
||||||
|
## The app name of loki clients
|
||||||
|
client: {}
|
||||||
|
# name:
|
||||||
|
|
||||||
|
## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
|
||||||
|
nodeSelector: {}
|
||||||
|
|
||||||
|
## ref: https://kubernetes.io/docs/concepts/storage/persistent-volumes/
|
||||||
|
## If you set enabled as "True", you need :
|
||||||
|
## - create a pv which above 10Gi and has same namespace with loki
|
||||||
|
## - keep storageClassName same with below setting
|
||||||
|
persistence:
|
||||||
|
enabled: true
|
||||||
|
accessModes:
|
||||||
|
- ReadWriteOnce
|
||||||
|
size: 128Gi
|
||||||
|
annotations: {}
|
||||||
|
# selector:
|
||||||
|
# matchLabels:
|
||||||
|
# app.kubernetes.io/name: loki
|
||||||
|
subPath: ""
|
||||||
|
existingClaim: "monitoring-loki-data-pvc"
|
||||||
|
|
||||||
|
## Pod Labels
|
||||||
|
podLabels: {}
|
||||||
|
|
||||||
|
## Pod Annotations
|
||||||
|
podAnnotations:
|
||||||
|
prometheus.io/scrape: "true"
|
||||||
|
prometheus.io/port: "http-metrics"
|
||||||
|
|
||||||
|
podManagementPolicy: OrderedReady
|
||||||
|
|
||||||
|
## Assign a PriorityClassName to pods if set
|
||||||
|
# priorityClassName:
|
||||||
|
|
||||||
|
rbac:
|
||||||
|
create: true
|
||||||
|
pspEnabled: true
|
||||||
|
|
||||||
|
readinessProbe:
|
||||||
|
httpGet:
|
||||||
|
path: /ready
|
||||||
|
port: http-metrics
|
||||||
|
initialDelaySeconds: 45
|
||||||
|
|
||||||
|
replicas: 1
|
||||||
|
|
||||||
|
resources: {}
|
||||||
|
# limits:
|
||||||
|
# cpu: 200m
|
||||||
|
# memory: 256Mi
|
||||||
|
# requests:
|
||||||
|
# cpu: 100m
|
||||||
|
# memory: 128Mi
|
||||||
|
|
||||||
|
securityContext:
|
||||||
|
fsGroup: 10001
|
||||||
|
runAsGroup: 10001
|
||||||
|
runAsNonRoot: true
|
||||||
|
runAsUser: 10001
|
||||||
|
|
||||||
|
service:
|
||||||
|
type: ClusterIP
|
||||||
|
nodePort:
|
||||||
|
port: 3100
|
||||||
|
annotations: {}
|
||||||
|
labels: {}
|
||||||
|
|
||||||
|
serviceAccount:
|
||||||
|
create: true
|
||||||
|
name:
|
||||||
|
annotations: {}
|
||||||
|
|
||||||
|
terminationGracePeriodSeconds: 4800
|
||||||
|
|
||||||
|
## Tolerations for pod assignment
|
||||||
|
## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
|
||||||
|
tolerations: []
|
||||||
|
|
||||||
|
# The values to set in the PodDisruptionBudget spec
|
||||||
|
# If not set then a PodDisruptionBudget will not be created
|
||||||
|
podDisruptionBudget: {}
|
||||||
|
# minAvailable: 1
|
||||||
|
# maxUnavailable: 1
|
||||||
|
|
||||||
|
updateStrategy:
|
||||||
|
type: RollingUpdate
|
||||||
|
|
||||||
|
serviceMonitor:
|
||||||
|
enabled: false
|
||||||
|
interval: ""
|
||||||
|
additionalLabels: {}
|
||||||
|
annotations: {}
|
||||||
|
# scrapeTimeout: 10s
|
||||||
|
|
||||||
|
initContainers: []
|
||||||
|
## Init containers to be added to the loki pod.
|
||||||
|
# - name: my-init-container
|
||||||
|
# image: busybox:latest
|
||||||
|
# command: ['sh', '-c', 'echo hello']
|
||||||
|
|
||||||
|
extraContainers: []
|
||||||
|
## Additional containers to be added to the loki pod.
|
||||||
|
# - name: reverse-proxy
|
||||||
|
# image: angelbarrera92/basic-auth-reverse-proxy:dev
|
||||||
|
# args:
|
||||||
|
# - "serve"
|
||||||
|
# - "--upstream=http://localhost:3100"
|
||||||
|
# - "--auth-config=/etc/reverse-proxy-conf/authn.yaml"
|
||||||
|
# ports:
|
||||||
|
# - name: http
|
||||||
|
# containerPort: 11811
|
||||||
|
# protocol: TCP
|
||||||
|
# volumeMounts:
|
||||||
|
# - name: reverse-proxy-auth-config
|
||||||
|
# mountPath: /etc/reverse-proxy-conf
|
||||||
|
|
||||||
|
|
||||||
|
extraVolumes: []
|
||||||
|
## Additional volumes to the loki pod.
|
||||||
|
# - name: reverse-proxy-auth-config
|
||||||
|
# secret:
|
||||||
|
# secretName: reverse-proxy-auth-config
|
||||||
|
|
||||||
|
## Extra volume mounts that will be added to the loki container
|
||||||
|
extraVolumeMounts: []
|
||||||
|
|
||||||
|
extraPorts: []
|
||||||
|
## Additional ports to the loki services. Useful to expose extra container ports.
|
||||||
|
# - port: 11811
|
||||||
|
# protocol: TCP
|
||||||
|
# name: http
|
||||||
|
# targetPort: http
|
||||||
|
|
||||||
|
# Extra env variables to pass to the loki container
|
||||||
|
env: []
|
37
yaml/monitoring/loki-volumes.yaml
Normal file
37
yaml/monitoring/loki-volumes.yaml
Normal file
|
@ -0,0 +1,37 @@
|
||||||
|
apiVersion: v1
|
||||||
|
kind: PersistentVolume
|
||||||
|
metadata:
|
||||||
|
name: monitoring-loki-data-pv
|
||||||
|
spec:
|
||||||
|
capacity:
|
||||||
|
storage: 128Gi
|
||||||
|
accessModes:
|
||||||
|
- ReadWriteOnce
|
||||||
|
persistentVolumeReclaimPolicy: Retain
|
||||||
|
rbd:
|
||||||
|
monitors:
|
||||||
|
- ${CEPH_MONITOR_1}:6789
|
||||||
|
- ${CEPH_MONITOR_2}:6789
|
||||||
|
- ${CEPH_MONITOR_3}:6789
|
||||||
|
pool: kube
|
||||||
|
user: kube
|
||||||
|
secretRef:
|
||||||
|
name: ceph-secret-kube
|
||||||
|
namespace: kube-system
|
||||||
|
fsType: ext4
|
||||||
|
readOnly: false
|
||||||
|
image: monitoring-loki-data
|
||||||
|
persistentVolumeReclaimPolicy: Retain
|
||||||
|
---
|
||||||
|
apiVersion: "v1"
|
||||||
|
kind: "PersistentVolumeClaim"
|
||||||
|
metadata:
|
||||||
|
name: monitoring-loki-data-pvc
|
||||||
|
namespace: monitoring
|
||||||
|
spec:
|
||||||
|
accessModes:
|
||||||
|
- ReadWriteOnce
|
||||||
|
resources:
|
||||||
|
requests:
|
||||||
|
storage: 128Gi
|
||||||
|
volumeName: "monitoring-loki-data-pv"
|
1529
yaml/monitoring/prometheus-values.yaml
Normal file
1529
yaml/monitoring/prometheus-values.yaml
Normal file
File diff suppressed because it is too large
Load diff
76
yaml/monitoring/prometheus-volumes.yaml
Normal file
76
yaml/monitoring/prometheus-volumes.yaml
Normal file
|
@ -0,0 +1,76 @@
|
||||||
|
apiVersion: v1
|
||||||
|
kind: PersistentVolume
|
||||||
|
metadata:
|
||||||
|
name: monitoring-prometheus-alertmanager-data-pv
|
||||||
|
spec:
|
||||||
|
capacity:
|
||||||
|
storage: 2Gi
|
||||||
|
accessModes:
|
||||||
|
- ReadWriteOnce
|
||||||
|
persistentVolumeReclaimPolicy: Retain
|
||||||
|
rbd:
|
||||||
|
monitors:
|
||||||
|
- ${CEPH_MONITOR_1}:6789
|
||||||
|
- ${CEPH_MONITOR_2}:6789
|
||||||
|
- ${CEPH_MONITOR_3}:6789
|
||||||
|
pool: kube
|
||||||
|
user: kube
|
||||||
|
secretRef:
|
||||||
|
name: ceph-secret-kube
|
||||||
|
namespace: kube-system
|
||||||
|
fsType: ext4
|
||||||
|
readOnly: false
|
||||||
|
image: monitoring-prometheus-alertmanager-data
|
||||||
|
persistentVolumeReclaimPolicy: Retain
|
||||||
|
---
|
||||||
|
apiVersion: "v1"
|
||||||
|
kind: "PersistentVolumeClaim"
|
||||||
|
metadata:
|
||||||
|
name: monitoring-prometheus-alertmanager-data-pvc
|
||||||
|
namespace: monitoring
|
||||||
|
spec:
|
||||||
|
accessModes:
|
||||||
|
- ReadWriteOnce
|
||||||
|
resources:
|
||||||
|
requests:
|
||||||
|
storage: 2Gi
|
||||||
|
volumeName: "monitoring-prometheus-alertmanager-data-pv"
|
||||||
|
---
|
||||||
|
apiVersion: v1
|
||||||
|
kind: PersistentVolume
|
||||||
|
metadata:
|
||||||
|
name: monitoring-prometheus-server-data-pv
|
||||||
|
spec:
|
||||||
|
capacity:
|
||||||
|
storage: 32Gi
|
||||||
|
accessModes:
|
||||||
|
- ReadWriteOnce
|
||||||
|
persistentVolumeReclaimPolicy: Retain
|
||||||
|
rbd:
|
||||||
|
monitors:
|
||||||
|
- ${CEPH_MONITOR_1}:6789
|
||||||
|
- ${CEPH_MONITOR_2}:6789
|
||||||
|
- ${CEPH_MONITOR_3}:6789
|
||||||
|
pool: kube
|
||||||
|
user: kube
|
||||||
|
secretRef:
|
||||||
|
name: ceph-secret-kube
|
||||||
|
namespace: kube-system
|
||||||
|
fsType: ext4
|
||||||
|
readOnly: false
|
||||||
|
image: monitoring-prometheus-server-data
|
||||||
|
persistentVolumeReclaimPolicy: Retain
|
||||||
|
---
|
||||||
|
apiVersion: "v1"
|
||||||
|
kind: "PersistentVolumeClaim"
|
||||||
|
metadata:
|
||||||
|
name: monitoring-prometheus-server-data-pvc
|
||||||
|
namespace: monitoring
|
||||||
|
spec:
|
||||||
|
accessModes:
|
||||||
|
- ReadWriteOnce
|
||||||
|
resources:
|
||||||
|
requests:
|
||||||
|
storage: 32Gi
|
||||||
|
volumeName: "monitoring-prometheus-server-data-pv"
|
||||||
|
|
57
yaml/monitoring/zabbix-agent-daemonset.yaml
Normal file
57
yaml/monitoring/zabbix-agent-daemonset.yaml
Normal file
|
@ -0,0 +1,57 @@
|
||||||
|
apiVersion: apps/v1
|
||||||
|
kind: DaemonSet
|
||||||
|
metadata:
|
||||||
|
name: zabbix-agent
|
||||||
|
namespace: monitoring
|
||||||
|
spec:
|
||||||
|
selector:
|
||||||
|
matchLabels:
|
||||||
|
app: zabbix-agent
|
||||||
|
template:
|
||||||
|
metadata:
|
||||||
|
labels:
|
||||||
|
app: zabbix-agent
|
||||||
|
spec:
|
||||||
|
hostNetwork: true
|
||||||
|
hostPID: true
|
||||||
|
hostIPC: false
|
||||||
|
containers:
|
||||||
|
- name: zabbix-agent
|
||||||
|
image: zabbix/zabbix-agent
|
||||||
|
imagePullPolicy: Always
|
||||||
|
ports:
|
||||||
|
- containerPort: 10050
|
||||||
|
name: zabbix-agent
|
||||||
|
env:
|
||||||
|
- name: ZBX_SERVER_HOST
|
||||||
|
value: k8x_zabbix_server
|
||||||
|
- name: ZBX_STARTAGENTS
|
||||||
|
value: "1"
|
||||||
|
- name: ZBX_TIMEOUT
|
||||||
|
value: "10"
|
||||||
|
- name: ZBX_TLSCONNECT
|
||||||
|
value: "psk"
|
||||||
|
- name: ZBX_TLSACCEPT
|
||||||
|
value: "psk"
|
||||||
|
- name: ZBX_TLSPSKIDENTITY
|
||||||
|
valueFrom:
|
||||||
|
secretKeyRef:
|
||||||
|
name: zabbix-psk-id-secret
|
||||||
|
key: zabbix_psk_id
|
||||||
|
- name: ZBX_TLSPSKFILE
|
||||||
|
value: "zabbix_agentd.psk"
|
||||||
|
- name: ZBX_ACTIVE_ALLOW
|
||||||
|
value: "false"
|
||||||
|
securityContext:
|
||||||
|
privileged: true
|
||||||
|
resources:
|
||||||
|
requests:
|
||||||
|
cpu: 0.15
|
||||||
|
volumeMounts:
|
||||||
|
- name: zabbix-psk-config
|
||||||
|
mountPath: "/var/lib/zabbix/enc"
|
||||||
|
readOnly: true
|
||||||
|
volumes:
|
||||||
|
- name: zabbix-psk-config
|
||||||
|
secret:
|
||||||
|
secretName: zabbix-psk-secret
|
13
yaml/system/default-resource-limits.yaml
Normal file
13
yaml/system/default-resource-limits.yaml
Normal file
|
@ -0,0 +1,13 @@
|
||||||
|
apiVersion: v1
|
||||||
|
kind: LimitRange
|
||||||
|
metadata:
|
||||||
|
name: default-resource-limits
|
||||||
|
spec:
|
||||||
|
limits:
|
||||||
|
- default:
|
||||||
|
cpu: 2000m
|
||||||
|
memory: 4096Mi
|
||||||
|
defaultRequest:
|
||||||
|
cpu: 10m
|
||||||
|
memory: 128Mi
|
||||||
|
type: Container
|
19
yaml/system/kube-apiserver-to-kubelet-clusterrole.yaml
Normal file
19
yaml/system/kube-apiserver-to-kubelet-clusterrole.yaml
Normal file
|
@ -0,0 +1,19 @@
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
kind: ClusterRole
|
||||||
|
metadata:
|
||||||
|
annotations:
|
||||||
|
rbac.authorization.kubernetes.io/autoupdate: "true"
|
||||||
|
labels:
|
||||||
|
kubernetes.io/bootstrapping: rbac-defaults
|
||||||
|
name: system:kube-apiserver-to-kubelet
|
||||||
|
rules:
|
||||||
|
- apiGroups:
|
||||||
|
- ""
|
||||||
|
resources:
|
||||||
|
- nodes/proxy
|
||||||
|
- nodes/stats
|
||||||
|
- nodes/log
|
||||||
|
- nodes/spec
|
||||||
|
- nodes/metrics
|
||||||
|
verbs:
|
||||||
|
- "*"
|
|
@ -0,0 +1,13 @@
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
kind: ClusterRoleBinding
|
||||||
|
metadata:
|
||||||
|
name: system:kube-apiserver
|
||||||
|
namespace: ""
|
||||||
|
roleRef:
|
||||||
|
apiGroup: rbac.authorization.k8s.io
|
||||||
|
kind: ClusterRole
|
||||||
|
name: system:kube-apiserver-to-kubelet
|
||||||
|
subjects:
|
||||||
|
- apiGroup: rbac.authorization.k8s.io
|
||||||
|
kind: User
|
||||||
|
name: kubernetes
|
5
yaml/system/namespace.yaml
Normal file
5
yaml/system/namespace.yaml
Normal file
|
@ -0,0 +1,5 @@
|
||||||
|
---
|
||||||
|
apiVersion: v1
|
||||||
|
kind: Namespace
|
||||||
|
metadata:
|
||||||
|
name: ${nspace}
|
33
yaml/system/sa-rbac-backup-agent.yaml
Normal file
33
yaml/system/sa-rbac-backup-agent.yaml
Normal file
|
@ -0,0 +1,33 @@
|
||||||
|
---
|
||||||
|
kind: Role
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
metadata:
|
||||||
|
name: backup-agent-role
|
||||||
|
namespace: ${nspace}
|
||||||
|
rules:
|
||||||
|
- apiGroups: [""]
|
||||||
|
resources:
|
||||||
|
- pods
|
||||||
|
- pods/log
|
||||||
|
verbs:
|
||||||
|
- get
|
||||||
|
- list
|
||||||
|
- apiGroups: [""]
|
||||||
|
resources:
|
||||||
|
- pods/exec
|
||||||
|
verbs:
|
||||||
|
- create
|
||||||
|
---
|
||||||
|
kind: RoleBinding
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
metadata:
|
||||||
|
name: backup-agent-rolebinding
|
||||||
|
namespace: ${nspace}
|
||||||
|
roleRef:
|
||||||
|
apiGroup: rbac.authorization.k8s.io
|
||||||
|
kind: Role
|
||||||
|
name: backup-agent-role
|
||||||
|
subjects:
|
||||||
|
- kind: ServiceAccount
|
||||||
|
name: backup-agent-sa
|
||||||
|
namespace: ${nspace}
|
110
yaml/system/sa-rbac.yaml
Normal file
110
yaml/system/sa-rbac.yaml
Normal file
|
@ -0,0 +1,110 @@
|
||||||
|
---
|
||||||
|
kind: Role
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
metadata:
|
||||||
|
name: admin-role
|
||||||
|
namespace: ${nspace}
|
||||||
|
rules:
|
||||||
|
- apiGroups: [ "", "extensions", "apps", "batch", "autoscaling" ]
|
||||||
|
resources:
|
||||||
|
- pods
|
||||||
|
- pods/log
|
||||||
|
- pods/exec
|
||||||
|
- pods/portforward
|
||||||
|
- daemonsets
|
||||||
|
- deployments
|
||||||
|
- services
|
||||||
|
- replicasets
|
||||||
|
- replicationcontrollers
|
||||||
|
- statefulsets
|
||||||
|
- horizontalpodautoscalers
|
||||||
|
- jobs
|
||||||
|
- cronjobs
|
||||||
|
- events
|
||||||
|
- ingresses
|
||||||
|
- persistentvolumeclaims
|
||||||
|
- certificates
|
||||||
|
- configmaps
|
||||||
|
- secrets
|
||||||
|
- logs
|
||||||
|
verbs:
|
||||||
|
- get
|
||||||
|
- list
|
||||||
|
- watch
|
||||||
|
- create
|
||||||
|
- update
|
||||||
|
- delete
|
||||||
|
- patch
|
||||||
|
- apiGroups: [ "certmanager.k8s.io" ]
|
||||||
|
resources:
|
||||||
|
- issuers
|
||||||
|
verbs:
|
||||||
|
- get
|
||||||
|
- list
|
||||||
|
- watch
|
||||||
|
- apiGroups: [ "certmanager.k8s.io" ]
|
||||||
|
resources:
|
||||||
|
- certificates
|
||||||
|
verbs:
|
||||||
|
- get
|
||||||
|
- list
|
||||||
|
- watch
|
||||||
|
- create
|
||||||
|
- update
|
||||||
|
- delete
|
||||||
|
- patch
|
||||||
|
- apiGroups: [ "networking.k8s.io" ]
|
||||||
|
resources:
|
||||||
|
- ingresses
|
||||||
|
verbs:
|
||||||
|
- get
|
||||||
|
- list
|
||||||
|
- watch
|
||||||
|
- create
|
||||||
|
- update
|
||||||
|
- delete
|
||||||
|
- patch
|
||||||
|
---
|
||||||
|
kind: RoleBinding
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
metadata:
|
||||||
|
name: admin-rolebinding
|
||||||
|
namespace: ${nspace}
|
||||||
|
roleRef:
|
||||||
|
apiGroup: rbac.authorization.k8s.io
|
||||||
|
kind: Role
|
||||||
|
name: admin-role
|
||||||
|
subjects:
|
||||||
|
- kind: ServiceAccount
|
||||||
|
name: admin-sa
|
||||||
|
namespace: ${nspace}
|
||||||
|
---
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
kind: ClusterRole
|
||||||
|
metadata:
|
||||||
|
name: admin-${nspace}-clusterrole
|
||||||
|
rules:
|
||||||
|
- apiGroups: [ "" ]
|
||||||
|
resources:
|
||||||
|
- persistentvolumes
|
||||||
|
verbs:
|
||||||
|
- get
|
||||||
|
- list
|
||||||
|
- watch
|
||||||
|
- create
|
||||||
|
- update
|
||||||
|
- delete
|
||||||
|
- patch
|
||||||
|
---
|
||||||
|
kind: ClusterRoleBinding
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
metadata:
|
||||||
|
name: admin-${nspace}-clusterrolebinding
|
||||||
|
roleRef:
|
||||||
|
apiGroup: rbac.authorization.k8s.io
|
||||||
|
kind: ClusterRole
|
||||||
|
name: admin-${nspace}-clusterrole
|
||||||
|
subjects:
|
||||||
|
- kind: ServiceAccount
|
||||||
|
name: admin-sa
|
||||||
|
namespace: ${nspace}
|
Loading…
Reference in a new issue