import from rtg
This commit is contained in:
commit
21a59cba7f
98 changed files with 37309 additions and 0 deletions
4
.gitignore
vendored
Normal file
4
.gitignore
vendored
Normal file
|
@ -0,0 +1,4 @@
|
|||
.DS_Store
|
||||
|
||||
kubespray/
|
||||
config
|
26
.gitlab-ci.yml
Normal file
26
.gitlab-ci.yml
Normal file
|
@ -0,0 +1,26 @@
|
|||
stages:
|
||||
- terraforming
|
||||
|
||||
staging:
|
||||
stage: terraforming
|
||||
variables:
|
||||
TF_STATE_NAME: staging
|
||||
TF_CACHE_KEY: staging
|
||||
TF_ROOT: terraform/staging
|
||||
trigger:
|
||||
include: terraform/staging/.gitlab-ci.yml
|
||||
rules:
|
||||
- changes:
|
||||
- terraform/staging/*
|
||||
|
||||
testbed:
|
||||
stage: terraforming
|
||||
variables:
|
||||
TF_STATE_NAME: testbed
|
||||
TF_CACHE_KEY: testbed
|
||||
TF_ROOT: terraform/testbed
|
||||
trigger:
|
||||
include: terraform/testbed/.gitlab-ci.yml
|
||||
rules:
|
||||
- changes:
|
||||
- terraform/testbed/*
|
20
README.md
Normal file
20
README.md
Normal file
|
@ -0,0 +1,20 @@
|
|||
|
||||
# Kubernetes Cluster
|
||||
|
||||
Application cluster installer based on
|
||||
|
||||
- kubespray
|
||||
- k8x
|
||||
|
||||
# Start from clusteradmin provisioning host
|
||||
|
||||
./init.sh - setup kubespray based on kube-inventory so we can manage a k8s cluster with it
|
||||
# Start from master1
|
||||
|
||||
./setup-apps.sh - post install system apps like ingress, registry and monitoring to make the cluster usable
|
||||
|
||||
./setup-scripts.sh - setup additional helper scrips
|
||||
|
||||
./setup-env.sh <envname> - create new namespace and setup additional keypair, service accounts, rbac, limits (and optional wildcard ssl certificate)
|
||||
|
||||
./attach-private-registry <regname> - setup a secret for a external private registry
|
47
attach-private-registry.sh
Executable file
47
attach-private-registry.sh
Executable file
|
@ -0,0 +1,47 @@
|
|||
#!/bin/bash
|
||||
|
||||
echo ""
|
||||
echo "... ] Attaching private Docker registry [ ..."
|
||||
echo ""
|
||||
|
||||
if [ -z "$1" ]; then
|
||||
echo "] Usage: ./attach_private_registry.sh <registry-name>"
|
||||
exit 2
|
||||
fi
|
||||
REG_NAME="$1"
|
||||
|
||||
echo -n "] Target secret namespace: "
|
||||
read NSPACE
|
||||
if [ -z "$NSPACE" ]; then
|
||||
echo "] No namespace"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo -n "] Enter Docker registry user: "
|
||||
read REGISTRY_USER
|
||||
|
||||
echo -n "] Enter Docker registry password (token): "
|
||||
read REGISTRY_PASS
|
||||
|
||||
echo -n "] Enter Docker registry email: "
|
||||
read REGISTRY_EMAIL
|
||||
|
||||
echo -n "] Enter Docker registry url (empty for docker hub): "
|
||||
read REGISTRY_URL
|
||||
if [ -z "$REGISTRY_URL" ]; then
|
||||
CONFIG_URL="--docker-server=https://index.docker.io/v2/"
|
||||
else
|
||||
CONFIG_URL="--docker-server=https://${REGISTRY_URL}/v2/"
|
||||
fi
|
||||
|
||||
SECRET_NAME="registry-${NSPACE}-${REG_NAME}"
|
||||
SECRET_CONFIG="${CONFIG_URL} --docker-username=${REGISTRY_USER} --docker-password=${REGISTRY_PASS} --docker-email=${REGISTRY_EMAIL}"
|
||||
CMD="/usr/local/bin/k -n ${NSPACE} create secret docker-registry ${SECRET_NAME} ${SECRET_CONFIG}"
|
||||
|
||||
echo ""
|
||||
echo "Executing command: ${CMD}"
|
||||
echo -n "Is that okay [y/n]? "
|
||||
read answer
|
||||
if [ "$answer" != "${answer#[Yy]}" ]; then
|
||||
${CMD}
|
||||
fi
|
39
config.dist
Normal file
39
config.dist
Normal file
|
@ -0,0 +1,39 @@
|
|||
#GLOBAL
|
||||
NTP_SERVER=10.15.8.80
|
||||
CERT_MODE=False
|
||||
CLUSTER_DOMAIN=k8test.example.com
|
||||
CLUSTER_SUPP_ADDR="[10.15.0.2, kube.k8test.example.com]"
|
||||
EXT_LB_DOMAIN=kube.example.com
|
||||
EXT_LB_ADDRESS=10.15.0.2
|
||||
EXT_LB_PORT=16444
|
||||
|
||||
#INGRESS-NGINX
|
||||
MAXMIND_LIC="4rD1ICHnexjd6KaY"
|
||||
|
||||
#CERT-MANAGER
|
||||
ADMIN_EMAIL=admin@example.com
|
||||
CLOUDFLARE_API_KEY=000
|
||||
|
||||
#STORAGE
|
||||
CEPH_ADMIN_KEY=""
|
||||
CEPH_USER_KEY=""
|
||||
CEPH_MONITOR_1="10.15.8.91"
|
||||
CEPH_MONITOR_2="10.15.8.92"
|
||||
CEPH_MONITOR_3="10.15.8.93"
|
||||
|
||||
#REGISTRY
|
||||
REGISTRY_URL="registry.develop.example.com"
|
||||
REGISTRY_USER=deployer
|
||||
REGISTRY_PASS=pass123
|
||||
REGISTRY_INTERNAL=False
|
||||
|
||||
#MONITORING
|
||||
ZABBIX_SERVER="10.15.0.2"
|
||||
ZABBIX_PSK=asdqwe123
|
||||
ZABBIX_PSK_ID=PSK
|
||||
GRAFANA_SMTP_HOST=email-smtp.eu-west-1.amazonaws.com
|
||||
GRAFANA_SMTP_USER=user
|
||||
GRAFANA_SMTP_PASSWORD="asdqwe123"
|
||||
GRAFANA_SMTP_FROM_ADDRESS="no-reply@example.com"
|
||||
LOKI_STORAGE_SIZE=128Gi
|
||||
LOKI_RETENTION=long
|
48
init.sh
Executable file
48
init.sh
Executable file
|
@ -0,0 +1,48 @@
|
|||
#!/bin/bash
|
||||
|
||||
# k8x v2
|
||||
|
||||
KUBESPRAY_TAG="release-2.16"
|
||||
|
||||
if [ -f config ]; then
|
||||
echo "config file FOUND :)"
|
||||
source config
|
||||
else
|
||||
echo "config file is missing."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "] Install packages to cluster admin node ..."
|
||||
apt update
|
||||
apt install git rsync python3-pip -y
|
||||
|
||||
echo "] Clone Kubespray ..."
|
||||
git clone https://github.com/kubernetes-sigs/kubespray.git
|
||||
|
||||
echo "] Sync our recipies ..."
|
||||
rsync -av inventory/ kubespray/inventory/kube/
|
||||
|
||||
echo "- setup external load balancer variable from config..."
|
||||
sed -i "s#var_lb_domain#${EXT_LB_DOMAIN}#g" "kubespray/inventory/kube/group_vars/all/all.yml"
|
||||
sed -i "s#var_lb_address#${EXT_LB_ADDRESS}#g" "kubespray/inventory/kube/group_vars/all/all.yml"
|
||||
sed -i "s#var_lb_port#${EXT_LB_PORT}#g" "kubespray/inventory/kube/group_vars/all/all.yml"
|
||||
|
||||
echo "- setup additional addresses to kube ssl cert"
|
||||
sed -i "s#var_cluster_supp_addr#${CLUSTER_SUPP_ADDR}#g" "kubespray/inventory/kube/group_vars/k8s_cluster/k8s-cluster.yml"
|
||||
|
||||
echo "- setup ceph variables from config ..."
|
||||
sed -i "s#var_ceph_monitors#${CEPH_MONITOR_1}:6789,${CEPH_MONITOR_2}:6789,${CEPH_MONITOR_3}:6789#g" "kubespray/inventory/kube/group_vars/k8s_cluster/addons.yml"
|
||||
sed -i "s#var_ceph_admin_key#${CEPH_ADMIN_KEY}#g" "kubespray/inventory/kube/group_vars/k8s_cluster/addons.yml"
|
||||
sed -i "s#var_ceph_user_key#${CEPH_USER_KEY}#g" "kubespray/inventory/kube/group_vars/k8s_cluster/addons.yml"
|
||||
|
||||
echo "] Switch to $KUBESPRAY_TAG branch ..."
|
||||
cd kubespray/
|
||||
git checkout $KUBESPRAY_TAG
|
||||
cd ..
|
||||
|
||||
echo "] Patch $KUBESPRAY_TAG ..."
|
||||
rsync -av patches/${KUBESPRAY_TAG}/ kubespray/
|
||||
|
||||
echo "] Execute python requierments.txt ..."
|
||||
cd kubespray/
|
||||
pip3 install -r requirements.txt
|
118
inventory/group_vars/all/all.yml
Normal file
118
inventory/group_vars/all/all.yml
Normal file
|
@ -0,0 +1,118 @@
|
|||
---
|
||||
## Directory where etcd data stored
|
||||
etcd_data_dir: /var/lib/etcd
|
||||
|
||||
## Experimental kubeadm etcd deployment mode. Available only for new deployment
|
||||
etcd_kubeadm_enabled: false
|
||||
|
||||
## Directory where the binaries will be installed
|
||||
bin_dir: /usr/local/bin
|
||||
|
||||
## The access_ip variable is used to define how other nodes should access
|
||||
## the node. This is used in flannel to allow other flannel nodes to see
|
||||
## this node for example. The access_ip is really useful AWS and Google
|
||||
## environments where the nodes are accessed remotely by the "public" ip,
|
||||
## but don't know about that address themselves.
|
||||
# access_ip: 1.1.1.1
|
||||
|
||||
## External LB example config
|
||||
#apiserver_loadbalancer_domain_name: "var_lb_domain"
|
||||
#loadbalancer_apiserver:
|
||||
# address: var_lb_address
|
||||
# port: var_lb_port
|
||||
|
||||
## Internal loadbalancers for apiservers
|
||||
loadbalancer_apiserver_localhost: true
|
||||
# valid options are "nginx" or "haproxy"
|
||||
# loadbalancer_apiserver_type: nginx # valid values "nginx" or "haproxy"
|
||||
loadbalancer_apiserver_type: "haproxy"
|
||||
|
||||
## If the cilium is going to be used in strict mode, we can use the
|
||||
## localhost connection and not use the external LB. If this parameter is
|
||||
## not specified, the first node to connect to kubeapi will be used.
|
||||
use_localhost_as_kubeapi_loadbalancer: true
|
||||
|
||||
## Local loadbalancer should use this port
|
||||
## And must be set port 6443
|
||||
loadbalancer_apiserver_port: 6443
|
||||
|
||||
## If loadbalancer_apiserver_healthcheck_port variable defined, enables proxy liveness check for nginx.
|
||||
loadbalancer_apiserver_healthcheck_port: 8081
|
||||
|
||||
### OTHER OPTIONAL VARIABLES
|
||||
|
||||
## Upstream dns servers
|
||||
upstream_dns_servers:
|
||||
- 8.8.8.8
|
||||
- 1.1.1.1
|
||||
|
||||
## There are some changes specific to the cloud providers
|
||||
## for instance we need to encapsulate packets with some network plugins
|
||||
## If set the possible values are either 'gce', 'aws', 'azure', 'openstack', 'vsphere', 'oci', or 'external'
|
||||
## When openstack is used make sure to source in the openstack credentials
|
||||
## like you would do when using openstack-client before starting the playbook.
|
||||
# cloud_provider:
|
||||
|
||||
## When cloud_provider is set to 'external', you can set the cloud controller to deploy
|
||||
## Supported cloud controllers are: 'openstack' and 'vsphere'
|
||||
## When openstack or vsphere are used make sure to source in the required fields
|
||||
# external_cloud_provider:
|
||||
|
||||
## Set these proxy values in order to update package manager and docker daemon to use proxies
|
||||
# http_proxy: ""
|
||||
# https_proxy: ""
|
||||
|
||||
## Refer to roles/kubespray-defaults/defaults/main.yml before modifying no_proxy
|
||||
# no_proxy: ""
|
||||
|
||||
## Some problems may occur when downloading files over https proxy due to ansible bug
|
||||
## https://github.com/ansible/ansible/issues/32750. Set this variable to False to disable
|
||||
## SSL validation of get_url module. Note that kubespray will still be performing checksum validation.
|
||||
# download_validate_certs: False
|
||||
|
||||
## If you need exclude all cluster nodes from proxy and other resources, add other resources here.
|
||||
# additional_no_proxy: ""
|
||||
|
||||
## If you need to disable proxying of os package repositories but are still behind an http_proxy set
|
||||
## skip_http_proxy_on_os_packages to true
|
||||
## This will cause kubespray not to set proxy environment in /etc/yum.conf for centos and in /etc/apt/apt.conf for debian/ubuntu
|
||||
## Special information for debian/ubuntu - you have to set the no_proxy variable, then apt package will install from your source of wish
|
||||
# skip_http_proxy_on_os_packages: false
|
||||
|
||||
## Since workers are included in the no_proxy variable by default, docker engine will be restarted on all nodes (all
|
||||
## pods will restart) when adding or removing workers. To override this behaviour by only including master nodes in the
|
||||
## no_proxy variable, set below to true:
|
||||
no_proxy_exclude_workers: false
|
||||
|
||||
## Certificate Management
|
||||
## This setting determines whether certs are generated via scripts.
|
||||
## Chose 'none' if you provide your own certificates.
|
||||
## Option is "script", "none"
|
||||
# cert_management: script
|
||||
|
||||
## Set to true to allow pre-checks to fail and continue deployment
|
||||
# ignore_assert_errors: false
|
||||
|
||||
## The read-only port for the Kubelet to serve on with no authentication/authorization. Uncomment to enable.
|
||||
# kube_read_only_port: 10255
|
||||
|
||||
## Set true to download and cache container
|
||||
# download_container: true
|
||||
|
||||
## Deploy container engine
|
||||
# Set false if you want to deploy container engine manually.
|
||||
# deploy_container_engine: true
|
||||
|
||||
## Red Hat Enterprise Linux subscription registration
|
||||
## Add either RHEL subscription Username/Password or Organization ID/Activation Key combination
|
||||
## Update RHEL subscription purpose usage, role and SLA if necessary
|
||||
# rh_subscription_username: ""
|
||||
# rh_subscription_password: ""
|
||||
# rh_subscription_org_id: ""
|
||||
# rh_subscription_activation_key: ""
|
||||
# rh_subscription_usage: "Development"
|
||||
# rh_subscription_role: "Red Hat Enterprise Server"
|
||||
# rh_subscription_sla: "Self-Support"
|
||||
|
||||
## Check if access_ip responds to ping. Set false if your firewall blocks ICMP.
|
||||
# ping_access_ip: true
|
9
inventory/group_vars/all/aws.yml
Normal file
9
inventory/group_vars/all/aws.yml
Normal file
|
@ -0,0 +1,9 @@
|
|||
## To use AWS EBS CSI Driver to provision volumes, uncomment the first value
|
||||
## and configure the parameters below
|
||||
# aws_ebs_csi_enabled: true
|
||||
# aws_ebs_csi_enable_volume_scheduling: true
|
||||
# aws_ebs_csi_enable_volume_snapshot: false
|
||||
# aws_ebs_csi_enable_volume_resizing: false
|
||||
# aws_ebs_csi_controller_replicas: 1
|
||||
# aws_ebs_csi_plugin_image_tag: latest
|
||||
# aws_ebs_csi_extra_volume_tags: "Owner=owner,Team=team,Environment=environment'
|
40
inventory/group_vars/all/azure.yml
Normal file
40
inventory/group_vars/all/azure.yml
Normal file
|
@ -0,0 +1,40 @@
|
|||
## When azure is used, you need to also set the following variables.
|
||||
## see docs/azure.md for details on how to get these values
|
||||
|
||||
# azure_cloud:
|
||||
# azure_tenant_id:
|
||||
# azure_subscription_id:
|
||||
# azure_aad_client_id:
|
||||
# azure_aad_client_secret:
|
||||
# azure_resource_group:
|
||||
# azure_location:
|
||||
# azure_subnet_name:
|
||||
# azure_security_group_name:
|
||||
# azure_security_group_resource_group:
|
||||
# azure_vnet_name:
|
||||
# azure_vnet_resource_group:
|
||||
# azure_route_table_name:
|
||||
# azure_route_table_resource_group:
|
||||
# supported values are 'standard' or 'vmss'
|
||||
# azure_vmtype: standard
|
||||
|
||||
## Azure Disk CSI credentials and parameters
|
||||
## see docs/azure-csi.md for details on how to get these values
|
||||
|
||||
# azure_csi_tenant_id:
|
||||
# azure_csi_subscription_id:
|
||||
# azure_csi_aad_client_id:
|
||||
# azure_csi_aad_client_secret:
|
||||
# azure_csi_location:
|
||||
# azure_csi_resource_group:
|
||||
# azure_csi_vnet_name:
|
||||
# azure_csi_vnet_resource_group:
|
||||
# azure_csi_subnet_name:
|
||||
# azure_csi_security_group_name:
|
||||
# azure_csi_use_instance_metadata:
|
||||
# azure_csi_tags: "Owner=owner,Team=team,Environment=environment'
|
||||
|
||||
## To enable Azure Disk CSI, uncomment below
|
||||
# azure_csi_enabled: true
|
||||
# azure_csi_controller_replicas: 1
|
||||
# azure_csi_plugin_image_tag: latest
|
34
inventory/group_vars/all/containerd.yml
Normal file
34
inventory/group_vars/all/containerd.yml
Normal file
|
@ -0,0 +1,34 @@
|
|||
---
|
||||
# Please see roles/container-engine/containerd/defaults/main.yml for more configuration options
|
||||
|
||||
# containerd_storage_dir: "/var/lib/containerd"
|
||||
# containerd_state_dir: "/run/containerd"
|
||||
# containerd_oom_score: 0
|
||||
|
||||
# containerd_default_runtime: "runc"
|
||||
# containerd_snapshotter: "native"
|
||||
|
||||
# containerd_runtimes:
|
||||
# - name: runc
|
||||
# type: "io.containerd.runc.v2"
|
||||
# engine: ""
|
||||
# root: ""
|
||||
# Example for Kata Containers as additional runtime:
|
||||
# - name: kata
|
||||
# type: "io.containerd.kata.v2"
|
||||
# engine: ""
|
||||
# root: ""
|
||||
|
||||
# containerd_grpc_max_recv_message_size: 16777216
|
||||
# containerd_grpc_max_send_message_size: 16777216
|
||||
|
||||
# containerd_debug_level: "info"
|
||||
|
||||
# containerd_metrics_address: ""
|
||||
|
||||
# containerd_metrics_grpc_histogram: false
|
||||
|
||||
# containerd_registries:
|
||||
# "docker.io": "https://registry-1.docker.io"
|
||||
|
||||
# containerd_max_container_log_line_size: -1
|
2
inventory/group_vars/all/coreos.yml
Normal file
2
inventory/group_vars/all/coreos.yml
Normal file
|
@ -0,0 +1,2 @@
|
|||
## Does coreos need auto upgrade, default is true
|
||||
# coreos_auto_upgrade: true
|
59
inventory/group_vars/all/docker.yml
Normal file
59
inventory/group_vars/all/docker.yml
Normal file
|
@ -0,0 +1,59 @@
|
|||
---
|
||||
## Uncomment this if you want to force overlay/overlay2 as docker storage driver
|
||||
## Please note that overlay2 is only supported on newer kernels
|
||||
# docker_storage_options: -s overlay2
|
||||
|
||||
## Enable docker_container_storage_setup, it will configure devicemapper driver on Centos7 or RedHat7.
|
||||
docker_container_storage_setup: false
|
||||
|
||||
## It must be define a disk path for docker_container_storage_setup_devs.
|
||||
## Otherwise docker-storage-setup will be executed incorrectly.
|
||||
# docker_container_storage_setup_devs: /dev/vdb
|
||||
|
||||
## Uncomment this if you want to change the Docker Cgroup driver (native.cgroupdriver)
|
||||
## Valid options are systemd or cgroupfs, default is systemd
|
||||
# docker_cgroup_driver: systemd
|
||||
|
||||
## Only set this if you have more than 3 nameservers:
|
||||
## If true Kubespray will only use the first 3, otherwise it will fail
|
||||
docker_dns_servers_strict: false
|
||||
|
||||
# Path used to store Docker data
|
||||
docker_daemon_graph: "/var/lib/docker"
|
||||
|
||||
## Used to set docker daemon iptables options to true
|
||||
docker_iptables_enabled: "false"
|
||||
|
||||
# Docker log options
|
||||
# Rotate container stderr/stdout logs at 50m and keep last 5
|
||||
docker_log_opts: "--log-opt max-size=50m --log-opt max-file=5"
|
||||
|
||||
# define docker bin_dir
|
||||
docker_bin_dir: "/usr/bin"
|
||||
|
||||
# keep docker packages after installation; speeds up repeated ansible provisioning runs when '1'
|
||||
# kubespray deletes the docker package on each run, so caching the package makes sense
|
||||
docker_rpm_keepcache: 1
|
||||
|
||||
## An obvious use case is allowing insecure-registry access to self hosted registries.
|
||||
## Can be ipaddress and domain_name.
|
||||
## example define 172.19.16.11 or mirror.registry.io
|
||||
# docker_insecure_registries:
|
||||
# - mirror.registry.io
|
||||
# - 172.19.16.11
|
||||
|
||||
## Add other registry,example China registry mirror.
|
||||
# docker_registry_mirrors:
|
||||
# - https://registry.docker-cn.com
|
||||
# - https://mirror.aliyuncs.com
|
||||
|
||||
## If non-empty will override default system MountFlags value.
|
||||
## This option takes a mount propagation flag: shared, slave
|
||||
## or private, which control whether mounts in the file system
|
||||
## namespace set up for docker will receive or propagate mounts
|
||||
## and unmounts. Leave empty for system default
|
||||
# docker_mount_flags:
|
||||
|
||||
## A string of extra options to pass to the docker daemon.
|
||||
## This string should be exactly as you wish it to appear.
|
||||
# docker_options: ""
|
10
inventory/group_vars/all/gcp.yml
Normal file
10
inventory/group_vars/all/gcp.yml
Normal file
|
@ -0,0 +1,10 @@
|
|||
## GCP compute Persistent Disk CSI Driver credentials and parameters
|
||||
## See docs/gcp-pd-csi.md for information about the implementation
|
||||
|
||||
## Specify the path to the file containing the service account credentials
|
||||
# gcp_pd_csi_sa_cred_file: "/my/safe/credentials/directory/cloud-sa.json"
|
||||
|
||||
## To enable GCP Persistent Disk CSI driver, uncomment below
|
||||
# gcp_pd_csi_enabled: true
|
||||
# gcp_pd_csi_controller_replicas: 1
|
||||
# gcp_pd_csi_driver_image_tag: "v0.7.0-gke.0"
|
28
inventory/group_vars/all/oci.yml
Normal file
28
inventory/group_vars/all/oci.yml
Normal file
|
@ -0,0 +1,28 @@
|
|||
## When Oracle Cloud Infrastructure is used, set these variables
|
||||
# oci_private_key:
|
||||
# oci_region_id:
|
||||
# oci_tenancy_id:
|
||||
# oci_user_id:
|
||||
# oci_user_fingerprint:
|
||||
# oci_compartment_id:
|
||||
# oci_vnc_id:
|
||||
# oci_subnet1_id:
|
||||
# oci_subnet2_id:
|
||||
## Override these default/optional behaviors if you wish
|
||||
# oci_security_list_management: All
|
||||
## If you would like the controller to manage specific lists per subnet. This is a mapping of subnet ocids to security list ocids. Below are examples.
|
||||
# oci_security_lists:
|
||||
# ocid1.subnet.oc1.phx.aaaaaaaasa53hlkzk6nzksqfccegk2qnkxmphkblst3riclzs4rhwg7rg57q: ocid1.securitylist.oc1.iad.aaaaaaaaqti5jsfvyw6ejahh7r4okb2xbtuiuguswhs746mtahn72r7adt7q
|
||||
# ocid1.subnet.oc1.phx.aaaaaaaahuxrgvs65iwdz7ekwgg3l5gyah7ww5klkwjcso74u3e4i64hvtvq: ocid1.securitylist.oc1.iad.aaaaaaaaqti5jsfvyw6ejahh7r4okb2xbtuiuguswhs746mtahn72r7adt7q
|
||||
## If oci_use_instance_principals is true, you do not need to set the region, tenancy, user, key, passphrase, or fingerprint
|
||||
# oci_use_instance_principals: false
|
||||
# oci_cloud_controller_version: 0.6.0
|
||||
## If you would like to control OCI query rate limits for the controller
|
||||
# oci_rate_limit:
|
||||
# rate_limit_qps_read:
|
||||
# rate_limit_qps_write:
|
||||
# rate_limit_bucket_read:
|
||||
# rate_limit_bucket_write:
|
||||
## Other optional variables
|
||||
# oci_cloud_controller_pull_source: (default iad.ocir.io/oracle/cloud-provider-oci)
|
||||
# oci_cloud_controller_pull_secret: (name of pull secret to use if you define your own mirror above)
|
79
inventory/group_vars/all/offline.yml
Normal file
79
inventory/group_vars/all/offline.yml
Normal file
|
@ -0,0 +1,79 @@
|
|||
---
|
||||
## Global Offline settings
|
||||
### Private Container Image Registry
|
||||
# registry_host: "myprivateregisry.com"
|
||||
# files_repo: "http://myprivatehttpd"
|
||||
### If using CentOS, RedHat, AlmaLinux or Fedora
|
||||
# yum_repo: "http://myinternalyumrepo"
|
||||
### If using Debian
|
||||
# debian_repo: "http://myinternaldebianrepo"
|
||||
### If using Ubuntu
|
||||
# ubuntu_repo: "http://myinternalubunturepo"
|
||||
|
||||
## Container Registry overrides
|
||||
# kube_image_repo: "{{ registry_host }}"
|
||||
# gcr_image_repo: "{{ registry_host }}"
|
||||
# docker_image_repo: "{{ registry_host }}"
|
||||
# quay_image_repo: "{{ registry_host }}"
|
||||
|
||||
## Kubernetes components
|
||||
# kubeadm_download_url: "{{ files_repo }}/kubernetes/{{ kube_version }}/kubeadm"
|
||||
# kubectl_download_url: "{{ files_repo }}/kubernetes/{{ kube_version }}/kubectl"
|
||||
# kubelet_download_url: "{{ files_repo }}/kubernetes/{{ kube_version }}/kubelet"
|
||||
|
||||
## CNI Plugins
|
||||
# cni_download_url: "{{ files_repo }}/kubernetes/cni/cni-plugins-linux-{{ image_arch }}-{{ cni_version }}.tgz"
|
||||
|
||||
## cri-tools
|
||||
# crictl_download_url: "{{ files_repo }}/kubernetes/cri-tools/crictl-{{ crictl_version }}-{{ ansible_system | lower }}-{{ image_arch }}.tar.gz"
|
||||
|
||||
## [Optional] etcd: only if you **DON'T** use etcd_deployment=host
|
||||
# etcd_download_url: "{{ files_repo }}/kubernetes/etcd/etcd-{{ etcd_version }}-linux-amd64.tar.gz"
|
||||
|
||||
# [Optional] Calico: If using Calico network plugin
|
||||
# calicoctl_download_url: "{{ files_repo }}/kubernetes/calico/{{ calico_ctl_version }}/calicoctl-linux-{{ image_arch }}"
|
||||
# [Optional] Calico with kdd: If using Calico network plugin with kdd datastore
|
||||
# calico_crds_download_url: "{{ files_repo }}/kubernetes/calico/{{ calico_version }}.tar.gz"
|
||||
|
||||
# [Optional] helm: only if you set helm_enabled: true
|
||||
# helm_download_url: "{{ files_repo }}/helm-{{ helm_version }}-linux-{{ image_arch }}.tar.gz"
|
||||
|
||||
# [Optional] crun: only if you set crun_enabled: true
|
||||
# crun_download_url: "{{ files_repo }}/containers/crun/releases/download/{{ crun_version }}/crun-{{ crun_version }}-linux-{{ image_arch }}"
|
||||
|
||||
# [Optional] kata: only if you set kata_containers_enabled: true
|
||||
# kata_containers_download_url: "{{ files_repo }}/kata-containers/runtime/releases/download/{{ kata_containers_version }}/kata-static-{{ kata_containers_version }}-{{ ansible_architecture }}.tar.xz"
|
||||
|
||||
## CentOS/Redhat/AlmaLinux
|
||||
### For EL7, base and extras repo must be available, for EL8, baseos and appstream
|
||||
### By default we enable those repo automatically
|
||||
# rhel_enable_repos: false
|
||||
### Docker / Containerd
|
||||
# docker_rh_repo_base_url: "{{ yum_repo }}/docker-ce/$releasever/$basearch"
|
||||
# docker_rh_repo_gpgkey: "{{ yum_repo }}/docker-ce/gpg"
|
||||
|
||||
## Fedora
|
||||
### Docker
|
||||
# docker_fedora_repo_base_url: "{{ yum_repo }}/docker-ce/{{ ansible_distribution_major_version }}/{{ ansible_architecture }}"
|
||||
# docker_fedora_repo_gpgkey: "{{ yum_repo }}/docker-ce/gpg"
|
||||
### Containerd
|
||||
# containerd_fedora_repo_base_url: "{{ yum_repo }}/containerd"
|
||||
# containerd_fedora_repo_gpgkey: "{{ yum_repo }}/docker-ce/gpg"
|
||||
|
||||
## Debian
|
||||
### Docker
|
||||
# docker_debian_repo_base_url: "{{ debian_repo }}/docker-ce"
|
||||
# docker_debian_repo_gpgkey: "{{ debian_repo }}/docker-ce/gpg"
|
||||
### Containerd
|
||||
# containerd_debian_repo_base_url: "{{ ubuntu_repo }}/containerd"
|
||||
# containerd_debian_repo_gpgkey: "{{ ubuntu_repo }}/containerd/gpg"
|
||||
# containerd_debian_repo_repokey: 'YOURREPOKEY'
|
||||
|
||||
## Ubuntu
|
||||
### Docker
|
||||
# docker_ubuntu_repo_base_url: "{{ ubuntu_repo }}/docker-ce"
|
||||
# docker_ubuntu_repo_gpgkey: "{{ ubuntu_repo }}/docker-ce/gpg"
|
||||
### Containerd
|
||||
# containerd_ubuntu_repo_base_url: "{{ ubuntu_repo }}/containerd"
|
||||
# containerd_ubuntu_repo_gpgkey: "{{ ubuntu_repo }}/containerd/gpg"
|
||||
# containerd_ubuntu_repo_repokey: 'YOURREPOKEY'
|
49
inventory/group_vars/all/openstack.yml
Normal file
49
inventory/group_vars/all/openstack.yml
Normal file
|
@ -0,0 +1,49 @@
|
|||
## When OpenStack is used, Cinder version can be explicitly specified if autodetection fails (Fixed in 1.9: https://github.com/kubernetes/kubernetes/issues/50461)
|
||||
# openstack_blockstorage_version: "v1/v2/auto (default)"
|
||||
# openstack_blockstorage_ignore_volume_az: yes
|
||||
## When OpenStack is used, if LBaaSv2 is available you can enable it with the following 2 variables.
|
||||
# openstack_lbaas_enabled: True
|
||||
# openstack_lbaas_subnet_id: "Neutron subnet ID (not network ID) to create LBaaS VIP"
|
||||
## To enable automatic floating ip provisioning, specify a subnet.
|
||||
# openstack_lbaas_floating_network_id: "Neutron network ID (not subnet ID) to get floating IP from, disabled by default"
|
||||
## Override default LBaaS behavior
|
||||
# openstack_lbaas_use_octavia: False
|
||||
# openstack_lbaas_method: "ROUND_ROBIN"
|
||||
# openstack_lbaas_provider: "haproxy"
|
||||
# openstack_lbaas_create_monitor: "yes"
|
||||
# openstack_lbaas_monitor_delay: "1m"
|
||||
# openstack_lbaas_monitor_timeout: "30s"
|
||||
# openstack_lbaas_monitor_max_retries: "3"
|
||||
|
||||
## Values for the external OpenStack Cloud Controller
|
||||
# external_openstack_lbaas_network_id: "Neutron network ID to create LBaaS VIP"
|
||||
# external_openstack_lbaas_subnet_id: "Neutron subnet ID to create LBaaS VIP"
|
||||
# external_openstack_lbaas_floating_network_id: "Neutron network ID to get floating IP from"
|
||||
# external_openstack_lbaas_floating_subnet_id: "Neutron subnet ID to get floating IP from"
|
||||
# external_openstack_lbaas_method: "ROUND_ROBIN"
|
||||
# external_openstack_lbaas_provider: "octavia"
|
||||
# external_openstack_lbaas_create_monitor: false
|
||||
# external_openstack_lbaas_monitor_delay: "1m"
|
||||
# external_openstack_lbaas_monitor_timeout: "30s"
|
||||
# external_openstack_lbaas_monitor_max_retries: "3"
|
||||
# external_openstack_lbaas_manage_security_groups: false
|
||||
# external_openstack_lbaas_internal_lb: false
|
||||
# external_openstack_network_ipv6_disabled: false
|
||||
# external_openstack_network_internal_networks: []
|
||||
# external_openstack_network_public_networks: []
|
||||
# external_openstack_metadata_search_order: "configDrive,metadataService"
|
||||
|
||||
## Application credentials to authenticate against Keystone API
|
||||
## Those settings will take precedence over username and password that might be set your environment
|
||||
## All of them are required
|
||||
# external_openstack_application_credential_name:
|
||||
# external_openstack_application_credential_id:
|
||||
# external_openstack_application_credential_secret:
|
||||
|
||||
## The tag of the external OpenStack Cloud Controller image
|
||||
# external_openstack_cloud_controller_image_tag: "latest"
|
||||
|
||||
## To use Cinder CSI plugin to provision volumes set this value to true
|
||||
## Make sure to source in the openstack credentials
|
||||
# cinder_csi_enabled: true
|
||||
# cinder_csi_controller_replicas: 1
|
24
inventory/group_vars/all/vsphere.yml
Normal file
24
inventory/group_vars/all/vsphere.yml
Normal file
|
@ -0,0 +1,24 @@
|
|||
## Values for the external vSphere Cloud Provider
|
||||
# external_vsphere_vcenter_ip: "myvcenter.domain.com"
|
||||
# external_vsphere_vcenter_port: "443"
|
||||
# external_vsphere_insecure: "true"
|
||||
# external_vsphere_user: "administrator@vsphere.local"
|
||||
# external_vsphere_password: "K8s_admin"
|
||||
# external_vsphere_datacenter: "DATACENTER_name"
|
||||
# external_vsphere_kubernetes_cluster_id: "kubernetes-cluster-id"
|
||||
|
||||
## Vsphere version where located VMs
|
||||
# external_vsphere_version: "6.7u3"
|
||||
|
||||
## Tags for the external vSphere Cloud Provider images
|
||||
# external_vsphere_cloud_controller_image_tag: "latest"
|
||||
# vsphere_syncer_image_tag: "v1.0.2"
|
||||
# vsphere_csi_attacher_image_tag: "v1.1.1"
|
||||
# vsphere_csi_controller: "v1.0.2"
|
||||
# vsphere_csi_liveness_probe_image_tag: "v1.1.0"
|
||||
# vsphere_csi_provisioner_image_tag: "v1.2.2"
|
||||
# vsphere_csi_resizer_tag: "v1.0.0"
|
||||
|
||||
## To use vSphere CSI plugin to provision volumes set this value to true
|
||||
# vsphere_csi_enabled: true
|
||||
# vsphere_csi_controller_replicas: 1
|
22
inventory/group_vars/etcd.yml
Normal file
22
inventory/group_vars/etcd.yml
Normal file
|
@ -0,0 +1,22 @@
|
|||
---
|
||||
## Etcd auto compaction retention for mvcc key value store in hour
|
||||
# etcd_compaction_retention: 0
|
||||
|
||||
## Set level of detail for etcd exported metrics, specify 'extensive' to include histogram metrics.
|
||||
# etcd_metrics: basic
|
||||
|
||||
## Etcd is restricted by default to 512M on systems under 4GB RAM, 512MB is not enough for much more than testing.
|
||||
## Set this if your etcd nodes have less than 4GB but you want more RAM for etcd. Set to 0 for unrestricted RAM.
|
||||
# etcd_memory_limit: "512M"
|
||||
|
||||
## Etcd has a default of 2G for its space quota. If you put a value in etcd_memory_limit which is less than
|
||||
## etcd_quota_backend_bytes, you may encounter out of memory terminations of the etcd cluster. Please check
|
||||
## etcd documentation for more information.
|
||||
# etcd_quota_backend_bytes: "2147483648"
|
||||
|
||||
### ETCD: disable peer client cert authentication.
|
||||
# This affects ETCD_PEER_CLIENT_CERT_AUTH variable
|
||||
# etcd_peer_client_auth: true
|
||||
|
||||
## Settings for etcd deployment type
|
||||
etcd_deployment_type: docker
|
182
inventory/group_vars/k8s_cluster/addons.yml
Normal file
182
inventory/group_vars/k8s_cluster/addons.yml
Normal file
|
@ -0,0 +1,182 @@
|
|||
---
|
||||
# Kubernetes dashboard
|
||||
# RBAC required. see docs/getting-started.md for access details.
|
||||
# dashboard_enabled: false
|
||||
|
||||
# Helm deployment
|
||||
helm_enabled: true
|
||||
|
||||
# Registry deployment
|
||||
registry_enabled: false
|
||||
# registry_namespace: kube-system
|
||||
# registry_storage_class: ""
|
||||
# registry_disk_size: "10Gi"
|
||||
|
||||
# Metrics Server deployment
|
||||
metrics_server_enabled: true
|
||||
# metrics_server_kubelet_insecure_tls: true
|
||||
# metrics_server_metric_resolution: 60s
|
||||
# metrics_server_kubelet_preferred_address_types: "InternalIP"
|
||||
|
||||
# Rancher Local Path Provisioner
|
||||
local_path_provisioner_enabled: false
|
||||
# local_path_provisioner_namespace: "local-path-storage"
|
||||
# local_path_provisioner_storage_class: "local-path"
|
||||
# local_path_provisioner_reclaim_policy: Delete
|
||||
# local_path_provisioner_claim_root: /opt/local-path-provisioner/
|
||||
# local_path_provisioner_debug: false
|
||||
# local_path_provisioner_image_repo: "rancher/local-path-provisioner"
|
||||
# local_path_provisioner_image_tag: "v0.0.19"
|
||||
# local_path_provisioner_helper_image_repo: "busybox"
|
||||
# local_path_provisioner_helper_image_tag: "latest"
|
||||
|
||||
# Local volume provisioner deployment
|
||||
local_volume_provisioner_enabled: false
|
||||
# local_volume_provisioner_namespace: kube-system
|
||||
# local_volume_provisioner_nodelabels:
|
||||
# - kubernetes.io/hostname
|
||||
# - topology.kubernetes.io/region
|
||||
# - topology.kubernetes.io/zone
|
||||
# local_volume_provisioner_storage_classes:
|
||||
# local-storage:
|
||||
# host_dir: /mnt/disks
|
||||
# mount_dir: /mnt/disks
|
||||
# volume_mode: Filesystem
|
||||
# fs_type: ext4
|
||||
# fast-disks:
|
||||
# host_dir: /mnt/fast-disks
|
||||
# mount_dir: /mnt/fast-disks
|
||||
# block_cleaner_command:
|
||||
# - "/scripts/shred.sh"
|
||||
# - "2"
|
||||
# volume_mode: Filesystem
|
||||
# fs_type: ext4
|
||||
|
||||
# CephFS provisioner deployment
|
||||
cephfs_provisioner_enabled: false
|
||||
# cephfs_provisioner_namespace: "cephfs-provisioner"
|
||||
# cephfs_provisioner_cluster: ceph
|
||||
# cephfs_provisioner_monitors: "172.24.0.1:6789,172.24.0.2:6789,172.24.0.3:6789"
|
||||
# cephfs_provisioner_admin_id: admin
|
||||
# cephfs_provisioner_secret: secret
|
||||
# cephfs_provisioner_storage_class: cephfs
|
||||
# cephfs_provisioner_reclaim_policy: Delete
|
||||
# cephfs_provisioner_claim_root: /volumes
|
||||
# cephfs_provisioner_deterministic_names: true
|
||||
|
||||
# RBD provisioner deployment
|
||||
rbd_provisioner_enabled: true
|
||||
rbd_provisioner_namespace: kube-system
|
||||
rbd_provisioner_replicas: 2
|
||||
rbd_provisioner_monitors: "var_ceph_monitors"
|
||||
rbd_provisioner_pool: kube
|
||||
rbd_provisioner_admin_id: admin
|
||||
rbd_provisioner_secret_name: ceph-secret
|
||||
rbd_provisioner_secret: var_ceph_admin_key
|
||||
rbd_provisioner_user_id: kube
|
||||
rbd_provisioner_user_secret_name: ceph-secret-kube
|
||||
rbd_provisioner_user_secret: var_ceph_user_key
|
||||
rbd_provisioner_user_secret_namespace: kube-system
|
||||
rbd_provisioner_fs_type: ext4
|
||||
rbd_provisioner_image_format: "2"
|
||||
rbd_provisioner_image_features: layering
|
||||
rbd_provisioner_storage_class: rados-block
|
||||
rbd_provisioner_reclaim_policy: Delete
|
||||
|
||||
# Nginx ingress controller deployment
|
||||
ingress_nginx_enabled: false
|
||||
# ingress_nginx_host_network: false
|
||||
ingress_publish_status_address: ""
|
||||
# ingress_nginx_nodeselector:
|
||||
# kubernetes.io/os: "linux"
|
||||
# ingress_nginx_tolerations:
|
||||
# - key: "node-role.kubernetes.io/master"
|
||||
# operator: "Equal"
|
||||
# value: ""
|
||||
# effect: "NoSchedule"
|
||||
# - key: "node-role.kubernetes.io/control-plane"
|
||||
# operator: "Equal"
|
||||
# value: ""
|
||||
# effect: "NoSchedule"
|
||||
# ingress_nginx_namespace: "ingress-nginx"
|
||||
# ingress_nginx_insecure_port: 80
|
||||
# ingress_nginx_secure_port: 443
|
||||
# ingress_nginx_configmap:
|
||||
# map-hash-bucket-size: "128"
|
||||
# ssl-protocols: "TLSv1.2 TLSv1.3"
|
||||
# ingress_nginx_configmap_tcp_services:
|
||||
# 9000: "default/example-go:8080"
|
||||
# ingress_nginx_configmap_udp_services:
|
||||
# 53: "kube-system/coredns:53"
|
||||
# ingress_nginx_extra_args:
|
||||
# - --default-ssl-certificate=default/foo-tls
|
||||
# ingress_nginx_class: nginx
|
||||
|
||||
# ambassador ingress controller deployment
|
||||
ingress_ambassador_enabled: false
|
||||
# ingress_ambassador_namespace: "ambassador"
|
||||
# ingress_ambassador_version: "*"
|
||||
# ingress_ambassador_multi_namespaces: false
|
||||
|
||||
# ALB ingress controller deployment
|
||||
ingress_alb_enabled: false
|
||||
# alb_ingress_aws_region: "us-east-1"
|
||||
# alb_ingress_restrict_scheme: "false"
|
||||
# Enables logging on all outbound requests sent to the AWS API.
|
||||
# If logging is desired, set to true.
|
||||
# alb_ingress_aws_debug: "false"
|
||||
|
||||
# Cert manager deployment
|
||||
cert_manager_enabled: false
|
||||
# cert_manager_namespace: "cert-manager"
|
||||
|
||||
# MetalLB deployment
|
||||
metallb_enabled: false
|
||||
metallb_speaker_enabled: true
|
||||
# metallb_ip_range:
|
||||
# - "10.5.0.50-10.5.0.99"
|
||||
# metallb_speaker_nodeselector:
|
||||
# kubernetes.io/os: "linux"
|
||||
# metallb_controller_nodeselector:
|
||||
# kubernetes.io/os: "linux"
|
||||
# metallb_speaker_tolerations:
|
||||
# - key: "node-role.kubernetes.io/master"
|
||||
# operator: "Equal"
|
||||
# value: ""
|
||||
# effect: "NoSchedule"
|
||||
# - key: "node-role.kubernetes.io/control-plane"
|
||||
# operator: "Equal"
|
||||
# value: ""
|
||||
# effect: "NoSchedule"
|
||||
# metallb_controller_tolerations:
|
||||
# - key: "node-role.kubernetes.io/master"
|
||||
# operator: "Equal"
|
||||
# value: ""
|
||||
# effect: "NoSchedule"
|
||||
# - key: "node-role.kubernetes.io/control-plane"
|
||||
# operator: "Equal"
|
||||
# value: ""
|
||||
# effect: "NoSchedule"
|
||||
# metallb_version: v0.9.6
|
||||
# metallb_protocol: "layer2"
|
||||
# metallb_port: "7472"
|
||||
# metallb_limits_cpu: "100m"
|
||||
# metallb_limits_mem: "100Mi"
|
||||
# metallb_additional_address_pools:
|
||||
# kube_service_pool:
|
||||
# ip_range:
|
||||
# - "10.5.1.50-10.5.1.99"
|
||||
# protocol: "layer2"
|
||||
# auto_assign: false
|
||||
# metallb_protocol: "bgp"
|
||||
# metallb_peers:
|
||||
# - peer_address: 192.0.2.1
|
||||
# peer_asn: 64512
|
||||
# my_asn: 4200000000
|
||||
# - peer_address: 192.0.2.2
|
||||
# peer_asn: 64513
|
||||
# my_asn: 4200000000
|
||||
|
||||
# The plugin manager for kubectl
|
||||
krew_enabled: false
|
||||
krew_root_dir: "/usr/local/krew"
|
312
inventory/group_vars/k8s_cluster/k8s-cluster.yml
Normal file
312
inventory/group_vars/k8s_cluster/k8s-cluster.yml
Normal file
|
@ -0,0 +1,312 @@
|
|||
---
|
||||
# Kubernetes configuration dirs and system namespace.
|
||||
# Those are where all the additional config stuff goes
|
||||
# the kubernetes normally puts in /srv/kubernetes.
|
||||
# This puts them in a sane location and namespace.
|
||||
# Editing those values will almost surely break something.
|
||||
kube_config_dir: /etc/kubernetes
|
||||
kube_script_dir: "{{ bin_dir }}/kubernetes-scripts"
|
||||
kube_manifest_dir: "{{ kube_config_dir }}/manifests"
|
||||
|
||||
# This is where all the cert scripts and certs will be located
|
||||
kube_cert_dir: "{{ kube_config_dir }}/ssl"
|
||||
|
||||
# This is where all of the bearer tokens will be stored
|
||||
kube_token_dir: "{{ kube_config_dir }}/tokens"
|
||||
|
||||
kube_api_anonymous_auth: true
|
||||
|
||||
## Change this to use another Kubernetes version, e.g. a current beta release
|
||||
#kube_version: v1.20.7
|
||||
kube_version: v1.19.4
|
||||
|
||||
# Where the binaries will be downloaded.
|
||||
# Note: ensure that you've enough disk space (about 1G)
|
||||
local_release_dir: "/tmp/releases"
|
||||
# Random shifts for retrying failed ops like pushing/downloading
|
||||
retry_stagger: 5
|
||||
|
||||
# This is the group that the cert creation scripts chgrp the
|
||||
# cert files to. Not really changeable...
|
||||
kube_cert_group: kube-cert
|
||||
|
||||
# Cluster Loglevel configuration
|
||||
kube_log_level: 2
|
||||
|
||||
# Directory where credentials will be stored
|
||||
credentials_dir: "{{ inventory_dir }}/credentials"
|
||||
|
||||
## It is possible to activate / deactivate selected authentication methods (oidc, static token auth)
|
||||
# kube_oidc_auth: false
|
||||
# kube_token_auth: false
|
||||
|
||||
|
||||
## Variables for OpenID Connect Configuration https://kubernetes.io/docs/admin/authentication/
|
||||
## To use OpenID you have to deploy additional an OpenID Provider (e.g Dex, Keycloak, ...)
|
||||
|
||||
# kube_oidc_url: https:// ...
|
||||
# kube_oidc_client_id: kubernetes
|
||||
## Optional settings for OIDC
|
||||
# kube_oidc_ca_file: "{{ kube_cert_dir }}/ca.pem"
|
||||
# kube_oidc_username_claim: sub
|
||||
# kube_oidc_username_prefix: 'oidc:'
|
||||
# kube_oidc_groups_claim: groups
|
||||
# kube_oidc_groups_prefix: 'oidc:'
|
||||
|
||||
## Variables to control webhook authn/authz
|
||||
# kube_webhook_token_auth: false
|
||||
# kube_webhook_token_auth_url: https://...
|
||||
# kube_webhook_token_auth_url_skip_tls_verify: false
|
||||
|
||||
## For webhook authorization, authorization_modes must include Webhook
|
||||
# kube_webhook_authorization: false
|
||||
# kube_webhook_authorization_url: https://...
|
||||
# kube_webhook_authorization_url_skip_tls_verify: false
|
||||
|
||||
# Choose network plugin (cilium, calico, weave or flannel. Use cni for generic cni plugin)
|
||||
# Can also be set to 'cloud', which lets the cloud provider setup appropriate routing
|
||||
kube_network_plugin: calico
|
||||
|
||||
# Setting multi_networking to true will install Multus: https://github.com/intel/multus-cni
|
||||
kube_network_plugin_multus: false
|
||||
|
||||
# Kubernetes internal network for services, unused block of space.
|
||||
kube_service_addresses: 10.233.0.0/18
|
||||
|
||||
# internal network. When used, it will assign IP
|
||||
# addresses from this range to individual pods.
|
||||
# This network must be unused in your network infrastructure!
|
||||
kube_pods_subnet: 10.233.64.0/18
|
||||
|
||||
# internal network node size allocation (optional). This is the size allocated
|
||||
# to each node for pod IP address allocation. Note that the number of pods per node is
|
||||
# also limited by the kubelet_max_pods variable which defaults to 110.
|
||||
#
|
||||
# Example:
|
||||
# Up to 64 nodes and up to 254 or kubelet_max_pods (the lowest of the two) pods per node:
|
||||
# - kube_pods_subnet: 10.233.64.0/18
|
||||
# - kube_network_node_prefix: 24
|
||||
# - kubelet_max_pods: 110
|
||||
#
|
||||
# Example:
|
||||
# Up to 128 nodes and up to 126 or kubelet_max_pods (the lowest of the two) pods per node:
|
||||
# - kube_pods_subnet: 10.233.64.0/18
|
||||
# - kube_network_node_prefix: 25
|
||||
# - kubelet_max_pods: 110
|
||||
kube_network_node_prefix: 24
|
||||
|
||||
# Configure Dual Stack networking (i.e. both IPv4 and IPv6)
|
||||
enable_dual_stack_networks: false
|
||||
|
||||
# Kubernetes internal network for IPv6 services, unused block of space.
|
||||
# This is only used if enable_dual_stack_networks is set to true
|
||||
# This provides 4096 IPv6 IPs
|
||||
kube_service_addresses_ipv6: fd85:ee78:d8a6:8607::1000/116
|
||||
|
||||
# Internal network. When used, it will assign IPv6 addresses from this range to individual pods.
|
||||
# This network must not already be in your network infrastructure!
|
||||
# This is only used if enable_dual_stack_networks is set to true.
|
||||
# This provides room for 256 nodes with 254 pods per node.
|
||||
kube_pods_subnet_ipv6: fd85:ee78:d8a6:8607::1:0000/112
|
||||
|
||||
# IPv6 subnet size allocated to each for pods.
|
||||
# This is only used if enable_dual_stack_networks is set to true
|
||||
# This provides room for 254 pods per node.
|
||||
kube_network_node_prefix_ipv6: 120
|
||||
|
||||
# The port the API Server will be listening on.
|
||||
kube_apiserver_ip: "{{ kube_service_addresses|ipaddr('net')|ipaddr(1)|ipaddr('address') }}"
|
||||
kube_apiserver_port: 6443 # (https)
|
||||
# kube_apiserver_insecure_port: 8080 # (http)
|
||||
# Set to 0 to disable insecure port - Requires RBAC in authorization_modes and kube_api_anonymous_auth: true
|
||||
kube_apiserver_insecure_port: 0 # (disabled)
|
||||
|
||||
# Kube-proxy proxyMode configuration.
|
||||
# Can be ipvs, iptables
|
||||
kube_proxy_mode: ipvs
|
||||
|
||||
# configure arp_ignore and arp_announce to avoid answering ARP queries from kube-ipvs0 interface
|
||||
# must be set to true for MetalLB to work
|
||||
kube_proxy_strict_arp: false
|
||||
|
||||
# A string slice of values which specify the addresses to use for NodePorts.
|
||||
# Values may be valid IP blocks (e.g. 1.2.3.0/24, 1.2.3.4/32).
|
||||
# The default empty string slice ([]) means to use all local addresses.
|
||||
# kube_proxy_nodeport_addresses_cidr is retained for legacy config
|
||||
kube_proxy_nodeport_addresses: >-
|
||||
{%- if kube_proxy_nodeport_addresses_cidr is defined -%}
|
||||
[{{ kube_proxy_nodeport_addresses_cidr }}]
|
||||
{%- else -%}
|
||||
[]
|
||||
{%- endif -%}
|
||||
|
||||
# If non-empty, will use this string as identification instead of the actual hostname
|
||||
# kube_override_hostname: >-
|
||||
# {%- if cloud_provider is defined and cloud_provider in [ 'aws' ] -%}
|
||||
# {%- else -%}
|
||||
# {{ inventory_hostname }}
|
||||
# {%- endif -%}
|
||||
|
||||
## Encrypting Secret Data at Rest (experimental)
|
||||
kube_encrypt_secret_data: false
|
||||
|
||||
# DNS configuration.
|
||||
# Kubernetes cluster name, also will be used as DNS domain
|
||||
cluster_name: cluster.local
|
||||
# Subdomains of DNS domain to be resolved via /etc/resolv.conf for hostnet pods
|
||||
ndots: 2
|
||||
# Can be coredns, coredns_dual, manual or none
|
||||
dns_mode: coredns
|
||||
# Set manual server if using a custom cluster DNS server
|
||||
# manual_dns_server: 10.x.x.x
|
||||
# Enable nodelocal dns cache
|
||||
enable_nodelocaldns: true
|
||||
nodelocaldns_ip: 169.254.25.10
|
||||
nodelocaldns_health_port: 9254
|
||||
# nodelocaldns_external_zones:
|
||||
# - zones:
|
||||
# - example.com
|
||||
# - example.io:1053
|
||||
# nameservers:
|
||||
# - 1.1.1.1
|
||||
# - 2.2.2.2
|
||||
# cache: 5
|
||||
# - zones:
|
||||
# - https://mycompany.local:4453
|
||||
# nameservers:
|
||||
# - 192.168.0.53
|
||||
# cache: 0
|
||||
# Enable k8s_external plugin for CoreDNS
|
||||
enable_coredns_k8s_external: false
|
||||
coredns_k8s_external_zone: k8s_external.local
|
||||
# Enable endpoint_pod_names option for kubernetes plugin
|
||||
enable_coredns_k8s_endpoint_pod_names: false
|
||||
|
||||
# Can be docker_dns, host_resolvconf or none
|
||||
resolvconf_mode: docker_dns
|
||||
# Deploy netchecker app to verify DNS resolve as an HTTP service
|
||||
deploy_netchecker: false
|
||||
# Ip address of the kubernetes skydns service
|
||||
skydns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(3)|ipaddr('address') }}"
|
||||
skydns_server_secondary: "{{ kube_service_addresses|ipaddr('net')|ipaddr(4)|ipaddr('address') }}"
|
||||
dns_domain: "{{ cluster_name }}"
|
||||
|
||||
## Container runtime
|
||||
## docker for docker, crio for cri-o and containerd for containerd.
|
||||
container_manager: docker
|
||||
|
||||
# Additional container runtimes
|
||||
kata_containers_enabled: false
|
||||
|
||||
kubeadm_certificate_key: "{{ lookup('password', credentials_dir + '/kubeadm_certificate_key.creds length=64 chars=hexdigits') | lower }}"
|
||||
|
||||
# K8s image pull policy (imagePullPolicy)
|
||||
k8s_image_pull_policy: IfNotPresent
|
||||
|
||||
# audit log for kubernetes
|
||||
kubernetes_audit: false
|
||||
|
||||
# dynamic kubelet configuration
|
||||
dynamic_kubelet_configuration: false
|
||||
|
||||
# define kubelet config dir for dynamic kubelet
|
||||
# kubelet_config_dir:
|
||||
default_kubelet_config_dir: "{{ kube_config_dir }}/dynamic_kubelet_dir"
|
||||
dynamic_kubelet_configuration_dir: "{{ kubelet_config_dir | default(default_kubelet_config_dir) }}"
|
||||
|
||||
# pod security policy (RBAC must be enabled either by having 'RBAC' in authorization_modes or kubeadm enabled)
|
||||
podsecuritypolicy_enabled: false
|
||||
|
||||
# Custom PodSecurityPolicySpec for restricted policy
|
||||
# podsecuritypolicy_restricted_spec: {}
|
||||
|
||||
# Custom PodSecurityPolicySpec for privileged policy
|
||||
# podsecuritypolicy_privileged_spec: {}
|
||||
|
||||
# Make a copy of kubeconfig on the host that runs Ansible in {{ inventory_dir }}/artifacts
|
||||
# kubeconfig_localhost: false
|
||||
# Download kubectl onto the host that runs Ansible in {{ bin_dir }}
|
||||
# kubectl_localhost: false
|
||||
|
||||
# A comma separated list of levels of node allocatable enforcement to be enforced by kubelet.
|
||||
# Acceptable options are 'pods', 'system-reserved', 'kube-reserved' and ''. Default is "".
|
||||
# kubelet_enforce_node_allocatable: pods
|
||||
|
||||
## Optionally reserve resources for OS system daemons.
|
||||
# system_reserved: true
|
||||
## Uncomment to override default values
|
||||
# system_memory_reserved: 512Mi
|
||||
# system_cpu_reserved: 500m
|
||||
## Reservation for master hosts
|
||||
# system_master_memory_reserved: 256Mi
|
||||
# system_master_cpu_reserved: 250m
|
||||
|
||||
# An alternative flexvolume plugin directory
|
||||
# kubelet_flexvolumes_plugins_dir: /usr/libexec/kubernetes/kubelet-plugins/volume/exec
|
||||
|
||||
## Supplementary addresses that can be added in kubernetes ssl keys.
|
||||
## That can be useful for example to setup a keepalived virtual IP
|
||||
# supplementary_addresses_in_ssl_keys: [10.0.0.1, 10.0.0.2, 10.0.0.3]
|
||||
supplementary_addresses_in_ssl_keys: var_cluster_supp_addr
|
||||
|
||||
## Running on top of openstack vms with cinder enabled may lead to unschedulable pods due to NoVolumeZoneConflict restriction in kube-scheduler.
|
||||
## See https://github.com/kubernetes-sigs/kubespray/issues/2141
|
||||
## Set this variable to true to get rid of this issue
|
||||
volume_cross_zone_attachment: false
|
||||
## Add Persistent Volumes Storage Class for corresponding cloud provider (supported: in-tree OpenStack, Cinder CSI,
|
||||
## AWS EBS CSI, Azure Disk CSI, GCP Persistent Disk CSI)
|
||||
persistent_volumes_enabled: false
|
||||
|
||||
## Container Engine Acceleration
|
||||
## Enable container acceleration feature, for example use gpu acceleration in containers
|
||||
# nvidia_accelerator_enabled: true
|
||||
## Nvidia GPU driver install. Install will by done by a (init) pod running as a daemonset.
|
||||
## Important: if you use Ubuntu then you should set in all.yml 'docker_storage_options: -s overlay2'
|
||||
## Array with nvida_gpu_nodes, leave empty or comment if you don't want to install drivers.
|
||||
## Labels and taints won't be set to nodes if they are not in the array.
|
||||
# nvidia_gpu_nodes:
|
||||
# - kube-gpu-001
|
||||
# nvidia_driver_version: "384.111"
|
||||
## flavor can be tesla or gtx
|
||||
# nvidia_gpu_flavor: gtx
|
||||
## NVIDIA driver installer images. Change them if you have trouble accessing gcr.io.
|
||||
# nvidia_driver_install_centos_container: atzedevries/nvidia-centos-driver-installer:2
|
||||
# nvidia_driver_install_ubuntu_container: gcr.io/google-containers/ubuntu-nvidia-driver-installer@sha256:7df76a0f0a17294e86f691c81de6bbb7c04a1b4b3d4ea4e7e2cccdc42e1f6d63
|
||||
## NVIDIA GPU device plugin image.
|
||||
# nvidia_gpu_device_plugin_container: "k8s.gcr.io/nvidia-gpu-device-plugin@sha256:0842734032018be107fa2490c98156992911e3e1f2a21e059ff0105b07dd8e9e"
|
||||
|
||||
## Support tls min version, Possible values: VersionTLS10, VersionTLS11, VersionTLS12, VersionTLS13.
|
||||
# tls_min_version: ""
|
||||
|
||||
## Support tls cipher suites.
|
||||
# tls_cipher_suites: {}
|
||||
# - TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA
|
||||
# - TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256
|
||||
# - TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256
|
||||
# - TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA
|
||||
# - TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384
|
||||
# - TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305
|
||||
# - TLS_ECDHE_ECDSA_WITH_RC4_128_SHA
|
||||
# - TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA
|
||||
# - TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA
|
||||
# - TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256
|
||||
# - TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256
|
||||
# - TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA
|
||||
# - TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384
|
||||
# - TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305
|
||||
# - TLS_ECDHE_RSA_WITH_RC4_128_SHA
|
||||
# - TLS_RSA_WITH_3DES_EDE_CBC_SHA
|
||||
# - TLS_RSA_WITH_AES_128_CBC_SHA
|
||||
# - TLS_RSA_WITH_AES_128_CBC_SHA256
|
||||
# - TLS_RSA_WITH_AES_128_GCM_SHA256
|
||||
# - TLS_RSA_WITH_AES_256_CBC_SHA
|
||||
# - TLS_RSA_WITH_AES_256_GCM_SHA384
|
||||
# - TLS_RSA_WITH_RC4_128_SHA
|
||||
|
||||
## Amount of time to retain events. (default 1h0m0s)
|
||||
event_ttl_duration: "1h0m0s"
|
||||
|
||||
## Automatically renew K8S control plane certificates on first Monday of each month
|
||||
auto_renew_certificates: false
|
||||
# First Monday of each month
|
||||
# auto_renew_certificates_systemd_calendar: "Mon *-*-1,2,3,4,5,6,7 03:{{ groups['kube_control_plane'].index(inventory_hostname) }}0:00"
|
102
inventory/group_vars/k8s_cluster/k8s-net-calico.yml
Normal file
102
inventory/group_vars/k8s_cluster/k8s-net-calico.yml
Normal file
|
@ -0,0 +1,102 @@
|
|||
# see roles/network_plugin/calico/defaults/main.yml
|
||||
|
||||
## With calico it is possible to distributed routes with border routers of the datacenter.
|
||||
## Warning : enabling router peering will disable calico's default behavior ('node mesh').
|
||||
## The subnets of each nodes will be distributed by the datacenter router
|
||||
# peer_with_router: false
|
||||
|
||||
# Enables Internet connectivity from containers
|
||||
# nat_outgoing: true
|
||||
|
||||
# Enables Calico CNI "host-local" IPAM plugin
|
||||
# calico_ipam_host_local: true
|
||||
|
||||
# add default ippool name
|
||||
# calico_pool_name: "default-pool"
|
||||
|
||||
# add default ippool blockSize (defaults kube_network_node_prefix)
|
||||
# calico_pool_blocksize: 24
|
||||
|
||||
# add default ippool CIDR (must be inside kube_pods_subnet, defaults to kube_pods_subnet otherwise)
|
||||
# calico_pool_cidr: 1.2.3.4/5
|
||||
|
||||
# Add default IPV6 IPPool CIDR. Must be inside kube_pods_subnet_ipv6. Defaults to kube_pods_subnet_ipv6 if not set.
|
||||
# calico_pool_cidr_ipv6: fd85:ee78:d8a6:8607::1:0000/112
|
||||
|
||||
# Global as_num (/calico/bgp/v1/global/as_num)
|
||||
# global_as_num: "64512"
|
||||
|
||||
# If doing peering with node-assigned asn where the globas does not match your nodes, you want this
|
||||
# to be true. All other cases, false.
|
||||
# calico_no_global_as_num: false
|
||||
|
||||
# You can set MTU value here. If left undefined or empty, it will
|
||||
# not be specified in calico CNI config, so Calico will use built-in
|
||||
# defaults. The value should be a number, not a string.
|
||||
calico_mtu: 1410
|
||||
|
||||
# Configure the MTU to use for workload interfaces and tunnels.
|
||||
# - If Wireguard is enabled, set to your network MTU - 60
|
||||
# - Otherwise, if VXLAN or BPF mode is enabled, set to your network MTU - 50
|
||||
# - Otherwise, if IPIP is enabled, set to your network MTU - 20
|
||||
# - Otherwise, if not using any encapsulation, set to your network MTU.
|
||||
# calico_veth_mtu: 1440
|
||||
|
||||
# Advertise Cluster IPs
|
||||
# calico_advertise_cluster_ips: true
|
||||
|
||||
# Advertise Service External IPs
|
||||
# calico_advertise_service_external_ips:
|
||||
# - x.x.x.x/24
|
||||
# - y.y.y.y/32
|
||||
|
||||
# Adveritse Service LoadBalancer IPs
|
||||
# calico_advertise_service_loadbalancer_ips:
|
||||
# - x.x.x.x/24
|
||||
# - y.y.y.y/16
|
||||
|
||||
# Choose data store type for calico: "etcd" or "kdd" (kubernetes datastore)
|
||||
# calico_datastore: "kdd"
|
||||
|
||||
# Choose Calico iptables backend: "Legacy", "Auto" or "NFT"
|
||||
# calico_iptables_backend: "Legacy"
|
||||
|
||||
# Use typha (only with kdd)
|
||||
# typha_enabled: false
|
||||
|
||||
# Generate TLS certs for secure typha<->calico-node communication
|
||||
# typha_secure: false
|
||||
|
||||
# Scaling typha: 1 replica per 100 nodes is adequate
|
||||
# Number of typha replicas
|
||||
# typha_replicas: 1
|
||||
|
||||
# Set max typha connections
|
||||
# typha_max_connections_lower_limit: 300
|
||||
|
||||
# Set calico network backend: "bird", "vxlan" or "none"
|
||||
# bird enable BGP routing, required for ipip mode.
|
||||
# calico_network_backend: bird
|
||||
|
||||
# IP in IP and VXLAN is mutualy exclusive modes.
|
||||
# set IP in IP encapsulation mode: "Always", "CrossSubnet", "Never"
|
||||
# calico_ipip_mode: 'Always'
|
||||
|
||||
# set VXLAN encapsulation mode: "Always", "CrossSubnet", "Never"
|
||||
# calico_vxlan_mode: 'Never'
|
||||
|
||||
# set VXLAN port and VNI
|
||||
# calico_vxlan_vni: 4096
|
||||
# calico_vxlan_port: 4789
|
||||
|
||||
# If you want to use non default IP_AUTODETECTION_METHOD for calico node set this option to one of:
|
||||
# * can-reach=DESTINATION
|
||||
# * interface=INTERFACE-REGEX
|
||||
# see https://docs.projectcalico.org/reference/node/configuration
|
||||
# calico_ip_auto_method: "interface=eth.*"
|
||||
# Choose the iptables insert mode for Calico: "Insert" or "Append".
|
||||
# calico_felix_chaininsertmode: Insert
|
||||
|
||||
# If you want use the default route interface when you use multiple interface with dynamique route (iproute2)
|
||||
# see https://docs.projectcalico.org/reference/node/configuration : FELIX_DEVICEROUTESOURCEADDRESS
|
||||
# calico_use_default_route_src_ipaddr: false
|
10
inventory/group_vars/k8s_cluster/k8s-net-canal.yml
Normal file
10
inventory/group_vars/k8s_cluster/k8s-net-canal.yml
Normal file
|
@ -0,0 +1,10 @@
|
|||
# see roles/network_plugin/canal/defaults/main.yml
|
||||
|
||||
# The interface used by canal for host <-> host communication.
|
||||
# If left blank, then the interface is choosing using the node's
|
||||
# default route.
|
||||
# canal_iface: ""
|
||||
|
||||
# Whether or not to masquerade traffic to destinations not within
|
||||
# the pod network.
|
||||
# canal_masquerade: "true"
|
1
inventory/group_vars/k8s_cluster/k8s-net-cilium.yml
Normal file
1
inventory/group_vars/k8s_cluster/k8s-net-cilium.yml
Normal file
|
@ -0,0 +1 @@
|
|||
# see roles/network_plugin/cilium/defaults/main.yml
|
18
inventory/group_vars/k8s_cluster/k8s-net-flannel.yml
Normal file
18
inventory/group_vars/k8s_cluster/k8s-net-flannel.yml
Normal file
|
@ -0,0 +1,18 @@
|
|||
# see roles/network_plugin/flannel/defaults/main.yml
|
||||
|
||||
## interface that should be used for flannel operations
|
||||
## This is actually an inventory cluster-level item
|
||||
# flannel_interface:
|
||||
|
||||
## Select interface that should be used for flannel operations by regexp on Name or IP
|
||||
## This is actually an inventory cluster-level item
|
||||
## example: select interface with ip from net 10.0.0.0/23
|
||||
## single quote and escape backslashes
|
||||
# flannel_interface_regexp: '10\\.0\\.[0-2]\\.\\d{1,3}'
|
||||
|
||||
# You can choose what type of flannel backend to use: 'vxlan' or 'host-gw'
|
||||
# for experimental backend
|
||||
# please refer to flannel's docs : https://github.com/coreos/flannel/blob/master/README.md
|
||||
# flannel_backend_type: "vxlan"
|
||||
# flannel_vxlan_vni: 1
|
||||
# flannel_vxlan_port: 8472
|
61
inventory/group_vars/k8s_cluster/k8s-net-kube-router.yml
Normal file
61
inventory/group_vars/k8s_cluster/k8s-net-kube-router.yml
Normal file
|
@ -0,0 +1,61 @@
|
|||
# See roles/network_plugin/kube-router//defaults/main.yml
|
||||
|
||||
# Enables Pod Networking -- Advertises and learns the routes to Pods via iBGP
|
||||
# kube_router_run_router: true
|
||||
|
||||
# Enables Network Policy -- sets up iptables to provide ingress firewall for pods
|
||||
# kube_router_run_firewall: true
|
||||
|
||||
# Enables Service Proxy -- sets up IPVS for Kubernetes Services
|
||||
# see docs/kube-router.md "Caveats" section
|
||||
# kube_router_run_service_proxy: false
|
||||
|
||||
# Add Cluster IP of the service to the RIB so that it gets advertises to the BGP peers.
|
||||
# kube_router_advertise_cluster_ip: false
|
||||
|
||||
# Add External IP of service to the RIB so that it gets advertised to the BGP peers.
|
||||
# kube_router_advertise_external_ip: false
|
||||
|
||||
# Add LoadbBalancer IP of service status as set by the LB provider to the RIB so that it gets advertised to the BGP peers.
|
||||
# kube_router_advertise_loadbalancer_ip: false
|
||||
|
||||
# Adjust manifest of kube-router daemonset template with DSR needed changes
|
||||
# kube_router_enable_dsr: false
|
||||
|
||||
# Array of arbitrary extra arguments to kube-router, see
|
||||
# https://github.com/cloudnativelabs/kube-router/blob/master/docs/user-guide.md
|
||||
# kube_router_extra_args: []
|
||||
|
||||
# ASN numbers of the BGP peer to which cluster nodes will advertise cluster ip and node's pod cidr.
|
||||
# kube_router_peer_router_asns: ~
|
||||
|
||||
# The ip address of the external router to which all nodes will peer and advertise the cluster ip and pod cidr's.
|
||||
# kube_router_peer_router_ips: ~
|
||||
|
||||
# The remote port of the external BGP to which all nodes will peer. If not set, default BGP port (179) will be used.
|
||||
# kube_router_peer_router_ports: ~
|
||||
|
||||
# Setups node CNI to allow hairpin mode, requires node reboots, see
|
||||
# https://github.com/cloudnativelabs/kube-router/blob/master/docs/user-guide.md#hairpin-mode
|
||||
# kube_router_support_hairpin_mode: false
|
||||
|
||||
# Select DNS Policy ClusterFirstWithHostNet, ClusterFirst, etc.
|
||||
# kube_router_dns_policy: ClusterFirstWithHostNet
|
||||
|
||||
# Array of annotations for master
|
||||
# kube_router_annotations_master: []
|
||||
|
||||
# Array of annotations for every node
|
||||
# kube_router_annotations_node: []
|
||||
|
||||
# Array of common annotations for every node
|
||||
# kube_router_annotations_all: []
|
||||
|
||||
# Enables scraping kube-router metrics with Prometheus
|
||||
# kube_router_enable_metrics: false
|
||||
|
||||
# Path to serve Prometheus metrics on
|
||||
# kube_router_metrics_path: /metrics
|
||||
|
||||
# Prometheus metrics port to use
|
||||
# kube_router_metrics_port: 9255
|
6
inventory/group_vars/k8s_cluster/k8s-net-macvlan.yml
Normal file
6
inventory/group_vars/k8s_cluster/k8s-net-macvlan.yml
Normal file
|
@ -0,0 +1,6 @@
|
|||
---
|
||||
# private interface, on a l2-network
|
||||
macvlan_interface: "eth1"
|
||||
|
||||
# Enable nat in default gateway network interface
|
||||
enable_nat_default_gateway: true
|
61
inventory/group_vars/k8s_cluster/k8s-net-weave.yml
Normal file
61
inventory/group_vars/k8s_cluster/k8s-net-weave.yml
Normal file
|
@ -0,0 +1,61 @@
|
|||
# see roles/network_plugin/weave/defaults/main.yml
|
||||
|
||||
# Weave's network password for encryption, if null then no network encryption.
|
||||
# weave_password: ~
|
||||
|
||||
# If set to 1, disable checking for new Weave Net versions (default is blank,
|
||||
# i.e. check is enabled)
|
||||
# weave_checkpoint_disable: false
|
||||
|
||||
# Soft limit on the number of connections between peers. Defaults to 100.
|
||||
# weave_conn_limit: 100
|
||||
|
||||
# Weave Net defaults to enabling hairpin on the bridge side of the veth pair
|
||||
# for containers attached. If you need to disable hairpin, e.g. your kernel is
|
||||
# one of those that can panic if hairpin is enabled, then you can disable it by
|
||||
# setting `HAIRPIN_MODE=false`.
|
||||
# weave_hairpin_mode: true
|
||||
|
||||
# The range of IP addresses used by Weave Net and the subnet they are placed in
|
||||
# (CIDR format; default 10.32.0.0/12)
|
||||
# weave_ipalloc_range: "{{ kube_pods_subnet }}"
|
||||
|
||||
# Set to 0 to disable Network Policy Controller (default is on)
|
||||
# weave_expect_npc: "{{ enable_network_policy }}"
|
||||
|
||||
# List of addresses of peers in the Kubernetes cluster (default is to fetch the
|
||||
# list from the api-server)
|
||||
# weave_kube_peers: ~
|
||||
|
||||
# Set the initialization mode of the IP Address Manager (defaults to consensus
|
||||
# amongst the KUBE_PEERS)
|
||||
# weave_ipalloc_init: ~
|
||||
|
||||
# Set the IP address used as a gateway from the Weave network to the host
|
||||
# network - this is useful if you are configuring the addon as a static pod.
|
||||
# weave_expose_ip: ~
|
||||
|
||||
# Address and port that the Weave Net daemon will serve Prometheus-style
|
||||
# metrics on (defaults to 0.0.0.0:6782)
|
||||
# weave_metrics_addr: ~
|
||||
|
||||
# Address and port that the Weave Net daemon will serve status requests on
|
||||
# (defaults to disabled)
|
||||
# weave_status_addr: ~
|
||||
|
||||
# Weave Net defaults to 1376 bytes, but you can set a smaller size if your
|
||||
# underlying network has a tighter limit, or set a larger size for better
|
||||
# performance if your network supports jumbo frames (e.g. 8916)
|
||||
# weave_mtu: 1376
|
||||
|
||||
# Set to 1 to preserve the client source IP address when accessing Service
|
||||
# annotated with `service.spec.externalTrafficPolicy=Local`. The feature works
|
||||
# only with Weave IPAM (default).
|
||||
# weave_no_masq_local: true
|
||||
|
||||
# set to nft to use nftables backend for iptables (default is iptables)
|
||||
# weave_iptables_backend: iptables
|
||||
|
||||
# Extra variables that passing to launch.sh, useful for enabling seed mode, see
|
||||
# https://www.weave.works/docs/net/latest/tasks/ipam/ipam/
|
||||
# weave_extra_args: ~
|
81
inventory/hosts-dev.yaml
Normal file
81
inventory/hosts-dev.yaml
Normal file
|
@ -0,0 +1,81 @@
|
|||
all:
|
||||
hosts:
|
||||
devmaster01:
|
||||
ansible_host: 10.15.8.1
|
||||
ip: 10.15.8.1
|
||||
access_ip: 10.15.8.1
|
||||
devmaster02:
|
||||
ansible_host: 10.15.8.2
|
||||
ip: 10.15.8.2
|
||||
access_ip: 10.15.8.2
|
||||
devmaster03:
|
||||
ansible_host: 10.15.8.3
|
||||
ip: 10.15.8.3
|
||||
access_ip: 10.15.8.3
|
||||
worker01:
|
||||
ansible_host: 10.15.8.21
|
||||
ip: 10.15.8.21
|
||||
access_ip: 10.15.8.21
|
||||
worker02:
|
||||
ansible_host: 10.15.8.22
|
||||
ip: 10.15.8.22
|
||||
access_ip: 10.15.8.22
|
||||
worker03:
|
||||
ansible_host: 10.15.8.23
|
||||
ip: 10.15.8.23
|
||||
access_ip: 10.15.8.23
|
||||
worker04:
|
||||
ansible_host: 10.15.8.24
|
||||
ip: 10.15.8.24
|
||||
access_ip: 10.15.8.24
|
||||
worker05:
|
||||
ansible_host: 10.15.8.25
|
||||
ip: 10.15.8.25
|
||||
access_ip: 10.15.8.25
|
||||
worker06:
|
||||
ansible_host: 10.15.8.26
|
||||
ip: 10.15.8.26
|
||||
access_ip: 10.15.8.26
|
||||
worker07:
|
||||
ansible_host: 10.15.8.27
|
||||
ip: 10.15.8.27
|
||||
access_ip: 10.15.8.27
|
||||
worker08:
|
||||
ansible_host: 10.15.8.28
|
||||
ip: 10.15.8.28
|
||||
access_ip: 10.15.8.28
|
||||
worker09:
|
||||
ansible_host: 10.15.8.29
|
||||
ip: 10.15.8.29
|
||||
access_ip: 10.15.8.29
|
||||
children:
|
||||
kube_control_plane:
|
||||
hosts:
|
||||
devmaster01:
|
||||
devmaster02:
|
||||
devmaster03:
|
||||
kube_node:
|
||||
hosts:
|
||||
devmaster01:
|
||||
devmaster02:
|
||||
devmaster03:
|
||||
worker01:
|
||||
worker02:
|
||||
worker03:
|
||||
worker04:
|
||||
worker05:
|
||||
worker06:
|
||||
worker07:
|
||||
worker08:
|
||||
worker09:
|
||||
etcd:
|
||||
hosts:
|
||||
devmaster01:
|
||||
devmaster02:
|
||||
devmaster03:
|
||||
k8s_cluster:
|
||||
children:
|
||||
kube_control_plane:
|
||||
kube_node:
|
||||
calico_rr:
|
||||
hosts: {}
|
61
inventory/hosts-dom1.yaml
Normal file
61
inventory/hosts-dom1.yaml
Normal file
|
@ -0,0 +1,61 @@
|
|||
all:
|
||||
hosts:
|
||||
dom1master01:
|
||||
ansible_host: 10.0.1.101
|
||||
ip: 10.0.1.101
|
||||
access_ip: 10.0.1.101
|
||||
dom1master02:
|
||||
ansible_host: 10.0.1.102
|
||||
ip: 10.0.1.102
|
||||
access_ip: 10.0.1.102
|
||||
dom1master03:
|
||||
ansible_host: 10.0.1.103
|
||||
ip: 10.0.1.103
|
||||
access_ip: 10.0.1.103
|
||||
kvmcu01:
|
||||
ansible_host: 10.0.1.121
|
||||
ip: 10.0.1.121
|
||||
access_ip: 10.0.1.121
|
||||
kvmcu02:
|
||||
ansible_host: 10.0.1.122
|
||||
ip: 10.0.1.122
|
||||
access_ip: 10.0.1.122
|
||||
kvmcu03:
|
||||
ansible_host: 10.0.1.123
|
||||
ip: 10.0.1.123
|
||||
access_ip: 10.0.1.123
|
||||
kvmcu04:
|
||||
ansible_host: 10.0.1.124
|
||||
ip: 10.0.1.124
|
||||
access_ip: 10.0.1.124
|
||||
kvmcu05:
|
||||
ansible_host: 10.0.1.125
|
||||
ip: 10.0.1.125
|
||||
access_ip: 10.0.1.125
|
||||
children:
|
||||
kube_control_plane:
|
||||
hosts:
|
||||
dom1master01:
|
||||
dom1master02:
|
||||
dom1master03:
|
||||
kube_node:
|
||||
hosts:
|
||||
dom1master01:
|
||||
dom1master02:
|
||||
dom1master03:
|
||||
kvmcu01:
|
||||
kvmcu02:
|
||||
kvmcu03:
|
||||
kvmcu04:
|
||||
kvmcu05:
|
||||
etcd:
|
||||
hosts:
|
||||
dom1master01:
|
||||
dom1master02:
|
||||
dom1master03:
|
||||
k8s_cluster:
|
||||
children:
|
||||
kube_control_plane:
|
||||
kube_node:
|
||||
calico_rr:
|
||||
hosts: {}
|
38
inventory/inventory.ini
Normal file
38
inventory/inventory.ini
Normal file
|
@ -0,0 +1,38 @@
|
|||
# ## Configure 'ip' variable to bind kubernetes services on a
|
||||
# ## different ip than the default iface
|
||||
# ## We should set etcd_member_name for etcd cluster. The node that is not a etcd member do not need to set the value, or can set the empty string value.
|
||||
[all]
|
||||
# node1 ansible_host=95.54.0.12 # ip=10.3.0.1 etcd_member_name=etcd1
|
||||
# node2 ansible_host=95.54.0.13 # ip=10.3.0.2 etcd_member_name=etcd2
|
||||
# node3 ansible_host=95.54.0.14 # ip=10.3.0.3 etcd_member_name=etcd3
|
||||
# node4 ansible_host=95.54.0.15 # ip=10.3.0.4 etcd_member_name=etcd4
|
||||
# node5 ansible_host=95.54.0.16 # ip=10.3.0.5 etcd_member_name=etcd5
|
||||
# node6 ansible_host=95.54.0.17 # ip=10.3.0.6 etcd_member_name=etcd6
|
||||
|
||||
# ## configure a bastion host if your nodes are not directly reachable
|
||||
# [bastion]
|
||||
# bastion ansible_host=x.x.x.x ansible_user=some_user
|
||||
|
||||
[kube_control_plane]
|
||||
# node1
|
||||
# node2
|
||||
# node3
|
||||
|
||||
[etcd]
|
||||
# node1
|
||||
# node2
|
||||
# node3
|
||||
|
||||
[kube_node]
|
||||
# node2
|
||||
# node3
|
||||
# node4
|
||||
# node5
|
||||
# node6
|
||||
|
||||
[calico_rr]
|
||||
|
||||
[k8s_cluster:children]
|
||||
kube_control_plane
|
||||
kube_node
|
||||
calico_rr
|
|
@ -0,0 +1,9 @@
|
|||
---
|
||||
containerd_versioned_pkg:
|
||||
'latest': "{{ containerd_package }}"
|
||||
'1.3.7': "{{ containerd_package }}=1.3.7-1"
|
||||
'1.3.9': "{{ containerd_package }}=1.3.9-1"
|
||||
'1.4.3': "{{ containerd_package }}=1.4.3-1"
|
||||
'1.4.4': "{{ containerd_package }}=1.4.3-1"
|
||||
'stable': "{{ containerd_package }}=1.4.3-1"
|
||||
'edge': "{{ containerd_package }}=1.4.4-1"
|
92
scripts/create-sa.sh
Executable file
92
scripts/create-sa.sh
Executable file
|
@ -0,0 +1,92 @@
|
|||
#!/bin/bash
|
||||
set -e
|
||||
set -o pipefail
|
||||
|
||||
# Add user to k8s using service account, no RBAC (must create RBAC after this script)
|
||||
if [[ -z "$1" ]] || [[ -z "$2" ]]; then
|
||||
echo "usage: $0 <service_account_name> <namespace>"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
SERVICE_ACCOUNT_NAME=$1
|
||||
NAMESPACE="$2"
|
||||
KUBECFG_FILE_NAME="/tmp/kube/k8s-${SERVICE_ACCOUNT_NAME}-${NAMESPACE}-conf"
|
||||
TARGET_FOLDER="/tmp/kube"
|
||||
|
||||
create_target_folder() {
|
||||
echo -n "Creating target directory to hold files in ${TARGET_FOLDER}..."
|
||||
mkdir -p "${TARGET_FOLDER}"
|
||||
printf "done"
|
||||
}
|
||||
|
||||
create_service_account() {
|
||||
echo -e "\\nCreating a service account: ${SERVICE_ACCOUNT_NAME} on namespace: ${NAMESPACE}"
|
||||
kubectl create sa "${SERVICE_ACCOUNT_NAME}" --namespace "${NAMESPACE}"
|
||||
}
|
||||
|
||||
get_secret_name_from_service_account() {
|
||||
echo -e "\\nGetting secret of service account ${SERVICE_ACCOUNT_NAME}-${NAMESPACE}"
|
||||
SECRET_NAME=$(kubectl get sa "${SERVICE_ACCOUNT_NAME}" --namespace "${NAMESPACE}" -o json | jq -r '.secrets[].name')
|
||||
echo "Secret name: ${SECRET_NAME}"
|
||||
}
|
||||
|
||||
extract_ca_crt_from_secret() {
|
||||
echo -e -n "\\nExtracting ca.crt from secret..."
|
||||
kubectl get secret "${SECRET_NAME}" --namespace "${NAMESPACE}" -o json | jq \
|
||||
-r '.data["ca.crt"]' | base64 -d > "${TARGET_FOLDER}/ca.crt"
|
||||
printf "done"
|
||||
}
|
||||
|
||||
get_user_token_from_secret() {
|
||||
echo -e -n "\\nGetting user token from secret..."
|
||||
USER_TOKEN=$(kubectl get secret "${SECRET_NAME}" \
|
||||
--namespace "${NAMESPACE}" -o json | jq -r '.data["token"]' | base64 -d)
|
||||
printf "done"
|
||||
}
|
||||
|
||||
set_kube_config_values() {
|
||||
context=$(kubectl config current-context)
|
||||
echo -e "\\nSetting current context to: $context"
|
||||
|
||||
CLUSTER_NAME=$(kubectl config get-contexts "$context" | awk '{print $3}' | tail -n 1)
|
||||
echo "Cluster name: ${CLUSTER_NAME}"
|
||||
|
||||
ENDPOINT=$(kubectl config view \
|
||||
-o jsonpath="{.clusters[?(@.name == \"${CLUSTER_NAME}\")].cluster.server}")
|
||||
echo "Endpoint: ${ENDPOINT}"
|
||||
|
||||
# Set up the config
|
||||
echo -e "\\nPreparing k8s-${SERVICE_ACCOUNT_NAME}-${NAMESPACE}-conf"
|
||||
echo -n "Setting a cluster entry in kubeconfig..."
|
||||
kubectl config set-cluster "${CLUSTER_NAME}" \
|
||||
--kubeconfig="${KUBECFG_FILE_NAME}" \
|
||||
--server="${ENDPOINT}" \
|
||||
--certificate-authority="${TARGET_FOLDER}/ca.crt" \
|
||||
--embed-certs=true
|
||||
|
||||
echo -n "Setting token credentials entry in kubeconfig..."
|
||||
kubectl config set-credentials \
|
||||
"${SERVICE_ACCOUNT_NAME}-${NAMESPACE}-${CLUSTER_NAME}" \
|
||||
--kubeconfig="${KUBECFG_FILE_NAME}" \
|
||||
--token="${USER_TOKEN}"
|
||||
|
||||
echo -n "Setting a context entry in kubeconfig..."
|
||||
kubectl config set-context \
|
||||
"${SERVICE_ACCOUNT_NAME}-${NAMESPACE}-${CLUSTER_NAME}" \
|
||||
--kubeconfig="${KUBECFG_FILE_NAME}" \
|
||||
--cluster="${CLUSTER_NAME}" \
|
||||
--user="${SERVICE_ACCOUNT_NAME}-${NAMESPACE}-${CLUSTER_NAME}" \
|
||||
--namespace="${NAMESPACE}"
|
||||
|
||||
echo -n "Setting the current-context in the kubeconfig file..."
|
||||
kubectl config use-context "${SERVICE_ACCOUNT_NAME}-${NAMESPACE}-${CLUSTER_NAME}" \
|
||||
--kubeconfig="${KUBECFG_FILE_NAME}"
|
||||
}
|
||||
|
||||
create_target_folder
|
||||
create_service_account
|
||||
sleep 10
|
||||
get_secret_name_from_service_account
|
||||
extract_ca_crt_from_secret
|
||||
get_user_token_from_secret
|
||||
set_kube_config_values
|
33
scripts/health.sh
Executable file
33
scripts/health.sh
Executable file
|
@ -0,0 +1,33 @@
|
|||
#!/bin/bash
|
||||
|
||||
kubectl cluster-info
|
||||
|
||||
#printf "\n [ etcd cluster health: ]\n"
|
||||
#/usr/local/bin/etcdctl --endpoints="https://${ETCD_1_IP}:2379,https://${ETCD_2_IP}:2379,https://${ETCD_3_IP}:2379" --cert ${CA_DIR}/etcd.crt --key ${CA_DIR}/etcd.key --cacert ${CA_DIR}/etcd-ca.crt endpoint status
|
||||
|
||||
printf "\n [ kubernetes components health: ]\n"
|
||||
/usr/local/bin/kubectl get componentstatuses
|
||||
|
||||
printf "\n [ kubernetes nodes: ]\n"
|
||||
kubectl get nodes -o wide
|
||||
|
||||
printf "\n [ helm releases: ]\n"
|
||||
helm ls --all-namespaces
|
||||
|
||||
if [ "$1" = "all" ]; then
|
||||
printf "\n [ kubernetes deployments: ]\n"
|
||||
/usr/local/bin/kubectl get deployments --all-namespaces
|
||||
printf "\n [ kubernetes services: ]\n"
|
||||
/usr/local/bin/kubectl get services --all-namespaces -o wide
|
||||
printf "\n [ kubernetes ingresses: ]\n"
|
||||
/usr/local/bin/kubectl get ingresses.v1.networking.k8s.io --all-namespaces
|
||||
printf "\n [ kubernates storage claims: ]\n"
|
||||
/usr/local/bin/kubectl get pvc --all-namespaces
|
||||
printf "\n [ kubernetes pods: ]\n"
|
||||
/usr/local/bin/kubectl get pods --all-namespaces -o wide
|
||||
fi
|
||||
|
||||
if [ ! -z "$1" ]; then
|
||||
printf "\n [ $1 status: ]\n"
|
||||
/usr/local/bin/kubectl get $1 --all-namespaces -o wide
|
||||
fi
|
5
scripts/k.sh
Executable file
5
scripts/k.sh
Executable file
|
@ -0,0 +1,5 @@
|
|||
#!/bin/bash
|
||||
|
||||
export KUBECONFIG=/etc/kubernetes/admin.conf
|
||||
|
||||
kubectl "$@"
|
50
scripts/logs-proxy.sh
Executable file
50
scripts/logs-proxy.sh
Executable file
|
@ -0,0 +1,50 @@
|
|||
#!/bin/sh
|
||||
|
||||
if [ -z "$SSH_ORIGINAL_COMMAND" ] ; then
|
||||
echo ""
|
||||
echo "Usage: ssh logs <namespace> <container_name_pattern> <lines> [grep pattern]"
|
||||
echo "Example: ssh logs shared matches-front 100"
|
||||
echo ""
|
||||
exit
|
||||
fi
|
||||
|
||||
NSPACE=`echo "$SSH_ORIGINAL_COMMAND" | awk '{print $1}'`
|
||||
|
||||
if [ "$NSPACE" = "kube-system" ] || [ "$NSPACE" = "monitoring" ]; then
|
||||
echo "Access denied."
|
||||
exit
|
||||
fi
|
||||
|
||||
SERVICE=`echo "$SSH_ORIGINAL_COMMAND" | awk '{print $2}'`
|
||||
|
||||
if [ -z $SERVICE ]; then
|
||||
KUBECONFIG=/home/logs/k8s-admin-conf /usr/local/bin/kubectl -n ${NSPACE} get pods
|
||||
exit
|
||||
fi
|
||||
|
||||
CONTAINER_NAME=`KUBECONFIG=/home/logs/k8s-admin-conf /usr/local/bin/kubectl -n ${NSPACE} get pods | grep "${SERVICE}" | awk '{print $1}'`
|
||||
num_lines=$(echo "$CONTAINER_NAME" | wc -l)
|
||||
if [ $num_lines -gt 1 ]; then
|
||||
echo "Specify exact container name from:"
|
||||
echo ""
|
||||
echo "$CONTAINER_NAME"
|
||||
echo ""
|
||||
echo "Usage: ssh logs ${NSPACE} $SERVICE-rnd123 <lines>"
|
||||
exit
|
||||
fi
|
||||
echo $CONTAINER_NAME
|
||||
|
||||
TAIL=`echo "$SSH_ORIGINAL_COMMAND" | awk '{print $3}'`
|
||||
if [ -n "$TAIL" ] && [ "$TAIL" -eq "$TAIL" ] 2>/dev/null; then
|
||||
TAIL="--tail $TAIL"
|
||||
else
|
||||
TAIL=''
|
||||
fi
|
||||
|
||||
GREP_PATTERN=`echo "$SSH_ORIGINAL_COMMAND" | awk '{print $4}'`
|
||||
|
||||
if [ -n "$GREP_PATTERN" ]; then
|
||||
KUBECONFIG=/home/logs/k8s-admin-conf /usr/local/bin/kubectl -n ${NSPACE} logs --timestamps --follow $TAIL $CONTAINER_NAME --all-containers | grep -E $GREP_PATTERN
|
||||
else
|
||||
KUBECONFIG=/home/logs/k8s-admin-conf /usr/local/bin/kubectl -n ${NSPACE} logs --timestamps --follow $TAIL $CONTAINER_NAME --all-containers
|
||||
fi;
|
17
scripts/omega-tunnel.sh
Executable file
17
scripts/omega-tunnel.sh
Executable file
|
@ -0,0 +1,17 @@
|
|||
#!/bin/sh
|
||||
|
||||
APP="$SSH_ORIGINAL_COMMAND"
|
||||
|
||||
case "$APP" in
|
||||
omega-ps|omega-core|omega-tron|mssql)
|
||||
;;
|
||||
*)
|
||||
echo "Usage: ssh omega@master01 -t <CONTAINER_NAME>\n"
|
||||
echo "Available containers:\n omega-ps\n omega-core\n omega-tron\n mssql"
|
||||
exit
|
||||
esac
|
||||
|
||||
export KUBECONFIG=k8s-admin-sa-staging-conf
|
||||
|
||||
POD=`kubectl -n staging get pods --selector="app=$APP" --output=go-template --template='{{range .items}}{{.metadata.name}}{{end}}'`
|
||||
exec kubectl -n staging exec -it "$POD" -- bash -c 'PATH="$PATH:/opt/mssql-tools/bin" bash'
|
6
scripts/podspernode.sh
Executable file
6
scripts/podspernode.sh
Executable file
|
@ -0,0 +1,6 @@
|
|||
#!/bin/bash
|
||||
|
||||
for node in `k get node --selector='!node-role.kubernetes.io/master' -o jsonpath='{.items[*].metadata.name}'`; do
|
||||
echo -n "$node: "
|
||||
k get pods --all-namespaces --no-headers --field-selector spec.nodeName=$node | wc -l
|
||||
done
|
37
scripts/tcp-proxy.sh
Executable file
37
scripts/tcp-proxy.sh
Executable file
|
@ -0,0 +1,37 @@
|
|||
#!/bin/bash
|
||||
|
||||
# For each user who needs to use this script you may create the .authorized_keys file using the following pattern:
|
||||
#command="/usr/local/bin/tcp-proxy",no-user-rc,no-x11-forwarding,no-agent-forwarding,no-pty,permitopen="127.0.0.1:23306",permitopen="127.0.0.1:21443" ssh-rsa <KEY> user@host
|
||||
|
||||
APP="$SSH_ORIGINAL_COMMAND"
|
||||
|
||||
case "$APP" in
|
||||
db)
|
||||
USERPORT=23306
|
||||
TARGETPORT=3306
|
||||
;;
|
||||
mssql)
|
||||
USERPORT=21443
|
||||
TARGETPORT=1433
|
||||
;;
|
||||
*)
|
||||
echo "Usage: ssh remotecon@master01.staging.example.com -L3306:127.0.0.1:23306 <SERVICE_NAME>"
|
||||
echo "Available services:\nmssql \ndb"
|
||||
exit
|
||||
esac
|
||||
|
||||
export KUBECONFIG=/home/remotecon/k8s-admin-sa-staging-conf
|
||||
|
||||
SVC=`kubectl get svc $APP --output=go-template --template='{{.metadata.name}}'`
|
||||
echo "Port forwarding $SVC:$TARGETPORT to 127.0.0.1:$USERPORT ..."
|
||||
|
||||
FWDPID=`ps ax | grep "svc\/$SVC" | awk '{print $1}'`
|
||||
if [ -z $FWDPID ] ; then
|
||||
/usr/sbin/daemonize /usr/local/bin/kubectl port-forward svc/$SVC $USERPORT:$TARGETPORT
|
||||
FWDPID=`ps ax | grep "svc\/$SVC" | awk '{print $1}'`
|
||||
echo "Spawning new forwarder at pid $FWDPID."
|
||||
else
|
||||
echo "Using the running forwarder at pid $FWDPID."
|
||||
fi
|
||||
echo "Press any key to end the session..."
|
||||
read X
|
169
setup-apps.sh
Executable file
169
setup-apps.sh
Executable file
|
@ -0,0 +1,169 @@
|
|||
#!/bin/bash
|
||||
|
||||
echo "... ] Setup Applications [ ..."
|
||||
|
||||
if [ -f config ]; then
|
||||
echo "config file FOUND :)"
|
||||
source config
|
||||
else
|
||||
echo "config file is missing."
|
||||
exit 1
|
||||
fi
|
||||
export CEPH_MONITOR_1
|
||||
export CEPH_MONITOR_2
|
||||
export CEPH_MONITOR_3
|
||||
export CLUSTER_DOMAIN
|
||||
export REGISTRY_URL
|
||||
export LOKI_STORAGE_SIZE
|
||||
export LOKI_RETENTION
|
||||
|
||||
allRunning() {
|
||||
podStatus=$(kubectl get pods -n $1 -o=jsonpath='{range .items[*]}{.status.conditions[?(@.type=="ContainersReady")].status}{"\n"}{end}')
|
||||
for elem in $podStatus
|
||||
do
|
||||
#echo $elem
|
||||
if [ $elem != "True" ]
|
||||
then
|
||||
return 0
|
||||
fi
|
||||
done
|
||||
return 1
|
||||
}
|
||||
|
||||
# Setup Helm repositories
|
||||
helm repo add jetstack https://charts.jetstack.io
|
||||
helm repo add ingress-nginx https://kubernetes.github.io/ingress-nginx
|
||||
helm repo add prometheus-community https://prometheus-community.github.io/helm-charts
|
||||
helm repo add grafana https://grafana.github.io/helm-charts
|
||||
helm repo update
|
||||
|
||||
# Setup ACME
|
||||
if [ $CERT_MODE == "True" ] ; then
|
||||
echo "] deploying cert-manager helm chart..."
|
||||
kubectl create namespace cert-manager
|
||||
kubectl -n cert-manager create secret generic cf-api-secret --from-literal=cf-api-key=${CLOUDFLARE_API_KEY}
|
||||
kubectl apply -f yaml/cert-manager/cert-manager.crds.yaml
|
||||
helm install \
|
||||
cert-manager jetstack/cert-manager \
|
||||
--namespace cert-manager \
|
||||
--version v1.1.0 \
|
||||
-f yaml/cert-manager/values.yaml
|
||||
|
||||
echo "] Setup cert-manager issuers ... "
|
||||
while :
|
||||
do
|
||||
allRunning cert-manager
|
||||
allAreRunning=$?
|
||||
if [ $allAreRunning == 1 ]; then
|
||||
sleep 10
|
||||
break
|
||||
fi
|
||||
done
|
||||
|
||||
cat yaml/cert-manager/letsencrypt-staging-clusterissuer.yaml | sed "s/var_acme_email/${ADMIN_EMAIL}/" | kubectl -n kube-system apply -f -
|
||||
cat yaml/cert-manager/letsencrypt-staging-dns-clusterissuer.yaml | sed "s/var_acme_email/${ADMIN_EMAIL}/" | kubectl -n kube-system apply -f -
|
||||
cat yaml/cert-manager/letsencrypt-production-clusterissuer.yaml | sed "s/var_acme_email/${ADMIN_EMAIL}/" | kubectl -n kube-system apply -f -
|
||||
cat yaml/cert-manager/letsencrypt-production-dns-clusterissuer.yaml | sed "s/var_acme_email/${ADMIN_EMAIL}/" | kubectl -n kube-system apply -f -
|
||||
fi
|
||||
|
||||
# Setup Ingress-Nginx
|
||||
kubectl create namespace ingress-nginx
|
||||
if [ $CERT_MODE == "True" ] ; then
|
||||
echo "] Deploying ingress-nginx helm chart WITH TLS termination in TCP/PROXY mode..."
|
||||
k8x_proxy_mode="true"
|
||||
else
|
||||
echo "] Deploying ingress-nginx helm chart WITHOUT TLS termination in HTTP mode..."
|
||||
k8x_proxy_mode="false"
|
||||
fi
|
||||
helm install \
|
||||
ingress-nginx ingress-nginx/ingress-nginx \
|
||||
--namespace ingress-nginx \
|
||||
--set-string controller.config.use-proxy-protocol="${k8x_proxy_mode}" \
|
||||
--set-string controller.maxmindLicenseKey="${MAXMIND_LIC}" \
|
||||
--version 3.34.0 \
|
||||
-f yaml/ingress-nginx/values.yaml
|
||||
|
||||
# Setup Monitoring
|
||||
kubectl create namespace monitoring
|
||||
kubectl create namespace loki
|
||||
|
||||
# Setup Zabbix
|
||||
echo "] Deploying zabbix-agent for host monitoring..."
|
||||
kubectl -n monitoring create secret generic zabbix-psk-secret --from-literal=zabbix_agentd.psk=${ZABBIX_PSK}
|
||||
kubectl -n monitoring create secret generic zabbix-psk-id-secret --from-literal=zabbix_psk_id=${ZABBIX_PSK_ID}
|
||||
cat yaml/monitoring/zabbix-agent-daemonset.yaml | sed "s/k8x_zabbix_server/${ZABBIX_SERVER}/" | kubectl -n monitoring apply -f -
|
||||
|
||||
# Setup Prometheus metrics
|
||||
echo "] Deploying prometheus for metrics aggregation..."
|
||||
cat yaml/monitoring/prometheus-volumes.yaml | envsubst | kubectl apply -f -
|
||||
helm install \
|
||||
prometheus prometheus-community/prometheus \
|
||||
--namespace monitoring \
|
||||
-f yaml/monitoring/prometheus-values.yaml
|
||||
|
||||
# Setup PLG Stack
|
||||
echo "] Deploying Promtail for logs aggregation ..."
|
||||
#promtail
|
||||
helm install \
|
||||
promtail grafana/promtail \
|
||||
--namespace monitoring \
|
||||
-f yaml/monitoring/promtail-values.yaml
|
||||
|
||||
echo "] Deploying Loki for promtail aggregation ..."
|
||||
cat yaml/monitoring/loki-v12-volumes.yaml | envsubst | kubectl apply -f -
|
||||
helm install \
|
||||
loki grafana/loki \
|
||||
--namespace loki \
|
||||
-f yaml/monitoring/loki-v12-values-${LOKI_RETENTION}.yaml
|
||||
cat yaml/monitoring/loki-v12-ext-svc.yaml | kubectl apply -f -
|
||||
|
||||
echo "] Deploying Grafana for monitoring dashboard ..."
|
||||
cat yaml/monitoring/grafana-volumes.yaml | envsubst | kubectl apply -f -
|
||||
helm install \
|
||||
grafana grafana/grafana \
|
||||
--namespace monitoring \
|
||||
-f yaml/monitoring/grafana-values.yaml \
|
||||
--set env.GF_SMTP_ENABLED=true,env.GF_SMTP_HOST=${GRAFANA_SMTP_HOST},env.GF_SMTP_FROM_ADDRESS=${GRAFANA_SMTP_FROM_ADDRESS},env.GF_SMTP_USER=${GRAFANA_SMTP_USER},env.GF_SMTP_PASSWORD=${GRAFANA_SMTP_PASSWORD},env.GF_SMTP_SKIP_VERIFY=true
|
||||
printf '\ngrafana login:\nuser: admin \npass: ' ; kubectl get secret --namespace monitoring grafana -o jsonpath="{.data.admin-password}" | base64 --decode ; echo
|
||||
#kubectl -n monitoring create secret generic monitoring-http-secret --from-file=yaml/monitoring/auth
|
||||
if [ $CERT_MODE == "True" ] ; then
|
||||
cat yaml/monitoring/grafana-cert.yaml | envsubst | kubectl -n monitoring apply -f -
|
||||
cat yaml/monitoring/grafana-ingress-secure.yaml | envsubst | kubectl -n monitoring apply -f -
|
||||
else
|
||||
cat yaml/monitoring/grafana-ingress.yaml | envsubst | kubectl -n monitoring apply -f -
|
||||
fi
|
||||
|
||||
# Setup Registry
|
||||
echo "] Deploying docker registry ..."
|
||||
kubectl create namespace registry
|
||||
if [ $REGISTRY_INTERNAL == "True" ]; then
|
||||
apt update
|
||||
apt install daemonize apache2-utils -y
|
||||
if [ -f /tmp/regsecret ]; then
|
||||
rm /tmp/regsecret
|
||||
fi
|
||||
# Genrating registry-sec for the use of registry.yaml deployment for internal webserver auth
|
||||
htpasswd -Bb -c /tmp/regsecret $REGISTRY_USER $REGISTRY_PASS
|
||||
regpassstr=`cat /tmp/regsecret | base64 -w 0`
|
||||
cat <<EOF | kubectl -n registry apply -f -
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: registry-sec
|
||||
type: Opaque
|
||||
data:
|
||||
HTPASSWD: ${regpassstr}
|
||||
EOF
|
||||
cat yaml/registry/registry-volumes.yaml | envsubst | kubectl -n registry apply -f -
|
||||
#cat yaml/registry/registry-volumes-nfs.yaml | envsubst | kubectl -n registry apply -f -
|
||||
cat yaml/registry/registry.yaml | envsubst | kubectl -n registry apply -f -
|
||||
|
||||
if [ $CERT_MODE == "True" ] ; then
|
||||
cat yaml/registry/registry-cert.yaml | envsubst | kubectl -n registry apply -f -
|
||||
cat yaml/registry/registry-ingress-secure.yaml | envsubst | kubectl -n registry apply -f -
|
||||
else
|
||||
cat yaml/registry/registry-ingress.yaml | envsubst | kubectl -n registry apply -f -
|
||||
fi
|
||||
fi
|
||||
#Create a registry secret to be used by pods
|
||||
kubectl -n registry create secret docker-registry registry-internal --docker-server=https://${REGISTRY_URL}/v2/ --docker-username=${REGISTRY_USER} --docker-password=${REGISTRY_PASS} --docker-email=${ADMIN_EMAIL}
|
63
setup-env.sh
Executable file
63
setup-env.sh
Executable file
|
@ -0,0 +1,63 @@
|
|||
#!/bin/bash
|
||||
|
||||
echo ""
|
||||
echo "... ] Setup Environment [ ..."
|
||||
|
||||
if [ -f config ]; then
|
||||
echo "config file FOUND :)"
|
||||
source config
|
||||
else
|
||||
echo "config file is missing."
|
||||
exit 1
|
||||
fi
|
||||
export CEPH_MONITOR_1
|
||||
export CEPH_MONITOR_2
|
||||
export CEPH_MONITOR_3
|
||||
export CLUSTER_DOMAIN
|
||||
|
||||
if [ -z $1 ]; then
|
||||
echo "Usage: $0 <env-name>"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
apt update
|
||||
apt install -y jq
|
||||
cp -v scripts/create-sa.sh /usr/local/bin/create-sa
|
||||
|
||||
nspace=$1
|
||||
export nspace
|
||||
|
||||
# Setup namespace, Service Accounts, RBAC, Limit
|
||||
printf "\nsetting up ${nspace}... \n"
|
||||
cat yaml/system/namespace.yaml | envsubst | kubectl apply -f -
|
||||
printf "\nsetting up Service Accounts... \n"
|
||||
/usr/local/bin/create-sa admin-sa ${nspace}
|
||||
/usr/local/bin/create-sa backup-agent-sa ${nspace}
|
||||
sleep 5
|
||||
printf "\nsetting up RBAC... \n"
|
||||
cat yaml/system/sa-rbac.yaml | envsubst | kubectl apply -f -
|
||||
cat yaml/system/sa-rbac-backup-agent.yaml | envsubst | kubectl apply -f -
|
||||
sleep 5
|
||||
printf "\nsetting up resource limits... \n"
|
||||
kubectl -n $nspace apply -f yaml/system/namespace-resource-limits.yaml
|
||||
|
||||
# Create a auth-keypair unique to the new namespace, to be used by external applications
|
||||
printf "\nsetting up shared keypair secret... \n"
|
||||
mkdir -p -v /root/secrets/kube
|
||||
openssl ecparam -genkey -name prime256v1 -noout -out /root/secrets/kube/${nspace}_id_ecdsa
|
||||
openssl ec -in /root/secrets/kube/${nspace}_id_ecdsa -pubout -out /root/secrets/kube/${nspace}_id_ecdsa.pub
|
||||
kubectl -n $nspace create secret generic auth-keypair --from-file=id_ecdsa=/root/secrets/kube/${nspace}_id_ecdsa --from-file=id_ecdsa.pub=/tmp/${nspace}_id_ecdsa.pub
|
||||
#rm /root/secrets/kube/${nspace}_id_ecdsa
|
||||
#rm /root/secrets/kube/${nspace}_id_ecdsa.pub
|
||||
|
||||
# Create wildcard certificate if we have cert-manager installed
|
||||
if [ $CERT_MODE == "True" ] ; then
|
||||
printf "\ncreate a wildcard certificate secret with letsencrypt for the defined namespace...\n"
|
||||
cat yaml/system/namespace-wild-cert.yaml | envsubst | kubectl -n ${nspace} apply -f -
|
||||
fi
|
||||
|
||||
# Copy internal registry credentials from its namespace
|
||||
kubectl -n registry get secret registry-internal -o yaml | sed "s/namespace: .*/namespace: ${nspace}/" | kubectl apply -f -
|
||||
|
||||
# Path the default service account with registry-internal as ImagePullSecret
|
||||
kubectl -n ${nspace} patch serviceaccount default -p '{"imagePullSecrets": [{"name": "registry-internal"}]}'
|
20
setup-scripts.sh
Executable file
20
setup-scripts.sh
Executable file
|
@ -0,0 +1,20 @@
|
|||
#!/bin/bash
|
||||
|
||||
echo "] Setup helper scripts ... "
|
||||
cp -v scripts/k.sh /usr/local/bin/k
|
||||
cp -v scripts/health.sh /usr/local/bin/health
|
||||
cp -v scripts/logs-proxy.sh /usr/local/bin/logs-proxy
|
||||
cp -v scripts/tcp-proxy.sh /usr/local/bin/tcp-proxy
|
||||
cp -v scripts/omega-tunnel.sh /usr/local/bin/omega-tunnel
|
||||
cp -v scripts/create-sa.sh /usr/local/bin/create-sa
|
||||
cp -v scripts/podspernode.sh /usr/local/bin/podspernode
|
||||
|
||||
echo "] Setup k autocomplete ..."
|
||||
echo "source <(kubectl completion bash)" > /root/.bashrc
|
||||
echo "complete -F __start_kubectl k" >> /root/.bashrc
|
||||
|
||||
echo "] Setup k9s ... "
|
||||
curl -L https://github.com/derailed/k9s/releases/download/v0.24.10/k9s_v0.24.10_Linux_x86_64.tar.gz -o /tmp/k9s.tar.gz
|
||||
tar xzvf /tmp/k9s.tar.gz k9s
|
||||
mv -v k9s /usr/local/bin
|
||||
rm /tmp/k9s.tar.gz
|
15
terraform/README.md
Normal file
15
terraform/README.md
Normal file
|
@ -0,0 +1,15 @@
|
|||
### Environment variables
|
||||
- `TF_VAR_vultr_apikey`
|
||||
- `TF_VAR_proxmox_node01_password`
|
||||
- `TF_VAR_proxmox_node02_password`
|
||||
- `TF_VAR_proxmox_node03_password`
|
||||
- `TF_VAR_proxmox_node04_password`
|
||||
- `TF_VAR_proxmox_node05_password`
|
||||
### Create proxmox terraform user
|
||||
```
|
||||
# Create the user
|
||||
pveum user add terraform@pve --password hackme123
|
||||
|
||||
# Assign the user the correct role
|
||||
pveum aclmod / -user terraform@pve -role Administrator
|
||||
```
|
32
terraform/staging/.gitlab-ci.yml
Normal file
32
terraform/staging/.gitlab-ci.yml
Normal file
|
@ -0,0 +1,32 @@
|
|||
# To contribute improvements to CI/CD templates, please follow the Development guide at:
|
||||
# https://docs.gitlab.com/ee/development/cicd/templates.html
|
||||
# This specific template is located at:
|
||||
# https://gitlab.com/gitlab-org/gitlab/-/blob/master/lib/gitlab/ci/templates/Terraform.latest.gitlab-ci.yml
|
||||
|
||||
include:
|
||||
- template: Terraform/Base.latest.gitlab-ci.yml # https://gitlab.com/gitlab-org/gitlab/blob/master/lib/gitlab/ci/templates/Terraform/Base.latest.gitlab-ci.yml
|
||||
- template: Jobs/SAST-IaC.latest.gitlab-ci.yml # https://gitlab.com/gitlab-org/gitlab/blob/master/lib/gitlab/ci/templates/Jobs/SAST-IaC.latest.gitlab-ci.yml
|
||||
|
||||
stages:
|
||||
- validate
|
||||
- test
|
||||
- build
|
||||
- deploy
|
||||
|
||||
fmt:
|
||||
extends: .terraform:fmt
|
||||
needs: []
|
||||
|
||||
validate:
|
||||
extends: .terraform:validate
|
||||
needs: []
|
||||
|
||||
build:
|
||||
extends: .terraform:build
|
||||
|
||||
deploy:
|
||||
extends: .terraform:deploy
|
||||
dependencies:
|
||||
- build
|
||||
environment:
|
||||
name: $TF_STATE_NAME
|
50
terraform/staging/main.tf
Normal file
50
terraform/staging/main.tf
Normal file
|
@ -0,0 +1,50 @@
|
|||
terraform {
|
||||
backend "http" {
|
||||
}
|
||||
required_providers {
|
||||
proxmox = {
|
||||
source = "Telmate/proxmox"
|
||||
version = "2.8.0"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
provider "proxmox" {
|
||||
alias = "prox01"
|
||||
pm_api_url = "https://10.15.0.101:8006/api2/json"
|
||||
pm_user = var.proxmox_user
|
||||
pm_password = var.proxmox_node01_password
|
||||
pm_tls_insecure = true
|
||||
}
|
||||
|
||||
provider "proxmox" {
|
||||
alias = "prox02"
|
||||
pm_api_url = "https://10.15.0.102:8006/api2/json"
|
||||
pm_user = var.proxmox_user
|
||||
pm_password = var.proxmox_node02_password
|
||||
pm_tls_insecure = true
|
||||
}
|
||||
|
||||
provider "proxmox" {
|
||||
alias = "prox03"
|
||||
pm_api_url = "https://10.15.0.103:8006/api2/json"
|
||||
pm_user = var.proxmox_user
|
||||
pm_password = var.proxmox_node03_password
|
||||
pm_tls_insecure = true
|
||||
}
|
||||
|
||||
provider "proxmox" {
|
||||
alias = "prox04"
|
||||
pm_api_url = "https://10.15.0.104:8006/api2/json"
|
||||
pm_user = var.proxmox_user
|
||||
pm_password = var.proxmox_node04_password
|
||||
pm_tls_insecure = true
|
||||
}
|
||||
|
||||
provider "proxmox" {
|
||||
alias = "prox05"
|
||||
pm_api_url = "https://10.15.0.105:8006/api2/json"
|
||||
pm_user = var.proxmox_user
|
||||
pm_password = var.proxmox_node05_password
|
||||
pm_tls_insecure = true
|
||||
}
|
28
terraform/staging/variables.tf
Normal file
28
terraform/staging/variables.tf
Normal file
|
@ -0,0 +1,28 @@
|
|||
variable "proxmox_user" {
|
||||
default = "terraform@pve"
|
||||
}
|
||||
|
||||
variable "proxmox_node01_password" {
|
||||
type = string
|
||||
description = "prox01 pve pass"
|
||||
}
|
||||
|
||||
variable "proxmox_node02_password" {
|
||||
type = string
|
||||
description = "prox02 pve pass"
|
||||
}
|
||||
|
||||
variable "proxmox_node03_password" {
|
||||
type = string
|
||||
description = "prox03 pve pass"
|
||||
}
|
||||
|
||||
variable "proxmox_node04_password" {
|
||||
type = string
|
||||
description = "prox04 pve pass"
|
||||
}
|
||||
|
||||
variable "proxmox_node05_password" {
|
||||
type = string
|
||||
description = "prox05 pve pass"
|
||||
}
|
32
terraform/testbed-hetzner/.gitlab-ci.yml
Normal file
32
terraform/testbed-hetzner/.gitlab-ci.yml
Normal file
|
@ -0,0 +1,32 @@
|
|||
# To contribute improvements to CI/CD templates, please follow the Development guide at:
|
||||
# https://docs.gitlab.com/ee/development/cicd/templates.html
|
||||
# This specific template is located at:
|
||||
# https://gitlab.com/gitlab-org/gitlab/-/blob/master/lib/gitlab/ci/templates/Terraform.latest.gitlab-ci.yml
|
||||
|
||||
include:
|
||||
- template: Terraform/Base.latest.gitlab-ci.yml # https://gitlab.com/gitlab-org/gitlab/blob/master/lib/gitlab/ci/templates/Terraform/Base.latest.gitlab-ci.yml
|
||||
- template: Jobs/SAST-IaC.latest.gitlab-ci.yml # https://gitlab.com/gitlab-org/gitlab/blob/master/lib/gitlab/ci/templates/Jobs/SAST-IaC.latest.gitlab-ci.yml
|
||||
|
||||
stages:
|
||||
- validate
|
||||
- test
|
||||
- build
|
||||
- deploy
|
||||
|
||||
fmt:
|
||||
extends: .terraform:fmt
|
||||
needs: []
|
||||
|
||||
validate:
|
||||
extends: .terraform:validate
|
||||
needs: []
|
||||
|
||||
build:
|
||||
extends: .terraform:build
|
||||
|
||||
deploy:
|
||||
extends: .terraform:deploy
|
||||
dependencies:
|
||||
- build
|
||||
environment:
|
||||
name: $TF_STATE_NAME
|
35
terraform/testbed-hetzner/k8s-masters.tf
Normal file
35
terraform/testbed-hetzner/k8s-masters.tf
Normal file
|
@ -0,0 +1,35 @@
|
|||
resource "vultr_instance" "master01test" {
|
||||
tag = "staging-testbed"
|
||||
hostname = "master01.teststaging.example.com"
|
||||
plan = "vc2-2c-4gb"
|
||||
region = "ams"
|
||||
os_id = "477"
|
||||
enable_ipv6 = false
|
||||
backups = "disabled"
|
||||
ddos_protection = false
|
||||
activation_email = true
|
||||
}
|
||||
|
||||
resource "vultr_instance" "master02test" {
|
||||
tag = "staging-testbed"
|
||||
hostname = "master02.teststaging.example.com"
|
||||
plan = "vc2-2c-4gb"
|
||||
region = "ams"
|
||||
os_id = "477"
|
||||
enable_ipv6 = false
|
||||
backups = "disabled"
|
||||
ddos_protection = false
|
||||
activation_email = true
|
||||
}
|
||||
|
||||
resource "vultr_instance" "master03test" {
|
||||
tag = "staging-testbed"
|
||||
hostname = "master03.teststaging.example.com"
|
||||
plan = "vc2-2c-4gb"
|
||||
region = "ams"
|
||||
os_id = "477"
|
||||
enable_ipv6 = false
|
||||
backups = "disabled"
|
||||
ddos_protection = false
|
||||
activation_email = true
|
||||
}
|
41
terraform/testbed-hetzner/k8s-workers.tf
Normal file
41
terraform/testbed-hetzner/k8s-workers.tf
Normal file
|
@ -0,0 +1,41 @@
|
|||
resource "proxmox_vm_qemu" "worker01test" {
|
||||
count = 1
|
||||
vmid = 211
|
||||
name = "worker01.teststaging.example.com"
|
||||
target_node = "prox05"
|
||||
clone = "debian-cloudinit"
|
||||
os_type = "cloud-init"
|
||||
cores = 4
|
||||
sockets = "1"
|
||||
cpu = "kvm64"
|
||||
memory = 4096
|
||||
scsihw = "virtio-scsi-pci"
|
||||
bootdisk = "virtio0"
|
||||
|
||||
disk {
|
||||
id = 0
|
||||
size = 32
|
||||
type = "virtio"
|
||||
storage = "local"
|
||||
iothread = true
|
||||
}
|
||||
|
||||
network {
|
||||
id = 0
|
||||
model = "virtio"
|
||||
bridge = "vmbr0"
|
||||
}
|
||||
|
||||
lifecycle {
|
||||
ignore_changes = [
|
||||
network,
|
||||
]
|
||||
}
|
||||
|
||||
# Cloud Init Settings
|
||||
#ipconfig0 = "ip=212.83.177.200/24,gw=10.10.10.1"
|
||||
cicustom = "network=local:snippets/networkworker01teststaging.yaml"
|
||||
sshkeys = <<EOF
|
||||
${var.ansible_ssh_key}
|
||||
EOF
|
||||
}
|
28
terraform/testbed-hetzner/main.tf
Normal file
28
terraform/testbed-hetzner/main.tf
Normal file
|
@ -0,0 +1,28 @@
|
|||
terraform {
|
||||
backend "http" {
|
||||
}
|
||||
required_providers {
|
||||
proxmox = {
|
||||
source = "Telmate/proxmox"
|
||||
version = "2.8.0"
|
||||
}
|
||||
vultr = {
|
||||
source = "vultr/vultr"
|
||||
version = "2.5.0"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
provider "proxmox" {
|
||||
alias = "prox05"
|
||||
pm_api_url = "https://10.15.0.105:8006/api2/json"
|
||||
pm_user = var.proxmox_user
|
||||
pm_password = var.proxmox_node05_password
|
||||
pm_tls_insecure = true
|
||||
}
|
||||
|
||||
provider "vultr" {
|
||||
api_key = var.vultr_apikey
|
||||
rate_limit = 700
|
||||
retry_limit = 3
|
||||
}
|
18
terraform/testbed-hetzner/variables.tf
Normal file
18
terraform/testbed-hetzner/variables.tf
Normal file
|
@ -0,0 +1,18 @@
|
|||
variable "pm_user" {
|
||||
default = "terraform@pve"
|
||||
}
|
||||
|
||||
variable "vmid" {
|
||||
default = 400
|
||||
description = "Starting ID for the Prox VMs"
|
||||
}
|
||||
|
||||
variable "proxmox_node05_password" {
|
||||
type = string
|
||||
description = "prox05 pve pass"
|
||||
}
|
||||
|
||||
variable "vultr_apikey" {
|
||||
type = string
|
||||
description = "vultr cloud api key"
|
||||
}
|
32
terraform/testbed/.gitlab-ci.yml
Normal file
32
terraform/testbed/.gitlab-ci.yml
Normal file
|
@ -0,0 +1,32 @@
|
|||
# To contribute improvements to CI/CD templates, please follow the Development guide at:
|
||||
# https://docs.gitlab.com/ee/development/cicd/templates.html
|
||||
# This specific template is located at:
|
||||
# https://gitlab.com/gitlab-org/gitlab/-/blob/master/lib/gitlab/ci/templates/Terraform.latest.gitlab-ci.yml
|
||||
|
||||
include:
|
||||
- template: Terraform/Base.latest.gitlab-ci.yml # https://gitlab.com/gitlab-org/gitlab/blob/master/lib/gitlab/ci/templates/Terraform/Base.latest.gitlab-ci.yml
|
||||
- template: Jobs/SAST-IaC.latest.gitlab-ci.yml # https://gitlab.com/gitlab-org/gitlab/blob/master/lib/gitlab/ci/templates/Jobs/SAST-IaC.latest.gitlab-ci.yml
|
||||
|
||||
stages:
|
||||
- validate
|
||||
- test
|
||||
- build
|
||||
- deploy
|
||||
|
||||
fmt:
|
||||
extends: .terraform:fmt
|
||||
needs: []
|
||||
|
||||
validate:
|
||||
extends: .terraform:validate
|
||||
needs: []
|
||||
|
||||
build:
|
||||
extends: .terraform:build
|
||||
|
||||
deploy:
|
||||
extends: .terraform:deploy
|
||||
dependencies:
|
||||
- build
|
||||
environment:
|
||||
name: $TF_STATE_NAME
|
1
terraform/testbed/README.md
Normal file
1
terraform/testbed/README.md
Normal file
|
@ -0,0 +1 @@
|
|||
# eks-terraform
|
18
terraform/testbed/backend.tf
Normal file
18
terraform/testbed/backend.tf
Normal file
|
@ -0,0 +1,18 @@
|
|||
terraform {
|
||||
required_providers {
|
||||
aws = {
|
||||
source = "hashicorp/aws"
|
||||
version = ">= 4.34.0"
|
||||
}
|
||||
kubectl = {
|
||||
source = "gavinbunney/kubectl"
|
||||
version = ">= 1.14.0"
|
||||
}
|
||||
helm = {
|
||||
source = "hashicorp/helm"
|
||||
version = ">= 2.6.0"
|
||||
}
|
||||
}
|
||||
|
||||
required_version = ">= 1.2.9"
|
||||
}
|
3
terraform/testbed/main.tf
Normal file
3
terraform/testbed/main.tf
Normal file
|
@ -0,0 +1,3 @@
|
|||
module "main" {
|
||||
source = "./modules/main"
|
||||
}
|
78
terraform/testbed/modules/main/aurora.tf
Normal file
78
terraform/testbed/modules/main/aurora.tf
Normal file
|
@ -0,0 +1,78 @@
|
|||
|
||||
module "aurora" {
|
||||
source = "../aurora"
|
||||
|
||||
name = "${var.prefix}-${var.cluster_name}"
|
||||
engine = "aurora-mysql"
|
||||
engine_version = var.engine_version
|
||||
|
||||
|
||||
instances = {
|
||||
for i in range(var.num_of_instances) : tostring(i) => {
|
||||
instance_class = var.instance_type
|
||||
}
|
||||
}
|
||||
monitoring_interval = 60
|
||||
iam_role_use_name_prefix = false
|
||||
iam_role_name = "${var.prefix}-rds-monitoring-role"
|
||||
kms_key_id = module.kms.key_arn
|
||||
vpc_id = module.vpc.vpc_id
|
||||
subnets = module.vpc.database_subnets
|
||||
|
||||
database_name = var.database_name
|
||||
create_db_cluster_parameter_group = var.create_db_cluster_parameter_group
|
||||
db_cluster_parameter_group_family = var.parameter_group_family
|
||||
db_cluster_parameter_group_name = var.cluster_name
|
||||
|
||||
availability_zones = var.azs
|
||||
enabled_cloudwatch_logs_exports = var.cloud_watch_exports
|
||||
master_password = random_password.password.result
|
||||
master_username = var.db_master_username
|
||||
create_random_password = false
|
||||
allowed_security_groups = [module.eks_cluster.security_group_id]
|
||||
}
|
||||
|
||||
############### SECRET MANAGER ######################
|
||||
|
||||
|
||||
data "aws_secretsmanager_secret" "secretmasterdb" {
|
||||
arn = aws_secretsmanager_secret.secretmasterdb.arn
|
||||
}
|
||||
|
||||
data "aws_secretsmanager_secret_version" "creds" {
|
||||
secret_id = data.aws_secretsmanager_secret.secretmasterdb.arn
|
||||
depends_on = [aws_secretsmanager_secret_version.sversion]
|
||||
}
|
||||
|
||||
locals {
|
||||
db_creds = jsondecode(data.aws_secretsmanager_secret_version.creds.secret_string)
|
||||
}
|
||||
|
||||
resource "random_password" "password" {
|
||||
length = 24
|
||||
override_special = "!#$%&*()-_=+[]{}<>:?"
|
||||
}
|
||||
|
||||
resource "random_integer" "random" {
|
||||
min = 100
|
||||
max = 999
|
||||
}
|
||||
|
||||
resource "aws_secretsmanager_secret" "secretmasterdb" {
|
||||
name = "${var.prefix}-${var.db_master_username}-${random_integer.random.result}"
|
||||
kms_key_id = module.kms.key_arn
|
||||
}
|
||||
|
||||
resource "aws_secretsmanager_secret_version" "sversion" {
|
||||
secret_id = aws_secretsmanager_secret.secretmasterdb.id
|
||||
secret_string = <<EOF
|
||||
{
|
||||
"username": "${var.db_master_username}",
|
||||
"password": "${random_password.password.result}",
|
||||
"engine": "aurora-mysql",
|
||||
"host": "${module.aurora.cluster_endpoint}",
|
||||
"port": "${module.aurora.cluster_port}",
|
||||
"dbClusterIdentifier": "${var.database_name}"
|
||||
}
|
||||
EOF
|
||||
}
|
15
terraform/testbed/modules/main/ecr.tf
Normal file
15
terraform/testbed/modules/main/ecr.tf
Normal file
|
@ -0,0 +1,15 @@
|
|||
module "ecr" {
|
||||
source = "../ecr_repository"
|
||||
|
||||
repository_name = "${var.prefix}-${var.repo_name}"
|
||||
|
||||
repository_encryption_type = var.ecr_encryption
|
||||
repository_kms_key = module.kms.key_arn
|
||||
image_tag_mutability = var.is_image_mutable
|
||||
scan_on_push = var.scan_enabled
|
||||
force_delete = var.delete_enabled
|
||||
lifecycle_policy = var.ecr_policy
|
||||
|
||||
tags = var.ecr_tags
|
||||
|
||||
}
|
7
terraform/testbed/modules/main/eks_cluster.tf
Normal file
7
terraform/testbed/modules/main/eks_cluster.tf
Normal file
|
@ -0,0 +1,7 @@
|
|||
|
||||
module "eks_cluster" {
|
||||
source = "../eks_cluster"
|
||||
cluster_name = var.eks_cluster_name
|
||||
public_subnets = module.vpc.public_subnets
|
||||
private_subnets = module.vpc.private_subnets
|
||||
}
|
7
terraform/testbed/modules/main/eks_fargate.tf
Normal file
7
terraform/testbed/modules/main/eks_fargate.tf
Normal file
|
@ -0,0 +1,7 @@
|
|||
module "fargate" {
|
||||
source = "../fargate"
|
||||
eks_cluster_name = module.eks_cluster.cluster_name
|
||||
fargate_profile_name = var.fargate_profile_name
|
||||
subnet_ids = module.vpc.private_subnets
|
||||
kubernetes_namespace = var.kubernetes_namespace
|
||||
}
|
20
terraform/testbed/modules/main/eks_kubernetes.tf
Normal file
20
terraform/testbed/modules/main/eks_kubernetes.tf
Normal file
|
@ -0,0 +1,20 @@
|
|||
module "kubernetes" {
|
||||
source = "../kubernetes"
|
||||
region = var.aws_region
|
||||
vpc_id = module.vpc.vpc_id
|
||||
vpc_cidr = var.cidr
|
||||
efs_subnet_ids = module.vpc.private_subnets
|
||||
eks_cluster_name = module.eks_cluster.cluster_name
|
||||
eks_cluster_endpoint = module.eks_cluster.endpoint
|
||||
eks_oidc_url = module.eks_cluster.oidc_url
|
||||
eks_ca_certificate = module.eks_cluster.ca_certificate
|
||||
namespace = var.kubernetes_namespace
|
||||
deployment_name = var.deployment_name
|
||||
replicas = var.deployment_replicas
|
||||
labels = var.app_labels
|
||||
db_name = var.database_name
|
||||
db_address = module.aurora.cluster_endpoint
|
||||
db_user = var.db_master_username
|
||||
db_pass = random_password.password.result
|
||||
namespace_depends_on = [ module.fargate.id , module.eks_node_group.id ]
|
||||
}
|
11
terraform/testbed/modules/main/eks_node_group.tf
Normal file
11
terraform/testbed/modules/main/eks_node_group.tf
Normal file
|
@ -0,0 +1,11 @@
|
|||
module "eks_node_group" {
|
||||
source = "../eks_node_group"
|
||||
eks_cluster_name = module.eks_cluster.cluster_name
|
||||
node_group_name = var.node_group_name
|
||||
subnet_ids = [ module.vpc.private_subnets[0], module.vpc.private_subnets[1] ]
|
||||
instance_types = var.ng_instance_types
|
||||
disk_size = var.disk_size
|
||||
desired_nodes = var.desired_nodes
|
||||
max_nodes = var.max_nodes
|
||||
min_nodes = var.min_nodes
|
||||
}
|
29
terraform/testbed/modules/main/eventbridge.tf
Normal file
29
terraform/testbed/modules/main/eventbridge.tf
Normal file
|
@ -0,0 +1,29 @@
|
|||
module "eventbridge" {
|
||||
source = "../eventbridge"
|
||||
|
||||
create = var.create_eventbridge_module
|
||||
|
||||
create_role = var.event_role_enabled
|
||||
create_bus = var.event_bus_enabled
|
||||
|
||||
rules = {
|
||||
"${var.prefix}-action" = {
|
||||
description = var.event_rule_description
|
||||
event_pattern = jsonencode({
|
||||
"detail-type" : [
|
||||
"AWS API Call via CloudTrail"
|
||||
],
|
||||
"detail" : {
|
||||
"eventSource" : ["${module.kms.key_arn}"],
|
||||
"eventName" : var.event_name
|
||||
} })
|
||||
}
|
||||
}
|
||||
|
||||
targets = {
|
||||
"${var.prefix}-action" = [{
|
||||
name = var.target_name
|
||||
arn = module.sns_topic.sns_topic_arn
|
||||
}]
|
||||
}
|
||||
}
|
18
terraform/testbed/modules/main/kms.tf
Normal file
18
terraform/testbed/modules/main/kms.tf
Normal file
|
@ -0,0 +1,18 @@
|
|||
module "kms" {
|
||||
source = "../kms"
|
||||
|
||||
create = var.create_kms_module
|
||||
key_usage = var.k_usage
|
||||
deletion_window_in_days = var.days_of_deletion
|
||||
enable_key_rotation = var.enabled_rotation
|
||||
is_enabled = var.key_enabled
|
||||
customer_master_key_spec = var.key_specs
|
||||
|
||||
key_owners = var.k_owners
|
||||
key_administrators = var.key_admins
|
||||
key_users = var.k_users
|
||||
key_aws_services = var.key_service_principals
|
||||
key_service_users = var.k_service_users
|
||||
aliases = var.alias
|
||||
|
||||
}
|
44
terraform/testbed/modules/main/main.tf
Normal file
44
terraform/testbed/modules/main/main.tf
Normal file
|
@ -0,0 +1,44 @@
|
|||
data "aws_caller_identity" "current" {}
|
||||
|
||||
provider "aws" {
|
||||
region = var.aws_region
|
||||
default_tags {
|
||||
tags = var.def_tags
|
||||
}
|
||||
}
|
||||
|
||||
provider "helm" {
|
||||
kubernetes {
|
||||
host = data.aws_eks_cluster.default.endpoint
|
||||
cluster_ca_certificate = base64decode(data.aws_eks_cluster.default.certificate_authority[0].data)
|
||||
exec {
|
||||
api_version = "client.authentication.k8s.io/v1beta1"
|
||||
args = ["eks", "get-token", "--cluster-name", data.aws_eks_cluster.default.id]
|
||||
command = "aws"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
# provider "kubectl" {
|
||||
# host = data.aws_eks_cluster.default.endpoint
|
||||
# cluster_ca_certificate = base64decode(data.aws_eks_cluster.default.certificate_authority[0].data)
|
||||
# load_config_file = false
|
||||
|
||||
# exec {
|
||||
# api_version = "client.authentication.k8s.io/v1beta1"
|
||||
# args = ["eks", "get-token", "--cluster-name", data.aws_eks_cluster.default.id]
|
||||
# command = "aws"
|
||||
# }
|
||||
# }
|
||||
|
||||
provider "kubernetes" {
|
||||
host = data.aws_eks_cluster.default.endpoint
|
||||
cluster_ca_certificate = base64decode(data.aws_eks_cluster.default.certificate_authority[0].data)
|
||||
# token = data.aws_eks_cluster_auth.default.token
|
||||
|
||||
exec {
|
||||
api_version = "client.authentication.k8s.io/v1beta1"
|
||||
args = ["eks", "get-token", "--cluster-name", data.aws_eks_cluster.default.id]
|
||||
command = "aws"
|
||||
}
|
||||
}
|
63
terraform/testbed/modules/main/sns.tf
Normal file
63
terraform/testbed/modules/main/sns.tf
Normal file
|
@ -0,0 +1,63 @@
|
|||
module "sns_topic" {
|
||||
source = "../sns_topic"
|
||||
create_sns_topic = var.create_sns_module
|
||||
|
||||
policy = data.aws_iam_policy_document.sns_topic_policy.json
|
||||
name = "${var.prefix}-${var.name_of_topic}"
|
||||
kms_master_key_id = module.kms.key_id
|
||||
|
||||
|
||||
}
|
||||
|
||||
data "aws_iam_policy_document" "sns_topic_policy" {
|
||||
statement {
|
||||
sid = "Policy1"
|
||||
effect = "Allow"
|
||||
principals {
|
||||
type = "Service"
|
||||
identifiers = var.principles_for_policy_1
|
||||
}
|
||||
actions = ["SNS:Publish"]
|
||||
resources = ["arn:aws:sns:${var.aws_region}:${data.aws_caller_identity.current.account_id}:${var.prefix}-${var.name_of_topic}"]
|
||||
}
|
||||
|
||||
statement {
|
||||
sid = "Policy2"
|
||||
effect = "Allow"
|
||||
principals {
|
||||
type = "Service"
|
||||
identifiers = var.principles_for_policy_2
|
||||
}
|
||||
actions = ["SNS:Publish"]
|
||||
resources = ["arn:aws:sns:${var.aws_region}:${data.aws_caller_identity.current.account_id}:${var.prefix}-${var.name_of_topic}"]
|
||||
condition {
|
||||
test = "StringEquals"
|
||||
variable = "AWS:SourceOwner"
|
||||
|
||||
values = [
|
||||
data.aws_caller_identity.current.account_id,
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
statement {
|
||||
sid = "Policy3"
|
||||
effect = "Allow"
|
||||
principals {
|
||||
type = "Service"
|
||||
identifiers = var.principles_for_policy_3
|
||||
}
|
||||
actions = ["SNS:Publish"]
|
||||
resources = ["arn:aws:sns:${var.aws_region}:${data.aws_caller_identity.current.account_id}:${var.prefix}-${var.name_of_topic}"]
|
||||
|
||||
condition {
|
||||
test = "StringEquals"
|
||||
variable = "AWS:Referer"
|
||||
|
||||
values = [
|
||||
data.aws_caller_identity.current.account_id,
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
717
terraform/testbed/modules/main/variables.tf
Normal file
717
terraform/testbed/modules/main/variables.tf
Normal file
|
@ -0,0 +1,717 @@
|
|||
########## MAIN VARIABLES ##########
|
||||
variable "aws_region" {
|
||||
description = "Set AWS region"
|
||||
type = string
|
||||
default = "eu-west-1"
|
||||
}
|
||||
|
||||
variable "prefix" {
|
||||
description = "Set prefix for environment (EXAMPLE:test/dev/prod)"
|
||||
type = string
|
||||
default = "test"
|
||||
}
|
||||
variable "def_tags" {
|
||||
description = "Tags related to all AWS resources created"
|
||||
type = map(string)
|
||||
default = {
|
||||
"Environment" = "Test"
|
||||
}
|
||||
}
|
||||
|
||||
######### ALB VARIABLES #########
|
||||
variable "alb_name" {
|
||||
description = "The name of the Application load balancer"
|
||||
type = string
|
||||
default = "fargate-alb"
|
||||
}
|
||||
|
||||
# variable "acm_cert_arn" {
|
||||
# description = "The ACM certificate arn to be used with the ALB"
|
||||
# type = string
|
||||
# }
|
||||
|
||||
######### ECS VARIABLES #########
|
||||
variable "efs_name" {
|
||||
description = "The name of the EFS"
|
||||
type = string
|
||||
default = "efs"
|
||||
}
|
||||
|
||||
variable "ecs_cluster_name" {
|
||||
description = "The name of the ecs_cluster"
|
||||
type = string
|
||||
default = "ecs-cluster-fargate"
|
||||
}
|
||||
|
||||
variable "container_insights" {
|
||||
description = "Value for container insights, accepts enabled or disabled"
|
||||
type = string
|
||||
default = "enabled"
|
||||
}
|
||||
|
||||
variable "ecs_fargate_name" {
|
||||
description = "The name of ecs_fargate"
|
||||
type = string
|
||||
default = "ecs-fargate"
|
||||
}
|
||||
|
||||
variable "tg_name" {
|
||||
description = "Name of the target group"
|
||||
type = string
|
||||
default = "ecs-fargate-tg"
|
||||
}
|
||||
|
||||
variable "logs_retention_days" {
|
||||
description = "Number of days that logs will be kept. Accepted values: (1, 3, 5, 7, 14, 30, 60, 90, 120, 150, 180, 365, 400, 545, 731, 1827, 2192, 2557, 2922, 3288, 3653)"
|
||||
type = number
|
||||
default = 14
|
||||
}
|
||||
|
||||
variable "task_definition_cpu" {
|
||||
description = "The CPU value of the task. Accepted values: 256 (.25 vCPU),512 (.5 vCPU),1024 (1 vCPU), 2048 (2 vCPU), 4096 (4 vCPU),8192 (8 vCPU)"
|
||||
type = number
|
||||
default = 256
|
||||
}
|
||||
|
||||
variable "task_definition_memory" {
|
||||
description = "The memory value of the task. Values depend on task_definition_cpu values. Accepted values can be found at https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definition_parameters.html"
|
||||
type = number
|
||||
default = 512
|
||||
}
|
||||
|
||||
variable "task_container_port" {
|
||||
description = "The port number on the container that is bound to the user-specified or automatically assigned host port"
|
||||
type = number
|
||||
default = 80
|
||||
}
|
||||
|
||||
variable "task_desired_count" {
|
||||
description = "The number of instances of the task definitions to place and keep running"
|
||||
type = number
|
||||
default = 1
|
||||
}
|
||||
|
||||
########## KMS VARIABLES ##########
|
||||
variable "create_kms_module" {
|
||||
description = "Should it create the KMS module or not"
|
||||
type = bool
|
||||
default = true
|
||||
}
|
||||
variable "k_usage" {
|
||||
description = "Specifies the intended use of the key. Valid values: `ENCRYPT_DECRYPT` or `SIGN_VERIFY`. Defaults to `ENCRYPT_DECRYPT`"
|
||||
type = string
|
||||
default = "ENCRYPT_DECRYPT"
|
||||
}
|
||||
|
||||
variable "days_of_deletion" {
|
||||
description = "Waiting period, after which the AWS KMS key is deleted. Valid values are days(number) between: `7` and `30` days inclusive"
|
||||
type = number
|
||||
default = 30
|
||||
}
|
||||
|
||||
variable "enabled_rotation" {
|
||||
description = "Enables key rotation. Default is `true`"
|
||||
type = bool
|
||||
default = true
|
||||
}
|
||||
|
||||
variable "key_enabled" {
|
||||
description = "Specifies whether the key is enabled. Defaults to `true`"
|
||||
type = bool
|
||||
default = true
|
||||
}
|
||||
variable "key_specs" {
|
||||
description = "Specifies whether the key contains a symmetric key or an asymmetric key pair and the encryption algorithms or signing algorithms that the key supports. Valid values: `SYMMETRIC_DEFAULT`, `RSA_2048`, `RSA_3072`, `RSA_4096`, `HMAC_256`, `ECC_NIST_P256`, `ECC_NIST_P384`, `ECC_NIST_P521`, or `ECC_SECG_P256K1`. Defaults to `SYMMETRIC_DEFAULT`"
|
||||
type = string
|
||||
default = "SYMMETRIC_DEFAULT"
|
||||
}
|
||||
variable "k_owners" {
|
||||
description = "List of IAM ARNs, which have kms:* permissions"
|
||||
type = list(string)
|
||||
default = []
|
||||
}
|
||||
variable "key_admins" {
|
||||
description = "List of IAM ARNs, which have `kms:Create*`, `kms:Describe*`, `kms:Enable`, `kms:List*`, `kms:Put*`, `kms:Update*`, `kms:Revoke*`, `kms:Disable*`, `kms:Get*`, `kms:Delete*`, `kms:TagResource`, `kms:UntagResource`, `kms:ScheduleKeyDeletion`, `kms:CancelKeyDeletion` permissions"
|
||||
type = list(string)
|
||||
default = []
|
||||
}
|
||||
variable "k_users" {
|
||||
description = "A list of IAM ARNs, which have `kms:Encrypt`, `kms:Decrypt`, `kms:ReEncrypt*`, `kms:GenerateDataKey*`, `kms:DescribeKey` permissions"
|
||||
type = list(string)
|
||||
default = []
|
||||
}
|
||||
variable "key_service_principals" {
|
||||
description = "A list of AWS service principals (https://gist.github.com/shortjared/4c1e3fe52bdfa47522cfe5b41e5d6f22)"
|
||||
type = list(string)
|
||||
default = ["sns.amazonaws.com", "events.amazonaws.com"]
|
||||
}
|
||||
variable "k_service_users" {
|
||||
description = "A list of IAM ARNs, which have `kms:CreateGrant, `kms:ListGrants`, `kms:RevokeGrant` permissions"
|
||||
type = list(string)
|
||||
default = []
|
||||
}
|
||||
variable "alias" {
|
||||
description = "List of key aliases"
|
||||
type = list(string)
|
||||
default = ["terraform-key1"]
|
||||
}
|
||||
|
||||
variable "tags" {
|
||||
description = "Map of strings/tags to add to the key resource"
|
||||
type = map(string)
|
||||
default = {
|
||||
Created = "True"
|
||||
}
|
||||
}
|
||||
|
||||
########## EVENTBRIDGE VARIABLES ##########
|
||||
variable "create_eventbridge_module" {
|
||||
description = "Should it create the EventBridge module or not"
|
||||
type = bool
|
||||
default = true
|
||||
}
|
||||
variable "event_role_enabled" {
|
||||
description = "Controls whether IAM roles should be created"
|
||||
type = bool
|
||||
default = false
|
||||
}
|
||||
variable "event_bus_enabled" {
|
||||
description = "Controls whether EventBridge Bus resource should be created. When, `false`, the default bus will be used for the rules"
|
||||
type = bool
|
||||
default = false
|
||||
}
|
||||
variable "event_rule_description" {
|
||||
description = "Description for the event rule"
|
||||
type = string
|
||||
default = "Detects KMS Deletion and Key disabled state"
|
||||
}
|
||||
variable "event_sources" {
|
||||
description = "List of evvent sources for the Event Rule, services, etc."
|
||||
type = list(string)
|
||||
default = ["kms.amazonaws.com"]
|
||||
}
|
||||
variable "event_name" {
|
||||
description = "List of events to detect"
|
||||
type = list(string)
|
||||
default = ["DisableKey", "ScheduleKeyDeletion"]
|
||||
}
|
||||
variable "target_name" {
|
||||
description = "Name of the Target rule of the event"
|
||||
type = string
|
||||
default = "SNS target"
|
||||
|
||||
}
|
||||
|
||||
|
||||
########## SNS VARIABLES ##########
|
||||
variable "create_sns_module" {
|
||||
description = "Should it create the SNS module or not"
|
||||
type = bool
|
||||
default = true
|
||||
}
|
||||
|
||||
variable "name_of_topic" {
|
||||
description = "The name of the SNS topic to create"
|
||||
type = string
|
||||
default = "sns-topic"
|
||||
}
|
||||
|
||||
variable "encryption_key" {
|
||||
description = "Defines the key to encrypt the SNS topic"
|
||||
type = string
|
||||
default = null
|
||||
}
|
||||
|
||||
variable "principles_for_policy_1" {
|
||||
description = "Principals for Policy 1"
|
||||
type = list(string)
|
||||
default = ["events.amazonaws.com", "s3.amazonaws.com", "rds.amazonaws.com", "budgets.amazonaws.com"]
|
||||
}
|
||||
variable "principles_for_policy_2" {
|
||||
description = "Principals for Policy 2"
|
||||
type = list(string)
|
||||
default = ["cloudwatch.amazonaws.com", "elasticache.amazonaws.com", "elasticbeanstalk.amazonaws.com", "autoscaling.amazonaws.com"]
|
||||
}
|
||||
variable "principles_for_policy_3" {
|
||||
description = "Principals for Policy 3"
|
||||
type = list(string)
|
||||
default = ["ses.amazonaws.com", "events.amazonaws.com"]
|
||||
}
|
||||
|
||||
########## VPC VARIABLES ##########
|
||||
|
||||
variable "create_vpc_module" {
|
||||
description = "Should it create the VPC module or not"
|
||||
type = bool
|
||||
default = true
|
||||
}
|
||||
|
||||
variable "vpc_name" {
|
||||
description = "Name of the VPC"
|
||||
type = string
|
||||
default = "VPC"
|
||||
}
|
||||
|
||||
variable "cidr" {
|
||||
description = "CIDR of the VPC"
|
||||
type = string
|
||||
default = "10.0.0.0/16"
|
||||
}
|
||||
|
||||
variable "azs" {
|
||||
description = "A list of availability zones names or ids in the region"
|
||||
type = list(string)
|
||||
default = ["eu-west-1a", "eu-west-1b", "eu-west-1c",]
|
||||
}
|
||||
|
||||
|
||||
variable "private_subnets" {
|
||||
description = " A list of private subnets inside the VPC"
|
||||
type = list(string)
|
||||
default = ["10.0.1.0/24", "10.0.2.0/24", "10.0.3.0/24"]
|
||||
}
|
||||
|
||||
|
||||
variable "public_subnets" {
|
||||
description = " A list of public subnets inside the VPC"
|
||||
type = list(string)
|
||||
default = ["10.0.11.0/24", "10.0.12.0/24", "10.0.13.0/24"]
|
||||
}
|
||||
|
||||
variable "database_subnets" {
|
||||
description = " A list of private subnets inside the VPC"
|
||||
type = list(string)
|
||||
default = ["10.0.21.0/24", "10.0.22.0/24", "10.0.23.0/24"]
|
||||
}
|
||||
|
||||
variable "enable_nat_gateway" {
|
||||
description = "Should be true if you want to provision NAT Gateways for each of your private networks"
|
||||
type = bool
|
||||
default = true
|
||||
}
|
||||
|
||||
variable "create_database_subnet_group" {
|
||||
description = "Controls if database subnet group should be created (n.b. database_subnets must also be set)"
|
||||
type = bool
|
||||
default = true
|
||||
}
|
||||
variable "private_acl_dedicated" {
|
||||
description = "Whether to use dedicated network ACL (not default) and custom rules for private subnets"
|
||||
type = bool
|
||||
default = true
|
||||
}
|
||||
variable "public_acl_dedicated" {
|
||||
description = "Whether to use dedicated network ACL (not default) and custom rules for public subnets"
|
||||
type = bool
|
||||
default = true
|
||||
}
|
||||
variable "pub_inbound_acl" {
|
||||
description = "Public subnets inbound network ACLs"
|
||||
type = list(map(string))
|
||||
|
||||
default = [
|
||||
{
|
||||
rule_number = 100
|
||||
rule_action = "allow"
|
||||
from_port = 0
|
||||
to_port = 0
|
||||
protocol = "-1"
|
||||
cidr_block = "0.0.0.0/0"
|
||||
},
|
||||
]
|
||||
}
|
||||
|
||||
variable "pub_outbound_acl" {
|
||||
description = "Public subnets outbound network ACLs"
|
||||
type = list(map(string))
|
||||
|
||||
default = [
|
||||
{
|
||||
rule_number = 100
|
||||
rule_action = "allow"
|
||||
from_port = 0
|
||||
to_port = 0
|
||||
protocol = "-1"
|
||||
cidr_block = "0.0.0.0/0"
|
||||
},
|
||||
]
|
||||
}
|
||||
|
||||
variable "private_inbound_acl" {
|
||||
description = "Private subnets inbound network ACLs"
|
||||
type = list(map(string))
|
||||
|
||||
default = [
|
||||
{
|
||||
rule_number = 100
|
||||
rule_action = "allow"
|
||||
from_port = 0
|
||||
to_port = 0
|
||||
protocol = "-1"
|
||||
cidr_block = "0.0.0.0/0"
|
||||
},
|
||||
]
|
||||
}
|
||||
|
||||
variable "private_outbound_acl" {
|
||||
description = "Private subnets outbound network ACLs"
|
||||
type = list(map(string))
|
||||
|
||||
default = [
|
||||
{
|
||||
rule_number = 100
|
||||
rule_action = "allow"
|
||||
from_port = 0
|
||||
to_port = 0
|
||||
protocol = "-1"
|
||||
cidr_block = "0.0.0.0/0"
|
||||
},
|
||||
]
|
||||
}
|
||||
variable "vpc_tags" {
|
||||
description = "Map of strings/tags to add to the VPCresource"
|
||||
type = map(string)
|
||||
default = {
|
||||
Created = "True"
|
||||
}
|
||||
}
|
||||
|
||||
########## ECR VARIABLES ##########
|
||||
variable "create_ecr_module" {
|
||||
description = "Should it create the ECR module or not"
|
||||
type = bool
|
||||
default = true
|
||||
}
|
||||
variable "is_image_mutable" {
|
||||
description = "The tag mutability setting for the repo. Values <IMMUTABLE> or <MUTABLE>"
|
||||
type = string
|
||||
default = "MUTABLE"
|
||||
}
|
||||
|
||||
variable "ecr_encryption" {
|
||||
description = "The encryption type for the repository. Must be one of: `KMS` or `AES256`. Defaults to `AES256`"
|
||||
type = string
|
||||
default = "KMS"
|
||||
}
|
||||
|
||||
variable "repository_kms_key" {
|
||||
description = "The ARN of the KMS key to use when encryption_type is `KMS`. If not specified, uses the default AWS managed key for ECR"
|
||||
type = string
|
||||
default = null
|
||||
}
|
||||
|
||||
variable "repo_name" {
|
||||
description = "Name of the repo to be created"
|
||||
type = string
|
||||
default = "ecr-repo"
|
||||
}
|
||||
|
||||
variable "ecr_tags" {
|
||||
description = "Tags to set on the ecr repo"
|
||||
type = map(string)
|
||||
default = {
|
||||
Creation = "True"
|
||||
}
|
||||
}
|
||||
|
||||
variable "scan_enabled" {
|
||||
description = "Whether images are scanned after being pushed to the repo"
|
||||
type = bool
|
||||
default = true
|
||||
}
|
||||
|
||||
variable "delete_enabled" {
|
||||
description = "Whether the repository can be forcefully removed while having images stored inside"
|
||||
type = bool
|
||||
default = true
|
||||
}
|
||||
variable "ecr_policy" {
|
||||
description = "ECR Lifecycle Policy (json string) to manage the image lifecycles in the ECR repo"
|
||||
type = string
|
||||
default = <<L_POLICY
|
||||
{
|
||||
"rules": [
|
||||
{
|
||||
"action": {
|
||||
"type": "expire"
|
||||
},
|
||||
"selection": {
|
||||
"countType": "imageCountMoreThan",
|
||||
"countNumber": 50,
|
||||
"tagStatus": "any"
|
||||
},
|
||||
"description": "Only keep 50 images",
|
||||
"rulePriority": 10
|
||||
}
|
||||
]
|
||||
}
|
||||
L_POLICY
|
||||
}
|
||||
|
||||
########## RDS VARIABLES ##########
|
||||
|
||||
|
||||
variable "engine_version" {
|
||||
description = "The Version of the DB engine"
|
||||
type = string
|
||||
default = "5.7"
|
||||
}
|
||||
|
||||
|
||||
variable "cluster_name" {
|
||||
description = "Cluster Name"
|
||||
type = string
|
||||
default = "clusterdb"
|
||||
}
|
||||
|
||||
|
||||
variable "num_of_instances" {
|
||||
description = "The number of instances that you wish to be in the cluster"
|
||||
type = number
|
||||
default = 1
|
||||
}
|
||||
|
||||
variable "instance_type" {
|
||||
description = "Type of instance- number of CPU's and RAM that will be provided to the instance (example: db.t3.small).Keep in mind that some types are not supported in different regions and versions of engine"
|
||||
type = string
|
||||
default = "db.t3.small"
|
||||
}
|
||||
|
||||
variable "cloud_watch_exports" {
|
||||
description = "Which parameters to export to cloudwatch"
|
||||
type = list(string)
|
||||
default = ["error", "slowquery"]
|
||||
}
|
||||
|
||||
|
||||
variable "parameter_group_name" {
|
||||
description = "Parameter group name"
|
||||
type = string
|
||||
default = "sqlvpcparamgroupname"
|
||||
}
|
||||
|
||||
|
||||
variable "parameter_group_family" {
|
||||
description = "Parameter group family"
|
||||
type = string
|
||||
default = "aurora-mysql5.7"
|
||||
}
|
||||
|
||||
variable "create_db_cluster_parameter_group" {
|
||||
description = "To create the parameter group or not, default is true"
|
||||
type = bool
|
||||
default = true
|
||||
}
|
||||
|
||||
variable "db_master_username" {
|
||||
description = "DB master username"
|
||||
type = string
|
||||
default = "rootuser"
|
||||
}
|
||||
|
||||
variable "database_name" {
|
||||
description = "Name of the database"
|
||||
type = string
|
||||
default = "testdb"
|
||||
}
|
||||
|
||||
########## WAF VARIABLES ##########
|
||||
|
||||
variable "waf_scope" {
|
||||
description = "The scope of the WAF. Region for lb association or cloudfront for cloudfront cdn association. Accepted values: (REGIONAL, CLOUDFRONT)."
|
||||
type = string
|
||||
default = "CLOUDFRONT"
|
||||
}
|
||||
|
||||
########## CLOUDFRONT VARIABLES ##########
|
||||
|
||||
# variable "cdn_alias" {
|
||||
# description = "Extra CNAMEs (alternate domain names), if any, for this distribution."
|
||||
# type = list(string)
|
||||
# }
|
||||
|
||||
# variable "us_east_cert_arn" {
|
||||
# description = "The ACM certificate available in us-east-1 to be used with Cloudfront"
|
||||
# type = string
|
||||
# }
|
||||
|
||||
variable "cdn_comment" {
|
||||
description = "Comment for the Cloudfront distribution"
|
||||
type = string
|
||||
default = "Cloudfront CDN for Wordpress"
|
||||
}
|
||||
|
||||
variable "cdn_price_class" {
|
||||
description = "Price class for the Clodufront distribution. Accepted values: (PriceClass_100, PriceClass_200, PriceClass_All)"
|
||||
type = string
|
||||
default = "PriceClass_100"
|
||||
}
|
||||
|
||||
variable "origin_protocol_policy" {
|
||||
description = "The origin protocol policy for Cloudfront. Accepted values are (https-only, http-only and match-viewer)"
|
||||
type = string
|
||||
default = "match-viewer"
|
||||
}
|
||||
|
||||
variable "origin_ssl_protocols" {
|
||||
description = "A list of accepted SSL origin protocols"
|
||||
type = list(string)
|
||||
default = ["TLSv1.2"]
|
||||
}
|
||||
|
||||
variable "cdn_allowed_methods" {
|
||||
description = "List of allowed methods (e.g. GET, PUT, POST, DELETE, HEAD) for AWS CloudFront"
|
||||
type = list(string)
|
||||
default = ["DELETE", "GET", "HEAD", "OPTIONS", "PATCH", "POST", "PUT"]
|
||||
}
|
||||
|
||||
variable "cdn_cached_methods" {
|
||||
description = "List of cached methods (e.g. GET, PUT, POST, DELETE, HEAD)"
|
||||
type = list(string)
|
||||
default = ["GET", "HEAD"]
|
||||
}
|
||||
|
||||
variable "enable_cdn" {
|
||||
description = "Enable or disable the Cloudfront modules. Allowed values: (true or false)"
|
||||
type = bool
|
||||
default = true
|
||||
}
|
||||
|
||||
variable "cdn_is_ipv6_enabled" {
|
||||
description = "Enable or disable ipv6 on Cloudfront"
|
||||
type = bool
|
||||
default = true
|
||||
}
|
||||
|
||||
variable "cdn_retain_on_delete" {
|
||||
description = "Enable or disable retention after delete of the CDN. Allowed values: (true or false)"
|
||||
type = bool
|
||||
default = false
|
||||
}
|
||||
|
||||
variable "cdn_wait_for_deployment" {
|
||||
description = "If enabled, the resource will wait for the distribution status to change from InProgress to Deployed. Setting this tofalse will skip the process."
|
||||
type = bool
|
||||
default = false
|
||||
}
|
||||
|
||||
|
||||
######### AUTO SCALING VARIABLES ############
|
||||
|
||||
variable "as_max_cpu_threshold" {
|
||||
description = "Threshold for max CPU usage"
|
||||
type = string
|
||||
default = "85"
|
||||
}
|
||||
|
||||
variable "as_min_cpu_threshold" {
|
||||
description = "Threshold for min CPU usage"
|
||||
type = string
|
||||
default = "10"
|
||||
}
|
||||
|
||||
variable "as_max_cpu_evaluation_period" {
|
||||
description = "The number of periods over which data is compared to the specified threshold for max cpu metric alarm"
|
||||
type = string
|
||||
default = "3"
|
||||
}
|
||||
|
||||
variable "as_min_cpu_evaluation_period" {
|
||||
description = "The number of periods over which data is compared to the specified threshold for min cpu metric alarm"
|
||||
type = string
|
||||
default = "3"
|
||||
}
|
||||
|
||||
variable "as_max_cpu_period" {
|
||||
description = "The period in seconds over which the specified statistic is applied for max cpu metric alarm"
|
||||
type = string
|
||||
default = "60"
|
||||
}
|
||||
|
||||
variable "as_min_cpu_period" {
|
||||
description = "The period in seconds over which the specified statistic is applied for min cpu metric alarm"
|
||||
type = string
|
||||
default = "60"
|
||||
}
|
||||
|
||||
variable "as_scale_target_max_capacity" {
|
||||
description = "The max capacity of the scalable target"
|
||||
type = number
|
||||
default = 5
|
||||
}
|
||||
|
||||
variable "as_scale_target_min_capacity" {
|
||||
description = "The min capacity of the scalable target"
|
||||
type = number
|
||||
default = 1
|
||||
}
|
||||
|
||||
#### EFS ####
|
||||
variable "app_labels" {
|
||||
type = map
|
||||
description = "List of the labels for Deployment"
|
||||
default = {
|
||||
"app" = "wordpress"
|
||||
"tier" = "frontend"
|
||||
}
|
||||
}
|
||||
variable "deployment_replicas" {
|
||||
type = string
|
||||
description = "Number of replicas for the Deployment"
|
||||
default = 3
|
||||
}
|
||||
variable "deployment_name" {
|
||||
type = string
|
||||
description = "Name of the Deployment"
|
||||
default = "wordpress"
|
||||
}
|
||||
|
||||
variable "kubernetes_namespace" {
|
||||
type = string
|
||||
description = "Kubernetes namespace for selection"
|
||||
default = "wordpress-rds"
|
||||
}
|
||||
|
||||
variable "fargate_profile_name" {
|
||||
type = string
|
||||
description = "Name of the Fargate Profile"
|
||||
default = "eks_fargate"
|
||||
}
|
||||
|
||||
variable "desired_nodes" {
|
||||
description = "Desired number of worker nodes"
|
||||
default = 2
|
||||
}
|
||||
|
||||
variable "max_nodes" {
|
||||
description = "Maximum number of worker nodes"
|
||||
default = 2
|
||||
}
|
||||
|
||||
variable "min_nodes" {
|
||||
description = "Minimum number of worker nodes"
|
||||
default = 1
|
||||
}
|
||||
|
||||
variable "ng_instance_types" {
|
||||
type = list(string)
|
||||
description = "List of instance types associated with the EKS Node Group"
|
||||
default = ["t2.micro"]
|
||||
}
|
||||
|
||||
variable "disk_size" {
|
||||
description = "Disk Size for Worker Nodes in GiB"
|
||||
default = 10
|
||||
}
|
||||
|
||||
variable "eks_cluster_name" {
|
||||
description = "Name of the EKS Cluster"
|
||||
default = "k8s"
|
||||
}
|
||||
|
||||
variable "node_group_name" {
|
||||
type = string
|
||||
description = "Name of the EKS Node Group"
|
||||
default = "k8s"
|
||||
}
|
||||
|
39
terraform/testbed/modules/main/vpc.tf
Normal file
39
terraform/testbed/modules/main/vpc.tf
Normal file
|
@ -0,0 +1,39 @@
|
|||
module "vpc" {
|
||||
source = "../vpc"
|
||||
|
||||
name = "Kubernetes"
|
||||
cidr = "10.0.0.0/16"
|
||||
|
||||
azs = ["us-east-2a", "us-east-2b", "us-east-2c"]
|
||||
private_subnets = ["10.0.1.0/24", "10.0.2.0/24", "10.0.3.0/24"]
|
||||
public_subnets = ["10.0.11.0/24", "10.0.12.0/24", "10.0.13.0/24"]
|
||||
database_subnets = ["10.0.21.0/24", "10.0.22.0/24", "10.0.23.0/24"]
|
||||
|
||||
public_subnet_tags = {
|
||||
Name = "${terraform.workspace}-public-subnet"
|
||||
"kubernetes.io/cluster/${var.eks_cluster_name}-${terraform.workspace}" = "shared"
|
||||
"kubernetes.io/role/elb" = 1
|
||||
Environment = terraform.workspace
|
||||
}
|
||||
|
||||
private_subnet_tags = {
|
||||
Name = "${terraform.workspace}-private-subnet"
|
||||
"kubernetes.io/cluster/${var.eks_cluster_name}-${terraform.workspace}" = "shared"
|
||||
"kubernetes.io/role/internal-elb" = 1
|
||||
Environment = terraform.workspace
|
||||
}
|
||||
|
||||
|
||||
enable_nat_gateway = true
|
||||
single_nat_gateway = true
|
||||
one_nat_gateway_per_az = false
|
||||
|
||||
enable_dns_hostnames = true
|
||||
enable_dns_support = true
|
||||
|
||||
tags = {
|
||||
Name = "${var.vpc_name}-${terraform.workspace}"
|
||||
"kubernetes.io/cluster/${var.eks_cluster_name}" = "shared"
|
||||
|
||||
}
|
||||
}
|
25358
yaml/cert-manager/cert-manager.crds.yaml
Normal file
25358
yaml/cert-manager/cert-manager.crds.yaml
Normal file
File diff suppressed because it is too large
Load diff
20
yaml/cert-manager/letsencrypt-production-clusterissuer.yaml
Normal file
20
yaml/cert-manager/letsencrypt-production-clusterissuer.yaml
Normal file
|
@ -0,0 +1,20 @@
|
|||
apiVersion: cert-manager.io/v1
|
||||
kind: ClusterIssuer
|
||||
metadata:
|
||||
name: letsencrypt-production
|
||||
spec:
|
||||
acme:
|
||||
# The ACME server URL
|
||||
server: https://acme-v02.api.letsencrypt.org/directory
|
||||
# Email address used for ACME registration
|
||||
email: var_acme_email
|
||||
# Name of a secret used to store the ACME account private key
|
||||
privateKeySecretRef:
|
||||
name: letsencrypt-sec-production
|
||||
# Enable the HTTP-01 challenge provider
|
||||
solvers:
|
||||
# An empty 'selector' means that this solver matches all domains
|
||||
- selector: {}
|
||||
http01:
|
||||
ingress:
|
||||
class: nginx
|
|
@ -0,0 +1,25 @@
|
|||
apiVersion: cert-manager.io/v1
|
||||
kind: ClusterIssuer
|
||||
metadata:
|
||||
name: letsencrypt-production-dns
|
||||
spec:
|
||||
acme:
|
||||
server: https://acme-v02.api.letsencrypt.org/directory
|
||||
email: var_acme_email
|
||||
|
||||
# Name of a secret used to store the ACME account private key
|
||||
privateKeySecretRef:
|
||||
name: letsencrypt-sec-production-dns
|
||||
|
||||
# ACME DNS-01 provider configurations
|
||||
solvers:
|
||||
# An empty 'selector' means that this solver matches all domains
|
||||
- selector: {}
|
||||
dns01:
|
||||
cloudflare:
|
||||
email: var_acme_email
|
||||
# !! Remember to first create a k8s secret
|
||||
# kubectl create secret generic cloudflare-api-key-secret
|
||||
apiKeySecretRef:
|
||||
name: cf-api-secret
|
||||
key: cf-api-key
|
20
yaml/cert-manager/letsencrypt-staging-clusterissuer.yaml
Normal file
20
yaml/cert-manager/letsencrypt-staging-clusterissuer.yaml
Normal file
|
@ -0,0 +1,20 @@
|
|||
apiVersion: cert-manager.io/v1
|
||||
kind: ClusterIssuer
|
||||
metadata:
|
||||
name: letsencrypt-staging
|
||||
spec:
|
||||
acme:
|
||||
# The ACME server URL
|
||||
server: https://acme-staging-v02.api.letsencrypt.org/directory
|
||||
# Email address used for ACME registration
|
||||
email: var_acme_email
|
||||
# Name of a secret used to store the ACME account private key
|
||||
privateKeySecretRef:
|
||||
name: letsencrypt-sec-staging
|
||||
# Enable the HTTP-01 challenge provider
|
||||
solvers:
|
||||
# An empty 'selector' means that this solver matches all domains
|
||||
- selector: {}
|
||||
http01:
|
||||
ingress:
|
||||
class: nginx
|
25
yaml/cert-manager/letsencrypt-staging-dns-clusterissuer.yaml
Normal file
25
yaml/cert-manager/letsencrypt-staging-dns-clusterissuer.yaml
Normal file
|
@ -0,0 +1,25 @@
|
|||
apiVersion: cert-manager.io/v1
|
||||
kind: ClusterIssuer
|
||||
metadata:
|
||||
name: letsencrypt-staging-dns
|
||||
spec:
|
||||
acme:
|
||||
server: https://acme-staging-v02.api.letsencrypt.org/directory
|
||||
email: var_acme_email
|
||||
|
||||
# Name of a secret used to store the ACME account private key
|
||||
privateKeySecretRef:
|
||||
name: letsencrypt-sec-staging-dns
|
||||
|
||||
# ACME DNS-01 provider configurations
|
||||
solvers:
|
||||
# An empty 'selector' means that this solver matches all domains
|
||||
- selector: {}
|
||||
dns01:
|
||||
cloudflare:
|
||||
email: var_acme_email
|
||||
# !! Remember to first create a k8s secret
|
||||
# kubectl create secret generic cloudflare-api-key-secret
|
||||
apiKeySecretRef:
|
||||
name: cf-api-secret
|
||||
key: cf-api-key
|
354
yaml/cert-manager/values.yaml
Normal file
354
yaml/cert-manager/values.yaml
Normal file
|
@ -0,0 +1,354 @@
|
|||
# Default values for cert-manager.
|
||||
# This is a YAML-formatted file.
|
||||
# Declare variables to be passed into your templates.
|
||||
global:
|
||||
## Reference to one or more secrets to be used when pulling images
|
||||
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
|
||||
##
|
||||
imagePullSecrets: []
|
||||
# - name: "image-pull-secret"
|
||||
|
||||
# Optional priority class to be used for the cert-manager pods
|
||||
priorityClassName: ""
|
||||
rbac:
|
||||
create: true
|
||||
|
||||
podSecurityPolicy:
|
||||
enabled: false
|
||||
useAppArmor: true
|
||||
|
||||
# Set the verbosity of cert-manager. Range of 0 - 6 with 6 being the most verbose.
|
||||
logLevel: 2
|
||||
|
||||
leaderElection:
|
||||
# Override the namespace used to store the ConfigMap for leader election
|
||||
namespace: "kube-system"
|
||||
|
||||
installCRDs: false
|
||||
|
||||
replicaCount: 1
|
||||
|
||||
strategy: {}
|
||||
# type: RollingUpdate
|
||||
# rollingUpdate:
|
||||
# maxSurge: 0
|
||||
# maxUnavailable: 1
|
||||
|
||||
# Comma separated list of feature gates that should be enabled on the
|
||||
# controller pod.
|
||||
featureGates: ""
|
||||
|
||||
image:
|
||||
repository: quay.io/jetstack/cert-manager-controller
|
||||
# You can manage a registry with
|
||||
# registry: quay.io
|
||||
# repository: jetstack/cert-manager-controller
|
||||
|
||||
# Override the image tag to deploy by setting this variable.
|
||||
# If no value is set, the chart's appVersion will be used.
|
||||
# tag: canary
|
||||
|
||||
# Setting a digest will override any tag
|
||||
# digest: sha256:0e072dddd1f7f8fc8909a2ca6f65e76c5f0d2fcfb8be47935ae3457e8bbceb20
|
||||
pullPolicy: IfNotPresent
|
||||
|
||||
# Override the namespace used to store DNS provider credentials etc. for ClusterIssuer
|
||||
# resources. By default, the same namespace as cert-manager is deployed within is
|
||||
# used. This namespace will not be automatically created by the Helm chart.
|
||||
clusterResourceNamespace: ""
|
||||
|
||||
serviceAccount:
|
||||
# Specifies whether a service account should be created
|
||||
create: true
|
||||
# The name of the service account to use.
|
||||
# If not set and create is true, a name is generated using the fullname template
|
||||
# name: ""
|
||||
# Optional additional annotations to add to the controller's ServiceAccount
|
||||
# annotations: {}
|
||||
|
||||
# Optional additional arguments
|
||||
extraArgs: []
|
||||
# Use this flag to set a namespace that cert-manager will use to store
|
||||
# supporting resources required for each ClusterIssuer (default is kube-system)
|
||||
# - --cluster-resource-namespace=kube-system
|
||||
# When this flag is enabled, secrets will be automatically removed when the certificate resource is deleted
|
||||
# - --enable-certificate-owner-ref=true
|
||||
|
||||
extraEnv: []
|
||||
# - name: SOME_VAR
|
||||
# value: 'some value'
|
||||
|
||||
resources: {}
|
||||
# requests:
|
||||
# cpu: 10m
|
||||
# memory: 32Mi
|
||||
|
||||
# Pod Security Context
|
||||
# ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
|
||||
securityContext: {}
|
||||
# legacy securityContext parameter format: if enabled is set to true, only fsGroup and runAsUser are supported
|
||||
# securityContext:
|
||||
# enabled: false
|
||||
# fsGroup: 1001
|
||||
# runAsUser: 1001
|
||||
# to support additional securityContext parameters, omit the `enabled` parameter and simply specify the parameters
|
||||
# you want to set, e.g.
|
||||
# securityContext:
|
||||
# fsGroup: 1000
|
||||
# runAsUser: 1000
|
||||
# runAsNonRoot: true
|
||||
|
||||
# Container Security Context to be set on the controller component container
|
||||
# ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
|
||||
containerSecurityContext: {}
|
||||
# capabilities:
|
||||
# drop:
|
||||
# - ALL
|
||||
# readOnlyRootFilesystem: true
|
||||
# runAsNonRoot: true
|
||||
|
||||
|
||||
volumes: []
|
||||
|
||||
volumeMounts: []
|
||||
|
||||
# Optional additional annotations to add to the controller Deployment
|
||||
# deploymentAnnotations: {}
|
||||
|
||||
# Optional additional annotations to add to the controller Pods
|
||||
# podAnnotations: {}
|
||||
|
||||
podLabels: {}
|
||||
|
||||
# Optional DNS settings, useful if you have a public and private DNS zone for
|
||||
# the same domain on Route 53. What follows is an example of ensuring
|
||||
# cert-manager can access an ingress or DNS TXT records at all times.
|
||||
# NOTE: This requires Kubernetes 1.10 or `CustomPodDNS` feature gate enabled for
|
||||
# the cluster to work.
|
||||
# podDnsPolicy: "None"
|
||||
# podDnsConfig:
|
||||
# nameservers:
|
||||
# - "1.1.1.1"
|
||||
# - "8.8.8.8"
|
||||
|
||||
nodeSelector: {}
|
||||
|
||||
ingressShim: {}
|
||||
# defaultIssuerName: ""
|
||||
# defaultIssuerKind: ""
|
||||
# defaultIssuerGroup: ""
|
||||
|
||||
prometheus:
|
||||
enabled: true
|
||||
servicemonitor:
|
||||
enabled: false
|
||||
prometheusInstance: default
|
||||
targetPort: 9402
|
||||
path: /metrics
|
||||
interval: 60s
|
||||
scrapeTimeout: 30s
|
||||
labels: {}
|
||||
|
||||
# Use these variables to configure the HTTP_PROXY environment variables
|
||||
# http_proxy: "http://proxy:8080"
|
||||
# http_proxy: "http://proxy:8080"
|
||||
# no_proxy: 127.0.0.1,localhost
|
||||
|
||||
# expects input structure as per specification https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.11/#affinity-v1-core
|
||||
# for example:
|
||||
# affinity:
|
||||
# nodeAffinity:
|
||||
# requiredDuringSchedulingIgnoredDuringExecution:
|
||||
# nodeSelectorTerms:
|
||||
# - matchExpressions:
|
||||
# - key: foo.bar.com/role
|
||||
# operator: In
|
||||
# values:
|
||||
# - master
|
||||
affinity: {}
|
||||
|
||||
# expects input structure as per specification https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.11/#toleration-v1-core
|
||||
# for example:
|
||||
# tolerations:
|
||||
# - key: foo.bar.com/role
|
||||
# operator: Equal
|
||||
# value: master
|
||||
# effect: NoSchedule
|
||||
tolerations: []
|
||||
|
||||
webhook:
|
||||
replicaCount: 1
|
||||
timeoutSeconds: 10
|
||||
|
||||
strategy: {}
|
||||
# type: RollingUpdate
|
||||
# rollingUpdate:
|
||||
# maxSurge: 0
|
||||
# maxUnavailable: 1
|
||||
|
||||
securityContext: {}
|
||||
|
||||
# Container Security Context to be set on the webhook component container
|
||||
# ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
|
||||
containerSecurityContext: {}
|
||||
# capabilities:
|
||||
# drop:
|
||||
# - ALL
|
||||
# readOnlyRootFilesystem: true
|
||||
# runAsNonRoot: true
|
||||
|
||||
# Optional additional annotations to add to the webhook Deployment
|
||||
# deploymentAnnotations: {}
|
||||
|
||||
# Optional additional annotations to add to the webhook Pods
|
||||
# podAnnotations: {}
|
||||
|
||||
# Optional additional annotations to add to the webhook MutatingWebhookConfiguration
|
||||
# mutatingWebhookConfigurationAnnotations: {}
|
||||
|
||||
# Optional additional annotations to add to the webhook ValidatingWebhookConfiguration
|
||||
# validatingWebhookConfigurationAnnotations: {}
|
||||
|
||||
# Optional additional arguments for webhook
|
||||
extraArgs: []
|
||||
|
||||
resources: {}
|
||||
# requests:
|
||||
# cpu: 10m
|
||||
# memory: 32Mi
|
||||
|
||||
## Liveness and readiness probe values
|
||||
## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes
|
||||
##
|
||||
livenessProbe:
|
||||
failureThreshold: 3
|
||||
initialDelaySeconds: 60
|
||||
periodSeconds: 10
|
||||
successThreshold: 1
|
||||
timeoutSeconds: 1
|
||||
readinessProbe:
|
||||
failureThreshold: 3
|
||||
initialDelaySeconds: 5
|
||||
periodSeconds: 5
|
||||
successThreshold: 1
|
||||
timeoutSeconds: 1
|
||||
|
||||
nodeSelector: {}
|
||||
|
||||
affinity: {}
|
||||
|
||||
tolerations: []
|
||||
|
||||
# Optional additional labels to add to the Webhook Pods
|
||||
podLabels: {}
|
||||
|
||||
image:
|
||||
repository: quay.io/jetstack/cert-manager-webhook
|
||||
# You can manage a registry with
|
||||
# registry: quay.io
|
||||
# repository: jetstack/cert-manager-webhook
|
||||
|
||||
# Override the image tag to deploy by setting this variable.
|
||||
# If no value is set, the chart's appVersion will be used.
|
||||
# tag: canary
|
||||
|
||||
# Setting a digest will override any tag
|
||||
# digest: sha256:0e072dddd1f7f8fc8909a2ca6f65e76c5f0d2fcfb8be47935ae3457e8bbceb20
|
||||
|
||||
pullPolicy: IfNotPresent
|
||||
|
||||
serviceAccount:
|
||||
# Specifies whether a service account should be created
|
||||
create: true
|
||||
# The name of the service account to use.
|
||||
# If not set and create is true, a name is generated using the fullname template
|
||||
# name: ""
|
||||
# Optional additional annotations to add to the controller's ServiceAccount
|
||||
# annotations: {}
|
||||
|
||||
# The port that the webhook should listen on for requests.
|
||||
# In GKE private clusters, by default kubernetes apiservers are allowed to
|
||||
# talk to the cluster nodes only on 443 and 10250. so configuring
|
||||
# securePort: 10250, will work out of the box without needing to add firewall
|
||||
# rules or requiring NET_BIND_SERVICE capabilities to bind port numbers <1000
|
||||
securePort: 10260
|
||||
|
||||
# Specifies if the webhook should be started in hostNetwork mode.
|
||||
#
|
||||
# Required for use in some managed kubernetes clusters (such as AWS EKS) with custom
|
||||
# CNI (such as calico), because control-plane managed by AWS cannot communicate
|
||||
# with pods' IP CIDR and admission webhooks are not working
|
||||
#
|
||||
# Since the default port for the webhook conflicts with kubelet on the host
|
||||
# network, `webhook.securePort` should be changed to an available port if
|
||||
# running in hostNetwork mode.
|
||||
hostNetwork: true
|
||||
|
||||
cainjector:
|
||||
enabled: true
|
||||
replicaCount: 1
|
||||
|
||||
strategy: {}
|
||||
# type: RollingUpdate
|
||||
# rollingUpdate:
|
||||
# maxSurge: 0
|
||||
# maxUnavailable: 1
|
||||
|
||||
securityContext: {}
|
||||
|
||||
# Container Security Context to be set on the cainjector component container
|
||||
# ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
|
||||
containerSecurityContext: {}
|
||||
# capabilities:
|
||||
# drop:
|
||||
# - ALL
|
||||
# readOnlyRootFilesystem: true
|
||||
# runAsNonRoot: true
|
||||
|
||||
|
||||
# Optional additional annotations to add to the cainjector Deployment
|
||||
# deploymentAnnotations: {}
|
||||
|
||||
# Optional additional annotations to add to the cainjector Pods
|
||||
# podAnnotations: {}
|
||||
|
||||
# Optional additional arguments for cainjector
|
||||
extraArgs: []
|
||||
|
||||
resources: {}
|
||||
# requests:
|
||||
# cpu: 10m
|
||||
# memory: 32Mi
|
||||
|
||||
nodeSelector: {}
|
||||
|
||||
affinity: {}
|
||||
|
||||
tolerations: []
|
||||
|
||||
# Optional additional labels to add to the CA Injector Pods
|
||||
podLabels: {}
|
||||
|
||||
image:
|
||||
repository: quay.io/jetstack/cert-manager-cainjector
|
||||
# You can manage a registry with
|
||||
# registry: quay.io
|
||||
# repository: jetstack/cert-manager-cainjector
|
||||
|
||||
# Override the image tag to deploy by setting this variable.
|
||||
# If no value is set, the chart's appVersion will be used.
|
||||
# tag: canary
|
||||
|
||||
# Setting a digest will override any tag
|
||||
# digest: sha256:0e072dddd1f7f8fc8909a2ca6f65e76c5f0d2fcfb8be47935ae3457e8bbceb20
|
||||
|
||||
pullPolicy: IfNotPresent
|
||||
|
||||
serviceAccount:
|
||||
# Specifies whether a service account should be created
|
||||
create: true
|
||||
# The name of the service account to use.
|
||||
# If not set and create is true, a name is generated using the fullname template
|
||||
# name: ""
|
||||
# Optional additional annotations to add to the controller's ServiceAccount
|
||||
# annotations: {}
|
738
yaml/ingress-nginx/old-values.yaml
Normal file
738
yaml/ingress-nginx/old-values.yaml
Normal file
|
@ -0,0 +1,738 @@
|
|||
## nginx configuration
|
||||
## Ref: https://github.com/kubernetes/ingress-nginx/blob/master/controllers/nginx/configuration.md
|
||||
##
|
||||
|
||||
## Overrides for generated resource names
|
||||
# See templates/_helpers.tpl
|
||||
# nameOverride:
|
||||
# fullnameOverride:
|
||||
|
||||
controller:
|
||||
name: controller
|
||||
image:
|
||||
repository: k8s.gcr.io/ingress-nginx/controller
|
||||
tag: "v0.41.2"
|
||||
digest: sha256:1f4f402b9c14f3ae92b11ada1dfe9893a88f0faeb0b2f4b903e2c67a0c3bf0de
|
||||
pullPolicy: IfNotPresent
|
||||
# www-data -> uid 101
|
||||
runAsUser: 101
|
||||
allowPrivilegeEscalation: true
|
||||
|
||||
# Configures the ports the nginx-controller listens on
|
||||
containerPort:
|
||||
http: 80
|
||||
https: 443
|
||||
|
||||
# Will add custom configuration options to Nginx https://kubernetes.github.io/ingress-nginx/user-guide/nginx-configuration/configmap/
|
||||
#config: {}
|
||||
config:
|
||||
use-proxy-protocol: "false"
|
||||
client-header-buffer-size: "8k"
|
||||
large-client-header-buffers: "4 16k"
|
||||
use-forwarded-headers: "true"
|
||||
use-geoip: "true"
|
||||
use-geoip2: "true"
|
||||
|
||||
## Annotations to be added to the controller config configuration configmap
|
||||
##
|
||||
configAnnotations: {}
|
||||
|
||||
# Will add custom headers before sending traffic to backends according to https://github.com/kubernetes/ingress-nginx/tree/master/docs/examples/customization/custom-headers
|
||||
#proxySetHeaders: {}
|
||||
proxySetHeaders:
|
||||
X-Country-Code: $geoip_country_code
|
||||
|
||||
# Will add custom headers before sending response traffic to the client according to: https://kubernetes.github.io/ingress-nginx/user-guide/nginx-configuration/configmap/#add-headers
|
||||
addHeaders: {}
|
||||
|
||||
# Optionally customize the pod dnsConfig.
|
||||
dnsConfig: {}
|
||||
|
||||
# Optionally change this to ClusterFirstWithHostNet in case you have 'hostNetwork: true'.
|
||||
# By default, while using host network, name resolution uses the host's DNS. If you wish nginx-controller
|
||||
# to keep resolving names inside the k8s network, use ClusterFirstWithHostNet.
|
||||
#dnsPolicy: ClusterFirst
|
||||
dnsPolicy: ClusterFirstWithHostNet
|
||||
|
||||
# Bare-metal considerations via the host network https://kubernetes.github.io/ingress-nginx/deploy/baremetal/#via-the-host-network
|
||||
# Ingress status was blank because there is no Service exposing the NGINX Ingress controller in a configuration using the host network, the default --publish-service flag used in standard cloud setups does not apply
|
||||
reportNodeInternalIp: false
|
||||
|
||||
# Required for use with CNI based kubernetes installations (such as ones set up by kubeadm),
|
||||
# since CNI and hostport don't mix yet. Can be deprecated once https://github.com/kubernetes/kubernetes/issues/23920
|
||||
# is merged
|
||||
#hostNetwork: false
|
||||
hostNetwork: true
|
||||
|
||||
## Use host ports 80 and 443
|
||||
## Disabled by default
|
||||
##
|
||||
hostPort:
|
||||
enabled: false
|
||||
ports:
|
||||
http: 80
|
||||
https: 443
|
||||
|
||||
## Election ID to use for status update
|
||||
##
|
||||
electionID: ingress-controller-leader
|
||||
|
||||
## Name of the ingress class to route through this controller
|
||||
##
|
||||
ingressClass: nginx
|
||||
|
||||
# labels to add to the pod container metadata
|
||||
podLabels: {}
|
||||
# key: value
|
||||
|
||||
## Security Context policies for controller pods
|
||||
##
|
||||
podSecurityContext: {}
|
||||
|
||||
## See https://kubernetes.io/docs/tasks/administer-cluster/sysctl-cluster/ for
|
||||
## notes on enabling and using sysctls
|
||||
###
|
||||
sysctls: {}
|
||||
# sysctls:
|
||||
# "net.core.somaxconn": "8192"
|
||||
|
||||
## Allows customization of the source of the IP address or FQDN to report
|
||||
## in the ingress status field. By default, it reads the information provided
|
||||
## by the service. If disable, the status field reports the IP address of the
|
||||
## node or nodes where an ingress controller pod is running.
|
||||
publishService:
|
||||
enabled: true
|
||||
## Allows overriding of the publish service to bind to
|
||||
## Must be <namespace>/<service_name>
|
||||
##
|
||||
pathOverride: ""
|
||||
|
||||
## Limit the scope of the controller
|
||||
##
|
||||
scope:
|
||||
enabled: false
|
||||
namespace: "" # defaults to .Release.Namespace
|
||||
|
||||
## Allows customization of the configmap / nginx-configmap namespace
|
||||
##
|
||||
configMapNamespace: "" # defaults to .Release.Namespace
|
||||
|
||||
## Allows customization of the tcp-services-configmap
|
||||
##
|
||||
tcp:
|
||||
configMapNamespace: "" # defaults to .Release.Namespace
|
||||
## Annotations to be added to the tcp config configmap
|
||||
annotations: {}
|
||||
|
||||
## Allows customization of the udp-services-configmap
|
||||
##
|
||||
udp:
|
||||
configMapNamespace: "" # defaults to .Release.Namespace
|
||||
## Annotations to be added to the udp config configmap
|
||||
annotations: {}
|
||||
|
||||
# Maxmind license key to download GeoLite2 Databases
|
||||
# https://blog.maxmind.com/2019/12/18/significant-changes-to-accessing-and-using-geolite2-databases
|
||||
maxmindLicenseKey: ""
|
||||
|
||||
## Additional command line arguments to pass to nginx-ingress-controller
|
||||
## E.g. to specify the default SSL certificate you can use
|
||||
## extraArgs:
|
||||
## default-ssl-certificate: "<namespace>/<secret_name>"
|
||||
extraArgs: {}
|
||||
|
||||
## Additional environment variables to set
|
||||
extraEnvs: []
|
||||
# extraEnvs:
|
||||
# - name: FOO
|
||||
# valueFrom:
|
||||
# secretKeyRef:
|
||||
# key: FOO
|
||||
# name: secret-resource
|
||||
|
||||
## DaemonSet or Deployment
|
||||
##
|
||||
#kind: Deployment
|
||||
kind: DaemonSet
|
||||
|
||||
## Annotations to be added to the controller Deployment or DaemonSet
|
||||
##
|
||||
annotations: {}
|
||||
# keel.sh/pollSchedule: "@every 60m"
|
||||
|
||||
## Labels to be added to the controller Deployment or DaemonSet
|
||||
##
|
||||
labels: {}
|
||||
# keel.sh/policy: patch
|
||||
# keel.sh/trigger: poll
|
||||
|
||||
|
||||
# The update strategy to apply to the Deployment or DaemonSet
|
||||
##
|
||||
updateStrategy: {}
|
||||
# rollingUpdate:
|
||||
# maxUnavailable: 1
|
||||
# type: RollingUpdate
|
||||
|
||||
# minReadySeconds to avoid killing pods before we are ready
|
||||
##
|
||||
minReadySeconds: 0
|
||||
|
||||
|
||||
## Node tolerations for server scheduling to nodes with taints
|
||||
## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
|
||||
##
|
||||
tolerations: []
|
||||
# - key: "key"
|
||||
# operator: "Equal|Exists"
|
||||
# value: "value"
|
||||
# effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)"
|
||||
|
||||
## Affinity and anti-affinity
|
||||
## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
|
||||
##
|
||||
affinity: {}
|
||||
# # An example of preferred pod anti-affinity, weight is in the range 1-100
|
||||
# podAntiAffinity:
|
||||
# preferredDuringSchedulingIgnoredDuringExecution:
|
||||
# - weight: 100
|
||||
# podAffinityTerm:
|
||||
# labelSelector:
|
||||
# matchExpressions:
|
||||
# - key: app.kubernetes.io/name
|
||||
# operator: In
|
||||
# values:
|
||||
# - ingress-nginx
|
||||
# - key: app.kubernetes.io/instance
|
||||
# operator: In
|
||||
# values:
|
||||
# - ingress-nginx
|
||||
# - key: app.kubernetes.io/component
|
||||
# operator: In
|
||||
# values:
|
||||
# - controller
|
||||
# topologyKey: kubernetes.io/hostname
|
||||
|
||||
# # An example of required pod anti-affinity
|
||||
# podAntiAffinity:
|
||||
# requiredDuringSchedulingIgnoredDuringExecution:
|
||||
# - labelSelector:
|
||||
# matchExpressions:
|
||||
# - key: app.kubernetes.io/name
|
||||
# operator: In
|
||||
# values:
|
||||
# - ingress-nginx
|
||||
# - key: app.kubernetes.io/instance
|
||||
# operator: In
|
||||
# values:
|
||||
# - ingress-nginx
|
||||
# - key: app.kubernetes.io/component
|
||||
# operator: In
|
||||
# values:
|
||||
# - controller
|
||||
# topologyKey: "kubernetes.io/hostname"
|
||||
|
||||
## Topology spread constraints rely on node labels to identify the topology domain(s) that each Node is in.
|
||||
## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/
|
||||
##
|
||||
topologySpreadConstraints: []
|
||||
# - maxSkew: 1
|
||||
# topologyKey: failure-domain.beta.kubernetes.io/zone
|
||||
# whenUnsatisfiable: DoNotSchedule
|
||||
# labelSelector:
|
||||
# matchLabels:
|
||||
# app.kubernetes.io/instance: ingress-nginx-internal
|
||||
|
||||
## terminationGracePeriodSeconds
|
||||
## wait up to five minutes for the drain of connections
|
||||
##
|
||||
terminationGracePeriodSeconds: 300
|
||||
|
||||
## Node labels for controller pod assignment
|
||||
## Ref: https://kubernetes.io/docs/user-guide/node-selection/
|
||||
##
|
||||
nodeSelector:
|
||||
kubernetes.io/os: linux
|
||||
|
||||
## Liveness and readiness probe values
|
||||
## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes
|
||||
##
|
||||
livenessProbe:
|
||||
failureThreshold: 5
|
||||
initialDelaySeconds: 10
|
||||
periodSeconds: 10
|
||||
successThreshold: 1
|
||||
timeoutSeconds: 1
|
||||
port: 10254
|
||||
readinessProbe:
|
||||
failureThreshold: 3
|
||||
initialDelaySeconds: 10
|
||||
periodSeconds: 10
|
||||
successThreshold: 1
|
||||
timeoutSeconds: 1
|
||||
port: 10254
|
||||
|
||||
# Path of the health check endpoint. All requests received on the port defined by
|
||||
# the healthz-port parameter are forwarded internally to this path.
|
||||
healthCheckPath: "/healthz"
|
||||
|
||||
## Annotations to be added to controller pods
|
||||
##
|
||||
podAnnotations: {}
|
||||
|
||||
replicaCount: 1
|
||||
|
||||
minAvailable: 1
|
||||
|
||||
# Define requests resources to avoid probe issues due to CPU utilization in busy nodes
|
||||
# ref: https://github.com/kubernetes/ingress-nginx/issues/4735#issuecomment-551204903
|
||||
# Ideally, there should be no limits.
|
||||
# https://engineering.indeedblog.com/blog/2019/12/cpu-throttling-regression-fix/
|
||||
resources:
|
||||
# limits:
|
||||
# cpu: 100m
|
||||
# memory: 90Mi
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 90Mi
|
||||
|
||||
# Mutually exclusive with keda autoscaling
|
||||
autoscaling:
|
||||
enabled: false
|
||||
minReplicas: 1
|
||||
maxReplicas: 11
|
||||
targetCPUUtilizationPercentage: 50
|
||||
targetMemoryUtilizationPercentage: 50
|
||||
|
||||
autoscalingTemplate: []
|
||||
# Custom or additional autoscaling metrics
|
||||
# ref: https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/#support-for-custom-metrics
|
||||
# - type: Pods
|
||||
# pods:
|
||||
# metric:
|
||||
# name: nginx_ingress_controller_nginx_process_requests_total
|
||||
# target:
|
||||
# type: AverageValue
|
||||
# averageValue: 10000m
|
||||
|
||||
# Mutually exclusive with hpa autoscaling
|
||||
keda:
|
||||
apiVersion: "keda.sh/v1alpha1"
|
||||
# apiVersion changes with keda 1.x vs 2.x
|
||||
# 2.x = keda.sh/v1alpha1
|
||||
# 1.x = keda.k8s.io/v1alpha1
|
||||
enabled: false
|
||||
minReplicas: 1
|
||||
maxReplicas: 11
|
||||
pollingInterval: 30
|
||||
cooldownPeriod: 300
|
||||
restoreToOriginalReplicaCount: false
|
||||
triggers: []
|
||||
# - type: prometheus
|
||||
# metadata:
|
||||
# serverAddress: http://<prometheus-host>:9090
|
||||
# metricName: http_requests_total
|
||||
# threshold: '100'
|
||||
# query: sum(rate(http_requests_total{deployment="my-deployment"}[2m]))
|
||||
|
||||
behavior: {}
|
||||
# scaleDown:
|
||||
# stabilizationWindowSeconds: 300
|
||||
# policies:
|
||||
# - type: Pods
|
||||
# value: 1
|
||||
# periodSeconds: 180
|
||||
# scaleUp:
|
||||
# stabilizationWindowSeconds: 300
|
||||
# policies:
|
||||
# - type: Pods
|
||||
# value: 2
|
||||
# periodSeconds: 60
|
||||
|
||||
## Enable mimalloc as a drop-in replacement for malloc.
|
||||
## ref: https://github.com/microsoft/mimalloc
|
||||
##
|
||||
enableMimalloc: true
|
||||
|
||||
## Override NGINX template
|
||||
customTemplate:
|
||||
configMapName: ""
|
||||
configMapKey: ""
|
||||
|
||||
service:
|
||||
enabled: true
|
||||
|
||||
annotations: {}
|
||||
labels: {}
|
||||
# clusterIP: ""
|
||||
|
||||
## List of IP addresses at which the controller services are available
|
||||
## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips
|
||||
##
|
||||
externalIPs: []
|
||||
|
||||
# loadBalancerIP: ""
|
||||
loadBalancerSourceRanges: []
|
||||
|
||||
enableHttp: true
|
||||
enableHttps: true
|
||||
|
||||
## Set external traffic policy to: "Local" to preserve source IP on
|
||||
## providers supporting it
|
||||
## Ref: https://kubernetes.io/docs/tutorials/services/source-ip/#source-ip-for-services-with-typeloadbalancer
|
||||
# externalTrafficPolicy: ""
|
||||
|
||||
# Must be either "None" or "ClientIP" if set. Kubernetes will default to "None".
|
||||
# Ref: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies
|
||||
# sessionAffinity: ""
|
||||
|
||||
# specifies the health check node port (numeric port number) for the service. If healthCheckNodePort isn’t specified,
|
||||
# the service controller allocates a port from your cluster’s NodePort range.
|
||||
# Ref: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip
|
||||
# healthCheckNodePort: 0
|
||||
|
||||
ports:
|
||||
http: 80
|
||||
https: 443
|
||||
|
||||
targetPorts:
|
||||
http: http
|
||||
https: https
|
||||
|
||||
#type: LoadBalancer
|
||||
|
||||
type: NodePort
|
||||
# nodePorts:
|
||||
# http: 32080
|
||||
# https: 32443
|
||||
# tcp:
|
||||
# 8080: 32808
|
||||
nodePorts:
|
||||
http: ""
|
||||
https: ""
|
||||
tcp: {}
|
||||
udp: {}
|
||||
|
||||
## Enables an additional internal load balancer (besides the external one).
|
||||
## Annotations are mandatory for the load balancer to come up. Varies with the cloud service.
|
||||
internal:
|
||||
enabled: false
|
||||
annotations: {}
|
||||
|
||||
## Restrict access For LoadBalancer service. Defaults to 0.0.0.0/0.
|
||||
loadBalancerSourceRanges: []
|
||||
|
||||
## Set external traffic policy to: "Local" to preserve source IP on
|
||||
## providers supporting it
|
||||
## Ref: https://kubernetes.io/docs/tutorials/services/source-ip/#source-ip-for-services-with-typeloadbalancer
|
||||
# externalTrafficPolicy: ""
|
||||
|
||||
extraContainers: []
|
||||
## Additional containers to be added to the controller pod.
|
||||
## See https://github.com/lemonldap-ng-controller/lemonldap-ng-controller as example.
|
||||
# - name: my-sidecar
|
||||
# image: nginx:latest
|
||||
# - name: lemonldap-ng-controller
|
||||
# image: lemonldapng/lemonldap-ng-controller:0.2.0
|
||||
# args:
|
||||
# - /lemonldap-ng-controller
|
||||
# - --alsologtostderr
|
||||
# - --configmap=$(POD_NAMESPACE)/lemonldap-ng-configuration
|
||||
# env:
|
||||
# - name: POD_NAME
|
||||
# valueFrom:
|
||||
# fieldRef:
|
||||
# fieldPath: metadata.name
|
||||
# - name: POD_NAMESPACE
|
||||
# valueFrom:
|
||||
# fieldRef:
|
||||
# fieldPath: metadata.namespace
|
||||
# volumeMounts:
|
||||
# - name: copy-portal-skins
|
||||
# mountPath: /srv/var/lib/lemonldap-ng/portal/skins
|
||||
|
||||
extraVolumeMounts: []
|
||||
## Additional volumeMounts to the controller main container.
|
||||
# - name: copy-portal-skins
|
||||
# mountPath: /var/lib/lemonldap-ng/portal/skins
|
||||
|
||||
extraVolumes: []
|
||||
## Additional volumes to the controller pod.
|
||||
# - name: copy-portal-skins
|
||||
# emptyDir: {}
|
||||
|
||||
extraInitContainers: []
|
||||
## Containers, which are run before the app containers are started.
|
||||
# - name: init-myservice
|
||||
# image: busybox
|
||||
# command: ['sh', '-c', 'until nslookup myservice; do echo waiting for myservice; sleep 2; done;']
|
||||
|
||||
admissionWebhooks:
|
||||
annotations: {}
|
||||
enabled: true
|
||||
failurePolicy: Fail
|
||||
# timeoutSeconds: 10
|
||||
port: 8443
|
||||
certificate: "/usr/local/certificates/cert"
|
||||
key: "/usr/local/certificates/key"
|
||||
namespaceSelector: {}
|
||||
objectSelector: {}
|
||||
|
||||
service:
|
||||
annotations: {}
|
||||
# clusterIP: ""
|
||||
externalIPs: []
|
||||
# loadBalancerIP: ""
|
||||
loadBalancerSourceRanges: []
|
||||
servicePort: 443
|
||||
type: ClusterIP
|
||||
|
||||
patch:
|
||||
enabled: true
|
||||
image:
|
||||
repository: docker.io/jettech/kube-webhook-certgen
|
||||
tag: v1.5.0
|
||||
pullPolicy: IfNotPresent
|
||||
## Provide a priority class name to the webhook patching job
|
||||
##
|
||||
priorityClassName: ""
|
||||
podAnnotations: {}
|
||||
nodeSelector: {}
|
||||
tolerations: []
|
||||
runAsUser: 2000
|
||||
|
||||
metrics:
|
||||
port: 10254
|
||||
# if this port is changed, change healthz-port: in extraArgs: accordingly
|
||||
enabled: false
|
||||
|
||||
service:
|
||||
annotations: {}
|
||||
# prometheus.io/scrape: "true"
|
||||
# prometheus.io/port: "10254"
|
||||
|
||||
# clusterIP: ""
|
||||
|
||||
## List of IP addresses at which the stats-exporter service is available
|
||||
## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips
|
||||
##
|
||||
externalIPs: []
|
||||
|
||||
# loadBalancerIP: ""
|
||||
loadBalancerSourceRanges: []
|
||||
servicePort: 9913
|
||||
type: ClusterIP
|
||||
# externalTrafficPolicy: ""
|
||||
# nodePort: ""
|
||||
|
||||
serviceMonitor:
|
||||
enabled: false
|
||||
additionalLabels: {}
|
||||
namespace: ""
|
||||
namespaceSelector: {}
|
||||
# Default: scrape .Release.Namespace only
|
||||
# To scrape all, use the following:
|
||||
# namespaceSelector:
|
||||
# any: true
|
||||
scrapeInterval: 30s
|
||||
# honorLabels: true
|
||||
targetLabels: []
|
||||
metricRelabelings: []
|
||||
|
||||
prometheusRule:
|
||||
enabled: false
|
||||
additionalLabels: {}
|
||||
# namespace: ""
|
||||
rules: []
|
||||
# # These are just examples rules, please adapt them to your needs
|
||||
# - alert: NGINXConfigFailed
|
||||
# expr: count(nginx_ingress_controller_config_last_reload_successful == 0) > 0
|
||||
# for: 1s
|
||||
# labels:
|
||||
# severity: critical
|
||||
# annotations:
|
||||
# description: bad ingress config - nginx config test failed
|
||||
# summary: uninstall the latest ingress changes to allow config reloads to resume
|
||||
# - alert: NGINXCertificateExpiry
|
||||
# expr: (avg(nginx_ingress_controller_ssl_expire_time_seconds) by (host) - time()) < 604800
|
||||
# for: 1s
|
||||
# labels:
|
||||
# severity: critical
|
||||
# annotations:
|
||||
# description: ssl certificate(s) will expire in less then a week
|
||||
# summary: renew expiring certificates to avoid downtime
|
||||
# - alert: NGINXTooMany500s
|
||||
# expr: 100 * ( sum( nginx_ingress_controller_requests{status=~"5.+"} ) / sum(nginx_ingress_controller_requests) ) > 5
|
||||
# for: 1m
|
||||
# labels:
|
||||
# severity: warning
|
||||
# annotations:
|
||||
# description: Too many 5XXs
|
||||
# summary: More than 5% of all requests returned 5XX, this requires your attention
|
||||
# - alert: NGINXTooMany400s
|
||||
# expr: 100 * ( sum( nginx_ingress_controller_requests{status=~"4.+"} ) / sum(nginx_ingress_controller_requests) ) > 5
|
||||
# for: 1m
|
||||
# labels:
|
||||
# severity: warning
|
||||
# annotations:
|
||||
# description: Too many 4XXs
|
||||
# summary: More than 5% of all requests returned 4XX, this requires your attention
|
||||
|
||||
## Improve connection draining when ingress controller pod is deleted using a lifecycle hook:
|
||||
## With this new hook, we increased the default terminationGracePeriodSeconds from 30 seconds
|
||||
## to 300, allowing the draining of connections up to five minutes.
|
||||
## If the active connections end before that, the pod will terminate gracefully at that time.
|
||||
## To effectively take advantage of this feature, the Configmap feature
|
||||
## worker-shutdown-timeout new value is 240s instead of 10s.
|
||||
##
|
||||
lifecycle:
|
||||
preStop:
|
||||
exec:
|
||||
command:
|
||||
- /wait-shutdown
|
||||
|
||||
priorityClassName: ""
|
||||
|
||||
## Rollback limit
|
||||
##
|
||||
revisionHistoryLimit: 10
|
||||
|
||||
## Default 404 backend
|
||||
##
|
||||
defaultBackend:
|
||||
##
|
||||
enabled: false
|
||||
|
||||
name: defaultbackend
|
||||
image:
|
||||
repository: k8s.gcr.io/defaultbackend-amd64
|
||||
tag: "1.5"
|
||||
pullPolicy: IfNotPresent
|
||||
# nobody user -> uid 65534
|
||||
runAsUser: 65534
|
||||
runAsNonRoot: true
|
||||
readOnlyRootFilesystem: true
|
||||
allowPrivilegeEscalation: false
|
||||
|
||||
extraArgs: {}
|
||||
|
||||
serviceAccount:
|
||||
create: true
|
||||
name:
|
||||
## Additional environment variables to set for defaultBackend pods
|
||||
extraEnvs: []
|
||||
|
||||
port: 8080
|
||||
|
||||
## Readiness and liveness probes for default backend
|
||||
## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/
|
||||
##
|
||||
livenessProbe:
|
||||
failureThreshold: 3
|
||||
initialDelaySeconds: 30
|
||||
periodSeconds: 10
|
||||
successThreshold: 1
|
||||
timeoutSeconds: 5
|
||||
readinessProbe:
|
||||
failureThreshold: 6
|
||||
initialDelaySeconds: 0
|
||||
periodSeconds: 5
|
||||
successThreshold: 1
|
||||
timeoutSeconds: 5
|
||||
|
||||
## Node tolerations for server scheduling to nodes with taints
|
||||
## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
|
||||
##
|
||||
tolerations: []
|
||||
# - key: "key"
|
||||
# operator: "Equal|Exists"
|
||||
# value: "value"
|
||||
# effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)"
|
||||
|
||||
affinity: {}
|
||||
|
||||
## Security Context policies for controller pods
|
||||
## See https://kubernetes.io/docs/tasks/administer-cluster/sysctl-cluster/ for
|
||||
## notes on enabling and using sysctls
|
||||
##
|
||||
podSecurityContext: {}
|
||||
|
||||
# labels to add to the pod container metadata
|
||||
podLabels: {}
|
||||
# key: value
|
||||
|
||||
## Node labels for default backend pod assignment
|
||||
## Ref: https://kubernetes.io/docs/user-guide/node-selection/
|
||||
##
|
||||
nodeSelector: {}
|
||||
|
||||
## Annotations to be added to default backend pods
|
||||
##
|
||||
podAnnotations: {}
|
||||
|
||||
replicaCount: 1
|
||||
|
||||
minAvailable: 1
|
||||
|
||||
resources: {}
|
||||
# limits:
|
||||
# cpu: 10m
|
||||
# memory: 20Mi
|
||||
# requests:
|
||||
# cpu: 10m
|
||||
# memory: 20Mi
|
||||
|
||||
autoscaling:
|
||||
enabled: false
|
||||
minReplicas: 1
|
||||
maxReplicas: 2
|
||||
targetCPUUtilizationPercentage: 50
|
||||
targetMemoryUtilizationPercentage: 50
|
||||
|
||||
service:
|
||||
annotations: {}
|
||||
|
||||
# clusterIP: ""
|
||||
|
||||
## List of IP addresses at which the default backend service is available
|
||||
## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips
|
||||
##
|
||||
externalIPs: []
|
||||
|
||||
# loadBalancerIP: ""
|
||||
loadBalancerSourceRanges: []
|
||||
servicePort: 80
|
||||
type: ClusterIP
|
||||
|
||||
priorityClassName: ""
|
||||
|
||||
## Enable RBAC as per https://github.com/kubernetes/ingress/tree/master/examples/rbac/nginx and https://github.com/kubernetes/ingress/issues/266
|
||||
rbac:
|
||||
create: true
|
||||
scope: false
|
||||
|
||||
# If true, create & use Pod Security Policy resources
|
||||
# https://kubernetes.io/docs/concepts/policy/pod-security-policy/
|
||||
podSecurityPolicy:
|
||||
enabled: false
|
||||
|
||||
serviceAccount:
|
||||
create: true
|
||||
name:
|
||||
|
||||
## Optional array of imagePullSecrets containing private registry credentials
|
||||
## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
|
||||
imagePullSecrets: []
|
||||
# - name: secretName
|
||||
|
||||
# TCP service key:value pairs
|
||||
# Ref: https://github.com/kubernetes/contrib/tree/master/ingress/controllers/nginx/examples/tcp
|
||||
##
|
||||
tcp: {}
|
||||
# 8080: "default/example-tcp-svc:9000"
|
||||
|
||||
# UDP service key:value pairs
|
||||
# Ref: https://github.com/kubernetes/contrib/tree/master/ingress/controllers/nginx/examples/udp
|
||||
##
|
||||
udp: {}
|
||||
# 53: "kube-system/kube-dns:53"
|
820
yaml/ingress-nginx/values.yaml
Normal file
820
yaml/ingress-nginx/values.yaml
Normal file
|
@ -0,0 +1,820 @@
|
|||
## nginx configuration
|
||||
## Ref: https://github.com/kubernetes/ingress-nginx/blob/master/docs/user-guide/nginx-configuration/index.md
|
||||
##
|
||||
|
||||
## Overrides for generated resource names
|
||||
# See templates/_helpers.tpl
|
||||
# nameOverride:
|
||||
# fullnameOverride:
|
||||
|
||||
controller:
|
||||
name: controller
|
||||
image:
|
||||
registry: k8s.gcr.io
|
||||
image: ingress-nginx/controller
|
||||
# for backwards compatibility consider setting the full image url via the repository value below
|
||||
# use *either* current default registry/image or repository format or installing chart by providing the values.yaml will fail
|
||||
# repository:
|
||||
tag: "v0.47.0"
|
||||
digest: sha256:a1e4efc107be0bb78f32eaec37bef17d7a0c81bec8066cdf2572508d21351d0b
|
||||
pullPolicy: IfNotPresent
|
||||
# www-data -> uid 101
|
||||
runAsUser: 101
|
||||
allowPrivilegeEscalation: true
|
||||
|
||||
# Use an existing PSP instead of creating one
|
||||
existingPsp: ""
|
||||
|
||||
# Configures the controller container name
|
||||
containerName: controller
|
||||
|
||||
# Configures the ports the nginx-controller listens on
|
||||
containerPort:
|
||||
http: 80
|
||||
https: 443
|
||||
|
||||
# Will add custom configuration options to Nginx https://kubernetes.github.io/ingress-nginx/user-guide/nginx-configuration/configmap/
|
||||
#config: {}
|
||||
config:
|
||||
use-proxy-protocol: "false"
|
||||
client-header-buffer-size: "8k"
|
||||
large-client-header-buffers: "4 16k"
|
||||
use-forwarded-headers: "true"
|
||||
use-geoip: "false"
|
||||
use-geoip2: "true"
|
||||
|
||||
## Annotations to be added to the controller config configuration configmap
|
||||
##
|
||||
configAnnotations: {}
|
||||
|
||||
# Will add custom headers before sending traffic to backends according to https://github.com/kubernetes/ingress-nginx/tree/master/docs/examples/customization/custom-headers
|
||||
#proxySetHeaders: {}
|
||||
proxySetHeaders:
|
||||
X-Country-Code: $geoip2_city_country_code
|
||||
|
||||
# Will add custom headers before sending response traffic to the client according to: https://kubernetes.github.io/ingress-nginx/user-guide/nginx-configuration/configmap/#add-headers
|
||||
addHeaders: {}
|
||||
|
||||
# Optionally customize the pod dnsConfig.
|
||||
dnsConfig: {}
|
||||
|
||||
# Optionally change this to ClusterFirstWithHostNet in case you have 'hostNetwork: true'.
|
||||
# By default, while using host network, name resolution uses the host's DNS. If you wish nginx-controller
|
||||
# to keep resolving names inside the k8s network, use ClusterFirstWithHostNet.
|
||||
dnsPolicy: ClusterFirst
|
||||
#dnsPolicy: ClusterFirstWithHostNet
|
||||
|
||||
# Bare-metal considerations via the host network https://kubernetes.github.io/ingress-nginx/deploy/baremetal/#via-the-host-network
|
||||
# Ingress status was blank because there is no Service exposing the NGINX Ingress controller in a configuration using the host network, the default --publish-service flag used in standard cloud setups does not apply
|
||||
reportNodeInternalIp: false
|
||||
|
||||
# Required for use with CNI based kubernetes installations (such as ones set up by kubeadm),
|
||||
# since CNI and hostport don't mix yet. Can be deprecated once https://github.com/kubernetes/kubernetes/issues/23920
|
||||
# is merged
|
||||
hostNetwork: false
|
||||
#hostNetwork: true
|
||||
|
||||
## Use host ports 80 and 443
|
||||
## Disabled by default
|
||||
##
|
||||
hostPort:
|
||||
enabled: false
|
||||
ports:
|
||||
http: 80
|
||||
https: 443
|
||||
|
||||
## Election ID to use for status update
|
||||
##
|
||||
electionID: ingress-controller-leader
|
||||
|
||||
## Name of the ingress class to route through this controller
|
||||
##
|
||||
ingressClass: nginx
|
||||
|
||||
# This section refers to the creation of the IngressClass resource
|
||||
# IngressClass resources are supported since k8s >= 1.18
|
||||
ingressClassResource:
|
||||
enabled: false
|
||||
default: false
|
||||
|
||||
# Parameters is a link to a custom resource containing additional
|
||||
# configuration for the controller. This is optional if the controller
|
||||
# does not require extra parameters.
|
||||
parameters: {}
|
||||
|
||||
# labels to add to the pod container metadata
|
||||
podLabels: {}
|
||||
# key: value
|
||||
|
||||
## Security Context policies for controller pods
|
||||
##
|
||||
podSecurityContext: {}
|
||||
|
||||
## See https://kubernetes.io/docs/tasks/administer-cluster/sysctl-cluster/ for
|
||||
## notes on enabling and using sysctls
|
||||
###
|
||||
sysctls: {}
|
||||
# sysctls:
|
||||
# "net.core.somaxconn": "8192"
|
||||
|
||||
## Allows customization of the source of the IP address or FQDN to report
|
||||
## in the ingress status field. By default, it reads the information provided
|
||||
## by the service. If disable, the status field reports the IP address of the
|
||||
## node or nodes where an ingress controller pod is running.
|
||||
publishService:
|
||||
enabled: true
|
||||
## Allows overriding of the publish service to bind to
|
||||
## Must be <namespace>/<service_name>
|
||||
##
|
||||
pathOverride: ""
|
||||
|
||||
## Limit the scope of the controller
|
||||
##
|
||||
scope:
|
||||
enabled: false
|
||||
namespace: "" # defaults to .Release.Namespace
|
||||
|
||||
## Allows customization of the configmap / nginx-configmap namespace
|
||||
##
|
||||
configMapNamespace: "" # defaults to .Release.Namespace
|
||||
|
||||
## Allows customization of the tcp-services-configmap
|
||||
##
|
||||
tcp:
|
||||
configMapNamespace: "" # defaults to .Release.Namespace
|
||||
## Annotations to be added to the tcp config configmap
|
||||
annotations: {}
|
||||
|
||||
## Allows customization of the udp-services-configmap
|
||||
##
|
||||
udp:
|
||||
configMapNamespace: "" # defaults to .Release.Namespace
|
||||
## Annotations to be added to the udp config configmap
|
||||
annotations: {}
|
||||
|
||||
# Maxmind license key to download GeoLite2 Databases
|
||||
# https://blog.maxmind.com/2019/12/18/significant-changes-to-accessing-and-using-geolite2-databases
|
||||
maxmindLicenseKey: ""
|
||||
|
||||
## Additional command line arguments to pass to nginx-ingress-controller
|
||||
## E.g. to specify the default SSL certificate you can use
|
||||
## extraArgs:
|
||||
## default-ssl-certificate: "<namespace>/<secret_name>"
|
||||
extraArgs: {}
|
||||
|
||||
## Additional environment variables to set
|
||||
extraEnvs: []
|
||||
# extraEnvs:
|
||||
# - name: FOO
|
||||
# valueFrom:
|
||||
# secretKeyRef:
|
||||
# key: FOO
|
||||
# name: secret-resource
|
||||
|
||||
## DaemonSet or Deployment
|
||||
##
|
||||
#kind: Deployment
|
||||
kind: DaemonSet
|
||||
|
||||
## Annotations to be added to the controller Deployment or DaemonSet
|
||||
##
|
||||
annotations: {}
|
||||
# keel.sh/pollSchedule: "@every 60m"
|
||||
|
||||
## Labels to be added to the controller Deployment or DaemonSet
|
||||
##
|
||||
labels: {}
|
||||
# keel.sh/policy: patch
|
||||
# keel.sh/trigger: poll
|
||||
|
||||
|
||||
# The update strategy to apply to the Deployment or DaemonSet
|
||||
##
|
||||
updateStrategy: {}
|
||||
# rollingUpdate:
|
||||
# maxUnavailable: 1
|
||||
# type: RollingUpdate
|
||||
|
||||
# minReadySeconds to avoid killing pods before we are ready
|
||||
##
|
||||
minReadySeconds: 0
|
||||
|
||||
|
||||
## Node tolerations for server scheduling to nodes with taints
|
||||
## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
|
||||
##
|
||||
tolerations: []
|
||||
# - key: "key"
|
||||
# operator: "Equal|Exists"
|
||||
# value: "value"
|
||||
# effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)"
|
||||
|
||||
## Affinity and anti-affinity
|
||||
## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
|
||||
##
|
||||
affinity: {}
|
||||
# # An example of preferred pod anti-affinity, weight is in the range 1-100
|
||||
# podAntiAffinity:
|
||||
# preferredDuringSchedulingIgnoredDuringExecution:
|
||||
# - weight: 100
|
||||
# podAffinityTerm:
|
||||
# labelSelector:
|
||||
# matchExpressions:
|
||||
# - key: app.kubernetes.io/name
|
||||
# operator: In
|
||||
# values:
|
||||
# - ingress-nginx
|
||||
# - key: app.kubernetes.io/instance
|
||||
# operator: In
|
||||
# values:
|
||||
# - ingress-nginx
|
||||
# - key: app.kubernetes.io/component
|
||||
# operator: In
|
||||
# values:
|
||||
# - controller
|
||||
# topologyKey: kubernetes.io/hostname
|
||||
|
||||
# # An example of required pod anti-affinity
|
||||
# podAntiAffinity:
|
||||
# requiredDuringSchedulingIgnoredDuringExecution:
|
||||
# - labelSelector:
|
||||
# matchExpressions:
|
||||
# - key: app.kubernetes.io/name
|
||||
# operator: In
|
||||
# values:
|
||||
# - ingress-nginx
|
||||
# - key: app.kubernetes.io/instance
|
||||
# operator: In
|
||||
# values:
|
||||
# - ingress-nginx
|
||||
# - key: app.kubernetes.io/component
|
||||
# operator: In
|
||||
# values:
|
||||
# - controller
|
||||
# topologyKey: "kubernetes.io/hostname"
|
||||
|
||||
## Topology spread constraints rely on node labels to identify the topology domain(s) that each Node is in.
|
||||
## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/
|
||||
##
|
||||
topologySpreadConstraints: []
|
||||
# - maxSkew: 1
|
||||
# topologyKey: failure-domain.beta.kubernetes.io/zone
|
||||
# whenUnsatisfiable: DoNotSchedule
|
||||
# labelSelector:
|
||||
# matchLabels:
|
||||
# app.kubernetes.io/instance: ingress-nginx-internal
|
||||
|
||||
## terminationGracePeriodSeconds
|
||||
## wait up to five minutes for the drain of connections
|
||||
##
|
||||
terminationGracePeriodSeconds: 300
|
||||
|
||||
## Node labels for controller pod assignment
|
||||
## Ref: https://kubernetes.io/docs/user-guide/node-selection/
|
||||
##
|
||||
nodeSelector:
|
||||
kubernetes.io/os: linux
|
||||
|
||||
## Liveness and readiness probe values
|
||||
## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes
|
||||
##
|
||||
# startupProbe:
|
||||
# httpGet:
|
||||
# # should match container.healthCheckPath
|
||||
# path: "/healthz"
|
||||
# port: 10254
|
||||
# scheme: HTTP
|
||||
# initialDelaySeconds: 5
|
||||
# periodSeconds: 5
|
||||
# timeoutSeconds: 2
|
||||
# successThreshold: 1
|
||||
# failureThreshold: 5
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
# should match container.healthCheckPath
|
||||
path: "/healthz"
|
||||
port: 10254
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 10
|
||||
periodSeconds: 10
|
||||
timeoutSeconds: 1
|
||||
successThreshold: 1
|
||||
failureThreshold: 5
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
# should match container.healthCheckPath
|
||||
path: "/healthz"
|
||||
port: 10254
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 10
|
||||
periodSeconds: 10
|
||||
timeoutSeconds: 1
|
||||
successThreshold: 1
|
||||
failureThreshold: 3
|
||||
|
||||
|
||||
# Path of the health check endpoint. All requests received on the port defined by
|
||||
# the healthz-port parameter are forwarded internally to this path.
|
||||
healthCheckPath: "/healthz"
|
||||
|
||||
## Annotations to be added to controller pods
|
||||
##
|
||||
podAnnotations: {}
|
||||
|
||||
replicaCount: 1
|
||||
|
||||
minAvailable: 1
|
||||
|
||||
# Define requests resources to avoid probe issues due to CPU utilization in busy nodes
|
||||
# ref: https://github.com/kubernetes/ingress-nginx/issues/4735#issuecomment-551204903
|
||||
# Ideally, there should be no limits.
|
||||
# https://engineering.indeedblog.com/blog/2019/12/cpu-throttling-regression-fix/
|
||||
resources:
|
||||
# limits:
|
||||
# cpu: 100m
|
||||
# memory: 90Mi
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 90Mi
|
||||
|
||||
# Mutually exclusive with keda autoscaling
|
||||
autoscaling:
|
||||
enabled: false
|
||||
minReplicas: 1
|
||||
maxReplicas: 11
|
||||
targetCPUUtilizationPercentage: 50
|
||||
targetMemoryUtilizationPercentage: 50
|
||||
|
||||
autoscalingTemplate: []
|
||||
# Custom or additional autoscaling metrics
|
||||
# ref: https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/#support-for-custom-metrics
|
||||
# - type: Pods
|
||||
# pods:
|
||||
# metric:
|
||||
# name: nginx_ingress_controller_nginx_process_requests_total
|
||||
# target:
|
||||
# type: AverageValue
|
||||
# averageValue: 10000m
|
||||
|
||||
# Mutually exclusive with hpa autoscaling
|
||||
keda:
|
||||
apiVersion: "keda.sh/v1alpha1"
|
||||
# apiVersion changes with keda 1.x vs 2.x
|
||||
# 2.x = keda.sh/v1alpha1
|
||||
# 1.x = keda.k8s.io/v1alpha1
|
||||
enabled: false
|
||||
minReplicas: 1
|
||||
maxReplicas: 11
|
||||
pollingInterval: 30
|
||||
cooldownPeriod: 300
|
||||
restoreToOriginalReplicaCount: false
|
||||
scaledObject:
|
||||
annotations: {}
|
||||
# Custom annotations for ScaledObject resource
|
||||
# annotations:
|
||||
# key: value
|
||||
triggers: []
|
||||
# - type: prometheus
|
||||
# metadata:
|
||||
# serverAddress: http://<prometheus-host>:9090
|
||||
# metricName: http_requests_total
|
||||
# threshold: '100'
|
||||
# query: sum(rate(http_requests_total{deployment="my-deployment"}[2m]))
|
||||
|
||||
behavior: {}
|
||||
# scaleDown:
|
||||
# stabilizationWindowSeconds: 300
|
||||
# policies:
|
||||
# - type: Pods
|
||||
# value: 1
|
||||
# periodSeconds: 180
|
||||
# scaleUp:
|
||||
# stabilizationWindowSeconds: 300
|
||||
# policies:
|
||||
# - type: Pods
|
||||
# value: 2
|
||||
# periodSeconds: 60
|
||||
|
||||
## Enable mimalloc as a drop-in replacement for malloc.
|
||||
## ref: https://github.com/microsoft/mimalloc
|
||||
##
|
||||
enableMimalloc: true
|
||||
|
||||
## Override NGINX template
|
||||
customTemplate:
|
||||
configMapName: ""
|
||||
configMapKey: ""
|
||||
|
||||
service:
|
||||
enabled: true
|
||||
|
||||
annotations: {}
|
||||
labels: {}
|
||||
# clusterIP: ""
|
||||
|
||||
## List of IP addresses at which the controller services are available
|
||||
## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips
|
||||
##
|
||||
externalIPs: []
|
||||
|
||||
# loadBalancerIP: ""
|
||||
loadBalancerSourceRanges: []
|
||||
|
||||
enableHttp: true
|
||||
enableHttps: true
|
||||
|
||||
## Set external traffic policy to: "Local" to preserve source IP on
|
||||
## providers supporting it
|
||||
## Ref: https://kubernetes.io/docs/tutorials/services/source-ip/#source-ip-for-services-with-typeloadbalancer
|
||||
# externalTrafficPolicy: ""
|
||||
|
||||
# Must be either "None" or "ClientIP" if set. Kubernetes will default to "None".
|
||||
# Ref: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies
|
||||
# sessionAffinity: ""
|
||||
|
||||
# specifies the health check node port (numeric port number) for the service. If healthCheckNodePort isn’t specified,
|
||||
# the service controller allocates a port from your cluster’s NodePort range.
|
||||
# Ref: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip
|
||||
# healthCheckNodePort: 0
|
||||
|
||||
ports:
|
||||
http: 80
|
||||
https: 443
|
||||
|
||||
targetPorts:
|
||||
http: http
|
||||
https: https
|
||||
|
||||
#type: LoadBalancer
|
||||
|
||||
type: NodePort
|
||||
# nodePorts:
|
||||
# http: 32080
|
||||
# https: 32443
|
||||
# tcp:
|
||||
# 8080: 32808
|
||||
nodePorts:
|
||||
http: ""
|
||||
https: ""
|
||||
tcp: {}
|
||||
udp: {}
|
||||
|
||||
## Enables an additional internal load balancer (besides the external one).
|
||||
## Annotations are mandatory for the load balancer to come up. Varies with the cloud service.
|
||||
internal:
|
||||
enabled: false
|
||||
annotations: {}
|
||||
|
||||
# loadBalancerIP: ""
|
||||
|
||||
## Restrict access For LoadBalancer service. Defaults to 0.0.0.0/0.
|
||||
loadBalancerSourceRanges: []
|
||||
|
||||
## Set external traffic policy to: "Local" to preserve source IP on
|
||||
## providers supporting it
|
||||
## Ref: https://kubernetes.io/docs/tutorials/services/source-ip/#source-ip-for-services-with-typeloadbalancer
|
||||
# externalTrafficPolicy: ""
|
||||
|
||||
extraContainers: []
|
||||
## Additional containers to be added to the controller pod.
|
||||
## See https://github.com/lemonldap-ng-controller/lemonldap-ng-controller as example.
|
||||
# - name: my-sidecar
|
||||
# image: nginx:latest
|
||||
# - name: lemonldap-ng-controller
|
||||
# image: lemonldapng/lemonldap-ng-controller:0.2.0
|
||||
# args:
|
||||
# - /lemonldap-ng-controller
|
||||
# - --alsologtostderr
|
||||
# - --configmap=$(POD_NAMESPACE)/lemonldap-ng-configuration
|
||||
# env:
|
||||
# - name: POD_NAME
|
||||
# valueFrom:
|
||||
# fieldRef:
|
||||
# fieldPath: metadata.name
|
||||
# - name: POD_NAMESPACE
|
||||
# valueFrom:
|
||||
# fieldRef:
|
||||
# fieldPath: metadata.namespace
|
||||
# volumeMounts:
|
||||
# - name: copy-portal-skins
|
||||
# mountPath: /srv/var/lib/lemonldap-ng/portal/skins
|
||||
|
||||
extraVolumeMounts: []
|
||||
## Additional volumeMounts to the controller main container.
|
||||
# - name: copy-portal-skins
|
||||
# mountPath: /var/lib/lemonldap-ng/portal/skins
|
||||
|
||||
extraVolumes: []
|
||||
## Additional volumes to the controller pod.
|
||||
# - name: copy-portal-skins
|
||||
# emptyDir: {}
|
||||
|
||||
extraInitContainers: []
|
||||
## Containers, which are run before the app containers are started.
|
||||
# - name: init-myservice
|
||||
# image: busybox
|
||||
# command: ['sh', '-c', 'until nslookup myservice; do echo waiting for myservice; sleep 2; done;']
|
||||
|
||||
admissionWebhooks:
|
||||
annotations: {}
|
||||
enabled: true
|
||||
failurePolicy: Fail
|
||||
timeoutSeconds: 30
|
||||
port: 8443
|
||||
certificate: "/usr/local/certificates/cert"
|
||||
key: "/usr/local/certificates/key"
|
||||
namespaceSelector: {}
|
||||
objectSelector: {}
|
||||
|
||||
# Use an existing PSP instead of creating one
|
||||
existingPsp: ""
|
||||
|
||||
service:
|
||||
annotations: {}
|
||||
# clusterIP: ""
|
||||
externalIPs: []
|
||||
# loadBalancerIP: ""
|
||||
loadBalancerSourceRanges: []
|
||||
servicePort: 443
|
||||
type: ClusterIP
|
||||
|
||||
patch:
|
||||
enabled: true
|
||||
image:
|
||||
registry: docker.io
|
||||
image: jettech/kube-webhook-certgen
|
||||
# for backwards compatibility consider setting the full image url via the repository value below
|
||||
# use *either* current default registry/image or repository format or installing chart by providing the values.yaml will fail
|
||||
# repository:
|
||||
tag: v1.5.1
|
||||
pullPolicy: IfNotPresent
|
||||
## Provide a priority class name to the webhook patching job
|
||||
##
|
||||
priorityClassName: ""
|
||||
podAnnotations: {}
|
||||
nodeSelector: {}
|
||||
tolerations: []
|
||||
runAsUser: 2000
|
||||
|
||||
metrics:
|
||||
port: 10254
|
||||
# if this port is changed, change healthz-port: in extraArgs: accordingly
|
||||
enabled: false
|
||||
|
||||
service:
|
||||
annotations: {}
|
||||
# prometheus.io/scrape: "true"
|
||||
# prometheus.io/port: "10254"
|
||||
|
||||
# clusterIP: ""
|
||||
|
||||
## List of IP addresses at which the stats-exporter service is available
|
||||
## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips
|
||||
##
|
||||
externalIPs: []
|
||||
|
||||
# loadBalancerIP: ""
|
||||
loadBalancerSourceRanges: []
|
||||
servicePort: 10254
|
||||
type: ClusterIP
|
||||
# externalTrafficPolicy: ""
|
||||
# nodePort: ""
|
||||
|
||||
serviceMonitor:
|
||||
enabled: false
|
||||
additionalLabels: {}
|
||||
# The label to use to retrieve the job name from.
|
||||
# jobLabel: "app.kubernetes.io/name"
|
||||
namespace: ""
|
||||
namespaceSelector: {}
|
||||
# Default: scrape .Release.Namespace only
|
||||
# To scrape all, use the following:
|
||||
# namespaceSelector:
|
||||
# any: true
|
||||
scrapeInterval: 30s
|
||||
# honorLabels: true
|
||||
targetLabels: []
|
||||
metricRelabelings: []
|
||||
|
||||
prometheusRule:
|
||||
enabled: false
|
||||
additionalLabels: {}
|
||||
# namespace: ""
|
||||
rules: []
|
||||
# # These are just examples rules, please adapt them to your needs
|
||||
# - alert: NGINXConfigFailed
|
||||
# expr: count(nginx_ingress_controller_config_last_reload_successful == 0) > 0
|
||||
# for: 1s
|
||||
# labels:
|
||||
# severity: critical
|
||||
# annotations:
|
||||
# description: bad ingress config - nginx config test failed
|
||||
# summary: uninstall the latest ingress changes to allow config reloads to resume
|
||||
# - alert: NGINXCertificateExpiry
|
||||
# expr: (avg(nginx_ingress_controller_ssl_expire_time_seconds) by (host) - time()) < 604800
|
||||
# for: 1s
|
||||
# labels:
|
||||
# severity: critical
|
||||
# annotations:
|
||||
# description: ssl certificate(s) will expire in less then a week
|
||||
# summary: renew expiring certificates to avoid downtime
|
||||
# - alert: NGINXTooMany500s
|
||||
# expr: 100 * ( sum( nginx_ingress_controller_requests{status=~"5.+"} ) / sum(nginx_ingress_controller_requests) ) > 5
|
||||
# for: 1m
|
||||
# labels:
|
||||
# severity: warning
|
||||
# annotations:
|
||||
# description: Too many 5XXs
|
||||
# summary: More than 5% of all requests returned 5XX, this requires your attention
|
||||
# - alert: NGINXTooMany400s
|
||||
# expr: 100 * ( sum( nginx_ingress_controller_requests{status=~"4.+"} ) / sum(nginx_ingress_controller_requests) ) > 5
|
||||
# for: 1m
|
||||
# labels:
|
||||
# severity: warning
|
||||
# annotations:
|
||||
# description: Too many 4XXs
|
||||
# summary: More than 5% of all requests returned 4XX, this requires your attention
|
||||
|
||||
## Improve connection draining when ingress controller pod is deleted using a lifecycle hook:
|
||||
## With this new hook, we increased the default terminationGracePeriodSeconds from 30 seconds
|
||||
## to 300, allowing the draining of connections up to five minutes.
|
||||
## If the active connections end before that, the pod will terminate gracefully at that time.
|
||||
## To effectively take advantage of this feature, the Configmap feature
|
||||
## worker-shutdown-timeout new value is 240s instead of 10s.
|
||||
##
|
||||
lifecycle:
|
||||
preStop:
|
||||
exec:
|
||||
command:
|
||||
- /wait-shutdown
|
||||
|
||||
priorityClassName: ""
|
||||
|
||||
## Rollback limit
|
||||
##
|
||||
revisionHistoryLimit: 10
|
||||
|
||||
## Default 404 backend
|
||||
##
|
||||
defaultBackend:
|
||||
##
|
||||
enabled: false
|
||||
|
||||
name: defaultbackend
|
||||
image:
|
||||
registry: k8s.gcr.io
|
||||
image: defaultbackend-amd64
|
||||
# for backwards compatibility consider setting the full image url via the repository value below
|
||||
# use *either* current default registry/image or repository format or installing chart by providing the values.yaml will fail
|
||||
# repository:
|
||||
tag: "1.5"
|
||||
pullPolicy: IfNotPresent
|
||||
# nobody user -> uid 65534
|
||||
runAsUser: 65534
|
||||
runAsNonRoot: true
|
||||
readOnlyRootFilesystem: true
|
||||
allowPrivilegeEscalation: false
|
||||
|
||||
# Use an existing PSP instead of creating one
|
||||
existingPsp: ""
|
||||
|
||||
extraArgs: {}
|
||||
|
||||
serviceAccount:
|
||||
create: true
|
||||
name: ""
|
||||
automountServiceAccountToken: true
|
||||
## Additional environment variables to set for defaultBackend pods
|
||||
extraEnvs: []
|
||||
|
||||
port: 8080
|
||||
|
||||
## Readiness and liveness probes for default backend
|
||||
## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/
|
||||
##
|
||||
livenessProbe:
|
||||
failureThreshold: 3
|
||||
initialDelaySeconds: 30
|
||||
periodSeconds: 10
|
||||
successThreshold: 1
|
||||
timeoutSeconds: 5
|
||||
readinessProbe:
|
||||
failureThreshold: 6
|
||||
initialDelaySeconds: 0
|
||||
periodSeconds: 5
|
||||
successThreshold: 1
|
||||
timeoutSeconds: 5
|
||||
|
||||
## Node tolerations for server scheduling to nodes with taints
|
||||
## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
|
||||
##
|
||||
tolerations: []
|
||||
# - key: "key"
|
||||
# operator: "Equal|Exists"
|
||||
# value: "value"
|
||||
# effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)"
|
||||
|
||||
affinity: {}
|
||||
|
||||
## Security Context policies for controller pods
|
||||
## See https://kubernetes.io/docs/tasks/administer-cluster/sysctl-cluster/ for
|
||||
## notes on enabling and using sysctls
|
||||
##
|
||||
podSecurityContext: {}
|
||||
|
||||
# labels to add to the pod container metadata
|
||||
podLabels: {}
|
||||
# key: value
|
||||
|
||||
## Node labels for default backend pod assignment
|
||||
## Ref: https://kubernetes.io/docs/user-guide/node-selection/
|
||||
##
|
||||
nodeSelector: {}
|
||||
|
||||
## Annotations to be added to default backend pods
|
||||
##
|
||||
podAnnotations: {}
|
||||
|
||||
replicaCount: 1
|
||||
|
||||
minAvailable: 1
|
||||
|
||||
resources: {}
|
||||
# limits:
|
||||
# cpu: 10m
|
||||
# memory: 20Mi
|
||||
# requests:
|
||||
# cpu: 10m
|
||||
# memory: 20Mi
|
||||
|
||||
extraVolumeMounts: []
|
||||
## Additional volumeMounts to the default backend container.
|
||||
# - name: copy-portal-skins
|
||||
# mountPath: /var/lib/lemonldap-ng/portal/skins
|
||||
|
||||
extraVolumes: []
|
||||
## Additional volumes to the default backend pod.
|
||||
# - name: copy-portal-skins
|
||||
# emptyDir: {}
|
||||
|
||||
autoscaling:
|
||||
annotations: {}
|
||||
enabled: false
|
||||
minReplicas: 1
|
||||
maxReplicas: 2
|
||||
targetCPUUtilizationPercentage: 50
|
||||
targetMemoryUtilizationPercentage: 50
|
||||
|
||||
service:
|
||||
annotations: {}
|
||||
|
||||
# clusterIP: ""
|
||||
|
||||
## List of IP addresses at which the default backend service is available
|
||||
## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips
|
||||
##
|
||||
externalIPs: []
|
||||
|
||||
# loadBalancerIP: ""
|
||||
loadBalancerSourceRanges: []
|
||||
servicePort: 80
|
||||
type: ClusterIP
|
||||
|
||||
priorityClassName: ""
|
||||
|
||||
## Enable RBAC as per https://github.com/kubernetes/ingress/tree/master/examples/rbac/nginx and https://github.com/kubernetes/ingress/issues/266
|
||||
rbac:
|
||||
create: true
|
||||
scope: false
|
||||
|
||||
# If true, create & use Pod Security Policy resources
|
||||
# https://kubernetes.io/docs/concepts/policy/pod-security-policy/
|
||||
podSecurityPolicy:
|
||||
enabled: false
|
||||
|
||||
serviceAccount:
|
||||
create: true
|
||||
name: ""
|
||||
automountServiceAccountToken: true
|
||||
|
||||
## Optional array of imagePullSecrets containing private registry credentials
|
||||
## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
|
||||
imagePullSecrets: []
|
||||
# - name: secretName
|
||||
|
||||
# TCP service key:value pairs
|
||||
# Ref: https://github.com/kubernetes/contrib/tree/master/ingress/controllers/nginx/examples/tcp
|
||||
##
|
||||
tcp: {}
|
||||
# 8080: "default/example-tcp-svc:9000"
|
||||
|
||||
# UDP service key:value pairs
|
||||
# Ref: https://github.com/kubernetes/contrib/tree/master/ingress/controllers/nginx/examples/udp
|
||||
##
|
||||
udp: {}
|
||||
# 53: "kube-system/kube-dns:53"
|
||||
|
||||
# A base64ed Diffie-Hellman parameter
|
||||
# This can be generated with: openssl dhparam 4096 2> /dev/null | base64
|
||||
# Ref: https://github.com/krmichel/ingress-nginx/blob/master/docs/examples/customization/ssl-dh-param
|
||||
dhParam:
|
16
yaml/monitoring/grafana-cert.yaml
Normal file
16
yaml/monitoring/grafana-cert.yaml
Normal file
|
@ -0,0 +1,16 @@
|
|||
---
|
||||
apiVersion: cert-manager.io/v1
|
||||
kind: Certificate
|
||||
metadata:
|
||||
name: grafana-cert
|
||||
namespace: monitoring
|
||||
spec:
|
||||
secretName: grafana-tls
|
||||
issuerRef:
|
||||
name: letsencrypt-production-dns
|
||||
kind: ClusterIssuer
|
||||
commonName: 'grafana.${CLUSTER_DOMAIN}'
|
||||
dnsNames:
|
||||
- 'grafana.${CLUSTER_DOMAIN}'
|
||||
privateKey:
|
||||
algorithm: ECDSA
|
23
yaml/monitoring/grafana-ingress-secure.yaml
Normal file
23
yaml/monitoring/grafana-ingress-secure.yaml
Normal file
|
@ -0,0 +1,23 @@
|
|||
---
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: grafana
|
||||
annotations:
|
||||
kubernetes.io/ingress.class: nginx
|
||||
spec:
|
||||
rules:
|
||||
- host: grafana.${CLUSTER_DOMAIN}
|
||||
http:
|
||||
paths:
|
||||
- pathType: Prefix
|
||||
path: /
|
||||
backend:
|
||||
service:
|
||||
name: grafana
|
||||
port:
|
||||
number: 80
|
||||
tls:
|
||||
- secretName: grafana-tls
|
||||
hosts:
|
||||
- grafana.${CLUSTER_DOMAIN}
|
19
yaml/monitoring/grafana-ingress.yaml
Normal file
19
yaml/monitoring/grafana-ingress.yaml
Normal file
|
@ -0,0 +1,19 @@
|
|||
---
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: grafana
|
||||
annotations:
|
||||
kubernetes.io/ingress.class: nginx
|
||||
spec:
|
||||
rules:
|
||||
- host: grafana.${CLUSTER_DOMAIN}
|
||||
http:
|
||||
paths:
|
||||
- pathType: Prefix
|
||||
path: /
|
||||
backend:
|
||||
service:
|
||||
name: grafana
|
||||
port:
|
||||
number: 80
|
752
yaml/monitoring/grafana-values.yaml
Normal file
752
yaml/monitoring/grafana-values.yaml
Normal file
|
@ -0,0 +1,752 @@
|
|||
rbac:
|
||||
create: true
|
||||
## Use an existing ClusterRole/Role (depending on rbac.namespaced false/true)
|
||||
# useExistingRole: name-of-some-(cluster)role
|
||||
pspEnabled: true
|
||||
pspUseAppArmor: true
|
||||
namespaced: false
|
||||
extraRoleRules: []
|
||||
# - apiGroups: []
|
||||
# resources: []
|
||||
# verbs: []
|
||||
extraClusterRoleRules: []
|
||||
# - apiGroups: []
|
||||
# resources: []
|
||||
# verbs: []
|
||||
serviceAccount:
|
||||
create: true
|
||||
name:
|
||||
nameTest:
|
||||
# annotations:
|
||||
# eks.amazonaws.com/role-arn: arn:aws:iam::123456789000:role/iam-role-name-here
|
||||
|
||||
replicas: 1
|
||||
|
||||
## Create HorizontalPodAutoscaler object for deployment type
|
||||
#
|
||||
autoscaling:
|
||||
enabled: false
|
||||
# minReplicas: 1
|
||||
# maxReplicas: 10
|
||||
# metrics:
|
||||
# - type: Resource
|
||||
# resource:
|
||||
# name: cpu
|
||||
# targetAverageUtilization: 60
|
||||
# - type: Resource
|
||||
# resource:
|
||||
# name: memory
|
||||
# targetAverageUtilization: 60
|
||||
|
||||
## See `kubectl explain poddisruptionbudget.spec` for more
|
||||
## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/
|
||||
podDisruptionBudget: {}
|
||||
# minAvailable: 1
|
||||
# maxUnavailable: 1
|
||||
|
||||
## See `kubectl explain deployment.spec.strategy` for more
|
||||
## ref: https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#strategy
|
||||
deploymentStrategy:
|
||||
type: RollingUpdate
|
||||
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /api/health
|
||||
port: 3000
|
||||
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /api/health
|
||||
port: 3000
|
||||
initialDelaySeconds: 60
|
||||
timeoutSeconds: 30
|
||||
failureThreshold: 10
|
||||
|
||||
## Use an alternate scheduler, e.g. "stork".
|
||||
## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/
|
||||
##
|
||||
# schedulerName: "default-scheduler"
|
||||
|
||||
image:
|
||||
repository: grafana/grafana
|
||||
tag: 8.0.0
|
||||
sha: ""
|
||||
pullPolicy: IfNotPresent
|
||||
|
||||
## Optionally specify an array of imagePullSecrets.
|
||||
## Secrets must be manually created in the namespace.
|
||||
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
|
||||
##
|
||||
# pullSecrets:
|
||||
# - myRegistrKeySecretName
|
||||
|
||||
testFramework:
|
||||
enabled: true
|
||||
image: "bats/bats"
|
||||
tag: "v1.1.0"
|
||||
imagePullPolicy: IfNotPresent
|
||||
securityContext: {}
|
||||
|
||||
securityContext:
|
||||
runAsUser: 472
|
||||
runAsGroup: 472
|
||||
fsGroup: 472
|
||||
|
||||
containerSecurityContext:
|
||||
{}
|
||||
|
||||
extraConfigmapMounts: []
|
||||
# - name: certs-configmap
|
||||
# mountPath: /etc/grafana/ssl/
|
||||
# subPath: certificates.crt # (optional)
|
||||
# configMap: certs-configmap
|
||||
# readOnly: true
|
||||
|
||||
|
||||
extraEmptyDirMounts: []
|
||||
# - name: provisioning-notifiers
|
||||
# mountPath: /etc/grafana/provisioning/notifiers
|
||||
|
||||
|
||||
# Apply extra labels to common labels.
|
||||
extraLabels: {}
|
||||
|
||||
## Assign a PriorityClassName to pods if set
|
||||
# priorityClassName:
|
||||
|
||||
downloadDashboardsImage:
|
||||
repository: curlimages/curl
|
||||
tag: 7.73.0
|
||||
sha: ""
|
||||
pullPolicy: IfNotPresent
|
||||
|
||||
downloadDashboards:
|
||||
env: {}
|
||||
envFromSecret: ""
|
||||
resources: {}
|
||||
|
||||
## Pod Annotations
|
||||
# podAnnotations: {}
|
||||
|
||||
## Pod Labels
|
||||
# podLabels: {}
|
||||
|
||||
podPortName: grafana
|
||||
|
||||
## Deployment annotations
|
||||
# annotations: {}
|
||||
|
||||
## Expose the grafana service to be accessed from outside the cluster (LoadBalancer service).
|
||||
## or access it from within the cluster (ClusterIP service). Set the service type and the port to serve it.
|
||||
## ref: http://kubernetes.io/docs/user-guide/services/
|
||||
##
|
||||
service:
|
||||
enabled: true
|
||||
type: ClusterIP
|
||||
port: 80
|
||||
targetPort: 3000
|
||||
# targetPort: 4181 To be used with a proxy extraContainer
|
||||
annotations: {}
|
||||
labels: {}
|
||||
portName: service
|
||||
|
||||
serviceMonitor:
|
||||
## If true, a ServiceMonitor CRD is created for a prometheus operator
|
||||
## https://github.com/coreos/prometheus-operator
|
||||
##
|
||||
enabled: false
|
||||
path: /metrics
|
||||
# namespace: monitoring (defaults to use the namespace this chart is deployed to)
|
||||
labels: {}
|
||||
interval: 1m
|
||||
scheme: http
|
||||
tlsConfig: {}
|
||||
scrapeTimeout: 30s
|
||||
relabelings: []
|
||||
|
||||
extraExposePorts: []
|
||||
# - name: keycloak
|
||||
# port: 8080
|
||||
# targetPort: 8080
|
||||
# type: ClusterIP
|
||||
|
||||
# overrides pod.spec.hostAliases in the grafana deployment's pods
|
||||
hostAliases: []
|
||||
# - ip: "1.2.3.4"
|
||||
# hostnames:
|
||||
# - "my.host.com"
|
||||
|
||||
ingress:
|
||||
enabled: false
|
||||
# For Kubernetes >= 1.18 you should specify the ingress-controller via the field ingressClassName
|
||||
# See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#specifying-the-class-of-an-ingress
|
||||
# ingressClassName: nginx
|
||||
# Values can be templated
|
||||
annotations: {}
|
||||
# kubernetes.io/ingress.class: nginx
|
||||
# kubernetes.io/tls-acme: "true"
|
||||
labels: {}
|
||||
path: /
|
||||
|
||||
# pathType is only for k8s > 1.19
|
||||
pathType: Prefix
|
||||
|
||||
hosts:
|
||||
- chart-example.local
|
||||
## Extra paths to prepend to every host configuration. This is useful when working with annotation based services.
|
||||
extraPaths: []
|
||||
# - path: /*
|
||||
# backend:
|
||||
# serviceName: ssl-redirect
|
||||
# servicePort: use-annotation
|
||||
## Or for k8s > 1.19
|
||||
# - path: /*
|
||||
# pathType: Prefix
|
||||
# backend:
|
||||
# service:
|
||||
# name: ssl-redirect
|
||||
# port:
|
||||
# name: service
|
||||
|
||||
|
||||
tls: []
|
||||
# - secretName: chart-example-tls
|
||||
# hosts:
|
||||
# - chart-example.local
|
||||
|
||||
resources: {}
|
||||
# limits:
|
||||
# cpu: 100m
|
||||
# memory: 128Mi
|
||||
# requests:
|
||||
# cpu: 100m
|
||||
# memory: 128Mi
|
||||
|
||||
## Node labels for pod assignment
|
||||
## ref: https://kubernetes.io/docs/user-guide/node-selection/
|
||||
#
|
||||
nodeSelector: {}
|
||||
|
||||
## Tolerations for pod assignment
|
||||
## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
|
||||
##
|
||||
tolerations: []
|
||||
|
||||
## Affinity for pod assignment
|
||||
## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
|
||||
##
|
||||
affinity: {}
|
||||
|
||||
extraInitContainers: []
|
||||
|
||||
## Enable an Specify container in extraContainers. This is meant to allow adding an authentication proxy to a grafana pod
|
||||
extraContainers: |
|
||||
# - name: proxy
|
||||
# image: quay.io/gambol99/keycloak-proxy:latest
|
||||
# args:
|
||||
# - -provider=github
|
||||
# - -client-id=
|
||||
# - -client-secret=
|
||||
# - -github-org=<ORG_NAME>
|
||||
# - -email-domain=*
|
||||
# - -cookie-secret=
|
||||
# - -http-address=http://0.0.0.0:4181
|
||||
# - -upstream-url=http://127.0.0.1:3000
|
||||
# ports:
|
||||
# - name: proxy-web
|
||||
# containerPort: 4181
|
||||
|
||||
## Volumes that can be used in init containers that will not be mounted to deployment pods
|
||||
extraContainerVolumes: []
|
||||
# - name: volume-from-secret
|
||||
# secret:
|
||||
# secretName: secret-to-mount
|
||||
# - name: empty-dir-volume
|
||||
# emptyDir: {}
|
||||
|
||||
## Enable persistence using Persistent Volume Claims
|
||||
## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/
|
||||
##
|
||||
persistence:
|
||||
type: pvc
|
||||
enabled: true
|
||||
# storageClassName: default
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
size: 8Gi
|
||||
# annotations: {}
|
||||
finalizers:
|
||||
- kubernetes.io/pvc-protection
|
||||
# selectorLabels: {}
|
||||
subPath: ""
|
||||
existingClaim: "monitoring-grafana-data"
|
||||
|
||||
## If persistence is not enabled, this allows to mount the
|
||||
## local storage in-memory to improve performance
|
||||
##
|
||||
inMemory:
|
||||
enabled: false
|
||||
## The maximum usage on memory medium EmptyDir would be
|
||||
## the minimum value between the SizeLimit specified
|
||||
## here and the sum of memory limits of all containers in a pod
|
||||
##
|
||||
# sizeLimit: 300Mi
|
||||
|
||||
initChownData:
|
||||
## If false, data ownership will not be reset at startup
|
||||
## This allows the prometheus-server to be run with an arbitrary user
|
||||
##
|
||||
enabled: true
|
||||
|
||||
## initChownData container image
|
||||
##
|
||||
image:
|
||||
repository: busybox
|
||||
tag: "1.31.1"
|
||||
sha: ""
|
||||
pullPolicy: IfNotPresent
|
||||
|
||||
## initChownData resource requests and limits
|
||||
## Ref: http://kubernetes.io/docs/user-guide/compute-resources/
|
||||
##
|
||||
resources: {}
|
||||
# limits:
|
||||
# cpu: 100m
|
||||
# memory: 128Mi
|
||||
# requests:
|
||||
# cpu: 100m
|
||||
# memory: 128Mi
|
||||
|
||||
|
||||
# Administrator credentials when not using an existing secret (see below)
|
||||
adminUser: admin
|
||||
# adminPassword: strongpassword
|
||||
|
||||
# Use an existing secret for the admin user.
|
||||
admin:
|
||||
existingSecret: ""
|
||||
userKey: admin-user
|
||||
passwordKey: admin-password
|
||||
|
||||
## Define command to be executed at startup by grafana container
|
||||
## Needed if using `vault-env` to manage secrets (ref: https://banzaicloud.com/blog/inject-secrets-into-pods-vault/)
|
||||
## Default is "run.sh" as defined in grafana's Dockerfile
|
||||
# command:
|
||||
# - "sh"
|
||||
# - "/run.sh"
|
||||
|
||||
## Use an alternate scheduler, e.g. "stork".
|
||||
## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/
|
||||
##
|
||||
# schedulerName:
|
||||
|
||||
## Extra environment variables that will be pass onto deployment pods
|
||||
##
|
||||
## to provide grafana with access to CloudWatch on AWS EKS:
|
||||
## 1. create an iam role of type "Web identity" with provider oidc.eks.* (note the provider for later)
|
||||
## 2. edit the "Trust relationships" of the role, add a line inside the StringEquals clause using the
|
||||
## same oidc eks provider as noted before (same as the existing line)
|
||||
## also, replace NAMESPACE and prometheus-operator-grafana with the service account namespace and name
|
||||
##
|
||||
## "oidc.eks.us-east-1.amazonaws.com/id/XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX:sub": "system:serviceaccount:NAMESPACE:prometheus-operator-grafana",
|
||||
##
|
||||
## 3. attach a policy to the role, you can use a built in policy called CloudWatchReadOnlyAccess
|
||||
## 4. use the following env: (replace 123456789000 and iam-role-name-here with your aws account number and role name)
|
||||
##
|
||||
## env:
|
||||
## AWS_ROLE_ARN: arn:aws:iam::123456789000:role/iam-role-name-here
|
||||
## AWS_WEB_IDENTITY_TOKEN_FILE: /var/run/secrets/eks.amazonaws.com/serviceaccount/token
|
||||
## AWS_REGION: us-east-1
|
||||
##
|
||||
## 5. uncomment the EKS section in extraSecretMounts: below
|
||||
## 6. uncomment the annotation section in the serviceAccount: above
|
||||
## make sure to replace arn:aws:iam::123456789000:role/iam-role-name-here with your role arn
|
||||
|
||||
env: {}
|
||||
|
||||
## "valueFrom" environment variable references that will be added to deployment pods
|
||||
## ref: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.17/#envvarsource-v1-core
|
||||
## Renders in container spec as:
|
||||
## env:
|
||||
## ...
|
||||
## - name: <key>
|
||||
## valueFrom:
|
||||
## <value rendered as YAML>
|
||||
envValueFrom: {}
|
||||
|
||||
## The name of a secret in the same kubernetes namespace which contain values to be added to the environment
|
||||
## This can be useful for auth tokens, etc. Value is templated.
|
||||
envFromSecret: ""
|
||||
|
||||
## Sensible environment variables that will be rendered as new secret object
|
||||
## This can be useful for auth tokens, etc
|
||||
envRenderSecret: {}
|
||||
|
||||
## Additional grafana server secret mounts
|
||||
# Defines additional mounts with secrets. Secrets must be manually created in the namespace.
|
||||
extraSecretMounts: []
|
||||
# - name: secret-files
|
||||
# mountPath: /etc/secrets
|
||||
# secretName: grafana-secret-files
|
||||
# readOnly: true
|
||||
# subPath: ""
|
||||
#
|
||||
# for AWS EKS (cloudwatch) use the following (see also instruction in env: above)
|
||||
# - name: aws-iam-token
|
||||
# mountPath: /var/run/secrets/eks.amazonaws.com/serviceaccount
|
||||
# readOnly: true
|
||||
# projected:
|
||||
# defaultMode: 420
|
||||
# sources:
|
||||
# - serviceAccountToken:
|
||||
# audience: sts.amazonaws.com
|
||||
# expirationSeconds: 86400
|
||||
# path: token
|
||||
#
|
||||
# for CSI e.g. Azure Key Vault use the following
|
||||
# - name: secrets-store-inline
|
||||
# mountPath: /run/secrets
|
||||
# readOnly: true
|
||||
# csi:
|
||||
# driver: secrets-store.csi.k8s.io
|
||||
# readOnly: true
|
||||
# volumeAttributes:
|
||||
# secretProviderClass: "akv-grafana-spc"
|
||||
# nodePublishSecretRef: # Only required when using service principal mode
|
||||
# name: grafana-akv-creds # Only required when using service principal mode
|
||||
|
||||
## Additional grafana server volume mounts
|
||||
# Defines additional volume mounts.
|
||||
extraVolumeMounts: []
|
||||
# - name: extra-volume-0
|
||||
# mountPath: /mnt/volume0
|
||||
# readOnly: true
|
||||
# existingClaim: volume-claim
|
||||
# - name: extra-volume-1
|
||||
# mountPath: /mnt/volume1
|
||||
# readOnly: true
|
||||
# hostPath: /usr/shared/
|
||||
|
||||
## Pass the plugins you want installed as a list.
|
||||
##
|
||||
plugins: []
|
||||
# - digrich-bubblechart-panel
|
||||
# - grafana-clock-panel
|
||||
|
||||
## Configure grafana datasources
|
||||
## ref: http://docs.grafana.org/administration/provisioning/#datasources
|
||||
##
|
||||
#datasources: {}
|
||||
# datasources.yaml:
|
||||
# apiVersion: 1
|
||||
# datasources:
|
||||
# - name: Prometheus
|
||||
# type: prometheus
|
||||
# url: http://prometheus-prometheus-server
|
||||
# access: proxy
|
||||
# isDefault: true
|
||||
# - name: CloudWatch
|
||||
# type: cloudwatch
|
||||
# access: proxy
|
||||
# uid: cloudwatch
|
||||
# editable: false
|
||||
# jsonData:
|
||||
# authType: credentials
|
||||
# defaultRegion: us-east-1
|
||||
datasources:
|
||||
datasources.yaml:
|
||||
apiVersion: 1
|
||||
datasources:
|
||||
- name: Prometheus
|
||||
type: prometheus
|
||||
url: http://prometheus-server.monitoring.svc.cluster.local
|
||||
access: proxy
|
||||
isDefault: true
|
||||
- name: Loki
|
||||
type: loki
|
||||
url: http://loki.monitoring.svc.cluster.local:3100
|
||||
accesS: proxy
|
||||
isDefault: false
|
||||
|
||||
## Configure notifiers
|
||||
## ref: http://docs.grafana.org/administration/provisioning/#alert-notification-channels
|
||||
##
|
||||
notifiers: {}
|
||||
# notifiers.yaml:
|
||||
# notifiers:
|
||||
# - name: email-notifier
|
||||
# type: email
|
||||
# uid: email1
|
||||
# # either:
|
||||
# org_id: 1
|
||||
# # or
|
||||
# org_name: Main Org.
|
||||
# is_default: true
|
||||
# settings:
|
||||
# addresses: an_email_address@example.com
|
||||
# delete_notifiers:
|
||||
|
||||
## Configure grafana dashboard providers
|
||||
## ref: http://docs.grafana.org/administration/provisioning/#dashboards
|
||||
##
|
||||
## `path` must be /var/lib/grafana/dashboards/<provider_name>
|
||||
##
|
||||
dashboardProviders: {}
|
||||
# dashboardproviders.yaml:
|
||||
# apiVersion: 1
|
||||
# providers:
|
||||
# - name: 'default'
|
||||
# orgId: 1
|
||||
# folder: ''
|
||||
# type: file
|
||||
# disableDeletion: false
|
||||
# editable: true
|
||||
# options:
|
||||
# path: /var/lib/grafana/dashboards/default
|
||||
|
||||
## Configure grafana dashboard to import
|
||||
## NOTE: To use dashboards you must also enable/configure dashboardProviders
|
||||
## ref: https://grafana.com/dashboards
|
||||
##
|
||||
## dashboards per provider, use provider name as key.
|
||||
##
|
||||
dashboards: {}
|
||||
# default:
|
||||
# some-dashboard:
|
||||
# json: |
|
||||
# $RAW_JSON
|
||||
# custom-dashboard:
|
||||
# file: dashboards/custom-dashboard.json
|
||||
# prometheus-stats:
|
||||
# gnetId: 2
|
||||
# revision: 2
|
||||
# datasource: Prometheus
|
||||
# local-dashboard:
|
||||
# url: https://example.com/repository/test.json
|
||||
# token: ''
|
||||
# local-dashboard-base64:
|
||||
# url: https://example.com/repository/test-b64.json
|
||||
# token: ''
|
||||
# b64content: true
|
||||
|
||||
## Reference to external ConfigMap per provider. Use provider name as key and ConfigMap name as value.
|
||||
## A provider dashboards must be defined either by external ConfigMaps or in values.yaml, not in both.
|
||||
## ConfigMap data example:
|
||||
##
|
||||
## data:
|
||||
## example-dashboard.json: |
|
||||
## RAW_JSON
|
||||
##
|
||||
dashboardsConfigMaps: {}
|
||||
# default: ""
|
||||
|
||||
## Grafana's primary configuration
|
||||
## NOTE: values in map will be converted to ini format
|
||||
## ref: http://docs.grafana.org/installation/configuration/
|
||||
##
|
||||
grafana.ini:
|
||||
paths:
|
||||
data: /var/lib/grafana/
|
||||
logs: /var/log/grafana
|
||||
plugins: /var/lib/grafana/plugins
|
||||
provisioning: /etc/grafana/provisioning
|
||||
analytics:
|
||||
check_for_updates: true
|
||||
log:
|
||||
mode: console
|
||||
grafana_net:
|
||||
url: https://grafana.net
|
||||
## grafana Authentication can be enabled with the following values on grafana.ini
|
||||
# server:
|
||||
# The full public facing url you use in browser, used for redirects and emails
|
||||
# root_url:
|
||||
# https://grafana.com/docs/grafana/latest/auth/github/#enable-github-in-grafana
|
||||
# auth.github:
|
||||
# enabled: false
|
||||
# allow_sign_up: false
|
||||
# scopes: user:email,read:org
|
||||
# auth_url: https://github.com/login/oauth/authorize
|
||||
# token_url: https://github.com/login/oauth/access_token
|
||||
# api_url: https://api.github.com/user
|
||||
# team_ids:
|
||||
# allowed_organizations:
|
||||
# client_id:
|
||||
# client_secret:
|
||||
## LDAP Authentication can be enabled with the following values on grafana.ini
|
||||
## NOTE: Grafana will fail to start if the value for ldap.toml is invalid
|
||||
# auth.ldap:
|
||||
# enabled: true
|
||||
# allow_sign_up: true
|
||||
# config_file: /etc/grafana/ldap.toml
|
||||
|
||||
## Grafana's LDAP configuration
|
||||
## Templated by the template in _helpers.tpl
|
||||
## NOTE: To enable the grafana.ini must be configured with auth.ldap.enabled
|
||||
## ref: http://docs.grafana.org/installation/configuration/#auth-ldap
|
||||
## ref: http://docs.grafana.org/installation/ldap/#configuration
|
||||
ldap:
|
||||
enabled: false
|
||||
# `existingSecret` is a reference to an existing secret containing the ldap configuration
|
||||
# for Grafana in a key `ldap-toml`.
|
||||
existingSecret: ""
|
||||
# `config` is the content of `ldap.toml` that will be stored in the created secret
|
||||
config: ""
|
||||
# config: |-
|
||||
# verbose_logging = true
|
||||
|
||||
# [[servers]]
|
||||
# host = "my-ldap-server"
|
||||
# port = 636
|
||||
# use_ssl = true
|
||||
# start_tls = false
|
||||
# ssl_skip_verify = false
|
||||
# bind_dn = "uid=%s,ou=users,dc=myorg,dc=com"
|
||||
|
||||
## Grafana's SMTP configuration
|
||||
## NOTE: To enable, grafana.ini must be configured with smtp.enabled
|
||||
## ref: http://docs.grafana.org/installation/configuration/#smtp
|
||||
smtp:
|
||||
# `existingSecret` is a reference to an existing secret containing the smtp configuration
|
||||
# for Grafana.
|
||||
existingSecret: ""
|
||||
userKey: "user"
|
||||
passwordKey: "password"
|
||||
|
||||
## Sidecars that collect the configmaps with specified label and stores the included files them into the respective folders
|
||||
## Requires at least Grafana 5 to work and can't be used together with parameters dashboardProviders, datasources and dashboards
|
||||
sidecar:
|
||||
image:
|
||||
repository: quay.io/kiwigrid/k8s-sidecar
|
||||
tag: 1.10.7
|
||||
sha: ""
|
||||
imagePullPolicy: IfNotPresent
|
||||
resources: {}
|
||||
# limits:
|
||||
# cpu: 100m
|
||||
# memory: 100Mi
|
||||
# requests:
|
||||
# cpu: 50m
|
||||
# memory: 50Mi
|
||||
# skipTlsVerify Set to true to skip tls verification for kube api calls
|
||||
# skipTlsVerify: true
|
||||
enableUniqueFilenames: false
|
||||
dashboards:
|
||||
enabled: false
|
||||
SCProvider: true
|
||||
# label that the configmaps with dashboards are marked with
|
||||
label: grafana_dashboard
|
||||
# value of label that the configmaps with dashboards are set to
|
||||
labelValue: null
|
||||
# folder in the pod that should hold the collected dashboards (unless `defaultFolderName` is set)
|
||||
folder: /tmp/dashboards
|
||||
# The default folder name, it will create a subfolder under the `folder` and put dashboards in there instead
|
||||
defaultFolderName: null
|
||||
# If specified, the sidecar will search for dashboard config-maps inside this namespace.
|
||||
# Otherwise the namespace in which the sidecar is running will be used.
|
||||
# It's also possible to specify ALL to search in all namespaces
|
||||
searchNamespace: null
|
||||
# search in configmap, secret or both
|
||||
resource: both
|
||||
# If specified, the sidecar will look for annotation with this name to create folder and put graph here.
|
||||
# You can use this parameter together with `provider.foldersFromFilesStructure`to annotate configmaps and create folder structure.
|
||||
folderAnnotation: null
|
||||
# provider configuration that lets grafana manage the dashboards
|
||||
provider:
|
||||
# name of the provider, should be unique
|
||||
name: sidecarProvider
|
||||
# orgid as configured in grafana
|
||||
orgid: 1
|
||||
# folder in which the dashboards should be imported in grafana
|
||||
folder: ''
|
||||
# type of the provider
|
||||
type: file
|
||||
# disableDelete to activate a import-only behaviour
|
||||
disableDelete: false
|
||||
# allow updating provisioned dashboards from the UI
|
||||
allowUiUpdates: false
|
||||
# allow Grafana to replicate dashboard structure from filesystem
|
||||
foldersFromFilesStructure: false
|
||||
datasources:
|
||||
enabled: false
|
||||
# label that the configmaps with datasources are marked with
|
||||
label: grafana_datasource
|
||||
# value of label that the configmaps with datasources are set to
|
||||
labelValue: null
|
||||
# If specified, the sidecar will search for datasource config-maps inside this namespace.
|
||||
# Otherwise the namespace in which the sidecar is running will be used.
|
||||
# It's also possible to specify ALL to search in all namespaces
|
||||
searchNamespace: null
|
||||
# search in configmap, secret or both
|
||||
resource: both
|
||||
notifiers:
|
||||
enabled: false
|
||||
# label that the configmaps with notifiers are marked with
|
||||
label: grafana_notifier
|
||||
# If specified, the sidecar will search for notifier config-maps inside this namespace.
|
||||
# Otherwise the namespace in which the sidecar is running will be used.
|
||||
# It's also possible to specify ALL to search in all namespaces
|
||||
searchNamespace: null
|
||||
# search in configmap, secret or both
|
||||
resource: both
|
||||
|
||||
## Override the deployment namespace
|
||||
##
|
||||
namespaceOverride: ""
|
||||
|
||||
## Number of old ReplicaSets to retain
|
||||
##
|
||||
revisionHistoryLimit: 10
|
||||
|
||||
## Add a seperate remote image renderer deployment/service
|
||||
imageRenderer:
|
||||
# Enable the image-renderer deployment & service
|
||||
enabled: false
|
||||
replicas: 1
|
||||
image:
|
||||
# image-renderer Image repository
|
||||
repository: grafana/grafana-image-renderer
|
||||
# image-renderer Image tag
|
||||
tag: latest
|
||||
# image-renderer Image sha (optional)
|
||||
sha: ""
|
||||
# image-renderer ImagePullPolicy
|
||||
pullPolicy: Always
|
||||
# extra environment variables
|
||||
env:
|
||||
HTTP_HOST: "0.0.0.0"
|
||||
# RENDERING_ARGS: --disable-gpu,--window-size=1280x758
|
||||
# RENDERING_MODE: clustered
|
||||
# image-renderer deployment serviceAccount
|
||||
serviceAccountName: ""
|
||||
# image-renderer deployment securityContext
|
||||
securityContext: {}
|
||||
# image-renderer deployment Host Aliases
|
||||
hostAliases: []
|
||||
# image-renderer deployment priority class
|
||||
priorityClassName: ''
|
||||
service:
|
||||
# Enable the image-renderer service
|
||||
enabled: true
|
||||
# image-renderer service port name
|
||||
portName: 'http'
|
||||
# image-renderer service port used by both service and deployment
|
||||
port: 8081
|
||||
targetPort: 8081
|
||||
# In case a sub_path is used this needs to be added to the image renderer callback
|
||||
grafanaSubPath: ""
|
||||
# name of the image-renderer port on the pod
|
||||
podPortName: http
|
||||
# number of image-renderer replica sets to keep
|
||||
revisionHistoryLimit: 10
|
||||
networkPolicy:
|
||||
# Enable a NetworkPolicy to limit inbound traffic to only the created grafana pods
|
||||
limitIngress: true
|
||||
# Enable a NetworkPolicy to limit outbound traffic to only the created grafana pods
|
||||
limitEgress: false
|
||||
resources: {}
|
||||
# limits:
|
||||
# cpu: 100m
|
||||
# memory: 100Mi
|
||||
# requests:
|
||||
# cpu: 50m
|
||||
# memory: 50Mi
|
36
yaml/monitoring/grafana-volumes.yaml
Normal file
36
yaml/monitoring/grafana-volumes.yaml
Normal file
|
@ -0,0 +1,36 @@
|
|||
apiVersion: v1
|
||||
kind: PersistentVolume
|
||||
metadata:
|
||||
name: monitoring-grafana-data
|
||||
spec:
|
||||
capacity:
|
||||
storage: 8Gi
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
persistentVolumeReclaimPolicy: Retain
|
||||
rbd:
|
||||
monitors:
|
||||
- ${CEPH_MONITOR_1}:6789
|
||||
- ${CEPH_MONITOR_2}:6789
|
||||
- ${CEPH_MONITOR_3}:6789
|
||||
pool: kube
|
||||
user: kube
|
||||
secretRef:
|
||||
name: ceph-secret-kube
|
||||
namespace: kube-system
|
||||
fsType: ext4
|
||||
readOnly: false
|
||||
image: grafana-data
|
||||
---
|
||||
apiVersion: "v1"
|
||||
kind: "PersistentVolumeClaim"
|
||||
metadata:
|
||||
name: monitoring-grafana-data
|
||||
namespace: monitoring
|
||||
spec:
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
resources:
|
||||
requests:
|
||||
storage: 8Gi
|
||||
volumeName: "monitoring-grafana-data"
|
13
yaml/monitoring/loki-v12-ext-svc.yaml
Normal file
13
yaml/monitoring/loki-v12-ext-svc.yaml
Normal file
|
@ -0,0 +1,13 @@
|
|||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: loki-v12
|
||||
namespace: monitoring
|
||||
spec:
|
||||
type: ExternalName
|
||||
externalName: loki.loki.svc.cluster.local
|
||||
ports:
|
||||
- port: 3100
|
||||
name: http-metrics
|
||||
targetPort: http-metrics
|
1468
yaml/monitoring/loki-v12-values-long.yaml
Normal file
1468
yaml/monitoring/loki-v12-values-long.yaml
Normal file
File diff suppressed because it is too large
Load diff
1468
yaml/monitoring/loki-v12-values-short.yaml
Normal file
1468
yaml/monitoring/loki-v12-values-short.yaml
Normal file
File diff suppressed because it is too large
Load diff
36
yaml/monitoring/loki-v12-volumes.yaml
Normal file
36
yaml/monitoring/loki-v12-volumes.yaml
Normal file
|
@ -0,0 +1,36 @@
|
|||
apiVersion: v1
|
||||
kind: PersistentVolumeClaim
|
||||
metadata:
|
||||
name: storage-loki-0
|
||||
namespace: loki
|
||||
spec:
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
resources:
|
||||
requests:
|
||||
storage: ${LOKI_STORAGE_SIZE}
|
||||
volumeName: "loki-v12-data"
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: PersistentVolume
|
||||
metadata:
|
||||
name: loki-v12-data
|
||||
spec:
|
||||
capacity:
|
||||
storage: ${LOKI_STORAGE_SIZE}
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
persistentVolumeReclaimPolicy: Retain
|
||||
rbd:
|
||||
monitors:
|
||||
- ${CEPH_MONITOR_1}:6789
|
||||
- ${CEPH_MONITOR_2}:6789
|
||||
- ${CEPH_MONITOR_3}:6789
|
||||
pool: kube
|
||||
user: kube
|
||||
secretRef:
|
||||
name: ceph-secret-kube
|
||||
namespace: kube-system
|
||||
fsType: ext4
|
||||
readOnly: false
|
||||
image: loki-v12-data
|
1694
yaml/monitoring/prometheus-values.yaml
Normal file
1694
yaml/monitoring/prometheus-values.yaml
Normal file
File diff suppressed because it is too large
Load diff
26
yaml/monitoring/prometheus-volumes.yaml
Normal file
26
yaml/monitoring/prometheus-volumes.yaml
Normal file
|
@ -0,0 +1,26 @@
|
|||
---
|
||||
apiVersion: v1
|
||||
kind: PersistentVolumeClaim
|
||||
metadata:
|
||||
name: monitoring-prometheus-alertmanager-data
|
||||
namespace: monitoring
|
||||
spec:
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
storageClassName: rados-block
|
||||
resources:
|
||||
requests:
|
||||
storage: 2Gi
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: PersistentVolumeClaim
|
||||
metadata:
|
||||
name: monitoring-prometheus-server-data
|
||||
namespace: monitoring
|
||||
spec:
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
storageClassName: rados-block
|
||||
resources:
|
||||
requests:
|
||||
storage: 32Gi
|
485
yaml/monitoring/promtail-values.yaml
Normal file
485
yaml/monitoring/promtail-values.yaml
Normal file
|
@ -0,0 +1,485 @@
|
|||
# -- Overrides the chart's name
|
||||
nameOverride: null
|
||||
|
||||
# -- Overrides the chart's computed fullname
|
||||
fullnameOverride: null
|
||||
|
||||
initContainer:
|
||||
# -- Specifies whether the init container for setting inotify max user instances is to be enabled
|
||||
enabled: false
|
||||
image:
|
||||
# -- The Docker registry for the init container
|
||||
registry: docker.io
|
||||
# -- Docker image repository for the init container
|
||||
repository: busybox
|
||||
# -- Docker tag for the init container
|
||||
tag: 1.33
|
||||
# -- Docker image pull policy for the init container image
|
||||
pullPolicy: IfNotPresent
|
||||
# -- The inotify max user instances to configure
|
||||
fsInotifyMaxUserInstances: 128
|
||||
|
||||
image:
|
||||
# -- The Docker registry
|
||||
registry: docker.io
|
||||
# -- Docker image repository
|
||||
repository: grafana/promtail
|
||||
# -- Overrides the image tag whose default is the chart's appVersion
|
||||
tag: null
|
||||
# -- Docker image pull policy
|
||||
pullPolicy: IfNotPresent
|
||||
|
||||
# -- Image pull secrets for Docker images
|
||||
imagePullSecrets: []
|
||||
|
||||
# -- Annotations for the SaemonSet
|
||||
annotations: {}
|
||||
|
||||
# -- The update strategy for the DaemonSet
|
||||
updateStrategy: {}
|
||||
|
||||
# -- Pod labels
|
||||
podLabels: {}
|
||||
|
||||
# -- Pod annotations
|
||||
podAnnotations: {}
|
||||
# prometheus.io/scrape: "true"
|
||||
# prometheus.io/port: "http-metrics"
|
||||
|
||||
# -- The name of the PriorityClass
|
||||
priorityClassName: null
|
||||
|
||||
# -- Liveness probe
|
||||
livenessProbe: {}
|
||||
|
||||
# -- Readiness probe
|
||||
# @default -- See `values.yaml`
|
||||
readinessProbe:
|
||||
failureThreshold: 5
|
||||
httpGet:
|
||||
path: /ready
|
||||
port: http-metrics
|
||||
initialDelaySeconds: 10
|
||||
periodSeconds: 10
|
||||
successThreshold: 1
|
||||
timeoutSeconds: 1
|
||||
|
||||
# -- Resource requests and limits
|
||||
resources: {}
|
||||
# limits:
|
||||
# cpu: 200m
|
||||
# memory: 128Mi
|
||||
# requests:
|
||||
# cpu: 100m
|
||||
# memory: 128Mi
|
||||
|
||||
# -- The security context for pods
|
||||
podSecurityContext:
|
||||
runAsUser: 0
|
||||
runAsGroup: 0
|
||||
|
||||
# -- The security context for containers
|
||||
containerSecurityContext:
|
||||
readOnlyRootFilesystem: true
|
||||
capabilities:
|
||||
drop:
|
||||
- ALL
|
||||
allowPrivilegeEscalation: false
|
||||
|
||||
rbac:
|
||||
# -- Specifies whether RBAC resources are to be created
|
||||
create: true
|
||||
# -- Specifies whether a PodSecurityPolicy is to be created
|
||||
pspEnabled: false
|
||||
|
||||
serviceAccount:
|
||||
# -- Specifies whether a ServiceAccount should be created
|
||||
create: true
|
||||
# -- The name of the ServiceAccount to use.
|
||||
# If not set and `create` is true, a name is generated using the fullname template
|
||||
name: null
|
||||
# -- Image pull secrets for the service account
|
||||
imagePullSecrets: []
|
||||
# -- Annotations for the service account
|
||||
annotations: {}
|
||||
|
||||
# -- Node selector for pods
|
||||
nodeSelector: {}
|
||||
|
||||
# -- Affinity configuration for pods
|
||||
affinity: {}
|
||||
|
||||
# -- Tolerations for pods. By default, pods will be scheduled on master/control-plane nodes.
|
||||
tolerations:
|
||||
- key: node-role.kubernetes.io/master
|
||||
operator: Exists
|
||||
effect: NoSchedule
|
||||
- key: node-role.kubernetes.io/control-plane
|
||||
operator: Exists
|
||||
effect: NoSchedule
|
||||
|
||||
# -- Default volumes that are mounted into pods. In most cases, these should not be changed.
|
||||
# Use `extraVolumes`/`extraVolumeMounts` for additional custom volumes.
|
||||
# @default -- See `values.yaml`
|
||||
defaultVolumes:
|
||||
- name: containers
|
||||
hostPath:
|
||||
path: /var/lib/docker/containers
|
||||
- name: pods
|
||||
hostPath:
|
||||
path: /var/log/pods
|
||||
|
||||
# -- Default volume mounts. Corresponds to `volumes`.
|
||||
# @default -- See `values.yaml`
|
||||
defaultVolumeMounts:
|
||||
- name: containers
|
||||
mountPath: /var/lib/docker/containers
|
||||
readOnly: true
|
||||
- name: pods
|
||||
mountPath: /var/log/pods
|
||||
readOnly: true
|
||||
|
||||
# Extra volumes to be added in addition to those specified under `defaultVolumes`.
|
||||
extraVolumes: []
|
||||
|
||||
# Extra volume mounts together. Corresponds to `extraVolumes`.
|
||||
extraVolumeMounts: []
|
||||
|
||||
# Extra args for the Promtail container.
|
||||
extraArgs: []
|
||||
# -- Example:
|
||||
# -- extraArgs:
|
||||
# -- - -client.external-labels=hostname=$(HOSTNAME)
|
||||
|
||||
# -- Extra environment variables
|
||||
extraEnv: []
|
||||
|
||||
# -- Extra environment variables from secrets or configmaps
|
||||
extraEnvFrom: []
|
||||
|
||||
# ServiceMonitor configuration
|
||||
serviceMonitor:
|
||||
# -- If enabled, ServiceMonitor resources for Prometheus Operator are created
|
||||
enabled: false
|
||||
# -- Alternative namespace for ServiceMonitor resources
|
||||
namespace: null
|
||||
# -- Namespace selector for ServiceMonitor resources
|
||||
namespaceSelector: {}
|
||||
# -- ServiceMonitor annotations
|
||||
annotations: {}
|
||||
# -- Additional ServiceMonitor labels
|
||||
labels: {}
|
||||
# -- ServiceMonitor scrape interval
|
||||
interval: null
|
||||
# -- ServiceMonitor scrape timeout in Go duration format (e.g. 15s)
|
||||
scrapeTimeout: null
|
||||
|
||||
# -- Configure additional ports and services. For each configured port, a corresponding service is created.
|
||||
# See values.yaml for details
|
||||
extraPorts: {}
|
||||
# syslog:
|
||||
# name: tcp-syslog
|
||||
# containerPort: 1514
|
||||
# protocol: TCP
|
||||
# service:
|
||||
# type: ClusterIP
|
||||
# clusterIP: null
|
||||
# port: 1514
|
||||
# externalIPs: []
|
||||
# nodePort: null
|
||||
# annotations: {}
|
||||
# labels: {}
|
||||
# loadBalancerIP: null
|
||||
# loadBalancerSourceRanges: []
|
||||
# externalTrafficPolicy: null
|
||||
|
||||
# -- PodSecurityPolicy configuration.
|
||||
# @default -- See `values.yaml`
|
||||
podSecurityPolicy:
|
||||
privileged: true
|
||||
allowPrivilegeEscalation: true
|
||||
volumes:
|
||||
- 'secret'
|
||||
- 'hostPath'
|
||||
- 'downwardAPI'
|
||||
hostNetwork: false
|
||||
hostIPC: false
|
||||
hostPID: false
|
||||
runAsUser:
|
||||
rule: 'RunAsAny'
|
||||
seLinux:
|
||||
rule: 'RunAsAny'
|
||||
supplementalGroups:
|
||||
rule: 'RunAsAny'
|
||||
fsGroup:
|
||||
rule: 'RunAsAny'
|
||||
readOnlyRootFilesystem: true
|
||||
requiredDropCapabilities:
|
||||
- ALL
|
||||
|
||||
# -- Section for crafting Promtails config file. The only directly relevant value is `config.file`
|
||||
# which is a templated string that references the other values and snippets below this key.
|
||||
# @default -- See `values.yaml`
|
||||
config:
|
||||
# -- The log level of the Promtail server
|
||||
# Must be reference in `config.file` to configure `server.log_level`
|
||||
# See default config in `values.yaml`
|
||||
logLevel: info
|
||||
# -- The port of the Promtail server
|
||||
# Must be reference in `config.file` to configure `server.http_listen_port`
|
||||
# See default config in `values.yaml`
|
||||
serverPort: 3101
|
||||
# -- The Loki address to post logs to.
|
||||
# Must be reference in `config.file` to configure `client.url`.
|
||||
# See default config in `values.yaml`
|
||||
#lokiAddress: http://loki:3100/loki/api/v1/push
|
||||
lokiAddress: http://loki-v12:3100/loki/api/v1/push
|
||||
# -- A section of reusable snippets that can be reference in `config.file`.
|
||||
# Custom snippets may be added in order to reduce redundancy.
|
||||
# This is especially helpful when multiple `kubernetes_sd_configs` are use which usually have large parts in common.
|
||||
# @default -- See `values.yaml`
|
||||
snippets:
|
||||
pipelineStages:
|
||||
- cri: {}
|
||||
common:
|
||||
- action: replace
|
||||
source_labels:
|
||||
- __meta_kubernetes_pod_node_name
|
||||
target_label: node_name
|
||||
- action: replace
|
||||
source_labels:
|
||||
- __meta_kubernetes_namespace
|
||||
target_label: namespace
|
||||
- action: replace
|
||||
replacement: $1
|
||||
separator: /
|
||||
source_labels:
|
||||
- namespace
|
||||
- app
|
||||
target_label: job
|
||||
- action: replace
|
||||
source_labels:
|
||||
- __meta_kubernetes_pod_name
|
||||
target_label: pod
|
||||
- action: replace
|
||||
source_labels:
|
||||
- __meta_kubernetes_pod_container_name
|
||||
target_label: container
|
||||
- action: replace
|
||||
replacement: /var/log/pods/*$1/*.log
|
||||
separator: /
|
||||
source_labels:
|
||||
- __meta_kubernetes_pod_uid
|
||||
- __meta_kubernetes_pod_container_name
|
||||
target_label: __path__
|
||||
- action: replace
|
||||
replacement: /var/log/pods/*$1/*.log
|
||||
regex: true/(.*)
|
||||
separator: /
|
||||
source_labels:
|
||||
- __meta_kubernetes_pod_annotationpresent_kubernetes_io_config_hash
|
||||
- __meta_kubernetes_pod_annotation_kubernetes_io_config_hash
|
||||
- __meta_kubernetes_pod_container_name
|
||||
target_label: __path__
|
||||
|
||||
# If set to true, adds an additional label for the scrape job.
|
||||
# This helps debug the Promtail config.
|
||||
addScrapeJobLabel: false
|
||||
|
||||
# -- You can put here any keys that will be directly added to the config file's 'client' block.
|
||||
# @default -- empty
|
||||
extraClientConfigs: ""
|
||||
|
||||
# -- You can put here any additional scrape configs you want to add to the config file.
|
||||
# @default -- empty
|
||||
extraScrapeConfigs: ""
|
||||
|
||||
scrapeConfigs: |
|
||||
# See also https://github.com/grafana/loki/blob/master/production/ksonnet/promtail/scrape_config.libsonnet for reference
|
||||
|
||||
# Pods with a label 'app.kubernetes.io/name'
|
||||
- job_name: kubernetes-pods-app-kubernetes-io-name
|
||||
pipeline_stages:
|
||||
{{- toYaml .Values.config.snippets.pipelineStages | nindent 4 }}
|
||||
kubernetes_sd_configs:
|
||||
- role: pod
|
||||
relabel_configs:
|
||||
- action: replace
|
||||
source_labels:
|
||||
- __meta_kubernetes_pod_label_app_kubernetes_io_name
|
||||
target_label: app
|
||||
- action: drop
|
||||
regex: ''
|
||||
source_labels:
|
||||
- app
|
||||
- action: replace
|
||||
source_labels:
|
||||
- __meta_kubernetes_pod_label_app_kubernetes_io_component
|
||||
target_label: component
|
||||
{{- if .Values.config.snippets.addScrapeJobLabel }}
|
||||
- action: replace
|
||||
replacement: kubernetes-pods-app-kubernetes-io-name
|
||||
target_label: scrape_job
|
||||
{{- end }}
|
||||
{{- toYaml .Values.config.snippets.common | nindent 4 }}
|
||||
|
||||
# Pods with a label 'app'
|
||||
- job_name: kubernetes-pods-app
|
||||
pipeline_stages:
|
||||
{{- toYaml .Values.config.snippets.pipelineStages | nindent 4 }}
|
||||
kubernetes_sd_configs:
|
||||
- role: pod
|
||||
relabel_configs:
|
||||
# Drop pods with label 'app.kubernetes.io/name'. They are already considered above
|
||||
- action: drop
|
||||
regex: .+
|
||||
source_labels:
|
||||
- __meta_kubernetes_pod_label_app_kubernetes_io_name
|
||||
- action: replace
|
||||
source_labels:
|
||||
- __meta_kubernetes_pod_label_app
|
||||
target_label: app
|
||||
- action: drop
|
||||
regex: ''
|
||||
source_labels:
|
||||
- app
|
||||
- action: replace
|
||||
source_labels:
|
||||
- __meta_kubernetes_pod_label_component
|
||||
target_label: component
|
||||
{{- if .Values.config.snippets.addScrapeJobLabel }}
|
||||
- action: replace
|
||||
replacement: kubernetes-pods-app
|
||||
target_label: scrape_job
|
||||
{{- end }}
|
||||
{{- toYaml .Values.config.snippets.common | nindent 4 }}
|
||||
|
||||
# Pods with direct controllers, such as StatefulSet
|
||||
- job_name: kubernetes-pods-direct-controllers
|
||||
pipeline_stages:
|
||||
{{- toYaml .Values.config.snippets.pipelineStages | nindent 4 }}
|
||||
kubernetes_sd_configs:
|
||||
- role: pod
|
||||
relabel_configs:
|
||||
# Drop pods with label 'app.kubernetes.io/name' or 'app'. They are already considered above
|
||||
- action: drop
|
||||
regex: .+
|
||||
separator: ''
|
||||
source_labels:
|
||||
- __meta_kubernetes_pod_label_app_kubernetes_io_name
|
||||
- __meta_kubernetes_pod_label_app
|
||||
- action: drop
|
||||
regex: '[0-9a-z-.]+-[0-9a-f]{8,10}'
|
||||
source_labels:
|
||||
- __meta_kubernetes_pod_controller_name
|
||||
- action: replace
|
||||
source_labels:
|
||||
- __meta_kubernetes_pod_controller_name
|
||||
target_label: app
|
||||
{{- if .Values.config.snippets.addScrapeJobLabel }}
|
||||
- action: replace
|
||||
replacement: kubernetes-pods-direct-controllers
|
||||
target_label: scrape_job
|
||||
{{- end }}
|
||||
{{- toYaml .Values.config.snippets.common | nindent 4 }}
|
||||
|
||||
# Pods with indirect controllers, such as Deployment
|
||||
- job_name: kubernetes-pods-indirect-controller
|
||||
pipeline_stages:
|
||||
{{- toYaml .Values.config.snippets.pipelineStages | nindent 4 }}
|
||||
kubernetes_sd_configs:
|
||||
- role: pod
|
||||
relabel_configs:
|
||||
# Drop pods with label 'app.kubernetes.io/name' or 'app'. They are already considered above
|
||||
- action: drop
|
||||
regex: .+
|
||||
separator: ''
|
||||
source_labels:
|
||||
- __meta_kubernetes_pod_label_app_kubernetes_io_name
|
||||
- __meta_kubernetes_pod_label_app
|
||||
- action: keep
|
||||
regex: '[0-9a-z-.]+-[0-9a-f]{8,10}'
|
||||
source_labels:
|
||||
- __meta_kubernetes_pod_controller_name
|
||||
- action: replace
|
||||
regex: '([0-9a-z-.]+)-[0-9a-f]{8,10}'
|
||||
source_labels:
|
||||
- __meta_kubernetes_pod_controller_name
|
||||
target_label: app
|
||||
{{- if .Values.config.snippets.addScrapeJobLabel }}
|
||||
- action: replace
|
||||
replacement: kubernetes-pods-indirect-controller
|
||||
target_label: scrape_job
|
||||
{{- end }}
|
||||
{{- toYaml .Values.config.snippets.common | nindent 4 }}
|
||||
# All remaining pods not yet covered
|
||||
- job_name: kubernetes-other
|
||||
pipeline_stages:
|
||||
{{- toYaml .Values.config.snippets.pipelineStages | nindent 4 }}
|
||||
kubernetes_sd_configs:
|
||||
- role: pod
|
||||
relabel_configs:
|
||||
# Drop what has already been covered
|
||||
- action: drop
|
||||
regex: .+
|
||||
separator: ''
|
||||
source_labels:
|
||||
- __meta_kubernetes_pod_label_app_kubernetes_io_name
|
||||
- __meta_kubernetes_pod_label_app
|
||||
- action: drop
|
||||
regex: .+
|
||||
source_labels:
|
||||
- __meta_kubernetes_pod_controller_name
|
||||
- action: replace
|
||||
source_labels:
|
||||
- __meta_kubernetes_pod_name
|
||||
target_label: app
|
||||
- action: replace
|
||||
source_labels:
|
||||
- __meta_kubernetes_pod_label_component
|
||||
target_label: component
|
||||
{{- if .Values.config.snippets.addScrapeJobLabel }}
|
||||
- action: replace
|
||||
replacement: kubernetes-other
|
||||
target_label: scrape_job
|
||||
{{- end }}
|
||||
{{- toYaml .Values.config.snippets.common | nindent 4 }}
|
||||
|
||||
# -- Config file contents for Promtail.
|
||||
# Must be configured as string.
|
||||
# It is templated so it can be assembled from reusable snippets in order to avoid redundancy.
|
||||
# @default -- See `values.yaml`
|
||||
file: |
|
||||
server:
|
||||
log_level: {{ .Values.config.logLevel }}
|
||||
http_listen_port: {{ .Values.config.serverPort }}
|
||||
|
||||
client:
|
||||
url: {{ tpl .Values.config.lokiAddress . }}
|
||||
{{- tpl .Values.config.snippets.extraClientConfigs . | nindent 2 }}
|
||||
|
||||
positions:
|
||||
filename: /run/promtail/positions.yaml
|
||||
|
||||
scrape_configs:
|
||||
{{- tpl .Values.config.snippets.scrapeConfigs . | nindent 2 }}
|
||||
{{- tpl .Values.config.snippets.extraScrapeConfigs . | nindent 2 }}
|
||||
|
||||
networkPolicy:
|
||||
# -- Specifies whether Network Policies should be created
|
||||
enabled: false
|
||||
metrics:
|
||||
# -- Specifies the Pods which are allowed to access the metrics port.
|
||||
# As this is cross-namespace communication, you also neeed the namespaceSelector.
|
||||
podSelector: {}
|
||||
# -- Specifies the namespaces which are allowed to access the metrics port
|
||||
namespaceSelector: {}
|
||||
# -- Specifies specific network CIDRs which are allowed to access the metrics port.
|
||||
# In case you use namespaceSelector, you also have to specify your kubelet networks here.
|
||||
# The metrics ports are also used for probes.
|
||||
cidrs: []
|
||||
k8sApi:
|
||||
# -- Specify the k8s API endpoint port
|
||||
port: 8443
|
||||
# -- Specifies specific network CIDRs you want to limit access to
|
||||
cidrs: []
|
57
yaml/monitoring/zabbix-agent-daemonset.yaml
Normal file
57
yaml/monitoring/zabbix-agent-daemonset.yaml
Normal file
|
@ -0,0 +1,57 @@
|
|||
apiVersion: apps/v1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
name: zabbix-agent
|
||||
namespace: monitoring
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: zabbix-agent
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: zabbix-agent
|
||||
spec:
|
||||
hostNetwork: true
|
||||
hostPID: true
|
||||
hostIPC: false
|
||||
containers:
|
||||
- name: zabbix-agent
|
||||
image: zabbix/zabbix-agent:alpine-5.0-latest
|
||||
imagePullPolicy: Always
|
||||
ports:
|
||||
- containerPort: 10050
|
||||
name: zabbix-agent
|
||||
env:
|
||||
- name: ZBX_SERVER_HOST
|
||||
value: k8x_zabbix_server
|
||||
- name: ZBX_STARTAGENTS
|
||||
value: "1"
|
||||
- name: ZBX_TIMEOUT
|
||||
value: "10"
|
||||
- name: ZBX_TLSCONNECT
|
||||
value: "psk"
|
||||
- name: ZBX_TLSACCEPT
|
||||
value: "psk"
|
||||
- name: ZBX_TLSPSKIDENTITY
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: zabbix-psk-id-secret
|
||||
key: zabbix_psk_id
|
||||
- name: ZBX_TLSPSKFILE
|
||||
value: "zabbix_agentd.psk"
|
||||
- name: ZBX_ACTIVE_ALLOW
|
||||
value: "false"
|
||||
securityContext:
|
||||
privileged: true
|
||||
resources:
|
||||
requests:
|
||||
cpu: 0.15
|
||||
volumeMounts:
|
||||
- name: zabbix-psk-config
|
||||
mountPath: "/var/lib/zabbix/enc"
|
||||
readOnly: true
|
||||
volumes:
|
||||
- name: zabbix-psk-config
|
||||
secret:
|
||||
secretName: zabbix-psk-secret
|
16
yaml/registry/registry-cert.yaml
Normal file
16
yaml/registry/registry-cert.yaml
Normal file
|
@ -0,0 +1,16 @@
|
|||
---
|
||||
apiVersion: cert-manager.io/v1
|
||||
kind: Certificate
|
||||
metadata:
|
||||
name: registry-cert
|
||||
namespace: registry
|
||||
spec:
|
||||
secretName: registry-tls
|
||||
issuerRef:
|
||||
name: letsencrypt-production-dns
|
||||
kind: ClusterIssuer
|
||||
commonName: '${REGISTRY_URL}'
|
||||
dnsNames:
|
||||
- '${REGISTRY_URL}'
|
||||
privateKey:
|
||||
algorithm: ECDSA
|
30
yaml/registry/registry-ingress-secure.yaml
Normal file
30
yaml/registry/registry-ingress-secure.yaml
Normal file
|
@ -0,0 +1,30 @@
|
|||
---
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: registry-ingress
|
||||
annotations:
|
||||
kubernetes.io/ingress.class: nginx
|
||||
nginx.ingress.kubernetes.io/proxy-body-size: "2048m"
|
||||
nginx.ingress.kubernetes.io/proxy-connect-timeout: "600"
|
||||
nginx.ingress.kubernetes.io/proxy-next-upstream-timeout: "600"
|
||||
nginx.ingress.kubernetes.io/proxy-next-upstream-tries: "10"
|
||||
nginx.ingress.kubernetes.io/proxy-read-timeout: "600"
|
||||
nginx.ingress.kubernetes.io/proxy-request-buffering: "off"
|
||||
nginx.ingress.kubernetes.io/proxy-send-timeout: "600"
|
||||
spec:
|
||||
rules:
|
||||
- host: ${REGISTRY_URL}
|
||||
http:
|
||||
paths:
|
||||
- pathType: Prefix
|
||||
path: /
|
||||
backend:
|
||||
service:
|
||||
name: registry
|
||||
port:
|
||||
number: 5000
|
||||
tls:
|
||||
- secretName: registry-tls
|
||||
hosts:
|
||||
- ${REGISTRY_URL}
|
26
yaml/registry/registry-ingress.yaml
Normal file
26
yaml/registry/registry-ingress.yaml
Normal file
|
@ -0,0 +1,26 @@
|
|||
---
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: registry-ingress
|
||||
annotations:
|
||||
kubernetes.io/ingress.class: nginx
|
||||
nginx.ingress.kubernetes.io/proxy-body-size: "2048m"
|
||||
nginx.ingress.kubernetes.io/proxy-connect-timeout: "600"
|
||||
nginx.ingress.kubernetes.io/proxy-next-upstream-timeout: "600"
|
||||
nginx.ingress.kubernetes.io/proxy-next-upstream-tries: "10"
|
||||
nginx.ingress.kubernetes.io/proxy-read-timeout: "600"
|
||||
nginx.ingress.kubernetes.io/proxy-request-buffering: "off"
|
||||
nginx.ingress.kubernetes.io/proxy-send-timeout: "600"
|
||||
spec:
|
||||
rules:
|
||||
- host: ${REGISTRY_URL}
|
||||
http:
|
||||
paths:
|
||||
- pathType: Prefix
|
||||
path: /
|
||||
backend:
|
||||
service:
|
||||
name: registry
|
||||
port:
|
||||
number: 5000
|
28
yaml/registry/registry-volumes-nfs.yaml
Normal file
28
yaml/registry/registry-volumes-nfs.yaml
Normal file
|
@ -0,0 +1,28 @@
|
|||
---
|
||||
apiVersion: v1
|
||||
kind: PersistentVolumeClaim
|
||||
metadata:
|
||||
name: registry-data
|
||||
namespace: registry
|
||||
spec:
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
resources:
|
||||
requests:
|
||||
storage: 420Gi
|
||||
volumeName: "registry-data"
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: PersistentVolume
|
||||
metadata:
|
||||
name: registry-data
|
||||
spec:
|
||||
capacity:
|
||||
storage: 420Gi
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
persistentVolumeReclaimPolicy: Retain
|
||||
nfs:
|
||||
path: "/srv/registry/data"
|
||||
server: 10.15.8.10
|
||||
readOnly: false
|
13
yaml/registry/registry-volumes.yaml
Normal file
13
yaml/registry/registry-volumes.yaml
Normal file
|
@ -0,0 +1,13 @@
|
|||
---
|
||||
apiVersion: v1
|
||||
kind: PersistentVolumeClaim
|
||||
metadata:
|
||||
name: registry-data
|
||||
namespace: registry
|
||||
spec:
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
storageClassName: rados-block
|
||||
resources:
|
||||
requests:
|
||||
storage: 128Gi
|
93
yaml/registry/registry.yaml
Normal file
93
yaml/registry/registry.yaml
Normal file
|
@ -0,0 +1,93 @@
|
|||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: registry-config
|
||||
data:
|
||||
registry-config.yml: |
|
||||
version: 0.1
|
||||
log:
|
||||
fileds:
|
||||
service: registry
|
||||
storage:
|
||||
cache:
|
||||
blobdescriptor: inmemory
|
||||
filesystem:
|
||||
rootdirectory: /var/lib/registry
|
||||
http:
|
||||
addr: :5000
|
||||
headers:
|
||||
X-Content-Type-Options: [nosniff]
|
||||
auth:
|
||||
htpasswd:
|
||||
realm: basic-realm
|
||||
path: /auth/htpasswd
|
||||
health:
|
||||
storagedriver:
|
||||
enabled: true
|
||||
interval: 10s
|
||||
threshold: 3
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: registry
|
||||
labels:
|
||||
app: registry
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: registry
|
||||
strategy:
|
||||
type: Recreate
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: registry
|
||||
spec:
|
||||
containers:
|
||||
- name: registry
|
||||
image: "registry:latest"
|
||||
imagePullPolicy: IfNotPresent
|
||||
securityContext:
|
||||
privileged: false
|
||||
ports:
|
||||
- name: http
|
||||
containerPort: 5000
|
||||
protocol: TCP
|
||||
volumeMounts:
|
||||
- name: registry-data
|
||||
mountPath: "/var/lib/registry"
|
||||
- name: registry-config
|
||||
mountPath: "/etc/docker/registry"
|
||||
readOnly: true
|
||||
- name: registry-htpasswd
|
||||
mountPath: "/auth"
|
||||
readOnly: true
|
||||
volumes:
|
||||
- name: registry-data
|
||||
persistentVolumeClaim:
|
||||
claimName: registry-data
|
||||
- name: registry-config
|
||||
configMap:
|
||||
name: registry-config
|
||||
items:
|
||||
- key: registry-config.yml
|
||||
path: config.yml
|
||||
- name: registry-htpasswd
|
||||
secret:
|
||||
secretName: registry-sec
|
||||
items:
|
||||
- key: HTPASSWD
|
||||
path: htpasswd
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: registry
|
||||
spec:
|
||||
ports:
|
||||
- port: 5000
|
||||
selector:
|
||||
app: registry
|
12
yaml/system/namespace-resource-limits.yaml
Normal file
12
yaml/system/namespace-resource-limits.yaml
Normal file
|
@ -0,0 +1,12 @@
|
|||
apiVersion: v1
|
||||
kind: LimitRange
|
||||
metadata:
|
||||
name: default-resource-limits
|
||||
spec:
|
||||
limits:
|
||||
- default:
|
||||
memory: 4096Mi
|
||||
defaultRequest:
|
||||
cpu: 10m
|
||||
memory: 128Mi
|
||||
type: Container
|
17
yaml/system/namespace-wild-cert.yaml
Normal file
17
yaml/system/namespace-wild-cert.yaml
Normal file
|
@ -0,0 +1,17 @@
|
|||
---
|
||||
apiVersion: cert-manager.io/v1
|
||||
kind: Certificate
|
||||
metadata:
|
||||
name: wildcard-${nspace}-cert
|
||||
namespace: ${nspace}
|
||||
spec:
|
||||
secretName: wildcard-${nspace}-tls
|
||||
issuerRef:
|
||||
name: letsencrypt-production-dns
|
||||
kind: ClusterIssuer
|
||||
commonName: '*.${nspace}.${CLUSTER_DOMAIN}'
|
||||
dnsNames:
|
||||
- '${nspace}.${CLUSTER_DOMAIN}'
|
||||
- '*.${nspace}.${CLUSTER_DOMAIN}'
|
||||
privateKey:
|
||||
algorithm: ECDSA
|
5
yaml/system/namespace.yaml
Normal file
5
yaml/system/namespace.yaml
Normal file
|
@ -0,0 +1,5 @@
|
|||
---
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: ${nspace}
|
33
yaml/system/sa-rbac-backup-agent.yaml
Normal file
33
yaml/system/sa-rbac-backup-agent.yaml
Normal file
|
@ -0,0 +1,33 @@
|
|||
---
|
||||
kind: Role
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: backup-agent-role
|
||||
namespace: ${nspace}
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources:
|
||||
- pods
|
||||
- pods/log
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- apiGroups: [""]
|
||||
resources:
|
||||
- pods/exec
|
||||
verbs:
|
||||
- create
|
||||
---
|
||||
kind: RoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: backup-agent-rolebinding
|
||||
namespace: ${nspace}
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: backup-agent-role
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: backup-agent-sa
|
||||
namespace: ${nspace}
|
110
yaml/system/sa-rbac.yaml
Normal file
110
yaml/system/sa-rbac.yaml
Normal file
|
@ -0,0 +1,110 @@
|
|||
---
|
||||
kind: Role
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: admin-role
|
||||
namespace: ${nspace}
|
||||
rules:
|
||||
- apiGroups: [ "", "extensions", "apps", "batch", "autoscaling" ]
|
||||
resources:
|
||||
- pods
|
||||
- pods/log
|
||||
- pods/exec
|
||||
- pods/portforward
|
||||
- daemonsets
|
||||
- deployments
|
||||
- services
|
||||
- replicasets
|
||||
- replicationcontrollers
|
||||
- statefulsets
|
||||
- horizontalpodautoscalers
|
||||
- jobs
|
||||
- cronjobs
|
||||
- events
|
||||
- ingresses
|
||||
- persistentvolumeclaims
|
||||
- certificates
|
||||
- configmaps
|
||||
- secrets
|
||||
- logs
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- create
|
||||
- update
|
||||
- delete
|
||||
- patch
|
||||
- apiGroups: [ "certmanager.k8s.io" ]
|
||||
resources:
|
||||
- issuers
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups: [ "certmanager.k8s.io" ]
|
||||
resources:
|
||||
- certificates
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- create
|
||||
- update
|
||||
- delete
|
||||
- patch
|
||||
- apiGroups: [ "networking.k8s.io" ]
|
||||
resources:
|
||||
- ingresses
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- create
|
||||
- update
|
||||
- delete
|
||||
- patch
|
||||
---
|
||||
kind: RoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: admin-rolebinding
|
||||
namespace: ${nspace}
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: admin-role
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: admin-sa
|
||||
namespace: ${nspace}
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: admin-${nspace}-clusterrole
|
||||
rules:
|
||||
- apiGroups: [ "" ]
|
||||
resources:
|
||||
- persistentvolumes
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- create
|
||||
- update
|
||||
- delete
|
||||
- patch
|
||||
---
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: admin-${nspace}-clusterrolebinding
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: admin-${nspace}-clusterrole
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: admin-sa
|
||||
namespace: ${nspace}
|
Loading…
Reference in a new issue