Commit 23704f6f authored by Robotka Adrián's avatar Robotka Adrián
Browse files

initial commit

parents
.venv
.idea
.vscode
remote
all: pre k8s post
cluster: all
pre:
echo Running pre-k8s tasks
bash -c "cd pre-kubespray; ./bootstrap.sh -c 'ansible-playbook setup_router.yaml; ansible-playbook setup_mgmt.yaml'"
k8s:
echo Creating cluster
ssh -A -t mgmt 'bash -c "cd kubespray/kubespray-repo; ansible-playbook -i ../inventory/vhc-1/hosts.yaml cluster.yml"'
post:
echo Running post-k8s tasks
bash -c "cd post-kubespray; ./bootstrap.sh -c 'ansible-playbook post_cluster.yaml'"
# Kubernetes cluster docs and playbooks
!!! WORK IN PROGRESS
KSZK-managed Kubernetes cluster related scripts
## Repo content
Clusters:
- KSZK cluster: global and internal services
- SCH cluster: Public-life services
- Dev cluster: KSZK experiments
## VMs
Check out: [git.sch && vmware && terraform](https://git.sch.bme.hu/vmware/terraform/sch-cluster/-/blob/master/vm_legacy/kszk-k8s.tf)
[defaults]
inventory = inventory.yaml
interpreter_python = /usr/bin/python3
forks = 30
#!/usr/bin/env bash
########################### Virtualenv setup ####################################
# create virtualenv if not present
[[ ! -d .venv ]] && python3 -m venv .venv
source .venv/bin/activate
pip3 install ansible ansible-lint
########################### Ansible setup ####################################
ansible-galaxy install --force -r requirements.galaxy.yml
########################### Help ####################################
echo
echo "########################################"
echo
echo "Your playbooks:"
find . -maxdepth 1 -type f -name "*.yaml" | grep -v inventory
echo
echo "Recommendation: Set up your OpenSSH config based on inventory.yml"
echo
echo "You can run playbook with:"
printf "\tansible-playbook your-playbook.yaml"
echo
# to stay in our comfy virtualenv
exec "${SHELL:-bash}" "$@"
venv
\ No newline at end of file
---
## Directory where etcd data stored
etcd_data_dir: /var/lib/etcd
## Experimental kubeadm etcd deployment mode. Available only for new deployment
etcd_kubeadm_enabled: true
## Directory where the binaries will be installed
bin_dir: /usr/local/bin
## The access_ip variable is used to define how other nodes should access
## the node. This is used in flannel to allow other flannel nodes to see
## this node for example. The access_ip is really useful AWS and Google
## environments where the nodes are accessed remotely by the "public" ip,
## but don't know about that address themselves.
# access_ip: 1.1.1.1
## External LB example config
# apiserver_loadbalancer_domain_name: "grapefruit.sch.bme.hu"
# loadbalancer_apiserver:
# address: 1.2.3.4
# port: 1234
## Internal loadbalancers for apiservers
# loadbalancer_apiserver_localhost: true
# valid options are "nginx" or "haproxy"
# loadbalancer_apiserver_type: nginx # valid values "nginx" or "haproxy"
## If the cilium is going to be used in strict mode, we can use the
## localhost connection and not use the external LB. If this parameter is
## not specified, the first node to connect to kubeapi will be used.
# use_localhost_as_kubeapi_loadbalancer: true
## Local loadbalancer should use this port
## And must be set port 6443
loadbalancer_apiserver_port: 6443
## If loadbalancer_apiserver_healthcheck_port variable defined, enables proxy liveness check for nginx.
loadbalancer_apiserver_healthcheck_port: 8081
### OTHER OPTIONAL VARIABLES
## Upstream dns servers
# upstream_dns_servers:
# - 8.8.8.8
# - 8.8.4.4
## There are some changes specific to the cloud providers
## for instance we need to encapsulate packets with some network plugins
## If set the possible values are either 'gce', 'aws', 'azure', 'openstack', 'vsphere', 'oci', or 'external'
## When openstack is used make sure to source in the openstack credentials
## like you would do when using openstack-client before starting the playbook.
# cloud_provider:
## When cloud_provider is set to 'external', you can set the cloud controller to deploy
## Supported cloud controllers are: 'openstack' and 'vsphere'
## When openstack or vsphere are used make sure to source in the required fields
# external_cloud_provider:
## Set these proxy values in order to update package manager and docker daemon to use proxies
# http_proxy: ""
# https_proxy: ""
## Refer to roles/kubespray-defaults/defaults/main.yml before modifying no_proxy
# no_proxy: ""
## Some problems may occur when downloading files over https proxy due to ansible bug
## https://github.com/ansible/ansible/issues/32750. Set this variable to False to disable
## SSL validation of get_url module. Note that kubespray will still be performing checksum validation.
# download_validate_certs: False
## If you need exclude all cluster nodes from proxy and other resources, add other resources here.
# additional_no_proxy: ""
## If you need to disable proxying of os package repositories but are still behind an http_proxy set
## skip_http_proxy_on_os_packages to true
## This will cause kubespray not to set proxy environment in /etc/yum.conf for centos and in /etc/apt/apt.conf for debian/ubuntu
## Special information for debian/ubuntu - you have to set the no_proxy variable, then apt package will install from your source of wish
# skip_http_proxy_on_os_packages: false
## Since workers are included in the no_proxy variable by default, docker engine will be restarted on all nodes (all
## pods will restart) when adding or removing workers. To override this behaviour by only including master nodes in the
## no_proxy variable, set below to true:
no_proxy_exclude_workers: false
## Certificate Management
## This setting determines whether certs are generated via scripts.
## Chose 'none' if you provide your own certificates.
## Option is "script", "none"
# cert_management: script
## Set to true to allow pre-checks to fail and continue deployment
# ignore_assert_errors: false
## The read-only port for the Kubelet to serve on with no authentication/authorization. Uncomment to enable.
# kube_read_only_port: 10255
## Set true to download and cache container
download_container: false
skip_downloads: false
## Deploy container engine
# Set false if you want to deploy container engine manually.
# deploy_container_engine: true
## Red Hat Enterprise Linux subscription registration
## Add either RHEL subscription Username/Password or Organization ID/Activation Key combination
## Update RHEL subscription purpose usage, role and SLA if necessary
# rh_subscription_username: ""
# rh_subscription_password: ""
# rh_subscription_org_id: ""
# rh_subscription_activation_key: ""
# rh_subscription_usage: "Development"
# rh_subscription_role: "Red Hat Enterprise Server"
# rh_subscription_sla: "Self-Support"
## Check if access_ip responds to ping. Set false if your firewall blocks ICMP.
# ping_access_ip: true
---
# Please see roles/container-engine/containerd/defaults/main.yml for more configuration options
# containerd_storage_dir: "/var/lib/containerd"
# containerd_state_dir: "/run/containerd"
# containerd_oom_score: 0
# containerd_default_runtime: "runc"
# containerd_snapshotter: "native"
# containerd_runtimes:
# - name: runc
# type: "io.containerd.runc.v2"
# engine: ""
# root: ""
# Example for Kata Containers as additional runtime:
# - name: kata
# type: "io.containerd.kata.v2"
# engine: ""
# root: ""
# containerd_grpc_max_recv_message_size: 16777216
# containerd_grpc_max_send_message_size: 16777216
# containerd_debug_level: "info"
# containerd_metrics_address: ""
# containerd_metrics_grpc_histogram: false
# containerd_registries:
# "docker.io": "https://registry-1.docker.io"
# containerd_max_container_log_line_size: -1
crio_registries_mirrors:
- prefix: docker.io
insecure: false
blocked: false
location: registry-1.docker.io
mirrors:
- location: mirror.gcr.io
insecure: false
---
## Uncomment this if you want to force overlay/overlay2 as docker storage driver
## Please note that overlay2 is only supported on newer kernels
# docker_storage_options: -s overlay2
## Enable docker_container_storage_setup, it will configure devicemapper driver on Centos7 or RedHat7.
docker_container_storage_setup: false
## It must be define a disk path for docker_container_storage_setup_devs.
## Otherwise docker-storage-setup will be executed incorrectly.
# docker_container_storage_setup_devs: /dev/vdb
## Uncomment this if you want to change the Docker Cgroup driver (native.cgroupdriver)
## Valid options are systemd or cgroupfs, default is systemd
# docker_cgroup_driver: systemd
## Only set this if you have more than 3 nameservers:
## If true Kubespray will only use the first 3, otherwise it will fail
docker_dns_servers_strict: false
# Path used to store Docker data
docker_daemon_graph: "/var/lib/docker"
## Used to set docker daemon iptables options to true
docker_iptables_enabled: "false"
# Docker log options
# Rotate container stderr/stdout logs at 50m and keep last 5
docker_log_opts: "--log-opt max-size=50m --log-opt max-file=5"
# define docker bin_dir
docker_bin_dir: "/usr/bin"
# keep docker packages after installation; speeds up repeated ansible provisioning runs when '1'
# kubespray deletes the docker package on each run, so caching the package makes sense
docker_rpm_keepcache: 1
## An obvious use case is allowing insecure-registry access to self hosted registries.
## Can be ipaddress and domain_name.
## example define 172.19.16.11 or mirror.registry.io
# docker_insecure_registries:
# - mirror.registry.io
# - 172.19.16.11
## Add other registry,example China registry mirror.
# docker_registry_mirrors:
# - https://registry.docker-cn.com
# - https://mirror.aliyuncs.com
## If non-empty will override default system MountFlags value.
## This option takes a mount propagation flag: shared, slave
## or private, which control whether mounts in the file system
## namespace set up for docker will receive or propagate mounts
## and unmounts. Leave empty for system default
# docker_mount_flags:
## A string of extra options to pass to the docker daemon.
## This string should be exactly as you wish it to appear.
# docker_options: ""
---
## Global Offline settings
### Private Container Image Registry
# registry_host: "myprivateregisry.com"
# files_repo: "http://myprivatehttpd"
### If using CentOS, RedHat, AlmaLinux or Fedora
# yum_repo: "http://myinternalyumrepo"
### If using Debian
# debian_repo: "http://myinternaldebianrepo"
### If using Ubuntu
# ubuntu_repo: "http://myinternalubunturepo"
## Container Registry overrides
# kube_image_repo: "{{ registry_host }}"
# gcr_image_repo: "{{ registry_host }}"
# docker_image_repo: "{{ registry_host }}"
# quay_image_repo: "{{ registry_host }}"
## Kubernetes components
# kubeadm_download_url: "{{ files_repo }}/kubernetes/{{ kube_version }}/kubeadm"
# kubectl_download_url: "{{ files_repo }}/kubernetes/{{ kube_version }}/kubectl"
# kubelet_download_url: "{{ files_repo }}/kubernetes/{{ kube_version }}/kubelet"
## CNI Plugins
# cni_download_url: "{{ files_repo }}/kubernetes/cni/cni-plugins-linux-{{ image_arch }}-{{ cni_version }}.tgz"
## cri-tools
# crictl_download_url: "{{ files_repo }}/kubernetes/cri-tools/crictl-{{ crictl_version }}-{{ ansible_system | lower }}-{{ image_arch }}.tar.gz"
## [Optional] etcd: only if you **DON'T** use etcd_deployment=host
# etcd_download_url: "{{ files_repo }}/kubernetes/etcd/etcd-{{ etcd_version }}-linux-amd64.tar.gz"
# [Optional] Calico: If using Calico network plugin
# calicoctl_download_url: "{{ files_repo }}/kubernetes/calico/{{ calico_ctl_version }}/calicoctl-linux-{{ image_arch }}"
# [Optional] Calico with kdd: If using Calico network plugin with kdd datastore
# calico_crds_download_url: "{{ files_repo }}/kubernetes/calico/{{ calico_version }}.tar.gz"
# [Optional] helm: only if you set helm_enabled: true
# helm_download_url: "{{ files_repo }}/helm-{{ helm_version }}-linux-{{ image_arch }}.tar.gz"
# [Optional] crun: only if you set crun_enabled: true
# crun_download_url: "{{ files_repo }}/containers/crun/releases/download/{{ crun_version }}/crun-{{ crun_version }}-linux-{{ image_arch }}"
# [Optional] kata: only if you set kata_containers_enabled: true
# kata_containers_download_url: "{{ files_repo }}/kata-containers/runtime/releases/download/{{ kata_containers_version }}/kata-static-{{ kata_containers_version }}-{{ ansible_architecture }}.tar.xz"
## CentOS/Redhat/AlmaLinux
### For EL7, base and extras repo must be available, for EL8, baseos and appstream
### By default we enable those repo automatically
# rhel_enable_repos: false
### Docker / Containerd
# docker_rh_repo_base_url: "{{ yum_repo }}/docker-ce/$releasever/$basearch"
# docker_rh_repo_gpgkey: "{{ yum_repo }}/docker-ce/gpg"
## Fedora
### Docker
# docker_fedora_repo_base_url: "{{ yum_repo }}/docker-ce/{{ ansible_distribution_major_version }}/{{ ansible_architecture }}"
# docker_fedora_repo_gpgkey: "{{ yum_repo }}/docker-ce/gpg"
### Containerd
# containerd_fedora_repo_base_url: "{{ yum_repo }}/containerd"
# containerd_fedora_repo_gpgkey: "{{ yum_repo }}/docker-ce/gpg"
## Debian
### Docker
# docker_debian_repo_base_url: "{{ debian_repo }}/docker-ce"
# docker_debian_repo_gpgkey: "{{ debian_repo }}/docker-ce/gpg"
### Containerd
# containerd_debian_repo_base_url: "{{ ubuntu_repo }}/containerd"
# containerd_debian_repo_gpgkey: "{{ ubuntu_repo }}/containerd/gpg"
# containerd_debian_repo_repokey: 'YOURREPOKEY'
## Ubuntu
### Docker
# docker_ubuntu_repo_base_url: "{{ ubuntu_repo }}/docker-ce"
# docker_ubuntu_repo_gpgkey: "{{ ubuntu_repo }}/docker-ce/gpg"
### Containerd
# containerd_ubuntu_repo_base_url: "{{ ubuntu_repo }}/containerd"
# containerd_ubuntu_repo_gpgkey: "{{ ubuntu_repo }}/containerd/gpg"
# containerd_ubuntu_repo_repokey: 'YOURREPOKEY'
---
## Etcd auto compaction retention for mvcc key value store in hour
# etcd_compaction_retention: 0
## Set level of detail for etcd exported metrics, specify 'extensive' to include histogram metrics.
# etcd_metrics: basic
## Etcd is restricted by default to 512M on systems under 4GB RAM, 512MB is not enough for much more than testing.
## Set this if your etcd nodes have less than 4GB but you want more RAM for etcd. Set to 0 for unrestricted RAM.
# etcd_memory_limit: "512M"
## Etcd has a default of 2G for its space quota. If you put a value in etcd_memory_limit which is less than
## etcd_quota_backend_bytes, you may encounter out of memory terminations of the etcd cluster. Please check
## etcd documentation for more information.
# etcd_quota_backend_bytes: "2147483648"
### ETCD: disable peer client cert authentication.
# This affects ETCD_PEER_CLIENT_CERT_AUTH variable
# etcd_peer_client_auth: true
## Settings for etcd deployment type
etcd_deployment_type: host
---
# Kubernetes dashboard
# RBAC required. see docs/getting-started.md for access details.
# dashboard_enabled: false
# Helm deployment
helm_enabled: false
# Registry deployment
registry_enabled: false
# registry_namespace: kube-system
# registry_storage_class: ""
# registry_disk_size: "10Gi"
# Metrics Server deployment
metrics_server_enabled: false
# metrics_server_kubelet_insecure_tls: true
# metrics_server_metric_resolution: 60s
# metrics_server_kubelet_preferred_address_types: "InternalIP"
# Rancher Local Path Provisioner
local_path_provisioner_enabled: false
# local_path_provisioner_namespace: "local-path-storage"
# local_path_provisioner_storage_class: "local-path"
# local_path_provisioner_reclaim_policy: Delete
# local_path_provisioner_claim_root: /opt/local-path-provisioner/
# local_path_provisioner_debug: false
# local_path_provisioner_image_repo: "rancher/local-path-provisioner"
# local_path_provisioner_image_tag: "v0.0.19"
# local_path_provisioner_helper_image_repo: "busybox"
# local_path_provisioner_helper_image_tag: "latest"
# Local volume provisioner deployment
local_volume_provisioner_enabled: false
# local_volume_provisioner_namespace: kube-system
# local_volume_provisioner_nodelabels:
# - kubernetes.io/hostname
# - topology.kubernetes.io/region
# - topology.kubernetes.io/zone
# local_volume_provisioner_storage_classes:
# local-storage:
# host_dir: /mnt/disks
# mount_dir: /mnt/disks
# volume_mode: Filesystem
# fs_type: ext4
# fast-disks:
# host_dir: /mnt/fast-disks
# mount_dir: /mnt/fast-disks
# block_cleaner_command:
# - "/scripts/shred.sh"
# - "2"
# volume_mode: Filesystem
# fs_type: ext4
# CephFS provisioner deployment
cephfs_provisioner_enabled: false
# cephfs_provisioner_namespace: "cephfs-provisioner"
# cephfs_provisioner_cluster: ceph
# cephfs_provisioner_monitors: "172.24.0.1:6789,172.24.0.2:6789,172.24.0.3:6789"
# cephfs_provisioner_admin_id: admin
# cephfs_provisioner_secret: secret
# cephfs_provisioner_storage_class: cephfs
# cephfs_provisioner_reclaim_policy: Delete
# cephfs_provisioner_claim_root: /volumes
# cephfs_provisioner_deterministic_names: true
# RBD provisioner deployment
rbd_provisioner_enabled: false
# rbd_provisioner_namespace: rbd-provisioner
# rbd_provisioner_replicas: 2
# rbd_provisioner_monitors: "172.24.0.1:6789,172.24.0.2:6789,172.24.0.3:6789"
# rbd_provisioner_pool: kube
# rbd_provisioner_admin_id: admin
# rbd_provisioner_secret_name: ceph-secret-admin
# rbd_provisioner_secret: ceph-key-admin
# rbd_provisioner_user_id: kube
# rbd_provisioner_user_secret_name: ceph-secret-user
# rbd_provisioner_user_secret: ceph-key-user
# rbd_provisioner_user_secret_namespace: rbd-provisioner
# rbd_provisioner_fs_type: ext4
# rbd_provisioner_image_format: "2"
# rbd_provisioner_image_features: layering
# rbd_provisioner_storage_class: rbd
# rbd_provisioner_reclaim_policy: Delete
# Nginx ingress controller deployment
ingress_nginx_enabled: false
# ingress_nginx_host_network: false
ingress_publish_status_address: ""
# ingress_nginx_nodeselector:
# kubernetes.io/os: "linux"
# ingress_nginx_tolerations:
# - key: "node-role.kubernetes.io/master"
# operator: "Equal"
# value: ""
# effect: "NoSchedule"
# - key: "node-role.kubernetes.io/control-plane"
# operator: "Equal"
# value: ""
# effect: "NoSchedule"
# ingress_nginx_namespace: "ingress-nginx"
# ingress_nginx_insecure_port: 80
# ingress_nginx_secure_port: 443
# ingress_nginx_configmap:
# map-hash-bucket-size: "128"
# ssl-protocols: "TLSv1.2 TLSv1.3"
# ingress_nginx_configmap_tcp_services:
# 9000: "default/example-go:8080"
# ingress_nginx_configmap_udp_services:
# 53: "kube-system/coredns:53"
# ingress_nginx_extra_args:
# - --default-ssl-certificate=default/foo-tls
# ingress_nginx_class: nginx
# ambassador ingress controller deployment
ingress_ambassador_enabled: false
# ingress_ambassador_namespace: "ambassador"
# ingress_ambassador_version: "*"
# ingress_ambassador_multi_namespaces: false
# ALB ingress controller deployment
ingress_alb_enabled: false
# alb_ingress_aws_region: "us-east-1"
# alb_ingress_restrict_scheme: "false"
# Enables logging on all outbound requests sent to the AWS API.
# If logging is desired, set to true.
# alb_ingress_aws_debug: "false"
# Cert manager deployment
cert_manager_enabled: false # sounds good; doesn't work - Mike, 2021
cert_manager_namespace: "cert-manager"
# MetalLB deployment
metallb_enabled: true
metallb_speaker_enabled: true
metallb_ip_range:
- "172.31.3.1-172.31.3.254"
# metallb_speaker_nodeselector:
# kubernetes.io/os: "linux"
# metallb_controller_nodeselector:
# kubernetes.io/os: "linux"
# metallb_speaker_tolerations:
# - key: "node-role.kubernetes.io/master"
# operator: "Equal"
# value: ""
# effect: "NoSchedule"
# - key: "node-role.kubernetes.io/control-plane"
# operator: "Equal"
# value: ""
# effect: "NoSchedule"
# metallb_controller_tolerations:
# - key: "node-role.kubernetes.io/master"
# operator: "Equal"
# value: ""
# effect: "NoSchedule"
# - key: "node-role.kubernetes.io/control-plane"
# operator: "Equal"
# value: ""
# effect: "NoSchedule"
# metallb_version: v0.9.6
# metallb_protocol: "layer2"
# metallb_port: "7472"
# metallb_limits_cpu: "100m"
# metallb_limits_mem: "100Mi"
# metallb_additional_address_pools:
# kube_service_pool:
# ip_range:
# - "10.5.1.50-10.5.1.99"
# protocol: "layer2"
# auto_assign: false
# metallb_protocol: "bgp"
# metallb_peers:
# - peer_address: 192.0.2.1
# peer_asn: 64512
# my_asn: 4200000000
# - peer_address: 192.0.2.2
# peer_asn: 64513
# my_asn: 4200000000
# The plugin manager for kubectl
krew_enabled: false
krew_root_dir: "/usr/local/krew"
---
# Kubernetes configuration dirs and system namespace.
# Those are where all the additional config stuff goes
# the kubernetes normally puts in /srv/kubernetes.
# This puts them in a sane location and namespace.
# Editing those values will almost surely break something.
kube_config_dir: /etc/kubernetes
kube_script_dir: "{{ bin_dir }}/kubernetes-scripts"
kube_manifest_dir: "{{ kube_config_dir }}/manifests"
# This is where all the cert scripts and certs will be located
kube_cert_dir: "{{ kube_config_dir }}/ssl"
# This is where all of the bearer tokens will be stored
kube_token_dir: "{{ kube_config_dir }}/tokens"