setup hcloud

# install pkg
> pacman -S hcloud

# add (rw) api token
> hcloud context create k8s-test

do not forget shell completion

add control plane

# add network
> hcloud network create \
  --ip-range 10.58.0.0/16 \
  --name k8s-network
# ...
# Network 000000 created

# add subnet
> hcloud network add-subnet k8s-network \
  --network-zone eu-central \
  --type server \
  --ip-range 10.58.0.0/16
# ...
# Subnet added to network 000000

# add master - adjust network
> hcloud server create \
  --name master-1 \
  --image debian-11 \
  --location hel1 \
  --type cpx11 \
  --ssh-key my-shh-key \
  --network 000000 \
  --start-after-create
# cpx11=2vcpu,2gb-ram,40gb-ssd

# add worker - adjust network
> hcloud server create \
  --name worker-1 \
  --image debian-11 \
  --location hel1 \
  --type cpx21 \
  --ssh-key my-shh-key \
  --network 000000 \
  --start-after-create
# cpx21=3vcpu,4gb-ram,80gb-ssd

# add worker - adjust network
> hcloud server create \
  --name worker-2 \
  --image debian-11 \
  --location hel1 \
  --type cpx21 \
  --ssh-key my-shh-key \
  --network 000000 \
  --start-after-create
# cpx21=3vcpu,4gb-ram,80gb-ssd

run on all nodes

# swap off
> swapoff -a; sed -i '/swap/d' /etc/fstab

# set timezone
> ln -sf \
  /usr/share/zoneinfo/Europe/Berlin \
  /etc/localtime

# create sysconfig file
> cat >>/etc/sysctl.d/kubernetes.conf<<EOF
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
EOF

# apply sysconfig
> sysctl --system

# update cache
> apt update

# install requirements
> apt install -y \
  ca-certificates \
  curl \
  gnupg \
  lsb-release

# install docker
> curl -fsSL \
  https://download.docker.com/linux/debian/gpg \
  | sudo gpg --dearmor -o /usr/share/keyrings/docker-archive-keyring.gpg

> echo \
  "deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/docker-archive-keyring.gpg] https://download.docker.com/linux/debian \
  $(lsb_release -cs) stable" | sudo tee /etc/apt/sources.list.d/docker.list > /dev/null

> apt update

> apt install -y \
  docker-ce \
  docker-ce-cli \
  containerd.io

# install kubeadm etc
> curl -fsSLo \
  /usr/share/keyrings/kubernetes-archive-keyring.gpg \
  https://packages.cloud.google.com/apt/doc/apt-key.gpg


> echo "deb [signed-by=/usr/share/keyrings/kubernetes-archive-keyring.gpg] https://apt.kubernetes.io/ kubernetes-xenial main" \
  | sudo tee /etc/apt/sources.list.d/kubernetes.list

> apt update

> apt install -y \
  kubelet \
  kubeadm \
  kubectl

> apt-mark hold \
  kubelet \
  kubeadm \
  kubectl

only on master

#init kubeadm - set correct cidr from above
> kubeadm init \
  --apiserver-advertise-address=<public_ip> \
  --pod-network-cidr=10.58.0.0/16  \
  --ignore-preflight-errors=all

## == CNI > please choose only one provider
# a) calico
> kubectl \
  --kubeconfig=/etc/kubernetes/admin.conf \
  create -f https://docs.projectcalico.org/v3.20/manifests/calico.yaml
# b) weave - not encrypted by default
> kubectl \
  --kubeconfig=/etc/kubernetes/admin.conf \
  apply -f "https://cloud.weave.works/k8s/net?k8s-version=$(kubectl version | base64 | tr -d '\n')"

> kubeadm token create \
  --print-join-command

on all worker

# run the output from the command above

on master - copy kube config

> mkdir ~/.kube
> cp /etc/kubernetes/admin.conf ~/.kube/config
# or put the output to your local ~/.kube directory

test kubectl

> kubectl get nodes -o wide

install helm

> curl -fsSL -o \
  get_helm.sh \
  https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3

> chmod 700 get_helm.sh

> ./get_helm.sh

> helm list

persistant storage using nfs and helm

# on master
> apt install -y nfs-kernel-server git-core

> mkdir /nfs-export

# edit /etc/exports
/nfs-export 10.0.0.0/8(rw,sync,no_root_squash,no_subtree_check)

> exportfs -a
> exportfs
  ...
  /nfs-export 10.0.0.0/

# on worker
> apt install -y nfs-common

# on master
> edit nfs-subdir-external-provisioner/charts/nfs-subdir-external-provisioner/values.yaml
  ...
  nfs:
    server: 10.58.0.2
    path: /nfs-export

> helm install nfs-subdir-external-provisioner \
  --namespace default \
  ./nfs-subdir-external-provisioner/charts/nfs-subdir-external-provisioner
  ...
  NAME: nfs-subdir-external-provisioner
  LAST DEPLOYED: Sat Dec  4 19:13:04 2021
  NAMESPACE: default
  STATUS: deployed
  REVISION: 1
  TEST SUITE: None

> kubectl get storageclass
  ...
  NAME         PROVISIONER                                     RECLAIMPOLICY   VOLUMEBINDINGMODE   ALLOWVOLUMEEXPANSION   AGE
  nfs-client   cluster.local/nfs-subdir-external-provisioner   Delete          Immediate           true                   19s

# on deployment set:
---
...
persistentVolume:
  storageClass: nfs-client
...
# or use '--set persistence.storageClass="nfs-client"' as extra arg in helm cmd

add a helm repo

# gitea repo
> helm repo add \
  gitea-charts https://dl.gitea.io/charts/

# install gitea with persistent storage
> helm install \
  gitea gitea-charts/gitea \
  --set persistence.storageClass="nfs-client" \
  --set postgresql.persistence.storageClass="nfs-client" \
  --set ingress.enabled="true" \
  --set ingress.hosts[0].host="test.x33u.org"

# check nfs directory
> ls -lha /nfs-export/
  ...
  drwxrwxrwx  2 root root 4.0K Dec  4 19:14 default-data-gitea-0-pvc-XXXXX-01b9-489b-98bb-a133cf3d7bf1

bare metal ingress controller - metalLB

> kubectl get configmap kube-proxy \
  -n kube-system -o yaml  | \
  sed -e "s/strictARP: false/strictARP: true/" | \
  kubectl apply -f - -n kube-system

# apply metalLB namespace
> kubectl apply -f \
  https://raw.githubusercontent.com/metallb/metallb/v0.11.0/manifests/namespace.yaml

# apply metalLB manifest
> kubectl apply -f \
  https://raw.githubusercontent.com/metallb/metallb/v0.11.0/manifests/metallb.yaml

# create secret
> kubectl create secret generic \
  -n metallb-system memberlist \
  --from-literal=secretkey="$(openssl rand -base64 128)"

# show metalLB pods
> kubectl get pods -n metallb-system
  ...
  NAME                          READY   STATUS    RESTARTS   AGE
  controller-7dcc8764f4-jrwkg   1/1     Running   0          3m16s
  speaker-kfj7s                 1/1     Running   0          3m16s
  speaker-v2szw                 1/1     Running   0          3m16s
  speaker-v48j2                 1/1     Running   0          3m16s

# define target network
> edit metallb-config.yaml
---
apiVersion: v1
kind: ConfigMap
metadata:
  namespace: metallb-system
  name: config
data:
  config: |
    address-pools:
    - name: default
      protocol: layer2
      addresses:
      - 10.58.0.2-10.58.0.4

> kubectl apply -f metallb-config.yaml

# create http loadbalancer
> edit loadbalancer.yaml
---
apiVersion: v1
kind: Service
metadata:
  name: nginx-balancer-test
spec:
  ports:
  - name: http
    port: 80
    protocol: TCP
    targetPort: 80
  selector:
    app: nginx
  type: LoadBalancer

> kubectl apply -f loadbalancer.yaml

# check services
> kubectl get services
  ...
  NAME           TYPE         CLUSTER-IP   EXTERNAL-IP PORT(S)      AGE
  ...
  nginx-balancer LoadBalancer 10.97.212.48 10.58.0.2   80:30284/TCP 11s

# add helm repo for nginx
> helm repo add \
  nginx-stable https://helm.nginx.com/stable

# install nginx ingress using helm
> helm install \
  ingress-nginx nginx-stable/nginx-ingress

## helm install ingress-nginx nginx-stable/nginx-ingress --set rbac.create=true

# apply cloud deployment
> kubectl apply -f \
  https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-v1.0.0/deploy/static/provider/cloud/deploy.yaml
https://raw.githubusercontent.com/kubernetes/ingress-nginx/main/deploy/static/provider/cloud/deploy.yaml

# apply bare metal deployment
> kubectl apply -f \
  https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-v1.0.0/deploy/static/provider/baremetal/deploy.yaml
https://raw.githubusercontent.com/kubernetes/ingress-nginx/main/deploy/static/provider/baremetal/deploy.yaml
(The IngressClass "nginx" is invalid: spec.controller: Invalid value: "k8s.io/ingress-nginx": field is immutable)

# check ingress pods
> kubectl get pods \
  --all-namespaces \
  -l app.kubernetes.io/name=ingress-nginx
  ...
  NAMESPACE       NAME                                        READY   STATUS      RESTARTS   AGE
  ingress-nginx   ingress-nginx-admission-create--1-h99df     0/1     Completed   0          75s
  ingress-nginx   ingress-nginx-admission-patch--1-cmjn7      0/1     Completed   1          75s
  ingress-nginx   ingress-nginx-controller-5fd866c9b6-9qwxn   1/1     Running     0          34s


> edit ingress-svc.yml
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
  name: hello-world
  annotations:
spec:
  ingressClassName: nginx
  rules:
  - host:  test.x33u.org
    http:
      paths:
      - pathType: Prefix
        path: "/"
        backend:
          service:
            name: hello-world
            port:
              number: 80


> kubectl apply -f ingress-svc.yml

add loadbalancer
solution a > use hetzner’s builtin LB

# create label for all servers
> hcloud server add-label master-1  k8s-role=master
> hcloud server add-label worker-1  k8s-role=worker
> hcloud server add-label worker-2  k8s-role=worker

# create LB service
> hcloud load-balancer  create \
  --name k8s-lb \
  --type lb11 \
  --location hel1 \
  --label k8s-role=master
helm repo add bitnami https://charts.bitnami.com/bitnami
helm install ingress bitnami/nginx-ingress-controller
kubectl get svc ingress-nginx-ingress-controller -o jsonpath="{.status.loadBalancer.ingress[0].ip}"
helm install joomla bitnami/joomla \
      --set joomlaPassword=secretpassword \
      --set mariadb.root.password=secretpassword \
      --set service.type=ClusterIP \
      --set ingress.enabled=true \
      --set ingress.hosts[0].name=DOMAIN

helm repo add jetstack https://charts.jetstack.io
kubectl create namespace cert-manager
kubectl apply --validate=false -f https://github.com/jetstack/cert-manager/releases/download/v0.14.1/cert-manager.crds.yaml

edit letsencrypt-prod.yaml
---
apiVersion: cert-manager.io/v1alpha2
kind: ClusterIssuer
metadata:
  name: letsencrypt-prod
  labels:
    name: letsencrypt-prod
spec:
  acme:
    email: EMAIL-ADDRESS
    privateKeySecretRef:
      name: letsencrypt-prod
    server: https://acme-v02.api.letsencrypt.org/directory
    solvers:
    - http01:
        ingress:
          class: nginx

kubectl apply -f letsencrypt-prod.yaml
helm install cert-manager --namespace cert-manager jetstack/cert-manager --version v0.14.1

helm upgrade joomla bitnami/joomla \
  --set joomlaPassword=secretpassword \
  --set mariadb.root.password=secretpassword \
  --set service.type=ClusterIP \
  --set ingress.enabled=true \
  --set ingress.certManager=true \
  --set ingress.tls[0].secretName=joomla.local-tls \
  --set ingress.annotations."kubernetes\.io/ingress\.class"=nginx \
  --set ingress.annotations."cert-manager\.io/cluster-issuer"=letsencrypt-prod \
  --set ingress.tls[0].hosts[0]=DOMAIN \
  --set ingress.hosts[0].name=DOMAIN

ha proxy setup

hcloud server create \
  --name haproxy-1 \
  --image debian-11 \
  --location hel1 \
  --type cx11 \
  --ssh-key my-shh-key \
  --network 000000 \
  --start-after-create

hcloud server create \
  --name haproxy-2 \
  --image debian-11 \
  --location hel1 \
  --type cx11 \
  --ssh-key my-shh-key \
  --network 000000 \
  --start-after-create

# on both nodes run
> apt update
> apt install -y \
  keepalived \
  haproxy \
  psmisc

create two additional master nodes

> hcloud server create \
  --name master-2 \
  --image debian-11 \
  --location hel1 \
  --type cpx11 \
  --ssh-key my-shh-key \
  --network 000000 \
  --start-after-create

> hcloud server create \
  --name master-3 \
  --image debian-11 \
  --location hel1 \
  --type cpx11 \
  --ssh-key my-shh-key \
  --network 000000 \
  --start-after-create

on both haproxy-1 and haproxy

> mv /etc/haproxy/haproxy.cfg \
  /etc/haproxy/haproxy.cfg.BK

> edit /etc/haproxy/haproxy.cfg

global
    log /dev/log  local0 warning
    chroot      /var/lib/haproxy
    pidfile     /var/run/haproxy.pid
    maxconn     4000
    user        haproxy
    group       haproxy
    daemon

   stats socket /var/lib/haproxy/stats

defaults
  log global
  option  httplog
  option  dontlognull
        timeout connect 5000
        timeout client 50000
        timeout server 50000

frontend kube-apiserver
  bind *:6443
  mode tcp
  option tcplog
  default_backend kube-apiserver

backend kube-apiserver
    mode tcp
    option tcplog
    option tcp-check
    balance roundrobin
    default-server inter 10s downinter 5s rise 2 fall 2 slowstart 60s maxconn 250 maxqueue 256 weight 100
    server kube-apiserver-1 10.58.0.2:6443 check # master-1
    server kube-apiserver-2 10.58.0.7:6443 check # master-2
    server kube-apiserver-3 10.58.0.8:6443 check # master-3


> systemctl restart haproxy \
  && systemctl enable haproxy

keepalived haproxy-1 - edit: /etc/keepalived/keepalived.conf

global_defs {
  notification_email {
  }
  router_id LVS_DEVEL
  vrrp_skip_check_adv_addr
  vrrp_garp_interval 0
  vrrp_gna_interval 0
}

vrrp_script chk_haproxy {
  script "killall -0 haproxy"
  interval 2
  weight 2
}

vrrp_instance haproxy-vip {
  state MASTER
  priority 100
  interface ens10 # private network
  virtual_router_id 60
  advert_int 1
  authentication {
    auth_type PASS
    auth_pass 1111
  }
  unicast_src_ip 10.58.0.5 # haproxy-1
  unicast_peer {
    10.58.0.6 # haproxy-2
  }

  virtual_ipaddress {
    10.58.0.10/24 # virtual ip
  }

  track_script {
    chk_haproxy
  }
}


> systemctl restart keepalived \
  && systemctl enable keepalived

on haproxy-2 change unicast_src_ip and unicast_peer address and state MASTER

keepalived haproxy-2 - edit: /etc/keepalived/keepalived.conf

troubleshoot

## == ingress error
> kubectl get ingressclass  --all-namespaces
> kubectl delete ingressclass  nginx

nice to have open-lens