aim:

  • local kubernetes cluster
  • include alarming
  • metric and logfile analysing
  • versioning upgrades

host software:

  • kind
  • calico
  • kubectl
  • wireguard
  • open-lens

cluster software:

  • grafana
  • prometheus
  • loki
  • promtail
  • argocd

get host software

# install packages
> yay -S \
  calico-bin \
  helm \
  kind \
  wireguard-tools \
  kubectl \
  lens

run open-lens to follow cluster operations on gui

define kind cluster settings cluster.yaml

kind: Cluster
apiVersion: kind.x-k8s.io/v1alpha4
networking:
  apiServerAddress: 127.0.0.1
  apiServerPort: 6443 #default is "random"
  podSubnet: "10.58.0.0/16"
  serviceSubnet: "10.62.0.0/12" #default is "10.96.0.0/12"
  kubeProxyMode: "ipvs" #default is "iptables"
  disableDefaultCNI: true #default is "false"
nodes:
- role: control-plane
  image: kindest/node:v1.23.3 #default is "v1.21"
  kubeadmConfigPatches:
  - |
    kind: InitConfiguration
    nodeRegistration:
      kubeletExtraArgs:
        node-labels: "ingress-ready=true"    
  extraPortMappings:
  - containerPort: 80
    hostPort: 80
    protocol: TCP
  - containerPort: 443
    hostPort: 443
    protocol: TCP
- role:  worker
  image: kindest/node:v1.23.3 #default is "v1.21"
  extraMounts:
  - hostPath: ./data
    containerPath: /tmp/data
    propagation: Bidirectional #default is "none"
    selinuxRelabel: false 
- role:  worker
  image: kindest/node:v1.23.3 #default is "v1.21"
  extraMounts:
  - hostPath: ./data
    containerPath: /tmp/data
    propagation: Bidirectional #default is "none"
    selinuxRelabel: false

apply cluster

# create storage path
> mkdir ./data-volume

# create cluster
> kind create cluster \
  --name my-cluster \
  --config cluster.yaml

# apply cni
> kubectl apply -f \
  https://docs.projectcalico.org/v3.22/manifests/calico.yaml

# get calico status
> kubectl -n kube-system get pods | grep calico-node

# enable calico wireguard feature
> calicoctl \
  patch felixconfiguration \
  default --type='merge' \
  -p '{"spec":{"wireguardEnabled":true}}' \
  --allow-version-mismatch

# verify if wireguard is working
> calicoctl \
  get node my-cluster-worker \
  -o yaml \
  --allow-version-mismatch | grep -i wireguard

# install nginx ingress controller
> kubectl apply -f \
  https://raw.githubusercontent.com/kubernetes/ingress-nginx/main/deploy/static/provider/kind/deploy.yaml

create storageclass.yaml

apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
  name: local-storage
provisioner: kubernetes.io/no-provisioner
reclaimPolicy: Retain
volumeBindingMode: Immediate

apply storageclass

# apply s
> kubectl apply -f storageclass.yaml

create monitoring volumes grafana-volumes.yaml

apiVersion: v1
kind: PersistentVolume
metadata:
  name: grafana-volume
  labels:
    type: local
spec:
  storageClassName: local-storage
  capacity:
    storage: 10Gi
  accessModes:
    - ReadWriteOnce
  hostPath:
    path: "/data-volume/grafana"
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  name: grafana-volume
  namespace: monitoring
  ownerReferences: null
spec:
  storageClassName: local-storage
  accessModes:
    - ReadWriteOnce
  resources:
    requests:
      storage: '10Gi'

apply monitoring volumes

# create missing folder
> mkdir -p ./data-volume/grafana

# create namespace
> kubectl create namespace monitoring

# apply config
> kubectl apply -f grafana-volume.yaml

create monitoring persistence loki-volumes.yaml

apiVersion: v1
kind: PersistentVolume
metadata:
  name: loki-volume
  labels:
    type: local
spec:
  storageClassName: local-storage
  capacity:
    storage: 10Gi
  accessModes:
    - ReadWriteOnce
  hostPath:
    path: "/data-volume"
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  name: loki-volume
  namespace: monitoring
  ownerReferences: null
spec:
  storageClassName: local-storage
  accessModes:
    - ReadWriteOnce
  resources:
    requests:
      storage: '10Gi'

apply monitoring volumes

# create missing folder
> mkdir -p ./data-volume/loki

# apply config
> kubectl apply -f loki-volume.yaml

create monitoring stack

# get "prometheus-community" helm chart repo
> helm repo \
  add prometheus-community \
  https://prometheus-community.github.io/helm-charts

# install "kube-prometheus-stack" helm chart
> helm install \
  grafana-stack \
  -n monitoring --create-namespace \
  prometheus-community/kube-prometheus-stack \
  --set grafana.persistence.enabled=true,grafana.persistence.storageClassName=local-storage,grafana.persistence.existingClaim=grafana-volume

# get "grafana"  helm chart repo
> helm repo \
  add grafana \
  https://grafana.github.io/helm-charts

# install "loki-stack" helm chart
> helm install \
  loki-stack \
  grafana/loki-stack \
  -n monitoring \
  --set grafana.enabled=false,loki.persistence.enabled=true,loki.persistence.storageClassName=local-storage,loki.persistence.existingClaim=loki-volume

# get admin password
> kubectl -n monitoring \
  get secrets grafana-stack \
  -o jsonpath="{.data.admin-password}" | base64 -d; echo

for more helm options see: prometheus-community and grafana-docs

create monitoring ingress grafana-fanout.yaml

apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
  name: grafana-fanout #ingress name
spec:
  rules:
  - host: grafana.local #url
    http:
      paths:
      - backend:
          service:
            name: grafana-stack #service
            port:
              number: 80 #service port
        path: /
        pathType: Prefix

get monitoring ingress

# apply "grafana-fanout.yaml"
> kubectl apply -f grafana-fanout.yaml \
  --namespace monitoring

# add entry to hostfile
> edit /etc/hosts
  ...
  127.0.0.1 grafana.local

open https://grafana.local

create argocd deployment

# create namespace
> kubectl create namespace argocd

# get argocd manifest
> curl -OL \
  https://raw.githubusercontent.com/argoproj/argo-cd/stable/manifests/install.yaml

# jump to deployment line ~3383 "argocd-server"
> edit install.yaml
  ...
      containers:
      - command:
        - argocd-server
        - --insecure #add this line to disable shipped tls

# apply modified manifest
> kubectl apply -f install.yaml \
  --namespace argocd

# get login password
> kubectl -n argocd \
  get secret argocd-initial-admin-secret \
  -o jsonpath="{.data.password}" | base64 -d; echo

create argocd ingress argocd-fanout.yaml

apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
  name: argocd-fanout #ingress name
spec:
  rules:
  - host: argo.local #url
    http:
      paths:
      - backend:
          service:
            name: argocd-server #service
            port:
              number: 80 #service port
        path: /
        pathType: Prefix

get argocd ingress

# apply "argocd-fanout.yaml"
> kubectl apply -f argocd-fanout.yaml \
  --namespace argocd

# add entry to hostfile
> edit /etc/hosts
  ...
  127.0.0.1 grafana.local argo.local

open https://argo.local