k8s stuff - hetzner loadbalancer
setup hcloud
# install pkg
> pacman -S hcloud
# add (rw) api token
> hcloud context create k8s-test
do not forget
shell completion
add control plane
> hcloud server create \
--name master-1 \
--image debian-11 \
--location hel1 \
--type cpx11 \
--ssh-key my-shh-key \
--start-after-create
# cpx11=2vcpu,2gb-ram,40gb-ssd
# add worker - adjust network
> hcloud server create \
--name worker-1 \
--image debian-11 \
--location hel1 \
--type cpx21 \
--ssh-key my-shh-key \
--start-after-create
# cpx21=3vcpu,4gb-ram,80gb-ssd
run on all nodes
# swap off
> swapoff -a; sed -i '/swap/d' /etc/fstab
# set timezone
> ln -sf \
/usr/share/zoneinfo/Europe/Berlin \
/etc/localtime
# create sysconfig file
> cat >>/etc/sysctl.d/kubernetes.conf<<EOF
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
EOF
# apply sysconfig
> sysctl --system
# update cache
> apt update
# install requirements
> apt install -y \
ca-certificates \
curl \
gnupg \
lsb-release
# install docker
> curl -fsSL \
https://download.docker.com/linux/debian/gpg \
| sudo gpg --dearmor -o /usr/share/keyrings/docker-archive-keyring.gpg
> echo \
"deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/docker-archive-keyring.gpg] https://download.docker.com/linux/debian \
$(lsb_release -cs) stable" | sudo tee /etc/apt/sources.list.d/docker.list > /dev/null
> apt update
> apt install -y \
docker-ce \
docker-ce-cli \
containerd.io
# install kubeadm etc
> curl -fsSLo \
/usr/share/keyrings/kubernetes-archive-keyring.gpg \
https://packages.cloud.google.com/apt/doc/apt-key.gpg
> echo "deb [signed-by=/usr/share/keyrings/kubernetes-archive-keyring.gpg] https://apt.kubernetes.io/ kubernetes-xenial main" \
| sudo tee /etc/apt/sources.list.d/kubernetes.list
> apt update
> apt install -y \
kubelet \
kubeadm \
kubectl
> apt-mark hold \
kubelet \
kubeadm \
kubectl
only on master
#init kubeadm - set correct cidr from above
> kubeadm init \
--control-plane-endpoint="<public_master_ip>:6443" \
--upload-certs
## == CNI > please choose only one provider
# a) calico
> kubectl \
--kubeconfig=/etc/kubernetes/admin.conf \
create -f https://docs.projectcalico.org/v3.20/manifests/calico.yaml
# b) weave - not encrypted by default
> kubectl \
--kubeconfig=/etc/kubernetes/admin.conf \
apply -f "https://cloud.weave.works/k8s/net?k8s-version=$(kubectl version | base64 | tr -d '\n')"
> kubeadm token create \
--print-join-command
on all worker
run the output from the init command above
on master - copy kube config
> mkdir ~/.kube
> cp /etc/kubernetes/admin.conf ~/.kube/config
# or put the output to your local ~/.kube directory
test kubectl
> kubectl get nodes -o wide
on master - create namespace.yml
apiVersion: v1
kind: Namespace
metadata:
name: ingress-space
---
apiVersion: v1
kind: ConfigMap
metadata:
name: nginx-configuration
namespace: ingress-space
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: ingress-serviceaccount
namespace: ingress-space
create ingress.yml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
name: ingress-clusterrole
rules:
- apiGroups:
- ""
resources:
- configmaps
- endpoints
- namespaces
- nodes
- pods
- secrets
verbs:
- list
- watch
- apiGroups:
- ""
resources:
- nodes
verbs:
- get
- apiGroups:
- ""
resources:
- services
verbs:
- get
- list
- watch
- apiGroups:
- extensions
resources:
- ingresses
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- events
verbs:
- create
- apiGroups:
- extensions
resources:
- ingresses/status
verbs:
- update
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
name: ingress-clusterrole-binding
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: ingress-clusterrole
subjects:
- kind: ServiceAccount
name: ingress-serviceaccount
namespace: ingress-space
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
labels:
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
name: ingress-role
namespace: ingress-space
rules:
- apiGroups:
- ""
resources:
- configmaps
- pods
- secrets
- namespaces
verbs:
- get
- apiGroups:
- ""
resourceNames:
- ingress-controller-leader-nginx
resources:
- configmaps
verbs:
- get
- update
- apiGroups:
- ""
resources:
- configmaps
verbs:
- create
- apiGroups:
- ""
resources:
- endpoints
verbs:
- get
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
creationTimestamp: "2020-12-15T09:28:21Z"
labels:
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
name: ingress-role-binding
namespace: ingress-space
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: ingress-role
subjects:
- kind: ServiceAccount
name: ingress-serviceaccount
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: ingress-controller
namespace: ingress-space
spec:
replicas: 1
selector:
matchLabels:
name: nginx-ingress
template:
metadata:
labels:
name: nginx-ingress
spec:
serviceAccountName: ingress-serviceaccount
containers:
- name: nginx-ingress-controller
image: quay.io/kubernetes-ingress-controller/nginx-ingress-controller:0.21.0
args:
- /nginx-ingress-controller
- --configmap=$(POD_NAMESPACE)/nginx-configuration
- --default-backend-service=kube-system/default-http-backend
- --publish-service=ingress-space/ingress-controller
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
ports:
- name: http
containerPort: 80
---
apiVersion: v1
kind: Service
metadata:
annotations:
service.beta.kubernetes.io/do-loadbalancer-enable-proxy-protocol: "true"
service.beta.kubernetes.io/do-loadbalancer-hostname: jamamiko.de
name: ingress
namespace: ingress-space
spec:
ports:
- name: http
nodePort: 30080
port: 80
protocol: TCP
targetPort: 80
selector:
name: nginx-ingress
type: LoadBalancer
create deployment.yml
apiVersion: apps/v1
kind: Deployment
metadata:
annotations:
deployment.kubernetes.io/revision: "1"
name: default-backend
namespace: kube-system
spec:
replicas: 1
selector:
matchLabels:
app: default-backend
strategy:
rollingUpdate:
maxSurge: 25%
maxUnavailable: 25%
type: RollingUpdate
template:
metadata:
creationTimestamp: null
labels:
app: default-backend
spec:
containers:
- image: srcmkr/csharpdevopsdemo
name: simple-webapp
env:
- name: COLOR
value: "#243554"
ports:
- containerPort: 80
protocol: TCP
---
apiVersion: v1
kind: Service
metadata:
name: default-http-backend
namespace: kube-system
spec:
ports:
- nodePort: 31984
port: 80
protocol: TCP
targetPort: 80
selector:
app: default-backend
type: NodePort
create app1.yml
apiVersion: apps/v1
kind: Deployment
metadata:
name: csharpdevops1-deployment
labels:
app: csharpdevops1
spec:
replicas: 1
selector:
matchLabels:
app: csharpdevops1
template:
metadata:
labels:
app: csharpdevops1
spec:
containers:
- name: csharpdevops1
image: srcmkr/csharpdevopsdemo
env:
- name: COLOR
value: "#6601b2"
ports:
- containerPort: 80
---
apiVersion: v1
kind: Service
metadata:
labels:
app: csharpdevops1
name: csharpdevops1-service
spec:
ports:
- name: http
port: 80
protocol: TCP
targetPort: 80
selector:
app: csharpdevops1
---
apiVersion: networking.k8s.io/v1beta1
kind: Ingress
metadata:
name: ingress-csharpdevops1
annotations:
kubernetes.io/ingress.class: nginx
nginx.ingress.kubernetes.io/rewrite-target: /
spec:
rules:
- host: jamamiko.de
http:
paths:
- path: /servicea
backend:
serviceName: csharpdevops1-service
servicePort: 80
create app2.yml
apiVersion: apps/v1
kind: Deployment
metadata:
name: csharpdevops2-deployment
labels:
app: csharpdevops2
spec:
replicas: 1
selector:
matchLabels:
app: csharpdevops2
template:
metadata:
labels:
app: csharpdevops2
spec:
containers:
- name: csharpdevops2
image: srcmkr/csharpdevopsdemo
env:
- name: COLOR
value: "#00c7ff"
ports:
- containerPort: 80
---
apiVersion: v1
kind: Service
metadata:
labels:
app: csharpdevops2
name: csharpdevops2-service
spec:
ports:
- name: http
port: 80
protocol: TCP
targetPort: 80
selector:
app: csharpdevops2
---
apiVersion: networking.k8s.io/v1beta1
kind: Ingress
metadata:
name: ingress-csharpdevops2
annotations:
kubernetes.io/ingress.class: nginx
nginx.ingress.kubernetes.io/rewrite-target: /
spec:
rules:
- host: jamamiko.de
http:
paths:
- path: /serviceb
backend:
serviceName: csharpdevops2-service
servicePort: 80
> kubectl apply \
-f namespace.yml \
-f ingress.yml \
-f backend.yml \
-f deployment.yml \
-f app1.yml
-f app2.yml
04-12-2021