kubeadm reset

root@master-3:~# kubeadm reset
[reset] WARNING: Changes made to this host by 'kubeadm init' or 'kubeadm join' will be reverted.
[reset] Are you sure you want to proceed? [y/N]: y
[preflight] Running pre-flight checks
W1205 15:56:08.026216   11892 removeetcdmember.go:80] [reset] No kubeadm config, using etcd pod spec to get data directory
[reset] No etcd config found. Assuming external etcd
[reset] Please, manually reset etcd to prevent further issues
[reset] Stopping the kubelet service
[reset] Unmounting mounted directories in "/var/lib/kubelet"
[reset] Deleting contents of config directories: [/etc/kubernetes/manifests /etc/kubernetes/pki]
[reset] Deleting files: [/etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/bootstrap-kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf]
[reset] Deleting contents of stateful directories: [/var/lib/kubelet /var/lib/dockershim /var/run/kubernetes /var/lib/cni]

The reset process does not clean CNI configuration. To do so, you must remove /etc/cni/net.d

The reset process does not reset or clean up iptables rules or IPVS tables.
If you wish to reset iptables, you must do so manually by using the "iptables" command.

If your cluster was setup to utilize IPVS, run ipvsadm --clear (or similar)
to reset your system's IPVS tables.

The reset process does not clean your kubeconfig files and you must remove them manually.
Please, check the contents of the $HOME/.kube/config file.
apiVersion: snapshot.storage.k8s.io/v1
kind: VolumeSnapshot
  name: new-snapshot-test
  volumeSnapshotClassName: csi-hostpath-snapclass
    persistentVolumeClaimName: data-my-netbox-postgresql-0
# add label "node-type=master"
> kubectl label nodes kind-control-plane node-type=master
# check label
> kubectl get nodes --show-labels | grep node-type
# del label (no typo)
> kubectl label nodes kind-control-plane node-type-


# get cluster info
> kubectl cluster-info
> kubectl cluster-info --context kind-kind


# show which pod runs on which node
> kubectl get pod -o wide
# or
> kubectl get pod --all-namespaces \
  -o json | jq '.items[] | .spec.nodeName + " " + .metadata.name'
# like above
> kubectl get pod \
  -o=custom-columns=NODE:.spec.nodeName,NAME:.metadata.name \


# port forwarding
# only for debugging cause no balancing is possible - just one pod
> kubectl port-forward mywebserver 5000:6000
# forward deployment
> kubectl port-forward deployment/gitea-deployment 8080:3000 --address
# usefull to expose a service which normally is not

# Create a service that directs requests on port 80 to container port 8000
> kubectl expose deployment nginx --port=80 --target-port=8000 --type=LoadBalancer
> kubectl expose deployment gitea-deployment --type=LoadBalancer --name=gitea-service


# Dump resource logs to stdout
> kubectl logs myotherwebserver-8458cdb575-s6cp4

# Stream logs for a specific container within a pod
> kubectl logs -f mywebserver -c mynginx

Viewing resources

# View the cluster and client configuration
> kubectl config view

# List all resources in the default namespace
> kubectl get services

# List all resources in a specific namespace
> kubectl get pods -n my-app

# List all resources in all namespaces in wide format
> kubectl get pods -o wide --all-namespaces

# List all resources in json (or yaml) format
> kubectl get pods -o json|yaml

# Describe resource details
> kubectl describe pods
> kubectl describe pod mywebserver

# Get documentation for a resource
> kubectl explain pods
> kubectl explain pod mywebserver

# List of resources sorted by name
> kubectl get services --sort-by=.metadata.name

# List resources sorted by restart count
> kubectl get pods --sort-by='.status.containerStatuses[0].restartCount'

#  Rolling update pods for resource
> kubectl rolling-update echoserver -f  my-manifest.yaml

> kubectl get componentstatuses

Managing Kubernetes resources

# Start a single instance of a pod
> kubectl run mywebserver --image=nginx

# Create a resource from the command line:
> kubectl create deployment myotherwebserver --image=nginx

# Create resource(s) such as pods, services or daemonsets from a YAML definition file:
> kubectl create -f ./my-manifest.yaml

# Create or apply changes to a resource
> kubectl apply -f ./my-manifest.yaml

# Delete a resource via Lens
> kubectl delete -f ./my-manifest.yaml

# Scale a resource
> kubectl scale --replicas=3 deployment.apps/myotherwebserver
> kubectl scale --replicas=3 -f my-manifest.yaml

# Connect to a running container
> kubectl attach mywebserver -c mynginx -i

# Run a command in a single container pod
> kubectl exec mywebserver -- /home/user/myscript.sh

# Delete a resource
> kubectl delete pod/mywebserver
> kubectl delete -f ./my-manifest.yaml

kubectl get pvc