Skip to content

Instantly share code, notes, and snippets.

@chadmcrowell
Last active February 20, 2026 19:31
Show Gist options
  • Select an option

  • Save chadmcrowell/cc935c2956deba3eb0f40b3067a77e0e to your computer and use it in GitHub Desktop.

Select an option

Save chadmcrowell/cc935c2956deba3eb0f40b3067a77e0e to your computer and use it in GitHub Desktop.
Kubernetes workshop
sudo kubeadm init \
--pod-network-cidr=192.168.0.0/16 \
--kubernetes-version=stable
kubectl apply -f https://raw.githubusercontent.com/projectcalico/calico/v3.28.0/manifests/calico.yaml
apt update && apt install -y bash-completion
echo 'source <(kubectl completion bash)' >> ~/.bashrc
echo 'source /usr/share/bash-completion/bash_completion' >> ~/.bashrc
echo 'alias k=kubectl' >> ~/.bashrc
echo 'complete -o default -F __start_kubectl k' >> ~/.bashrc
source ~/.bashrc
kubectl run nginx --image nginx --dry-run=client -o yaml > pod.yaml
kubectl apply -f pod.yaml
kubectl run nginx --image nginx
kubectl create deploy nginx --image nginx
kubectl create deploy nginx --image nginx --dry-run=client -o yaml > deploy-nginx.yaml
kubectl get po -A
kubectl edit po nginx
kubectl config view
kubectl config get-contexts
kubectl config use-contexts
kubectl -n kube-system exec etcd-controlplane -- etcdctl \
--endpoints=https://127.0.0.1:2379 \
--cacert=/etc/kubernetes/pki/etcd/ca.crt \
--cert=/etc/kubernetes/pki/etcd/server.crt \
--key=/etc/kubernetes/pki/etcd/server.key \
member list
ETCD_VERSION="v3.5.21"
curl -fsSL https://github.com/etcd-io/etcd/releases/download/${ETCD_VERSION}/etcd-${ETCD_VERSION}-linux-amd64.tar.gz \
| sudo tar -xz --strip-components=1 -C /usr/local/bin/ etcd-${ETCD_VERSION}-linux-amd64/etcdctl
# etcdctl defaults to API v2; Kubernetes uses v3, so we must set this to interact with the cluster's data
export ETCDCTL_API=3
sudo etcdctl snapshot save /tmp/etcd-backup.db \
--endpoints=https://127.0.0.1:2379 \
--cacert=/etc/kubernetes/pki/etcd/ca.crt \
--cert=/etc/kubernetes/pki/etcd/server.crt \
--key=/etc/kubernetes/pki/etcd/server.key
sudo etcdctl member list \
--endpoints=https://127.0.0.1:2379 \
--cacert=/etc/kubernetes/pki/etcd/ca.crt \
--cert=/etc/kubernetes/pki/etcd/server.crt \
--key=/etc/kubernetes/pki/etcd/server.key
# create namespace
kubectl create namespace dev
# create namespace YAML
apiVersion: v1
kind: Namespace
metadata:
name: dev
# list pods in the dev namespace
kubectl get pods -n dev
# list pods across ALL namespaces
kubectl get pods -A
# set context scope to dev namespace
kubectl config set-context --current --namespace=dev
kubectl create deploy mysql --image mysql:8 --dry-run=client -o yaml > deploy.yaml
kubectl set image deployment/nginx nginx=nginx:1.28
kubectl create deploy nginx --image nginx --dry-run=client -o yaml > deploy-nginx.yaml
kubectl annotate deployment/nginx3 \
kubernetes.io/change-cause="Updated nginx image to 1.18"
# add metrics server (in order to run kubectl top nodes & kubectl top pods)
kubectl apply -f https://raw.githubusercontent.com/chadmcrowell/acing-the-ckad-exam/main/ch_02/metrics-server-components.yaml
apiVersion: autoscaling/v2
kind: HorizontalPodAutoscaler
metadata:
name: nginx
spec:
scaleTargetRef:
apiVersion: apps/v1
kind: Deployment
name: nginx
minReplicas: 2
maxReplicas: 10
metrics:
- type: Resource
resource:
name: cpu
target:
type: Utilization
averageUtilization: 50
# instead of creating the above YAML file, you can use the imperative command on an existing deployment
kubectl autoscale deployment/nginx --min=2 --max=10 --cpu-percent=50
spec:
containers:
- name: nginx
image: nginx:1.27
resources:
requests: # guaranteed minimum
cpu: "100m" # 100 millicores = 0.1 CPU
memory: "128Mi"
limits: # maximum allowed
cpu: "500m"
memory: "256Mi"
apiVersion: v1
kind: ConfigMap
metadata:
name: app-config
data:
APP_ENV: production
LOG_LEVEL: info
app.properties: |
server.port=8080
server.host=0.0.0.0
kubectl create configmap app-config \
--from-literal=APP_ENV=production \
--from-literal=LOG_LEVEL=info
# create a config file
echo "server.port=8080" > app.properties
kubectl create configmap app-config --from-file=app.properties
env:
- name: APP_ENVIRONMENT
valueFrom:
configMapKeyRef:
name: app-config
key: APP_ENV
apiVersion: v1
kind: Secret
metadata:
name: db-credentials
type: Opaque
data:
DB_USER: YWRtaW4= # echo -n "admin" | base64
DB_PASS: czNjcmV0UEBzcw== # echo -n "s3cretP@ss" | base64
spec:
containers:
- name: app
image: myapp:latest
livenessProbe:
httpGet:
path: /healthz
port: 8080
initialDelaySeconds: 10 # wait 10s before first check
periodSeconds: 5 # check every 5s
failureThreshold: 3 # restart after 3 consecutive failures
livenessProbe:
tcpSocket:
port: 3306 # just checks if the port is open
periodSeconds: 10
# liveness and readiness (official docs)
# https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#define-a-liveness-command
# pod with two containers
apiVersion: v1
kind: Pod
metadata:
labels:
run: sidecar
name: sidecar
spec:
containers:
- image: httpd
name: apache
- image: busybox
name: sidecar
command: ['sh', '-c', 'while true; do echo "$(date)": "I am a sidecar"; sleep 5; done']
apiVersion: v1
kind: Pod
metadata:
name: probe-demo
spec:
containers:
- name: app
image: busybox
command:
- sh
- -c
- |
touch /tmp/healthy
echo "App started"
sleep 30
rm /tmp/healthy
echo "App became unhealthy"
sleep 600
livenessProbe:
exec:
command: ["cat", "/tmp/healthy"]
initialDelaySeconds: 5
periodSeconds: 5
readinessProbe:
exec:
command: ["cat", "/tmp/healthy"]
initialDelaySeconds: 5
periodSeconds: 5
##################################################
# Install Helm
curl -fsSL -o get_helm.sh https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3
chmod 700 get_helm.sh
./get_helm.sh
# Install CCM
helm repo add ccm-linode https://linode.github.io/linode-cloud-controller-manager/
helm repo update ccm-linode
# Install with your API token and region
export LINODE_API_TOKEN=your_token_here
export REGION=us-ord
# for Lab env 2-4, use this API token
export LINODE_API_TOKEN=0ceca26ac8b936b267993f1dad48100f32cd065f2369524dd73286e1003a3b40
# for lab env 5-9
export LINODE_API_TOKEN=a46573e459df85d378b3caba0cc20f59bc2288978bb1d26a602aaf28f22ec79b
# for lab env 9-12
export LINODE_API_TOKEN=53e2127ef85efe05be6d22220c2189b7e341c99facdaa65504f165641f545ebb
# for terri
export LINODE_API_TOKEN=824f736e4be46d83d55f89939860dfe062ff00aa90b2a3ca1fe209b668cf187f
# last ditch effort
export LINODE_API_TOKEN=1ff5ed37256e54473fff0b30e8bbcffb5913341b89bb71bb0f55f5e3a95e7ecb
helm install ccm-linode \
--set apiToken=$LINODE_API_TOKEN,region=$REGION \
ccm-linode/ccm-linode
################################
kubectl run dns-test --image=busybox --rm -it --restart=Never -- \
nslookup web.default.svc.cluster.local
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: allow-frontend-to-backend
spec:
podSelector:
matchLabels:
app: backend # this policy applies to backend pods
policyTypes:
- Ingress
ingress:
- from:
- podSelector:
matchLabels:
app: frontend # only allow traffic from frontend pods
ports:
- protocol: TCP
port: 8080
# linode CSI drivers
helm repo add linode-csi https://linode.github.io/linode-blockstorage-csi-driver/
helm repo update linode-csi
# deploy CSI driver
export LINODE_API_TOKEN=""
export REGION=""
helm install linode-csi-driver \
--set apiToken="${LINODE_API_TOKEN}" \
--set region="${REGION}" \
linode-csi/linode-blockstorage-csi-driver
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: app-storage
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 10Gi
storageClassName: linode-block-storage
kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-v1.12.0/deploy/static/provider/cloud/deploy.yaml
# verify the controller is running
kubectl get pods -n ingress-nginx
kubectl get svc -n ingress-nginx
# ingress-nginx-controller LoadBalancer 10.96.x.x <external-ip> 80:30080/TCP,443:30443/TCP
##########################################
# PROMETHEUS
# get the grafana svc
kubectl -n monitoring get svc monitoring-grafana
# port-forward grafana on the control plane
kubectl port-forward -n monitoring svc/monitoring-grafana 3000:80
# forwards localhost:3000 on your laptop → localhost:3000 on the control-plane node.
ssh -L 3000:localhost:3000 admin@<control-plan-ip>
# generate traffic load (need the web service running)
kubectl run http-load2 \
--image=busybox \
--restart=Never \
-- sh -c "while true; do wget -q -O- http://web.default.svc.cluster.local > /dev/null; done"
# generate CPU load
kubectl run cpu-burn \
--image=alpine \
--restart=Never \
-- sh -c "apk add --no-cache stress-ng && stress-ng --cpu 4 --vm 2 --vm-bytes 256M --timeout 300s"
#
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment