Node spec 4 core / 16 GB RAM 50GB OS / 50 GB ZFS
ubuntu escalation (or add sudo at the front)
mkdir ~/.kube && touch ~/.kube/config && chmod 700 ~/.kube/config
echo 'export KUBECONFIG=~/.kube/config' >> .bashrc
echo $(whoami) > /tmp/normaluser
sudo -i
apt-get update -y && apt-get upgrade -y
reboot
In admin node, load the variables
NU=$(cat /tmp/normaluser)
Install k3s & helm & kubectx
curl -sfL https://get.k3s.io | sh -
cat /etc/rancher/k3s/k3s.yaml > /home/$NU/.kube/config
curl https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3 | bash
git clone https://github.com/ahmetb/kubectx /opt/kubectx
ln -s /opt/kubectx/kubectx /usr/local/bin/kubectx
ln -s /opt/kubectx/kubens /usr/local/bin/kubens
Install openebs with zfs for persistent volumes with snapshot support on a single node
ZFSDISK=/dev/xvdb
apt install zfsutils-linux jq -y
zpool create zfspv-pool $ZFSDISK
zpool status
KUBENODE=$(kubectl get node -o=jsonpath="{.items[0].metadata.name}")
kubectl label node $KUBENODE openebs.io/rack=rack1
kubectl apply -f https://openebs.github.io/charts/zfs-operator.yaml
Make the storage class that will refer to the zfspv-pool
cat <<EOF | kubectl apply -f -
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: openebs-zfspv
annotations:
storageclass.kubernetes.io/is-default-class: "true"
parameters:
recordsize: "4k"
compression: "off"
dedup: "off"
fstype: "zfs"
poolname: "zfspv-pool"
provisioner: zfs.csi.openebs.io
EOF
cat <<EOF | kubectl apply -f -
kind: VolumeSnapshotClass
apiVersion: snapshot.storage.k8s.io/v1beta1
metadata:
name: zfspv-snapclass
annotations:
snapshot.storage.kubernetes.io/is-default-class: "true"
k10.kasten.io/is-snapshot-class: "true"
driver: zfs.csi.openebs.io
deletionPolicy: Delete
EOF
kubectl annotate storageclass/local-path storageclass.kubernetes.io/is-default-class-
Verify that local-path is not the default but openebs-zfspv is
kubectl get sc
Wait around 5 min for everything to be set up
kubectl get pod -n kube-system -l role=openebs-zfs
kubectl create ns stock-demo
helm repo add bitnami https://charts.bitnami.com/bitnami
helm install -n stock-demo --set global.postgresql.auth.username=root --set global.postgresql.auth.password=notsecure --set global.postgresql.auth.database=stock stockdb bitnami/postgresql
curl https://raw.githubusercontent.com/tdewin/stock-demo/main/kubernetes/deployment.yaml | sed s#tdewin/stock-demo#tdewin/stock-demo:vnext# | kubectl apply -n stock-demo -f -
curl https://raw.githubusercontent.com/tdewin/stock-demo/main/kubernetes/svc.yaml | sed s/^.*externalTrafficPolicy.*$//g | sed s/LoadBalancer/ClusterIP/g | kubectl apply -n stock-demo -f -
kubectl get svc -n stock-demo
Traefik ingress
cat <<EOF | kubectl apply -f -
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
annotations:
traefik.ingress.kubernetes.io/router.middlewares: stock-demo-strip-prefix@kubernetescrd
name: stock-demo-ingress
namespace: stock-demo
spec:
rules:
- http:
paths:
- backend:
service:
name: stock-demo-svc
port:
number: 80
path: /stock/
pathType: ImplementationSpecific
---
apiVersion: traefik.containo.us/v1alpha1
kind: Middleware
metadata:
name: strip-prefix
namespace: stock-demo
spec:
stripPrefix:
prefixes:
- /stock
EOF
helm repo add kasten https://charts.kasten.io/
kubectl create namespace kasten-io
helm install k10 kasten/k10 --namespace=kasten-io --set ingress.create=true --set auth.tokenAuth.enabled=true
It might take some time to get components to be installed, once do, you should be able to connect to the ingress
kubectl get -n kasten-io deploy
kubectl get ingress -n kasten-io
eg connect to http(s):///k10/
Get a token
export NS=kasten-io && export SA=k10-k10 && export TOKENNAME=$(kubectl -n $NS get sa $SA -o=jsonpath="{.secrets[0].name}") && echo "" && kubectl -n $NS get secret $TOKENNAME -o=jsonpath="{.data.token}" | base64 -d && echo ""
apt install nfs-common -y
based on aws mount documentation
mount -t nfs4 -o nfsvers=4.1,rsize=1048576,wsize=1048576,hard,timeo=600,retrans=2,noresvport fs-share.efs.eu-central-1.amazonaws.com:/ /mnt/k10
cat <<EOF | kubectl apply -f -
apiVersion: v1
kind: PersistentVolume
metadata:
name: backup-k10-pv
spec:
capacity:
storage: 10Gi
volumeMode: Filesystem
accessModes:
- ReadWriteMany
persistentVolumeReclaimPolicy: Retain
storageClassName: nfs
mountOptions:
- hard
- nfsvers=4.1
- rsize=1048576
- wsize=1048576
- timeo=600
- retrans=2
- noresvport
nfs:
path: /
server: fs-share.efs.eu-central-1.amazonaws.com
EOF
cat <<EOF | kubectl apply -f -
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: backup-k10-pv
namespace: kasten-io
spec:
storageClassName: nfs
accessModes:
- ReadWriteMany
resources:
requests:
storage: 10Gi
EOF
Now you can add the pvc inside your kasten installation
Basic Authentication
sudo apt install apache2-utils
htpasswd -nb timothy notsecure | openssl base64
Copy paste pw in secret below (replace dGltb...=)
cat | kubectl apply -f - <<EOF
apiVersion: v1
kind: Secret
metadata:
name: authsecret
namespace: stock-demo
data:
users: |2
dGltb...=
---
apiVersion: traefik.containo.us/v1alpha1
kind: Middleware
metadata:
name: authenticationmiddleware
namespace: stock-demo
spec:
basicAuth:
secret: authsecret
EOF
Patch annotation to add security
MIDW=$(kubectl -n stock-demo get ingress stock-demo-ingress -o=jsonpath='{.metadata.annotations.traefik\.ingress\.kubernetes\.io\/router\.middlewares}')
kubectl -n stock-demo annotate ingress/stock-demo-ingress --overwrite traefik.ingress.kubernetes.io/router.middlewares="$MIDW,stock-demo-authenticationmiddleware@kubernetescrd"
You need private and public key in text files of course. You might generate this with letsencrypt / certbot/certbot image
Create cert in kubernetes
k create secret tls -n default --cert=cert.pem --key=key.pem traefik-cert
For traefik make the tlsstore
cat | kubectl apply -f - <<EOF
apiVersion: traefik.containo.us/v1alpha1
kind: TLSStore
metadata:
name: default
namespace: default
spec:
defaultCertificate:
secretName: traefik-cert
EOF
You might need to kill the traefik pod in the kube-system if it does load the certiciate