Skip to content

Instantly share code, notes, and snippets.

@bouroo
Last active January 11, 2026 06:28
Show Gist options
  • Select an option

  • Save bouroo/021ca2b5da25c4c3fb2abe7215679f4c to your computer and use it in GitHub Desktop.

Select an option

Save bouroo/021ca2b5da25c4c3fb2abe7215679f4c to your computer and use it in GitHub Desktop.
RKE2 on Debian

RKE2 Production Setup Guide (nftables)

Base System Installation

Install Debian with openssh-server enabled during installation.

System Preparation (as root)

Install Required Packages

apt update && apt -y install \
  fail2ban sudo vlan ifupdown2 nftables \
  wget curl net-tools tmux unattended-upgrades \
  open-iscsi nfs-common cryptsetup \
  glances btop htop

Disable and Remove iptables

# Stop iptables services
systemctl stop iptables 2>/dev/null || true
systemctl disable iptables 2>/dev/null || true

# Remove iptables if present
apt purge -y iptables iptables-persistent 2>/dev/null || true

# Enable nftables
systemctl enable nftables
systemctl start nftables

Kernel Tuning

cat > /etc/sysctl.d/99-rke2.conf << 'EOF'
# Network
net.ipv4.ip_forward = 1
net.ipv6.conf.all.forwarding = 1
net.bridge.bridge-nf-call-iptables = 1
net.bridge.bridge-nf-call-ip6tables = 1

# nftables
net.netfilter.nf_conntrack_max = 1000000
net.netfilter.nf_conntrack_buckets = 250000
net.netfilter.nf_conntrack_tcp_timeout_established = 86400
net.netfilter.nf_conntrack_tcp_timeout_close_wait = 3600

# Performance
fs.inotify.max_user_instances = 8192
fs.inotify.max_user_watches = 524288
vm.max_map_count = 262144
fs.file-max = 2097152

# Network tuning
net.core.somaxconn = 32768
net.ipv4.tcp_max_syn_backlog = 8192
net.core.netdev_max_backlog = 16384
EOF

sysctl --system

Configure User Permissions

usermod -aG sudo debian

Load Required Kernel Modules

cat > /etc/modules-load.d/rke2.conf << 'EOF'
overlay
br_netfilter
nf_conntrack
nf_nat
nf_tables
EOF

systemctl restart systemd-modules-load

Verify Modules Loaded

lsmod | grep -E 'overlay|br_netfilter|nf_conntrack|nf_nat|nf_tables'

Network Configuration

Configure Interfaces

nano /etc/network/interfaces
source /etc/network/interfaces.d/*

auto lo
iface lo inet loopback
    dns-nameservers 1.1.1.2 8.8.8.8
    dns-search ${DOMAIN}

# Public interface
allow-hotplug eno1
iface eno1 inet static
    address ${PUBLIC_IP}/${CIDR}
    gateway ${PUBLIC_GW}

# Private interface base
allow-hotplug enp4s0f1
iface enp4s0f1 inet manual
    mtu 9000

# Private VLAN
auto vlan255
iface vlan255 inet static
    mtu 9000
    address ${PRIVATE_IP}/${PRIVATE_CIDR}
    vlan-raw-device enp4s0f1
    post-up ip route add ${PRIVATE_NET}/${PRIVATE_NET_CIDR} via ${PRIVATE_GW} || true
    pre-down ip route del ${PRIVATE_NET}/${PRIVATE_NET_CIDR} via ${PRIVATE_GW} || true

Apply network changes:

systemctl restart networking

RKE2 Installation

Create Configuration Directory

mkdir -p /etc/rancher/rke2

Configure RKE2 (Server Node)

nano /etc/rancher/rke2/config.yaml
# High Availability setup - first server
token: ${SECURE_TOKEN}
node-ip: ${PRIVATE_IP}
node-external-ip: ${PUBLIC_IP}
advertise-address: ${PRIVATE_IP}

# TLS SANs
tls-san:
  - "rke2.${DOMAIN}"
  - "${LOAD_BALANCER_IP}"
  - "${PRIVATE_IP}"
  - "${PUBLIC_IP}"

# Networking
cluster-cidr: "10.42.0.0/16"
service-cidr: "10.43.0.0/16"
cluster-dns: "10.43.0.10"

# Use nftables mode for kube-proxy
kube-proxy-arg:
  - "proxy-mode=nftables"
  - "conntrack-max-per-core=250000"
  - "conntrack-tcp-timeout-established=86400"

# Kubelet configuration
kubelet-arg:
  - "max-pods=250"
  - "serialize-image-pulls=false"
  - "kube-api-qps=50"
  - "kube-api-burst=100"

# Container runtime
cni:
  - canal

# Disable components (optional)
# disable:
#   - rke2-ingress-nginx

# etcd configuration
etcd-arg:
  - "quota-backend-bytes=8589934592"  # 8GB
  - "auto-compaction-retention=1h"

# Enable required features
# selinux: false  # Enable on RHEL-based systems

Configure RKE2 (Additional Server Nodes)

server: https://${FIRST_SERVER_IP}:9345
token: ${SECURE_TOKEN}
node-ip: ${PRIVATE_IP}
node-external-ip: ${PUBLIC_IP}
advertise-address: ${PRIVATE_IP}

tls-san:
  - "rke2.${DOMAIN}"
  - "${LOAD_BALANCER_IP}"

# nftables kube-proxy
kube-proxy-arg:
  - "proxy-mode=nftables"
  - "conntrack-max-per-core=250000"
  - "conntrack-tcp-timeout-established=86400"

kubelet-arg:
  - "max-pods=250"
  - "serialize-image-pulls=false"
  - "kube-api-qps=50"
  - "kube-api-burst=100"

cni:
  - canal

Configure RKE2 (Agent Nodes)

server: https://${LOAD_BALANCER_IP}:9345
token: ${SECURE_TOKEN}
node-ip: ${PRIVATE_IP}
node-external-ip: ${PUBLIC_IP}

kubelet-arg:
  - "max-pods=250"
  - "serialize-image-pulls=false"

Configure Private Registry

nano /etc/rancher/rke2/registries.yaml
mirrors:
  docker.io:
    endpoint:
      - "https://registry.${DOMAIN}"
  registry.${DOMAIN}:
    endpoint:
      - "https://registry.${DOMAIN}"

configs:
  "registry.${DOMAIN}":
    auth:
      username: ${REGISTRY_USER}
      password: ${REGISTRY_PASSWD}
    tls:
      insecure_skip_verify: false  # Use true only for testing

Install RKE2

# Set version (optional, omit for latest stable)
# export INSTALL_RKE2_VERSION=v1.31.4+rke2r1

curl -sfL https://get.rke2.io | sh -

# Server node
systemctl enable rke2-server.service
systemctl start rke2-server.service

# Agent node (use rke2-agent instead)
# systemctl enable rke2-agent.service
# systemctl start rke2-agent.service

Setup kubectl Access

mkdir -p ~/.kube
ln -s /etc/rancher/rke2/rke2.yaml ~/.kube/config
chmod 600 ~/.kube/config

# Add to PATH
echo 'export PATH=$PATH:/var/lib/rancher/rke2/bin' >> ~/.bashrc
source ~/.bashrc

Post-Installation Configuration

Verify Cluster Status

kubectl get nodes
kubectl get pods -A

Verify nftables Mode

# Check kube-proxy mode
kubectl logs -n kube-system -l k8s-app=kube-proxy | grep "Using nftables"

# View nftables rules created by kube-proxy
nft list ruleset | grep -A 20 "chain KUBE"

Configure CoreDNS with autopath

kubectl edit configmap -n kube-system rke2-coredns-rke2-coredns

Add autopath plugin to the Corefile:

data:
  Corefile: |
    .:53 {
        errors
        health {
            lameduck 5s
        }
        ready
        autopath @kubernetes
        kubernetes cluster.local in-addr.arpa ip6.arpa {
            pods verified
            fallthrough in-addr.arpa ip6.arpa
            ttl 30
        }
        prometheus :9153
        forward . /etc/resolv.conf
        cache 30
        loop
        reload
        loadbalance
    }

Restart CoreDNS:

kubectl rollout restart deployment -n kube-system rke2-coredns-rke2-coredns

Optional: Install Metrics Server

kubectl apply -f https://github.com/kubernetes-sigs/metrics-server/releases/latest/download/components.yaml

Optional: Configure Storage Class

If using Longhorn or other CSI:

# Longhorn example
kubectl apply -f https://raw.githubusercontent.com/longhorn/longhorn/master/deploy/longhorn.yaml

nftables Management

View Current Rules

# List all rules
nft list ruleset

# List specific table
nft list table ip filter

# List kube-proxy chains
nft list ruleset | grep -A 50 KUBE

Basic nftables Commands

# Flush all rules (use with caution)
nft flush ruleset

# Save current ruleset
nft list ruleset > /etc/nftables.conf

# Reload rules
nft -f /etc/nftables.conf

Monitor Connections

# View connection tracking
conntrack -L

# Count connections
conntrack -C

# Monitor new connections
conntrack -E

Maintenance

Upgrade RKE2

curl -sfL https://get.rke2.io | INSTALL_RKE2_VERSION=v1.31.4+rke2r1 sh -
systemctl restart rke2-server  # or rke2-agent

Backup etcd

rke2 etcd-snapshot save --name backup-$(date +%Y%m%d-%H%M%S)

Monitor Logs

journalctl -u rke2-server -f  # or rke2-agent

# Check kube-proxy logs
kubectl logs -n kube-system -l k8s-app=kube-proxy --tail=100

Troubleshooting nftables

Check nftables Service

systemctl status nftables

Verify Kernel Modules

lsmod | grep nf_tables

Debug kube-proxy

# Get kube-proxy pod
kubectl get pods -n kube-system -l k8s-app=kube-proxy

# Check logs
kubectl logs -n kube-system <kube-proxy-pod> | grep -i error

# Verify nftables mode is active
kubectl logs -n kube-system <kube-proxy-pod> | grep "nftables"

Common Issues

# If services are not reachable, check:
# 1. nftables rules
nft list ruleset | grep -i kube

# 2. Connection tracking
conntrack -L | grep <service-ip>

# 3. CNI status
kubectl get pods -n kube-system -l k8s-app=canal

Security Hardening

  • Use strong tokens (minimum 32 characters)
  • Enable Pod Security Standards
  • Configure Network Policies
  • Enable audit logging
  • Regular security updates via unattended-upgrades
  • Restrict API access with RBAC
  • Use private registry with TLS
  • Monitor nftables rules regularly

Key Changes from IPVS Version

  1. Replaced IPVS with nftables - modern packet filtering
  2. Removed ipvsadm and ipset packages
  3. Updated kube-proxy configuration to use proxy-mode=nftables
  4. Removed IPVS kernel modules from modules-load
  5. Added nftables-specific sysctl parameters
  6. Included nftables monitoring and troubleshooting
  7. Updated connection tracking parameters for nftables
  8. Added nftables verification steps

Benefits of nftables over IPVS

  • Unified framework: Single interface for packet filtering and NAT
  • Better performance: More efficient packet processing
  • Modern architecture: Native replacement for iptables
  • Simplified rules: Cleaner syntax and management
  • Future-proof: Active development and Kubernetes default
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment