Last active
May 30, 2025 14:54
-
-
Save jjtroberts/bfa1566db73a449515d5f9d0fc4ae9f8 to your computer and use it in GitHub Desktop.
Bash script that analyzes Kubernetes cluster node pools to identify empty candidates for cost optimization.
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| #!/bin/bash | |
| # | |
| # k8s-nodepool-analyzer.sh | |
| # | |
| # Description: Analyzes Kubernetes cluster node pools to identify empty ones for cost optimization. | |
| # Creates a hierarchical tree view showing NodePool -> Node -> Namespace -> Pod relationships. | |
| # Highlights empty nodepools that are candidates for removal to reduce cloud spending. | |
| # Filters out system namespaces (datadog, gmp-system, istio-system, kube-system, opentelemetry). | |
| # | |
| # Usage: ./k8s-nodepool-analyzer.sh [KUBE_CONTEXT] | |
| # | |
| # Arguments: | |
| # KUBE_CONTEXT (optional) - Kubernetes context name to analyze. If not provided, uses current context. | |
| # | |
| # Examples: | |
| # ./k8s-nodepool-analyzer.sh # Use current context | |
| # ./k8s-nodepool-analyzer.sh gke_prod_cluster # Use specific context | |
| # ./k8s-nodepool-analyzer.sh staging-cluster # Use staging context | |
| # | |
| # Output: | |
| # - Empty nodepools section (for cost optimization) | |
| # - Full cluster tree view | |
| # - Cost optimization summary with recommendations | |
| # | |
| # Requirements: | |
| # - kubectl configured with appropriate cluster access | |
| # - jq (JSON processor) | |
| # - Standard Unix tools (cut, grep, sort, awk) | |
| # | |
| # Function to get nodepool name from node labels (GKE specific) | |
| get_nodepool_name() { | |
| local node=$1 | |
| # Try common GKE nodepool labels | |
| local nodepool=$(kubectl get node "$node" -o jsonpath='{.metadata.labels.cloud\.google\.com/gke-nodepool}' 2>/dev/null) | |
| if [ -z "$nodepool" ]; then | |
| nodepool=$(kubectl get node "$node" -o jsonpath='{.metadata.labels.agentpool}' 2>/dev/null) # AKS | |
| fi | |
| if [ -z "$nodepool" ]; then | |
| nodepool=$(kubectl get node "$node" -o jsonpath='{.metadata.labels.eks\.amazonaws\.com/nodegroup}' 2>/dev/null) # EKS | |
| fi | |
| if [ -z "$nodepool" ]; then | |
| nodepool="unknown-pool" | |
| fi | |
| echo "$nodepool" | |
| } | |
| echo "Fetching cluster information..." | |
| # Create temporary files to store our data | |
| temp_dir=$(mktemp -d) | |
| nodepool_file="$temp_dir/nodepools" | |
| pods_file="$temp_dir/pods" | |
| # Get all nodes and their nodepools | |
| kubectl get nodes -o jsonpath='{range .items[*]}{.metadata.name}{"\n"}{end}' | while read -r node; do | |
| if [ -n "$node" ]; then | |
| nodepool=$(get_nodepool_name "$node") | |
| echo "$nodepool|$node" >> "$nodepool_file" | |
| fi | |
| done | |
| # Define namespaces to ignore | |
| ignored_namespaces="datadog gmp-system istio-system kube-system opentelemetry" | |
| # Function to check if namespace should be ignored | |
| is_ignored_namespace() { | |
| local ns=$1 | |
| for ignored in $ignored_namespaces; do | |
| if [ "$ns" = "$ignored" ]; then | |
| return 0 # true - should be ignored | |
| fi | |
| done | |
| return 1 # false - should not be ignored | |
| } | |
| # Get all pods with their node assignments and organize the data | |
| kubectl get pods --all-namespaces -o json | jq -r '.items[] | "\(.spec.nodeName // "unscheduled")|\(.metadata.namespace)|\(.metadata.name)"' | \ | |
| while IFS='|' read -r node_name namespace pod_name; do | |
| if [ -n "$node_name" ] && [ "$node_name" != "unscheduled" ] && ! is_ignored_namespace "$namespace"; then | |
| echo "$node_name|$namespace|$pod_name" >> "$pods_file" | |
| fi | |
| done | |
| # First, identify empty nodepools for highlighting | |
| empty_nodepools="" | |
| temp_empty_file="$temp_dir/empty_nodepools" | |
| if [ -f "$nodepool_file" ]; then | |
| cut -d'|' -f1 "$nodepool_file" | sort -u | while read -r nodepool; do | |
| has_pods=false | |
| # Check if any node in this nodepool has pods | |
| grep "^$nodepool|" "$nodepool_file" | cut -d'|' -f2 | while read -r node; do | |
| if [ -f "$pods_file" ] && grep -q "^$node|" "$pods_file"; then | |
| echo "has_pods" > "$temp_dir/nodepool_$nodepool" | |
| exit 0 | |
| fi | |
| done | |
| # If no pods found, this nodepool is empty | |
| if [ ! -f "$temp_dir/nodepool_$nodepool" ]; then | |
| echo "$nodepool" >> "$temp_empty_file" | |
| fi | |
| done | |
| fi | |
| # Display empty nodepools first (highlighted) | |
| echo | |
| echo "π¨ EMPTY NODEPOOLS - CANDIDATES FOR REMOVAL π¨" | |
| echo "===============================================" | |
| if [ -f "$temp_empty_file" ] && [ -s "$temp_empty_file" ]; then | |
| while read -r empty_nodepool; do | |
| echo "π΄ NodePool: $empty_nodepool (NO PODS - SAFE TO REMOVE)" | |
| # Show the nodes in this empty nodepool | |
| grep "^$empty_nodepool|" "$nodepool_file" | cut -d'|' -f2 | sort | while read -r node; do | |
| echo " βββ π₯οΈ Node: $node (empty)" | |
| done | |
| echo | |
| done < "$temp_empty_file" | |
| else | |
| echo "β No empty nodepools found - all nodepools have scheduled pods" | |
| echo | |
| fi | |
| echo "================================================" | |
| echo | |
| # Display the full tree structure | |
| echo "π FULL CLUSTER NODE-POD TREE:" | |
| echo "===============================" | |
| # Process and display the data | |
| if [ -f "$nodepool_file" ]; then | |
| # Get unique nodepools and sort them | |
| cut -d'|' -f1 "$nodepool_file" | sort -u | while read -r nodepool; do | |
| # Check if this is an empty nodepool | |
| is_empty="" | |
| if [ -f "$temp_empty_file" ] && grep -q "^$nodepool$" "$temp_empty_file"; then | |
| is_empty=" π΄ (EMPTY - CANDIDATE FOR REMOVAL)" | |
| fi | |
| echo "π¦ NodePool: $nodepool$is_empty" | |
| # Get nodes in this nodepool | |
| grep "^$nodepool|" "$nodepool_file" | cut -d'|' -f2 | sort | while read -r node; do | |
| echo " βββ π₯οΈ Node: $node" | |
| if [ -f "$pods_file" ]; then | |
| # Get namespaces for this node | |
| grep "^$node|" "$pods_file" | cut -d'|' -f2 | sort -u | while read -r namespace; do | |
| echo " βββ π Namespace: $namespace" | |
| # Get pods in this namespace on this node | |
| grep "^$node|$namespace|" "$pods_file" | cut -d'|' -f3 | sort | while read -r pod; do | |
| echo " βββ π³ Pod: $pod" | |
| done | |
| done | |
| # Check if node has no pods | |
| if ! grep -q "^$node|" "$pods_file"; then | |
| echo " βββ (no pods scheduled)" | |
| fi | |
| else | |
| echo " βββ (no pods scheduled)" | |
| fi | |
| echo | |
| done | |
| done | |
| fi | |
| # Summary | |
| if [ -n "$KUBE_CONTEXT" ]; then | |
| total_nodes=$(kubectl --context="$KUBE_CONTEXT" get nodes --no-headers | wc -l) | |
| total_pods=$(kubectl --context="$KUBE_CONTEXT" get pods --all-namespaces --no-headers | wc -l) | |
| else | |
| total_nodes=$(kubectl get nodes --no-headers | wc -l) | |
| total_pods=$(kubectl get pods --all-namespaces --no-headers | wc -l) | |
| fi | |
| total_nodepools=0 | |
| empty_nodepool_count=0 | |
| if [ -f "$nodepool_file" ]; then | |
| total_nodepools=$(cut -d'|' -f1 "$nodepool_file" | sort -u | wc -l) | |
| fi | |
| if [ -f "$temp_empty_file" ]; then | |
| empty_nodepool_count=$(wc -l < "$temp_empty_file" 2>/dev/null || echo 0) | |
| fi | |
| echo "π° COST OPTIMIZATION SUMMARY:" | |
| echo "=============================" | |
| echo "Cluster Context: ${KUBE_CONTEXT:-$(kubectl config current-context 2>/dev/null || echo 'unknown')}" | |
| echo "Total NodePools: $total_nodepools" | |
| echo "Empty NodePools: $empty_nodepool_count" | |
| echo "NodePools with Pods: $((total_nodepools - empty_nodepool_count))" | |
| echo "Total Nodes: $total_nodes" | |
| echo "Total Pods (filtered): $total_pods" | |
| echo | |
| if [ "$empty_nodepool_count" -gt 0 ]; then | |
| echo "π‘ RECOMMENDATION: Consider removing the $empty_nodepool_count empty nodepool(s) above to reduce cloud costs!" | |
| else | |
| echo "β All nodepools are actively being used." | |
| fi | |
| # Cleanup | |
| rm -rf "$temp_dir" |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment