First we need to abstract using aws with sts AssumeRole. Put this file anywhere in your $PATH
~.local/bin/sts-aws
#!/usr/bin/env bash
set -euo pipefail
aws-sts() {
local account=$1
local role_name=$2
if [ $# != 2 ];then
echo "use: aws-sts <account_id> <role_name>"
return 1
fi
local sts_token
sts_token=$(AWS_ACCESS_KEY_ID= AWS_SECRET_ACCESS_KEY= AWS_SESSION_TOKEN= aws sts assume-role --role-arn arn:aws:iam::$account:role/$role_name --role-session-name "$role_name-$account")
export AWS_ACCESS_KEY_ID=$(echo "$sts_token"|jq -r .Credentials.AccessKeyId)
export AWS_SECRET_ACCESS_KEY=$(echo "$sts_token"|jq -r .Credentials.SecretAccessKey)
export AWS_SESSION_TOKEN=$(echo "$sts_token"|jq -r .Credentials.SessionToken)
export AWS_DEFAULT_REGION=eu-central-1
}
CACHE_DIR="$HOME/.cache/aws-sts"
mkdir -p "$CACHE_DIR"
if [ -z "$AWS_STS_ACCOUNT" ] || [ -z "$AWS_STS_ROLE" ];then
echo "AWS_STS_ACCOUNT and AWS_STS_ROLE env have to be provided" >&2
exit 2
fi
# Create a unique cache file name based on account and role
CACHE_FILE="$CACHE_DIR/${AWS_STS_ACCOUNT}_${AWS_STS_ROLE}.env"
# Check if we have valid cached credentials
CACHE_VALID=0
if [ -f "$CACHE_FILE" ]; then
# Get expiration timestamp from cache file
EXPIRATION=$(grep "^EXPIRATION=" "$CACHE_FILE" | cut -d= -f2)
NOW=$(date +%s)
# Check if credentials are still valid (with 15-minute buffer)
if [ -n "$EXPIRATION" ] && [ "$EXPIRATION" -gt $((NOW + 900)) ]; then
CACHE_VALID=1
fi
fi
if [ "$CACHE_VALID" -eq 1 ]; then
# Source the cached environment variables
source "$CACHE_FILE"
else
# Get fresh credentials
aws-sts "$AWS_STS_ACCOUNT" "$AWS_STS_ROLE"
# Save the credentials to cache
{
echo "export AWS_ACCESS_KEY_ID=\"$AWS_ACCESS_KEY_ID\""
echo "export AWS_SECRET_ACCESS_KEY=\"$AWS_SECRET_ACCESS_KEY\""
echo "export AWS_SESSION_TOKEN=\"$AWS_SESSION_TOKEN\""
# Add expiration timestamp (1 hour validity)
echo "EXPIRATION=$(($(date +%s) + 3600))"
} > "$CACHE_FILE"
fi
exec aws "$@"Then assume role to destination account and run:
aws eks update-kubeconfig --region eu-central-1 --name cluster-name --alias local-kubectl-cluster-aliasThen edit your ~/.kube/config
Replace:
command: aws
with
command: sts-aws
env:
- name: AWS_STS_ACCOUNT
value: "123456789012"
- name: AWS_STS_ROLE
value: AwsRoleNameThatYouAssume
- name: AWS_STS_REGIONAL_ENDPOINTS
value: regional
Now you can just switch cluster using kubectx or any other way without requiring aws sts envirionment exports.