Skip to content

Instantly share code, notes, and snippets.

@r3xakead0
Created October 9, 2025 20:34
Show Gist options
  • Select an option

  • Save r3xakead0/d348c6ae29082c6e540fefbd92e401da to your computer and use it in GitHub Desktop.

Select an option

Save r3xakead0/d348c6ae29082c6e540fefbd92e401da to your computer and use it in GitHub Desktop.
Create a low-cost EC2 Spot instance that runs your stress tests and is automatically deleted. This stress test involves generating multiple requests to a URL and monitoring the results of these tests and saving them to a file.
#!/usr/bin/env bash
set -euo pipefail
############# PARÁMETROS EDITABLES #############
REGION="${REGION:-us-east-1}" # cambia si quieres
INSTANCE_TYPE="${INSTANCE_TYPE:-t3.small}" # barato y suficiente p/HTTP
SPOT_MAX_PRICE="${SPOT_MAX_PRICE:-}" # vacío = precio spot on-demand cap (recomendado)
SECURITY_GROUP_NAME="${SECURITY_GROUP_NAME:-spot-http-stress-sg}"
IAM_ROLE_NAME="${IAM_ROLE_NAME:-EC2SpotStressS3Role}"
INSTANCE_PROFILE_NAME="${INSTANCE_PROFILE_NAME:-EC2SpotStressS3Profile}"
POLICY_NAME="${POLICY_NAME:-EC2SpotStressS3Policy}"
S3_BUCKET="${S3_BUCKET:-}" # si vacío, se creará: spot-http-stress-<account>-<region>
KEY_NAME="${KEY_NAME:-}" # opcional (si quieres SSH). Vacío = sin key
# Parámetros de PRUEBA
TARGET_URL="${TARGET_URL:-https://example.com/}" # <-- pásalo por variable o edítalo
DURATION_SECONDS="${DURATION_SECONDS:-60}" # duración total aproximada en seg
TOTAL_REQUESTS="${TOTAL_REQUESTS:-10000}" # número total de requests a enviar
CONCURRENCY="${CONCURRENCY:-200}" # concurrencia para 'ab'
RESULTS_PREFIX="${RESULTS_PREFIX:-results}" # prefijo en S3
#################################################
echo "[*] Región: $REGION"
aws configure set region "$REGION" >/dev/null
# 1) Descubrir AMI Amazon Linux 2023 por SSM
echo "[*] Obteniendo AMI Amazon Linux 2023..."
AMI_ID="$(aws ssm get-parameters --names /aws/service/ami-amazon-linux-latest/al2023-ami-kernel-6.1-x86_64 --query 'Parameters[0].Value' --output text)"
echo " AMI: $AMI_ID"
# 2) Resolver cuenta actual
ACCOUNT_ID="$(aws sts get-caller-identity --query Account --output text)"
# 3) Bucket S3
if [[ -z "$S3_BUCKET" ]]; then
S3_BUCKET="spot-http-stress-${ACCOUNT_ID}-${REGION}"
fi
if ! aws s3 ls "s3://${S3_BUCKET}" >/dev/null 2>&1; then
echo "[*] Creando bucket S3: s3://${S3_BUCKET}"
if [[ "$REGION" == "us-east-1" ]]; then
aws s3api create-bucket --bucket "$S3_BUCKET" >/dev/null
else
aws s3api create-bucket --bucket "$S3_BUCKET" --create-bucket-configuration LocationConstraint="$REGION" >/dev/null
fi
fi
# 4) Crear IAM Role + Instance Profile (si no existen)
TRUST_DOC="$(cat <<'JSON'
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Principal": { "Service": "ec2.amazonaws.com" },
"Action": "sts:AssumeRole"
}
]
}
JSON
)"
echo "[*] Asegurando IAM Role ${IAM_ROLE_NAME}..."
if ! aws iam get-role --role-name "$IAM_ROLE_NAME" >/dev/null 2>&1; then
aws iam create-role --role-name "$IAM_ROLE_NAME" --assume-role-policy-document "$TRUST_DOC" >/dev/null
fi
# Política mínima para subir resultados a s3://bucket/prefix/*
POLICY_DOC="$(cat <<JSON
{
"Version": "2012-10-17",
"Statement": [
{
"Sid": "AllowPutResults",
"Effect": "Allow",
"Action": [
"s3:PutObject",
"s3:AbortMultipartUpload",
"s3:ListBucketMultipartUploads",
"s3:ListBucket"
],
"Resource": [
"arn:aws:s3:::${S3_BUCKET}",
"arn:aws:s3:::${S3_BUCKET}/${RESULTS_PREFIX}/*"
]
}
]
}
JSON
)"
echo "[*] Asegurando IAM Policy ${POLICY_NAME}..."
# crear o actualizar política administrada
POLICY_ARN=""
if aws iam get-policy --policy-arn "arn:aws:iam::${ACCOUNT_ID}:policy/${POLICY_NAME}" >/dev/null 2>&1; then
POLICY_ARN="arn:aws:iam::${ACCOUNT_ID}:policy/${POLICY_NAME}"
# para actualizar: crear nueva versión y setearla como default
aws iam create-policy-version --policy-arn "$POLICY_ARN" --policy-document "$POLICY_DOC" --set-as-default >/dev/null
else
POLICY_ARN="$(aws iam create-policy --policy-name "$POLICY_NAME" --policy-document "$POLICY_DOC" --query Policy.Arn --output text)"
fi
# adjuntar política si no está
if ! aws iam list-attached-role-policies --role-name "$IAM_ROLE_NAME" --query "AttachedPolicies[?PolicyArn=='${POLICY_ARN}'] | length(@)" --output text | grep -q "^1$"; then
aws iam attach-role-policy --role-name "$IAM_ROLE_NAME" --policy-arn "$POLICY_ARN" >/dev/null
fi
echo "[*] Asegurando Instance Profile ${INSTANCE_PROFILE_NAME}..."
if ! aws iam get-instance-profile --instance-profile-name "$INSTANCE_PROFILE_NAME" >/dev/null 2>&1; then
aws iam create-instance-profile --instance-profile-name "$INSTANCE_PROFILE_NAME" >/dev/null
fi
# Añadir rol al instance profile si no está
if ! aws iam get-instance-profile --instance-profile-name "$INSTANCE_PROFILE_NAME" \
--query "InstanceProfile.Roles[?RoleName=='${IAM_ROLE_NAME}'] | length(@)" \
--output text | grep -q "^1$"; then
aws iam add-role-to-instance-profile --instance-profile-name "$INSTANCE_PROFILE_NAME" --role-name "$IAM_ROLE_NAME" >/dev/null
# puede tardar unos segundos en propagarse
echo "[*] Esperando propagación de Instance Profile..."
sleep 10
fi
# 5) Security Group con salida a Internet
VPC_ID="$(aws ec2 describe-vpcs --query 'Vpcs[0].VpcId' --output text)"
echo "[*] Asegurando Security Group ${SECURITY_GROUP_NAME} en VPC ${VPC_ID}..."
SG_ID="$(aws ec2 describe-security-groups --filters "Name=group-name,Values=${SECURITY_GROUP_NAME}" "Name=vpc-id,Values=${VPC_ID}" --query 'SecurityGroups[0].GroupId' --output text 2>/dev/null || true)"
if [[ -z "$SG_ID" || "$SG_ID" == "None" ]]; then
SG_ID="$(aws ec2 create-security-group --group-name "$SECURITY_GROUP_NAME" --description "SG for Spot HTTP stress" --vpc-id "$VPC_ID" --query GroupId --output text)"
# egress all
aws ec2 authorize-security-group-egress --group-id "$SG_ID" --ip-permissions IpProtocol=-1,IpRanges="[{CidrIp=0.0.0.0/0}]" >/dev/null
fi
# 6) User data: instala ab, corre test, sube resultados a S3 y termina la instancia
USER_DATA="$(base64 -w0 <<EOF
#!/bin/bash
set -euo pipefail
dnf -y update
dnf -y install httpd-tools jq
mkdir -p /var/log/http-stress
cd /var/log/http-stress
INSTANCE_ID=\$(curl -s http://169.254.169.254/latest/meta-data/instance-id)
TS=\$(date -u +%Y%m%dT%H%M%SZ)
TARGET_URL="${TARGET_URL}"
TOTAL_REQUESTS="${TOTAL_REQUESTS}"
CONCURRENCY="${CONCURRENCY}"
DURATION_SECONDS="${DURATION_SECONDS}"
OUT_TXT="ab_\${TS}_\${INSTANCE_ID}.txt"
OUT_CSV="ab_\${TS}_\${INSTANCE_ID}.csv"
META_JSON="meta_\${TS}_\${INSTANCE_ID}.json"
echo "== Meta ==" | tee -a "\$OUT_TXT"
echo "instance_id=\$INSTANCE_ID" | tee -a "\$OUT_TXT"
echo "target_url=\$TARGET_URL" | tee -a "\$OUT_TXT"
echo "total_requests=\$TOTAL_REQUESTS" | tee -a "\$OUT_TXT"
echo "concurrency=\$CONCURRENCY" | tee -a "\$OUT_TXT"
echo "duration_seconds=\$DURATION_SECONDS" | tee -a "\$OUT_TXT"
# Ejecutar ApacheBench
# Nota: ab usa -n (total req) y -c (concurrencia). No es estrictamente "por tiempo",
# pero con DURATION_SECONDS hacemos un timeout del proceso para no exceder.
echo "== ab run ==" | tee -a "\$OUT_TXT"
timeout "\${DURATION_SECONDS}s" ab -n "\$TOTAL_REQUESTS" -c "\$CONCURRENCY" "\$TARGET_URL" | tee -a "\$OUT_TXT" || true
# Extraer métricas clave y volcarlas en CSV simple
REQS_PER_SEC=\$(grep -E 'Requests per second' "\$OUT_TXT" | awk '{print \$4}')
TIME_PER_REQ_MS=\$(grep -E 'Time per request:' "\$OUT_TXT" | head -n1 | awk '{print \$4}')
TRANSFER_RATE_KB=\$(grep -E 'Transfer rate:' "\$OUT_TXT" | awk '{print \$3}')
echo "timestamp,instance_id,target_url,total_requests,concurrency,duration_seconds,requests_per_sec,time_per_request_ms,transfer_rate_kb" > "\$OUT_CSV"
echo "\$TS,\$INSTANCE_ID,\$TARGET_URL,\$TOTAL_REQUESTS,\$CONCURRENCY,\$DURATION_SECONDS,\$REQS_PER_SEC,\$TIME_PER_REQ_MS,\$TRANSFER_RATE_KB" >> "\$OUT_CSV"
cat > "\$META_JSON" <<META
{
"timestamp": "\$TS",
"instance_id": "\$INSTANCE_ID",
"target_url": "\$TARGET_URL",
"total_requests": "\$TOTAL_REQUESTS",
"concurrency": "\$CONCURRENCY",
"duration_seconds": "\$DURATION_SECONDS"
}
META
# Subir a S3
aws s3 cp "\$OUT_TXT" "s3://${S3_BUCKET}/${RESULTS_PREFIX}/\$INSTANCE_ID/\$OUT_TXT"
aws s3 cp "\$OUT_CSV" "s3://${S3_BUCKET}/${RESULTS_PREFIX}/\$INSTANCE_ID/\$OUT_CSV"
aws s3 cp "\$META_JSON" "s3://${S3_BUCKET}/${RESULTS_PREFIX}/\$INSTANCE_ID/\$META_JSON"
# Apagar y TERMINAR la instancia
shutdown -h now
EOF
)"
# 7) Lanzar Spot
echo "[*] Lanzando instancia Spot..."
SPOT_ARGS="MarketType=spot,SpotOptions={SpotInstanceType=one-time"
if [[ -n "$SPOT_MAX_PRICE" ]]; then
SPOT_ARGS="${SPOT_ARGS},MaxPrice=${SPOT_MAX_PRICE}"
fi
SPOT_ARGS="${SPOT_ARGS},InstanceInterruptionBehavior=terminate}"
RUN_ARGS=(
--image-id "$AMI_ID"
--instance-type "$INSTANCE_TYPE"
--iam-instance-profile "Name=${INSTANCE_PROFILE_NAME}"
--instance-initiated-shutdown-behavior terminate
--security-group-ids "$SG_ID"
--tag-specifications "ResourceType=instance,Tags=[{Key=Name,Value=spot-http-stress},{Key=Purpose,Value=http-benchmark}]"
--user-data "$USER_DATA"
--instance-market-options "$SPOT_ARGS"
)
# Opcional: par de llaves
if [[ -n "$KEY_NAME" ]]; then
RUN_ARGS+=( --key-name "$KEY_NAME" )
fi
INSTANCE_ID="$(aws ec2 run-instances "${RUN_ARGS[@]}" --query 'Instances[0].InstanceId' --output text)"
echo "[*] Instancia iniciada: $INSTANCE_ID"
# 8) Esperar terminación (la propia instancia se apaga al finalizar)
echo "[*] Esperando a que la instancia se termine sola al concluir la prueba..."
aws ec2 wait instance-terminated --instance-ids "$INSTANCE_ID"
echo "[*] Instancia terminada."
# 9) Listar resultados
echo "[*] Objetos en s3://${S3_BUCKET}/${RESULTS_PREFIX}/${INSTANCE_ID}/"
aws s3 ls "s3://${S3_BUCKET}/${RESULTS_PREFIX}/${INSTANCE_ID}/"
# 10) (Opcional) Descargar resultados localmente
if [[ "${DOWNLOAD_RESULTS:-false}" == "true" ]]; then
mkdir -p ./results/"${INSTANCE_ID}"
aws s3 sync "s3://${S3_BUCKET}/${RESULTS_PREFIX}/${INSTANCE_ID}/" "./results/${INSTANCE_ID}/"
echo "[*] Resultados descargados en ./results/${INSTANCE_ID}/"
fi
echo "[✓] Prueba completada. Bucket: s3://${S3_BUCKET}/${RESULTS_PREFIX}/${INSTANCE_ID}/"
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment