Yes! You can build middleware that intercepts large responses and handles them automatically. Here's a clean approach:
// ResponseSizeMiddleware.cs
public class ResponseSizeMiddleware
{
private readonly RequestDelegate _next;
private readonly IAmazonS3 _s3Client;
private readonly ILogger<ResponseSizeMiddleware> _logger;
private const int MaxResponseSize = 1_000_000; // 1MB in bytes
private const string UseS3QueryParam = "useS3";
private const string PaginateQueryParam = "paginate";
public ResponseSizeMiddleware(
RequestDelegate next,
IAmazonS3 s3Client,
ILogger<ResponseSizeMiddleware> logger)
{
_next = next;
_s3Client = s3Client;
_logger = logger;
}
public async Task InvokeAsync(HttpContext context)
{
var originalBodyStream = context.Response.Body;
using var responseBody = new MemoryStream();
context.Response.Body = responseBody;
await _next(context);
responseBody.Seek(0, SeekOrigin.Begin);
var responseSize = responseBody.Length;
// Check if response is too large
if (responseSize > MaxResponseSize)
{
_logger.LogWarning("Response size {Size} exceeds limit", responseSize);
// Check query parameter preference
var useS3 = context.Request.Query.ContainsKey(UseS3QueryParam);
var paginate = context.Request.Query.ContainsKey(PaginateQueryParam);
if (useS3 || (!paginate && responseSize > MaxResponseSize))
{
await HandleS3Redirect(context, responseBody, originalBodyStream);
}
else
{
await HandlePaginationWarning(context, originalBodyStream);
}
}
else
{
// Response is fine, return as-is
responseBody.Seek(0, SeekOrigin.Begin);
await responseBody.CopyToAsync(originalBodyStream);
}
context.Response.Body = originalBodyStream;
}
private async Task HandleS3Redirect(
HttpContext context,
MemoryStream responseBody,
Stream originalBodyStream)
{
// Upload to S3
var key = $"responses/{Guid.NewGuid()}.json";
responseBody.Seek(0, SeekOrigin.Begin);
await _s3Client.PutObjectAsync(new PutObjectRequest
{
BucketName = "my-response-bucket",
Key = key,
InputStream = responseBody,
ContentType = context.Response.ContentType ?? "application/json"
});
// Generate presigned URL (5 minute expiry)
var presignedUrl = _s3Client.GetPreSignedURL(new GetPreSignedUrlRequest
{
BucketName = "my-response-bucket",
Key = key,
Expires = DateTime.UtcNow.AddMinutes(5)
});
// Return redirect or URL response
context.Response.Clear();
context.Response.StatusCode = 303; // See Other
context.Response.Headers["Location"] = presignedUrl;
// Optionally return JSON with URL
var redirectResponse = new
{
message = "Response too large, redirecting to S3",
url = presignedUrl,
expiresIn = "5 minutes"
};
context.Response.ContentType = "application/json";
await JsonSerializer.SerializeAsync(originalBodyStream, redirectResponse);
}
private async Task HandlePaginationWarning(
HttpContext context,
Stream originalBodyStream)
{
context.Response.Clear();
context.Response.StatusCode = 413; // Payload Too Large
var errorResponse = new ProblemDetails
{
Status = 413,
Title = "Response Too Large",
Detail = "The response exceeds 1MB. Please add pagination parameters (page, pageSize) or use ?useS3=true to receive a presigned URL.",
Extensions =
{
["suggestedActions"] = new[]
{
"Add ?page=1&pageSize=100 to paginate results",
"Add ?useS3=true to receive response via S3 presigned URL"
}
}
};
context.Response.ContentType = "application/problem+json";
await JsonSerializer.SerializeAsync(originalBodyStream, errorResponse);
}
}// Program.cs
builder.Services.AddAWSService<IAmazonS3>();
var app = builder.Build();
// Add before routing
app.UseMiddleware<ResponseSizeMiddleware>();
app.MapGet("/users", async (UserService svc) =>
{
// Your normal endpoint logic - middleware handles size automatically
var users = await svc.GetAllUsersAsync();
return Results.Ok(users);
});# Normal request - if < 1MB, returns data
GET /users
# If > 1MB, returns 413 with pagination suggestion
# Request with S3 redirect
GET /users?useS3=true
# Returns 303 redirect to S3 presigned URL
# Request with pagination (you implement in endpoint)
GET /users?page=1&pageSize=100// More granular control per endpoint
[AttributeUsage(AttributeTargets.Method)]
public class LargeResponseAttribute : Attribute
{
public LargeResponseStrategy Strategy { get; set; } = LargeResponseStrategy.S3Redirect;
}
public enum LargeResponseStrategy
{
S3Redirect,
RequirePagination,
AllowLarge // Skip middleware check
}
// Endpoint
app.MapGet("/users", async (UserService svc) =>
{
var users = await svc.GetAllUsersAsync();
return Results.Ok(users);
})
.WithMetadata(new LargeResponseAttribute { Strategy = LargeResponseStrategy.S3Redirect });Add compression before the size check:
// Program.cs
builder.Services.AddResponseCompression(options =>
{
options.EnableForHttps = true;
options.Providers.Add<GzipCompressionProvider>();
});
var app = builder.Build();
app.UseResponseCompression(); // Try compression first
app.UseMiddleware<ResponseSizeMiddleware>(); // Then check size// appsettings.json
{
"ResponseHandling": {
"MaxSizeBytes": 1000000,
"S3Bucket": "my-response-bucket",
"PresignedUrlExpiryMinutes": 5
}
}This gives you a clean, reusable solution where:
- Endpoints don't need to know about size limits
- Client can opt into S3 with
?useS3=true - Automatic 413 error with helpful message for pagination
- Easy to configure per-endpoint with attributes
Would you like me to add automatic pagination support to the middleware as well?