Skip to content

Instantly share code, notes, and snippets.

@kfrancis
Created March 2, 2026 14:49
Show Gist options
  • Select an option

  • Save kfrancis/0064c960832458254dcca9591948a103 to your computer and use it in GitHub Desktop.

Select an option

Save kfrancis/0064c960832458254dcca9591948a103 to your computer and use it in GitHub Desktop.
Set of classes used to workaround the disappearing permissions.
using System.Runtime.Caching;
using System.Threading;
using Microsoft.Extensions.Options;
namespace CabMD.Caching;
/// <summary>
/// Configuration for the L1 in-memory cache layer that sits in front of
/// the distributed (Redis) cache. Bind to <c>Caching:L1</c> in appsettings.
/// </summary>
public sealed class L1CacheOptions
{
/// <summary>
/// Master switch. When <c>false</c>, the decorator is bypassed and all
/// calls pass straight through to the underlying distributed cache.
/// Can be toggled at runtime via <see cref="IOptionsMonitor{TOptions}" />.
/// </summary>
public bool Enabled { get; set; } = true;
/// <summary>
/// Default max time (seconds) an item may live in L1 when the distributed
/// entry's TTL is longer or unknown (e.g., populated on a cache-miss Get).
/// </summary>
public int DefaultExpirationSeconds { get; set; } = 30;
/// <summary>
/// When <c>true</c>, Redis failures are swallowed and logged; the caller
/// receives <c>null</c> (for Gets) or a silent no-op (for Sets/Removes).
/// When <c>false</c>, exceptions propagate to the caller.
/// </summary>
public bool FailFastOnRedisError { get; set; } = true;
/// <summary>
/// Size limit (bytes) for the dedicated L1 <see cref="MemoryCache" />.
/// This cache is private to the decorator — it does not share the
/// application's <see cref="Microsoft.Extensions.Caching.Memory.IMemoryCache" /> registration.
/// </summary>
public long MemorySizeLimitBytes { get; set; } = 100L * 1024 * 1024; // 100 MB
/// <summary>
/// When <c>true</c>, concurrent L1 misses for the same key are collapsed
/// into a single Redis round-trip (per-key <see cref="SemaphoreSlim" />).
/// </summary>
public bool EnableSingleFlight { get; set; } = true;
/// <summary>
/// Upper bound of per-key semaphores kept for single-flight gating.
/// Each semaphore is ~100 bytes; 10,000 entries ≈ 1 MB.
/// Once the cap is reached, new keys share a fallback gate.
/// </summary>
public int MaxKeyLocks { get; set; } = 10_000;
/// <summary>
/// When <c>true</c>, guards against caching suspicious ABP application-configuration
/// payloads where an authenticated user has too few granted policies.
/// This prevents poisoned permission snapshots from persisting in Redis/L1.
/// </summary>
public bool ProtectAgainstPoisonedAppConfigPayloads { get; set; } = true;
/// <summary>
/// Minimum expected <c>GrantedPolicies</c> count for authenticated users in
/// application-configuration payloads. Values below this threshold are treated as suspicious.
/// </summary>
public int MinGrantedPoliciesForAuthenticatedUser { get; set; } = 1;
}
using System;
using System.Linq;
using Microsoft.Extensions.Caching.Distributed;
using Microsoft.Extensions.Configuration;
using Microsoft.Extensions.DependencyInjection;
using Microsoft.Extensions.Logging;
using Microsoft.Extensions.Options;
namespace CabMD.Caching;
public static class L1CacheServiceCollectionExtensions
{
/// <summary>
/// Wraps the existing <see cref="IDistributedCache" /> registration with an in-memory L1 layer.
/// Call AFTER Redis (or any distributed cache) is registered.
/// </summary>
public static IServiceCollection AddL1CacheDecorator(
this IServiceCollection services, IConfiguration configuration)
{
services.Configure<L1CacheOptions>(configuration.GetSection("Caching:L1"));
// Early exit if explicitly disabled
var section = configuration.GetSection("Caching:L1");
if (section.Exists() && section.GetValue<bool?>("Enabled") == false)
{
return services;
}
// Find the last IDistributedCache registration
var descriptor = services.LastOrDefault(d => d.ServiceType == typeof(IDistributedCache));
if (descriptor is null)
{
return services; // nothing to decorate
}
// Remove the original and re-home it under InnerDistributedCacheHolder
// so the decorator can resolve it without circular resolution.
services.Remove(descriptor);
services.Add(CreateInnerHolderDescriptor(descriptor));
// Register the decorator with the SAME lifetime as the original
services.Add(new ServiceDescriptor(
typeof(IDistributedCache),
static sp =>
{
var holder = sp.GetRequiredService<InnerDistributedCacheHolder>();
var options = sp.GetRequiredService<IOptionsMonitor<L1CacheOptions>>();
if (!options.CurrentValue.Enabled)
{
return holder.Inner;
}
var logger = sp.GetRequiredService<ILogger<L1DistributedCacheDecorator>>();
return new L1DistributedCacheDecorator(holder.Inner, options, logger);
},
descriptor.Lifetime));
return services;
}
/// <summary>
/// Creates a <see cref="ServiceDescriptor" /> for <see cref="InnerDistributedCacheHolder" />
/// that wraps whatever the original registration was (factory, instance, or type).
/// </summary>
private static ServiceDescriptor CreateInnerHolderDescriptor(ServiceDescriptor original)
{
if (original.ImplementationFactory is not null)
{
var factory = original.ImplementationFactory;
return new ServiceDescriptor(
typeof(InnerDistributedCacheHolder),
sp => new InnerDistributedCacheHolder((IDistributedCache)factory(sp)),
original.Lifetime);
}
if (original.ImplementationInstance is not null)
{
return new ServiceDescriptor(
typeof(InnerDistributedCacheHolder),
new InnerDistributedCacheHolder((IDistributedCache)original.ImplementationInstance));
}
if (original.ImplementationType is not null)
{
var implType = original.ImplementationType;
return new ServiceDescriptor(
typeof(InnerDistributedCacheHolder),
sp =>
{
var inner = (IDistributedCache)ActivatorUtilities.CreateInstance(sp, implType);
return new InnerDistributedCacheHolder(inner);
},
original.Lifetime);
}
throw new InvalidOperationException(
$"Cannot decorate IDistributedCache: unsupported ServiceDescriptor for {original}.");
}
}
/// <summary>
/// Holds a reference to the "real" (inner) distributed cache so the decorator
/// can resolve it without circular DI resolution.
/// <para>
/// Implements <see cref="IDisposable" /> so the DI container disposes the inner
/// cache (e.g., <c>RedisCache</c>) during shutdown. Without this, the inner
/// instance — created via <see cref="ActivatorUtilities" /> or a captured factory —
/// is invisible to the container's disposal tracking and would leak.
/// </para>
/// <para>
/// When <see cref="L1CacheOptions.Enabled" /> is <c>false</c>, the factory returns
/// <see cref="Inner" /> directly as <c>IDistributedCache</c>, so the container may
/// also track it independently — resulting in a double-dispose. This is safe:
/// <c>RedisCache</c> (and any well-written <c>IDisposable</c>) guards against it
/// with a <c>_disposed</c> flag.
/// </para>
/// </summary>
internal sealed class InnerDistributedCacheHolder : IDisposable
{
public InnerDistributedCacheHolder(IDistributedCache inner)
{
Inner = inner ?? throw new ArgumentNullException(nameof(inner));
}
public IDistributedCache Inner { get; }
public void Dispose()
{
(Inner as IDisposable)?.Dispose();
}
}
using System;
using System.Collections.Concurrent;
using System.Diagnostics.CodeAnalysis;
using System.Text.Json;
using System.Threading;
using System.Threading.Tasks;
using Microsoft.Extensions.Caching.Distributed;
using Microsoft.Extensions.Caching.Memory;
using Microsoft.Extensions.Logging;
using Microsoft.Extensions.Options;
namespace CabMD.Caching
{
/// <summary>
/// Decorates <see cref="IDistributedCache" /> with a dedicated in-memory L1 layer.
/// <para>
/// ABP's <c>DistributedCache&lt;T&gt;</c> wraps <c>IDistributedCache</c> and holds a
/// global <c>SemaphoreSlim(1,1)</c> during Redis I/O. When Redis is slow or down,
/// this creates a convoy that starves all permission/settings/feature cache lookups.
/// By serving hits from L1, the semaphore is held for microseconds instead of seconds.
/// </para>
/// <para>
/// Key design decisions:
/// <list type="bullet">
/// <item>
/// Private <see cref="MemoryCache" /> instance — isolated from ABP's IMemoryCache, enforces own
/// SizeLimit.
/// </item>
/// <item>Defensive byte[] cloning — callers cannot corrupt cached buffers.</item>
/// <item>Per-key single-flight — prevents Redis stampedes on cold keys.</item>
/// <item>Separate sync/async paths — sync callers never block on async primitives.</item>
/// <item>Best-effort L1 Refresh — re-inserts with original metadata to restart sliding window.</item>
/// </list>
/// </para>
/// </summary>
public sealed partial class L1DistributedCacheDecorator : IDistributedCache, IDisposable
{
private readonly IDistributedCache _inner;
/// <summary>
/// Per-key gates for single-flight stampede prevention.
/// Entries are long-lived (bounded by <see cref="L1CacheOptions.MaxKeyLocks" />).
/// Each SemaphoreSlim is ~100 bytes, so 10k entries ≈ 1 MB.
/// </summary>
private readonly ConcurrentDictionary<string, SemaphoreSlim> _keyLocks = new(StringComparer.Ordinal);
private readonly MemoryCache _l1; // Dedicated instance — NOT shared via DI
private readonly ILogger<L1DistributedCacheDecorator> _logger;
private readonly IOptionsMonitor<L1CacheOptions> _options;
private bool _disposed;
private int _keyLocksCount;
/// <summary>
/// Initializes a new instance of the L1DistributedCacheDecorator class, which wraps an existing distributed cache
/// with an additional in-memory level 1 (L1) cache for improved performance.
/// </summary>
/// <remarks>
/// The L1 cache uses a private MemoryCache instance that is completely isolated from ABP's
/// IMemoryCache. The memory size limit for the L1 cache is specified in bytes, and each cache entry's size is
/// determined by the length of its value.
/// </remarks>
/// <param name="inner">The distributed cache instance to be decorated. This parameter cannot be null.</param>
/// <param name="options">
/// The options monitor that provides configuration settings for the L1 cache. This parameter cannot
/// be null.
/// </param>
/// <param name="logger">
/// The logger used to record operational events and errors within the decorator. This parameter
/// cannot be null.
/// </param>
/// <exception cref="ArgumentNullException">Thrown if the inner, options, or logger parameter is null.</exception>
public L1DistributedCacheDecorator(
IDistributedCache inner,
IOptionsMonitor<L1CacheOptions> options,
ILogger<L1DistributedCacheDecorator> logger)
{
_inner = inner ?? throw new ArgumentNullException(nameof(inner));
_options = options ?? throw new ArgumentNullException(nameof(options));
_logger = logger ?? throw new ArgumentNullException(nameof(logger));
// Private MemoryCache — completely isolated from ABP's IMemoryCache.
// SizeLimit is in bytes because every entry sets Size = value.Length.
_l1 = new MemoryCache(new MemoryCacheOptions { SizeLimit = _options.CurrentValue.MemorySizeLimitBytes });
}
public void Dispose()
{
if (_disposed)
{
return;
}
_disposed = true;
_l1.Dispose();
// Dispose all semaphores
foreach (var kvp in _keyLocks)
{
kvp.Value.Dispose();
}
_keyLocks.Clear();
}
/// <summary>
/// Retrieves the cached byte array associated with the specified key, or populates the cache if the key is not
/// present.
/// </summary>
/// <remarks>
/// If single-flight is enabled, this method ensures that only one thread populates the cache for
/// a given key at a time, providing thread safety for concurrent access. Otherwise, the cache is accessed directly
/// without synchronization.
/// </remarks>
/// <param name="key">The key used to identify the cached byte array. Cannot be null or empty.</param>
/// <returns>
/// A byte array containing the cached data associated with the specified key, or null if the key does not exist in
/// the cache.
/// </returns>
public byte[]? Get(string key)
{
if (TryGetL1(key, out var cached))
{
LogL1Hit(key);
return cached;
}
var opt = _options.CurrentValue;
if (!opt.EnableSingleFlight)
{
return GetFromInnerAndPopulateL1Sync(key);
}
var gate = GetKeyGate(key, opt.MaxKeyLocks);
gate.Wait(); // Synchronous wait — no thread pool starvation risk
try
{
// Double-check after acquiring the gate
if (!TryGetL1(key, out cached))
{
return GetFromInnerAndPopulateL1Sync(key);
}
LogL1Hit(key);
return cached;
}
finally
{
gate.Release();
}
}
/// <summary>
/// Asynchronously retrieves the value associated with the specified key, using a local cache to improve
/// performance.
/// </summary>
/// <remarks>
/// If the value is present in the local (L1) cache, it is returned immediately. Otherwise, the
/// method retrieves the value from the underlying source and updates the cache. When single flight is enabled,
/// concurrent requests for the same key are synchronized to prevent redundant fetches.
/// </remarks>
/// <param name="key">The key that identifies the value to retrieve. This parameter cannot be null or empty.</param>
/// <param name="token">A cancellation token that can be used to cancel the asynchronous operation.</param>
/// <returns>A byte array containing the value associated with the specified key, or null if the key does not exist.</returns>
public async Task<byte[]?> GetAsync(string key, CancellationToken token = default)
{
if (TryGetL1(key, out var cached))
{
LogL1Hit(key);
return cached;
}
var opt = _options.CurrentValue;
if (!opt.EnableSingleFlight)
{
return await GetFromInnerAndPopulateL1Async(key, token).ConfigureAwait(false);
}
var gate = GetKeyGate(key, opt.MaxKeyLocks);
await gate.WaitAsync(token).ConfigureAwait(false);
try
{
if (!TryGetL1(key, out cached))
{
return await GetFromInnerAndPopulateL1Async(key, token).ConfigureAwait(false);
}
LogL1Hit(key);
return cached;
}
finally
{
gate.Release();
}
}
/// <summary>
/// Sets a value in the distributed cache for the specified key, using the provided cache entry options.
/// </summary>
/// <remarks>
/// If the specified key and value are determined to be suspicious, the method will evict the
/// entry and log the rejection. If a Redis error occurs and the FailFastOnRedisError option is enabled, the error
/// is logged.
/// </remarks>
/// <param name="key">The unique key that identifies the cached entry. This value cannot be null or empty.</param>
/// <param name="value">The byte array representing the value to store in the cache. This value must not be null.</param>
/// <param name="options">
/// The options that configure expiration and other settings for the cache entry. This parameter must
/// not be null.
/// </param>
public void Set(string key, byte[] value, DistributedCacheEntryOptions options)
{
if (IsSuspiciousApplicationConfigurationPayload(key, value, out var userId, out var userName))
{
EvictPoisonedPayloadSync(key);
LogPoisonedPayloadRejected(key, userId, userName);
return;
}
SetL1(key, value, options);
try
{
_inner.Set(key, value, options);
}
catch (Exception ex) when (_options.CurrentValue.FailFastOnRedisError)
{
LogRedisSetFailure(key, ex);
}
}
/// <summary>
/// Asynchronously sets a value in the distributed cache using the specified key and cache entry options.
/// </summary>
/// <remarks>
/// If the provided key and value are determined to be suspicious, the method will evict the
/// payload and log the rejection. If a Redis error occurs and the fail-fast option is enabled, the failure will be
/// logged.
/// </remarks>
/// <param name="key">The unique identifier for the cache entry. This value cannot be null or empty.</param>
/// <param name="value">The value to be stored in the cache as a byte array. This value cannot be null.</param>
/// <param name="options">The options that control the behavior of the cache entry, such as expiration settings.</param>
/// <param name="token">A cancellation token that can be used to cancel the asynchronous operation.</param>
/// <returns>A task that represents the asynchronous set operation.</returns>
public async Task SetAsync(
string key, byte[] value, DistributedCacheEntryOptions options,
CancellationToken token = default)
{
if (IsSuspiciousApplicationConfigurationPayload(key, value, out var userId, out var userName))
{
await EvictPoisonedPayloadAsync(key, token).ConfigureAwait(false);
LogPoisonedPayloadRejected(key, userId, userName);
return;
}
SetL1(key, value, options);
try
{
await _inner.SetAsync(key, value, options, token).ConfigureAwait(false);
}
catch (Exception ex) when (_options.CurrentValue.FailFastOnRedisError)
{
LogRedisSetFailure(key, ex);
}
}
/// <summary>
/// Removes the value with the specified key from both the primary and distributed cache layers.
/// </summary>
/// <remarks>
/// If removal from the distributed cache fails and the FailFastOnRedisError option is enabled, a
/// failure is logged. The method does not throw in this case.
/// </remarks>
/// <param name="key">The key of the element to remove from the cache. This parameter cannot be null.</param>
public void Remove(string key)
{
_l1.Remove(key);
try
{
_inner.Remove(key);
}
catch (Exception ex) when (_options.CurrentValue.FailFastOnRedisError)
{
LogRedisRemoveFailure(key, ex);
}
}
/// <summary>
/// Asynchronously removes the item with the specified key from both the local and distributed caches.
/// </summary>
/// <remarks>
/// If the distributed cache removal fails and the FailFastOnRedisError option is enabled, the
/// failure is logged but not rethrown. The item is always removed from the local cache regardless of distributed
/// cache errors.
/// </remarks>
/// <param name="key">The unique key that identifies the item to remove from the cache.</param>
/// <param name="token">A cancellation token that can be used to cancel the remove operation.</param>
/// <returns>A task that represents the asynchronous remove operation.</returns>
public async Task RemoveAsync(string key, CancellationToken token = default)
{
_l1.Remove(key);
try
{
await _inner.RemoveAsync(key, token).ConfigureAwait(false);
}
catch (Exception ex) when (_options.CurrentValue.FailFastOnRedisError)
{
LogRedisRemoveFailure(key, ex);
}
}
/// <summary>
/// Refreshes the cache entry associated with the specified key, attempting to update it from the underlying data
/// source.
/// </summary>
/// <remarks>
/// If the refresh operation on the underlying data source fails and the FailFastOnRedisError
/// option is enabled, the failure is logged for further analysis.
/// </remarks>
/// <param name="key">The key that identifies the cache entry to refresh. This value must not be null or empty.</param>
public void Refresh(string key)
{
// REFRESH — best-effort L1 sliding reset
RefreshL1(key);
try
{
_inner.Refresh(key);
}
catch (Exception ex) when (_options.CurrentValue.FailFastOnRedisError)
{
LogRedisRefreshFailure(key, ex);
}
}
/// <summary>
/// Asynchronously refreshes the cached data associated with the specified key in both the local and distributed
/// cache layers.
/// </summary>
/// <remarks>
/// If the distributed cache refresh fails and the FailFastOnRedisError option is enabled, the
/// failure is logged but not rethrown. The local cache is always refreshed regardless of distributed cache
/// errors.
/// </remarks>
/// <param name="key">The cache key identifying the data to refresh. This value cannot be null or empty.</param>
/// <param name="token">
/// A cancellation token that can be used to cancel the refresh operation. The default value is
/// <see
/// cref="CancellationToken.None" />
/// .
/// </param>
/// <returns>A task that represents the asynchronous refresh operation.</returns>
public async Task RefreshAsync(string key, CancellationToken token = default)
{
RefreshL1(key);
try
{
await _inner.RefreshAsync(key, token).ConfigureAwait(false);
}
catch (Exception ex) when (_options.CurrentValue.FailFastOnRedisError)
{
LogRedisRefreshFailure(key, ex);
}
}
/// <summary>
/// Retrieves a value associated with the specified key from the inner cache store and populates the Level 1 (L1)
/// cache with the retrieved value.
/// </summary>
/// <remarks>
/// If the retrieved value is identified as suspicious, it is evicted from the cache and a log
/// entry is created. If the FailFastOnRedisError option is enabled in the configuration, exceptions encountered
/// during retrieval are logged and the method returns null.
/// </remarks>
/// <param name="key">The key used to locate the value in the inner cache store. This parameter must not be null or empty.</param>
/// <returns>
/// A byte array containing a clone of the value associated with the specified key, or null if the key does not
/// exist or if the value is considered suspicious.
/// </returns>
private byte[]? GetFromInnerAndPopulateL1Sync(string key)
{
try
{
var value = _inner.Get(key);
if (value is null)
{
return null;
}
if (IsSuspiciousApplicationConfigurationPayload(key, value, out var userId, out var userName))
{
EvictPoisonedPayloadSync(key);
LogPoisonedPayloadDetected(key, userId, userName);
return null;
}
SetL1WithDefaults(key, value);
return Clone(value);
}
catch (Exception ex) when (_options.CurrentValue.FailFastOnRedisError)
{
LogRedisGetFailure(key, ex);
return null;
}
}
/// <summary>
/// Retrieves the value associated with the specified key from the inner distributed cache and populates the L1
/// cache with the result.
/// </summary>
/// <remarks>
/// If the retrieved value is determined to be suspicious, it is evicted from the cache and a log
/// entry is created. Any failures encountered while retrieving the value from the inner store are logged if the
/// FailFastOnRedisError option is enabled.
/// </remarks>
/// <param name="key">The key used to locate the value in the inner cache. Cannot be null or empty.</param>
/// <param name="token">A cancellation token that can be used to cancel the asynchronous operation.</param>
/// <returns>
/// A byte array containing a clone of the value associated with the specified key, or null if the key does not
/// exist or if the value is considered suspicious.
/// </returns>
private async Task<byte[]?> GetFromInnerAndPopulateL1Async(string key, CancellationToken token)
{
try
{
var value = await _inner.GetAsync(key, token).ConfigureAwait(false);
if (value is null)
{
return null;
}
if (IsSuspiciousApplicationConfigurationPayload(key, value, out var userId, out var userName))
{
await EvictPoisonedPayloadAsync(key, token).ConfigureAwait(false);
LogPoisonedPayloadDetected(key, userId, userName);
return null;
}
SetL1WithDefaults(key, value);
return Clone(value);
}
catch (Exception ex) when (_options.CurrentValue.FailFastOnRedisError)
{
LogRedisGetFailure(key, ex);
return null;
}
}
/// <summary>
/// Attempts to retrieve the value associated with the specified key from the L1 cache.
/// </summary>
/// <remarks>
/// If the key is not found or the associated value is null, the output parameter 'value' will be
/// set to null.
/// </remarks>
/// <param name="key">The key used to identify the value in the L1 cache. This parameter cannot be null.</param>
/// <param name="value">
/// When this method returns, contains the value associated with the specified key if the key is found; otherwise,
/// null.
/// </param>
/// <returns>true if the value was found and successfully retrieved; otherwise, false.</returns>
private bool TryGetL1(string key, out byte[]? value)
{
if (_l1.TryGetValue(key, out L1Entry entry) && entry.Value is not null)
{
value = Clone(entry.Value);
return true;
}
value = null;
return false;
}
/// <summary>
/// Write to L1 with expiration derived from the distributed cache options.
/// Clones the value so the caller's buffer is independent.
/// </summary>
private void SetL1(string key, byte[] value, DistributedCacheEntryOptions distributed)
{
var entry = BuildEntry(value, distributed);
WriteL1Entry(key, entry);
}
/// <summary>
/// Write to L1 with default expiration (used on Get path where we don't
/// know the original distributed TTL).
/// </summary>
private void SetL1WithDefaults(string key, byte[] value)
{
var opt = _options.CurrentValue;
var entry = new L1Entry(
Clone(value),
TimeSpan.FromSeconds(opt.DefaultExpirationSeconds),
null);
WriteL1Entry(key, entry);
}
/// <summary>
/// Best-effort Refresh: if the entry is in L1 and was stored with sliding
/// expiration, evict and re-insert so the sliding window restarts.
/// <para>
/// Note: this also resets <c>AbsoluteExpirationRelativeToNow</c> relative to
/// "now", which is slightly more generous than Redis REFRESH (which only
/// resets sliding). For short L1 TTLs this is acceptable.
/// </para>
/// </summary>
private void RefreshL1(string key)
{
if (_l1.TryGetValue(key, out L1Entry entry) && entry.Value is not null)
{
WriteL1Entry(key, entry);
}
}
/// <summary>
/// Writes an L1 cache entry associated with the specified key.
/// </summary>
/// <remarks>
/// If the entry has specified expiration settings, they will be applied to the cache entry. The
/// size of the cache entry is determined by the length of the entry's value.
/// </remarks>
/// <param name="key">The unique identifier for the cache entry. This key is used to retrieve the entry later.</param>
/// <param name="entry">The L1Entry object containing the data to be cached, including its value and expiration settings.</param>
private void WriteL1Entry(string key, L1Entry entry)
{
var memOptions = new MemoryCacheEntryOptions { Size = entry.Value.Length };
if (entry.AbsoluteExpirationRelativeToNow.HasValue)
{
memOptions.AbsoluteExpirationRelativeToNow = entry.AbsoluteExpirationRelativeToNow;
}
if (entry.SlidingExpiration.HasValue)
{
memOptions.SlidingExpiration = entry.SlidingExpiration;
}
_l1.Set(key, entry, memOptions);
}
/// <summary>
/// Creates a new L1Entry instance that encapsulates the specified value and applies expiration settings based on
/// the provided distributed cache entry options.
/// </summary>
/// <remarks>
/// The method determines the effective absolute expiration by selecting the shorter duration
/// between the default expiration cap and the values specified in the distributed cache entry options. If sliding
/// expiration is specified, it is also capped to ensure the entry's lifetime does not exceed the default
/// expiration. This ensures that cache entries remain bounded in duration, even if the underlying distributed cache
/// allows for longer or indefinite sliding expiration.
/// </remarks>
/// <param name="value">The byte array containing the value to be stored in the cache entry.</param>
/// <param name="distributed">
/// The distributed cache entry options that define absolute and sliding expiration policies for
/// the cache entry.
/// </param>
/// <returns>An L1Entry object that contains the provided value and the calculated expiration settings.</returns>
private L1Entry BuildEntry(byte[] value, DistributedCacheEntryOptions distributed)
{
var opt = _options.CurrentValue;
var defaultCap = TimeSpan.FromSeconds(opt.DefaultExpirationSeconds);
// Resolve the effective absolute expiration — use the tighter of default cap
// vs whatever the distributed entry specifies.
TimeSpan absRel;
if (distributed.AbsoluteExpirationRelativeToNow.HasValue)
{
absRel = Min(defaultCap, distributed.AbsoluteExpirationRelativeToNow.Value);
}
else if (distributed.AbsoluteExpiration.HasValue)
{
var rel = distributed.AbsoluteExpiration.Value - DateTimeOffset.UtcNow;
if (rel < TimeSpan.Zero)
{
rel = TimeSpan.Zero;
}
absRel = Min(defaultCap, rel);
}
else
{
absRel = defaultCap;
}
// Sliding expiration: cap it so L1 stays bounded even if Redis slides indefinitely.
TimeSpan? sliding = null;
if (distributed.SlidingExpiration.HasValue)
{
sliding = Min(defaultCap, distributed.SlidingExpiration.Value);
}
return new L1Entry(Clone(value), absRel, sliding);
}
/// <summary>
/// Returns a per-key <see cref="SemaphoreSlim" /> for stampede prevention.
/// Gates are bounded by <paramref name="maxKeyLocks" />; once the cap is
/// reached, new keys share a fallback gate (false-sharing, but bounded).
/// Gates are intentionally never evicted — each is ~100 bytes, so 10k ≈ 1 MB.
/// </summary>
private SemaphoreSlim GetKeyGate(string key, int maxKeyLocks)
{
if (_keyLocks.TryGetValue(key, out var existing))
{
return existing;
}
// At capacity — funnel unknown keys through a shared fallback gate.
// This introduces false-sharing but keeps memory bounded.
if (Volatile.Read(ref _keyLocksCount) >= maxKeyLocks)
{
return _keyLocks.GetOrAdd("__L1_FALLBACK__", static _ => new SemaphoreSlim(1, 1));
}
var gate = _keyLocks.GetOrAdd(key, static _ => new SemaphoreSlim(1, 1));
// Increment only if we actually added (approximate — GetOrAdd factory
// may be called but lose the race, causing slight overcount. This is
// acceptable since MaxKeyLocks is an advisory cap, not a hard limit.)
Interlocked.Increment(ref _keyLocksCount);
return gate;
}
// ──────────────────────────────────────────────
// HELPERS
// ──────────────────────────────────────────────
private static TimeSpan Min(TimeSpan a, TimeSpan b)
{
return a <= b ? a : b;
}
private static byte[] Clone(byte[] value)
{
var copy = new byte[value.Length];
Buffer.BlockCopy(value, 0, copy, 0, value.Length);
return copy;
}
/// <summary>
/// Removes the specified key from the local cache and attempts to remove it from the inner storage.
/// </summary>
/// <remarks>
/// If the removal from the inner storage fails and the FailFastOnRedisError option is enabled, a
/// log entry is created to capture the failure details.
/// </remarks>
/// <param name="key">The key of the payload to be evicted from the cache. This key must not be null or empty.</param>
private void EvictPoisonedPayloadSync(string key)
{
_l1.Remove(key);
try
{
_inner.Remove(key);
}
catch (Exception ex) when (_options.CurrentValue.FailFastOnRedisError)
{
LogRedisRemoveFailure(key, ex);
}
}
/// <summary>
/// Removes the specified payload from both the local cache and the underlying distributed cache asynchronously.
/// </summary>
/// <remarks>
/// If removal from the underlying distributed cache fails and the FailFastOnRedisError option is
/// enabled, the failure is logged for diagnostic purposes.
/// </remarks>
/// <param name="key">The unique identifier of the payload to remove from the caches.</param>
/// <param name="token">A cancellation token that can be used to cancel the operation.</param>
/// <returns></returns>
private async Task EvictPoisonedPayloadAsync(string key, CancellationToken token)
{
_l1.Remove(key);
try
{
await _inner.RemoveAsync(key, token).ConfigureAwait(false);
}
catch (Exception ex) when (_options.CurrentValue.FailFastOnRedisError)
{
LogRedisRemoveFailure(key, ex);
}
}
/// <summary>
/// Determines whether the provided application configuration payload is valid and indicates an authenticated user
/// based on specific criteria.
/// </summary>
/// <remarks>
/// The method checks for a minimum number of granted policies and the presence of user
/// authentication information in the payload. If the payload is malformed or does not meet the criteria, the method
/// returns false and sets the output parameters to "unknown".
/// </remarks>
/// <param name="key">
/// The key associated with the application configuration. This is used to validate the structure and intent of the
/// payload.
/// </param>
/// <param name="payload">A byte array containing the application configuration payload, expected to be in JSON format.</param>
/// <param name="userId">
/// When the method returns, contains the user ID extracted from the payload if authentication is successful;
/// otherwise, set to "unknown".
/// </param>
/// <param name="userName">
/// When the method returns, contains the username extracted from the payload if authentication is successful;
/// otherwise, set to "unknown".
/// </param>
/// <returns>true if the payload is valid and indicates that the user is authenticated; otherwise, false.</returns>
private bool IsSuspiciousApplicationConfigurationPayload(
string key,
byte[] payload,
out string userId,
out string userName)
{
userId = "unknown";
userName = "unknown";
var options = _options.CurrentValue;
if (!options.ProtectAgainstPoisonedAppConfigPayloads ||
options.MinGrantedPoliciesForAuthenticatedUser <= 0 ||
payload.Length == 0 ||
!LooksLikeApplicationConfigurationKey(key))
{
return false;
}
try
{
using var document = JsonDocument.Parse(payload);
var root = document.RootElement;
if (!TryGetPropertyIgnoreCase(root, "auth", out var auth) ||
auth.ValueKind != JsonValueKind.Object ||
!TryGetPropertyIgnoreCase(auth, "grantedPolicies", out var grantedPolicies) ||
grantedPolicies.ValueKind != JsonValueKind.Object)
{
return false;
}
var policyCount = 0;
foreach (var _ in grantedPolicies.EnumerateObject())
{
policyCount++;
if (policyCount >= options.MinGrantedPoliciesForAuthenticatedUser)
{
return false;
}
}
if (!TryGetPropertyIgnoreCase(root, "currentUser", out var currentUser) ||
currentUser.ValueKind != JsonValueKind.Object)
{
return false;
}
userId = GetStringPropertyIgnoreCase(currentUser, "id");
userName = GetStringPropertyIgnoreCase(currentUser, "userName");
var isAuthenticated =
TryGetPropertyIgnoreCase(currentUser, "isAuthenticated", out var isAuthenticatedElement) &&
isAuthenticatedElement.ValueKind is JsonValueKind.True or JsonValueKind.False
? isAuthenticatedElement.GetBoolean()
: !string.IsNullOrWhiteSpace(userId);
return isAuthenticated;
}
catch (JsonException)
{
// Not JSON or incompatible payload shape. Ignore.
return false;
}
}
[SuppressMessage("ReSharper", "StringLiteralTypo")]
private static bool LooksLikeApplicationConfigurationKey(string key)
{
return key.Contains("applicationconfiguration", StringComparison.OrdinalIgnoreCase) ||
key.Contains("abpapplicationconfiguration", StringComparison.OrdinalIgnoreCase);
}
private static bool TryGetPropertyIgnoreCase(JsonElement element, string propertyName, out JsonElement value)
{
foreach (var property in element.EnumerateObject())
{
if (string.Equals(property.Name, propertyName, StringComparison.OrdinalIgnoreCase))
{
value = property.Value;
return true;
}
}
value = default;
return false;
}
private static string GetStringPropertyIgnoreCase(JsonElement element, string propertyName)
{
if (!TryGetPropertyIgnoreCase(element, propertyName, out var value) ||
value.ValueKind != JsonValueKind.String)
{
return "unknown";
}
return value.GetString() ?? "unknown";
}
[LoggerMessage(Level = LogLevel.Trace, Message = "L1 cache hit for key: {Key}")]
private partial void LogL1Hit(string key);
[LoggerMessage(Level = LogLevel.Warning, Message = "Redis GET failed for key: {Key}, serving null (fail-fast)")]
private partial void LogRedisGetFailure(string key, Exception ex);
[LoggerMessage(Level = LogLevel.Warning, Message = "Redis SET failed for key: {Key}, L1 write succeeded")]
private partial void LogRedisSetFailure(string key, Exception ex);
[LoggerMessage(Level = LogLevel.Warning, Message = "Redis REMOVE failed for key: {Key}")]
private partial void LogRedisRemoveFailure(string key, Exception ex);
[LoggerMessage(Level = LogLevel.Warning, Message = "Redis REFRESH failed for key: {Key}")]
private partial void LogRedisRefreshFailure(string key, Exception ex);
[LoggerMessage(Level = LogLevel.Warning,
Message = "Rejected poisoned app-config payload for key: {Key}. UserId={UserId}, UserName={UserName}")]
private partial void LogPoisonedPayloadRejected(string key, string userId, string userName);
[LoggerMessage(Level = LogLevel.Warning,
Message =
"Detected poisoned app-config payload in cache for key: {Key}. Evicted and forcing regeneration. UserId={UserId}, UserName={UserName}")]
private partial void LogPoisonedPayloadDetected(string key, string userId, string userName);
/// <summary>
/// Wraps a cached value with the expiration metadata used to store it,
/// enabling <see cref="RefreshL1" /> to re-insert with the same settings.
/// </summary>
private readonly record struct L1Entry(
byte[] Value,
TimeSpan? AbsoluteExpirationRelativeToNow,
TimeSpan? SlidingExpiration);
}
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment