Skip to content

Instantly share code, notes, and snippets.

@yankooliveira
Created April 13, 2025 23:02
Show Gist options
  • Select an option

  • Save yankooliveira/cdb37cec82f6066c20400fa7165350d0 to your computer and use it in GitHub Desktop.

Select an option

Save yankooliveira/cdb37cec82f6066c20400fa7165350d0 to your computer and use it in GitHub Desktop.
Just a perfectly normal delighter.
# Done 0-shot with Gemini 2.5 based on this: https://editor.p5js.org/yanko.oliveira/full/0RsR9Z8Ix
import torch
import torch.nn.functional as F
import numpy as np
# --- Constants ---
# Using the standard Rec.709 / sRGB luminance weights
LUMA_WEIGHTS_NP = np.array([0.2126, 0.7152, 0.0722], dtype=np.float32)
MIN_DIVISOR = 0.02 # Minimum divisor to prevent division by zero/extreme values
class DelightNormalMask:
"""
ComfyUI node to remove lighting from a texture using its normal map,
based on a specified light direction, with brightness-based masking.
"""
def __init__(self):
pass
@classmethod
def INPUT_TYPES(s):
return {
"required": {
"color_image": ("IMAGE",),
"normal_map": ("IMAGE",),
"light_direction_x": ("FLOAT", {"default": -1.0, "min": -1.0, "max": 1.0, "step": 0.01}),
"light_direction_y": ("FLOAT", {"default": 1.0, "min": -1.0, "max": 1.0, "step": 0.01}),
"light_direction_z": ("FLOAT", {"default": 1.0, "min": -1.0, "max": 1.0, "step": 0.01}),
"ambient_amount": ("FLOAT", {"default": 0.20, "min": 0.0, "max": 1.0, "step": 0.01}),
"gamma": ("FLOAT", {"default": 2.2, "min": 1.0, "max": 3.0, "step": 0.01}),
"blend_factor": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}),
"mask_power": ("FLOAT", {"default": 2.0, "min": 0.1, "max": 10.0, "step": 0.05}),
}
}
RETURN_TYPES = ("IMAGE",)
FUNCTION = "process"
CATEGORY = "Image/Processing"
def process(self, color_image, normal_map,
light_direction_x, light_direction_y, light_direction_z,
ambient_amount, gamma, blend_factor, mask_power):
if color_image.shape[1:] != normal_map.shape[1:]:
raise ValueError(f"Color image and normal map must have the same dimensions (H, W, C). Got {color_image.shape} and {normal_map.shape}")
if color_image.shape[-1] < 3 or normal_map.shape[-1] < 3:
raise ValueError(f"Input images must have at least 3 channels (RGB). Got shapes: Color {color_image.shape}, Normal {normal_map.shape}")
# Ensure working with RGB, discard alpha if present
color_srgb = color_image[..., :3].clone() # BHWC, range [0, 1]
normal_rgb = normal_map[..., :3].clone() # BHWC, range [0, 1]
# Determine device and dtype from input tensors
device = color_srgb.device
dtype = color_srgb.dtype
batch_size, height, width, _ = color_srgb.shape
# --- Algorithm Implementation (Vectorized) ---
# 1. Decode Normal Map (from [0, 1] to [-1, 1]) and Normalize
N_decoded = normal_rgb * 2.0 - 1.0
N = F.normalize(N_decoded, p=2, dim=-1) # Normalize along the channel dimension
# 2. Prepare Light Vector and Normalize
light_direction_vec = torch.tensor([light_direction_x, light_direction_y, light_direction_z], device=device, dtype=dtype)
L_norm = F.normalize(light_direction_vec, p=2, dim=0)
# Reshape L for broadcasting: (3,) -> (1, 1, 1, 3)
L = L_norm.view(1, 1, 1, 3)
# 3. Calculate Lambertian Shading (Dot product N . L)
# N shape: (B, H, W, 3), L shape: (1, 1, 1, 3) -> Result shape (B, H, W, 1) after sum
lambertian = torch.sum(N * L, dim=-1, keepdim=True)
lambertian = torch.clamp(lambertian, min=0.0) # max(0.0, dot(N, L))
# 4. Calculate Base Shading (Incorporate ambient)
baseShading = torch.clamp(lambertian, min=ambient_amount) # max(ambientAmount, lambertian)
# 5. Calculate Safe Divisor
safeDivisor = torch.clamp(baseShading, min=MIN_DIVISOR) # max(baseShading, MIN_DIVISOR)
# 6. Linearize Input Color (sRGB to Linear)
# Add a small epsilon to avoid pow(0, gamma) issues if gamma > 1, although clamp below helps
originalLinear = torch.pow(color_srgb + 1e-6, gamma)
# 7. Calculate Delighted Linear Color
delightedLinear = originalLinear / safeDivisor # Broadcasting safeDivisor (B,H,W,1) over originalLinear (B,H,W,3)
# 8. Convert Delighted Color back to sRGB (Linear to sRGB)
# Clamp delightedLinear before inverse gamma to prevent potential issues with large values
delightedLinear_clamped = torch.clamp(delightedLinear, 0.0, 1.0) # Clamp prevents extreme values before pow
delightedSRGB = torch.pow(delightedLinear_clamped + 1e-6, 1.0 / gamma)
# 9. Calculate Brightness Mask
# 9a. Calculate Luminance (using Linear colors)
luma_weights = torch.tensor(LUMA_WEIGHTS_NP, device=device, dtype=dtype).view(1, 1, 1, 3)
# originalLinear shape: (B, H, W, 3), luma_weights shape: (1, 1, 1, 3) -> Result shape (B, H, W, 1) after sum
brightness_linear = torch.sum(originalLinear * luma_weights, dim=-1, keepdim=True)
brightness = torch.clamp(brightness_linear, 0.0, 1.0) # Clamp luminance to [0, 1] range
# 9b. Invert brightness
maskWeightBase = 1.0 - brightness
# 9c. Apply power for contrast
# Add epsilon to base to avoid pow(0, power) if power < 1
maskWeightPowered = torch.pow(maskWeightBase + 1e-6, mask_power)
maskWeightPowered = torch.clamp(maskWeightPowered, 0.0, 1.0) # Ensure mask weight stays in [0, 1]
# 10. Calculate Final Blend Weight
finalBlendWeight = maskWeightPowered * blend_factor # Broadcasting blend_factor (scalar)
# 11. Blend Colors (Mix in sRGB space)
# mix(A, B, w) = A * (1-w) + B * w
# color_srgb shape: (B, H, W, 3), delightedSRGB shape: (B, H, W, 3), finalBlendWeight shape: (B, H, W, 1)
blendedColor_sRGB = color_srgb * (1.0 - finalBlendWeight) + delightedSRGB * finalBlendWeight
# 12. Final Clamp
final_image = torch.clamp(blendedColor_sRGB, 0.0, 1.0)
# --- Output ---
# ComfyUI expects BHWC format, which we already have.
# If original image had alpha, we should add it back? For now, just return RGB.
# If alpha handling is needed:
# if color_image.shape[-1] == 4:
# alpha_channel = color_image[..., 3:4]
# final_image = torch.cat((final_image, alpha_channel), dim=-1)
return (final_image,)
# --- ComfyUI Registration ---
NODE_CLASS_MAPPINGS = {
"DelightNormalMask": DelightNormalMask
}
NODE_DISPLAY_NAME_MAPPINGS = {
"DelightNormalMask": "Delight (Normal+Mask)"
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment