Skip to content

Instantly share code, notes, and snippets.

@joey00072
Created November 27, 2025 03:48
Show Gist options
  • Select an option

  • Save joey00072/b0cbf98f4ac32c810dd7dbbbfbe91f58 to your computer and use it in GitHub Desktop.

Select an option

Save joey00072/b0cbf98f4ac32c810dd7dbbbfbe91f58 to your computer and use it in GitHub Desktop.
x1 = torch.randn(5)
x2 = x1.clone()
x1.requires_grad = True
x2.requires_grad = True
alpha = 1 - 0.5
beta = 1 + 0.53
def random_function_for_unique_grad(x):
return x**2+ F.sigmoid(x)
x1_o = random_function_for_unique_grad(x1)
x2_o = random_function_for_unique_grad(x2)
def masked_loss(x, alpha, beta):
# keep only x in [alpha, beta]
mask = (x >= alpha) & (x <= beta) # boolean mask for "in range"
mask = mask.to(x.dtype) # convert to same dtype as x if needed
return (x * mask)
def clipped_loss(x, alpha, beta):
return torch.clamp(x, alpha, beta)
masked_out = masked_loss(x2_o, alpha, beta)
masked_out.sum().backward()
clipped_out = clipped_loss(x1_o, alpha, beta)
clipped_out.sum().backward()
print("=== X ===")
print("x1",x1)
print("x2",x2)
print("=== X_o ===")
print("x1_o",masked_out)
print("x2_o",clipped_out)
print("=== Grad ===")
print("Grad:")
print("x1.grad",x1.grad)
print("x2.grad",x2.grad)
# === X ===
# x1 tensor([ 1.0200, 0.1265, 0.7952, -1.0116, -1.1075], requires_grad=True)
# x2 tensor([ 1.0200, 0.1265, 0.7952, -1.0116, -1.1075], requires_grad=True)
# === X_o ===
# x1_o tensor([0.0000, 0.5476, 1.3213, 1.2900, 1.4748], grad_fn=<MulBackward0>)
# x2_o tensor([1.5300, 0.5476, 1.3213, 1.2900, 1.4748], grad_fn=<ClampBackward1>)
# === Grad ===
# Grad:
# x1.grad tensor([ 0.0000, 0.5020, 1.8047, -1.8277, -2.0283])
# x2.grad tensor([ 0.0000, 0.5020, 1.8047, -1.8277, -2.0283])
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment