Skip to content

Instantly share code, notes, and snippets.

@greggman
Last active January 21, 2026 19:59
Show Gist options
  • Select an option

  • Save greggman/1dc45d54f195512adfb102c80bf0e52a to your computer and use it in GitHub Desktop.

Select an option

Save greggman/1dc45d54f195512adfb102c80bf0e52a to your computer and use it in GitHub Desktop.
WebGPU: Draw only to frag_depth then read via compute shader

WebGPU: Draw only to frag_depth then read via compute shader

view on jsgist

canvas {
margin: 3px; width: 100px; height: 20px; image-rendering: pixelated; display: block;
}
/*bug-in-github-api-content-can-not-be-empty*/
const adapter = await navigator.gpu.requestAdapter();
const device = await adapter.requestDevice();
device.addEventListener('uncapturederror', e => console.error(e.error.message));
const expected = [0.25, 0.5, 0.75];
async function test(format) {
const depthTex = device.createTexture({
size: [3],
format,
usage: GPUTextureUsage.RENDER_ATTACHMENT | GPUTextureUsage.TEXTURE_BINDING,
});
const data = new Float32Array(expected);
const buffer = device.createBuffer({
size: 12,
usage: GPUBufferUsage.COPY_DST | GPUBufferUsage.STORAGE
});
device.queue.writeBuffer(buffer, 0, data);
const uniformBuffer = device.createBuffer({
size: 16,
usage: GPUBufferUsage.COPY_DST | GPUBufferUsage.UNIFORM,
});
const module = device.createShaderModule({
code: `
const kPixelSize = 4;
@vertex fn vert_fullscreen_quad(
@builtin(vertex_index) vertex_index : u32
) -> @builtin(position) vec4f {
const pos = array(
vec2f(-1.0, -1.0),
vec2f( 3.0, -1.0),
vec2f(-1.0, 3.0));
return vec4f(pos[vertex_index], 0.0, 1.0);
}
struct Params {
srcOffset : u32,
bytesPerRow : u32,
dstOrigin : vec2u
};
@group(0) @binding(0) var<storage, read> src_buf : array<f32>;
@group(0) @binding(1) var<uniform> params : Params;
@fragment fn blit_buffer_to_texture(
@builtin(position) screen_position : vec4f
) -> @builtin(frag_depth) f32 {
let iposition = vec2u(screen_position.xy) - params.dstOrigin;
let srcOffset = params.srcOffset + iposition.x * kPixelSize + iposition.y * params.bytesPerRow;
return src_buf[srcOffset >> 2];
}
@group(0) @binding(0) var tex: texture_2d<f32>;
@fragment fn draw(
@builtin(position) screen_position : vec4f
) -> @location(0) vec4f {
let uv = vec2u(screen_position.xy) % textureDimensions(tex, 0);
return textureLoad(tex, uv, 0);
}
@group(0) @binding(1) var<storage, read_write> result: array<f32>;
@compute @workgroup_size(1) fn cs(@builtin(global_invocation_id) gid: vec3u) {
result[gid.x] = textureLoad(tex, vec2u(gid.x, 0), 0).r;
}
`,
});
const pipeline = device.createRenderPipeline({
layout: 'auto',
vertex: { module },
fragment: { module, entryPoint: 'blit_buffer_to_texture', targets:[] },
depthStencil: {
format,
depthWriteEnabled: true,
depthCompare: 'always',
},
});
const bindGroup = device.createBindGroup({
layout: pipeline.getBindGroupLayout(0),
entries: [
{ binding: 0, resource: buffer },
{ binding: 1, resource: uniformBuffer },
],
});
const copyPipeline = device.createComputePipeline({
layout: 'auto',
compute: { module },
});
const encoder = device.createCommandEncoder();
{
const pass = encoder.beginRenderPass({
colorAttachments: [],
depthStencilAttachment: {
view: depthTex,
depthLoadOp: 'load',
depthStoreOp: 'store',
depthClearValue: 0,
...(format.includes('stencil') ? {
stencilReadOnly: true,
} : {})
},
});
pass.setPipeline(pipeline);
pass.setBindGroup(0, bindGroup);
pass.draw(3);
pass.end();
}
const resultBuffer = device.createBuffer({
size: depthTex.width * 4,
usage: GPUBufferUsage.STORAGE | GPUBufferUsage.COPY_SRC,
});
const copyBuffer = device.createBuffer({
size: resultBuffer.size,
usage: GPUBufferUsage.COPY_DST | GPUBufferUsage.MAP_READ,
});
{
const bindGroup = device.createBindGroup({
layout: copyPipeline.getBindGroupLayout(0),
entries: [
{ binding: 0, resource: depthTex.createView({aspect: 'depth-only'}) },
{ binding: 1, resource: resultBuffer, },
],
})
const pass = encoder.beginComputePass();
pass.setPipeline(copyPipeline);
pass.setBindGroup(0, bindGroup);
pass.dispatchWorkgroups(depthTex.width);
pass.end();
encoder.copyBufferToBuffer(resultBuffer, 0, copyBuffer, 0, copyBuffer.size);
}
const preferredFormat = navigator.gpu.getPreferredCanvasFormat();
const canvas = document.createElement('canvas');
canvas.width = 3;
canvas.height = 1;
document.body.append(canvas);
const context = canvas.getContext('webgpu');
context.configure({
device,
format: preferredFormat,
});
{
const pipeline = device.createRenderPipeline({
layout: 'auto',
vertex: { module },
fragment: { module, entryPoint: 'draw', targets:[{format: preferredFormat}] },
});
const bindGroup = device.createBindGroup({
layout: pipeline.getBindGroupLayout(0),
entries: [
{ binding: 0, resource: depthTex.createView({aspect: 'depth-only'}) },
],
});
const pass = encoder.beginRenderPass({
colorAttachments: [{
view: context.getCurrentTexture(),
loadOp: 'clear',
storeOp: 'store',
clearValue: [1,0,0,1],
}],
});
pass.setPipeline(pipeline);
pass.setBindGroup(0, bindGroup);
pass.draw(3);
pass.end();
}
device.queue.submit([encoder.finish()]);
await copyBuffer.mapAsync(GPUMapMode.READ);
const result = new Float32Array(copyBuffer.getMappedRange()).slice();
copyBuffer.unmap();
console.log(`${format}: ${[...result].map(v => v.toFixed(3))}`);
}
console.log(`expected: ${[...expected].map(v => v.toFixed(3))}`);
for (const format of ['depth16unorm', 'depth24plus', 'depth24plus-stencil8', 'depth32float']) {
await test(format);
}
{"name":"WebGPU: Draw only to frag_depth then read via compute shader","settings":{},"filenames":["index.html","index.css","index.js"]}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment