From 1d50673e2817e301ee0964890ad14e2469791362 Mon Sep 17 00:00:00 2001 From: Iwo Plaza Date: Wed, 15 Oct 2025 10:51:23 +0200 Subject: [PATCH 1/6] feat: Always flushing --- .../content/docs/fundamentals/pipelines.mdx | 9 +--- .../examples/image-processing/blur/index.ts | 5 +-- .../src/examples/rendering/3d-fish/index.ts | 2 - .../rendering/cubemap-reflection/icosphere.ts | 2 - .../rendering/cubemap-reflection/index.ts | 2 - .../examples/rendering/simple-shadow/index.ts | 1 - .../src/examples/rendering/two-boxes/index.ts | 1 - .../simulation/slime-mold-3d/index.ts | 14 +++---- .../src/examples/tests/texture-test/index.ts | 4 +- packages/typegpu/src/core/buffer/buffer.ts | 41 +++++++------------ .../src/core/pipeline/computePipeline.ts | 4 +- .../src/core/pipeline/renderPipeline.ts | 38 +++++++++-------- .../typegpu/src/core/pipeline/timeable.ts | 5 ++- .../typegpu/src/core/querySet/querySet.ts | 22 +++++----- packages/typegpu/src/core/root/init.ts | 21 ++-------- packages/typegpu/src/core/root/rootTypes.ts | 11 ----- packages/typegpu/src/prepareDispatch.ts | 1 - 17 files changed, 71 insertions(+), 112 deletions(-) diff --git a/apps/typegpu-docs/src/content/docs/fundamentals/pipelines.mdx b/apps/typegpu-docs/src/content/docs/fundamentals/pipelines.mdx index 1f29da63b..ddd85a669 100644 --- a/apps/typegpu-docs/src/content/docs/fundamentals/pipelines.mdx +++ b/apps/typegpu-docs/src/content/docs/fundamentals/pipelines.mdx @@ -325,9 +325,6 @@ It accepts the number of vertices and optionally the instance count, first verte After calling the method, the shader is set for execution immediately. Compute pipelines are executed using the `dispatchWorkgroups` method, which accepts the number of workgroups in each dimension. -Unlike render pipelines, after running this method, the execution is not submitted to the GPU immediately. -In order to do so, `root['~unstable'].flush()` needs to be run. -However, that is usually not necessary, as it is done automatically when trying to read the result of computation. ### Drawing with `drawIndexed` @@ -376,14 +373,14 @@ const mainFragment = tgpu['~unstable'].fragmentFn({ const indexBuffer = root .createBuffer(d.arrayOf(d.u16, 6), [0, 2, 1, 0, 3, 2]) .$usage('index'); - + const pipeline = root['~unstable'] .withVertex(vertex, { color: vertexLayout.attrib }) .withFragment(mainFragment, { format: presentationFormat }) .createPipeline() .withIndexBuffer(indexBuffer); - pipeline + pipeline .with(vertexLayout, colorBuffer) .drawIndexed(6); ``` @@ -407,8 +404,6 @@ root['~unstable'].beginRenderPass( pass.draw(3); }, ); - -root['~unstable'].flush(); ``` It is also possible to access the underlying WebGPU resources for the TypeGPU pipelines, by calling `root.unwrap(pipeline)`. diff --git a/apps/typegpu-docs/src/examples/image-processing/blur/index.ts b/apps/typegpu-docs/src/examples/image-processing/blur/index.ts index 6cd1c8973..32af105d9 100644 --- a/apps/typegpu-docs/src/examples/image-processing/blur/index.ts +++ b/apps/typegpu-docs/src/examples/image-processing/blur/index.ts @@ -65,7 +65,7 @@ const ioLayout = tgpu.bindGroupLayout({ outTexture: { storageTexture: d.textureStorage2d('rgba8unorm') }, }); -const tileData = tgpu['~unstable'].workgroupVar( +const tileData = tgpu.workgroupVar( d.arrayOf(d.arrayOf(d.vec3f, 128), 4), ); @@ -191,7 +191,7 @@ function render() { for (const i of indices) { computePipeline - .with(ioLayout, ioBindGroups[i]) + .with(ioBindGroups[i]) .dispatchWorkgroups( Math.ceil(srcWidth / settings.blockDim), Math.ceil(srcHeight / 4), @@ -203,7 +203,6 @@ function render() { loadOp: 'clear', storeOp: 'store', }).draw(3); - root['~unstable'].flush(); } render(); diff --git a/apps/typegpu-docs/src/examples/rendering/3d-fish/index.ts b/apps/typegpu-docs/src/examples/rendering/3d-fish/index.ts index e7e8550b1..bd61f2bc0 100644 --- a/apps/typegpu-docs/src/examples/rendering/3d-fish/index.ts +++ b/apps/typegpu-docs/src/examples/rendering/3d-fish/index.ts @@ -306,8 +306,6 @@ function frame(timestamp: DOMHighResTimeStamp) { .with(renderFishBindGroups[odd ? 1 : 0]) .draw(fishModel.polygonCount, p.fishAmount); - root['~unstable'].flush(); - requestAnimationFrame(frame); } enqueuePresetChanges(); diff --git a/apps/typegpu-docs/src/examples/rendering/cubemap-reflection/icosphere.ts b/apps/typegpu-docs/src/examples/rendering/cubemap-reflection/icosphere.ts index e313ffd86..a192c30be 100644 --- a/apps/typegpu-docs/src/examples/rendering/cubemap-reflection/icosphere.ts +++ b/apps/typegpu-docs/src/examples/rendering/cubemap-reflection/icosphere.ts @@ -310,8 +310,6 @@ export class IcosphereGenerator { .with(bindGroup) .dispatchWorkgroups(xGroups, yGroups, 1); - this.root['~unstable'].flush(); - return nextBuffer; } diff --git a/apps/typegpu-docs/src/examples/rendering/cubemap-reflection/index.ts b/apps/typegpu-docs/src/examples/rendering/cubemap-reflection/index.ts index 2808485dd..c3d46890f 100644 --- a/apps/typegpu-docs/src/examples/rendering/cubemap-reflection/index.ts +++ b/apps/typegpu-docs/src/examples/rendering/cubemap-reflection/index.ts @@ -315,8 +315,6 @@ function render() { .with(renderBindGroup) .with(textureBindGroup) .draw(vertexBuffer.dataType.elementCount); - - root['~unstable'].flush(); } function loop() { diff --git a/apps/typegpu-docs/src/examples/rendering/simple-shadow/index.ts b/apps/typegpu-docs/src/examples/rendering/simple-shadow/index.ts index 839b3f8a8..33c336b83 100644 --- a/apps/typegpu-docs/src/examples/rendering/simple-shadow/index.ts +++ b/apps/typegpu-docs/src/examples/rendering/simple-shadow/index.ts @@ -352,7 +352,6 @@ function render() { } }, ); - root['~unstable'].flush(); } frameId = requestAnimationFrame(render); diff --git a/apps/typegpu-docs/src/examples/rendering/two-boxes/index.ts b/apps/typegpu-docs/src/examples/rendering/two-boxes/index.ts index fb21379bd..6f8d25b94 100644 --- a/apps/typegpu-docs/src/examples/rendering/two-boxes/index.ts +++ b/apps/typegpu-docs/src/examples/rendering/two-boxes/index.ts @@ -303,7 +303,6 @@ function render() { drawObject(cubeBuffer, bindGroup, 36, 'clear'); drawObject(secondCubeBuffer, secondBindGroup, 36, 'load'); drawObject(planeBuffer, planeBindGroup, 6, 'load'); - root['~unstable'].flush(); } function frame() { diff --git a/apps/typegpu-docs/src/examples/simulation/slime-mold-3d/index.ts b/apps/typegpu-docs/src/examples/simulation/slime-mold-3d/index.ts index 24140b8d3..513aa237f 100644 --- a/apps/typegpu-docs/src/examples/simulation/slime-mold-3d/index.ts +++ b/apps/typegpu-docs/src/examples/simulation/slime-mold-3d/index.ts @@ -487,14 +487,16 @@ function frame() { params.writePartial({ deltaTime }); - blurPipeline.with(computeLayout, bindGroups[currentTexture]) + blurPipeline + .with(bindGroups[currentTexture]) .dispatchWorkgroups( Math.ceil(resolution.x / BLUR_WORKGROUP_SIZE[0]), Math.ceil(resolution.y / BLUR_WORKGROUP_SIZE[1]), Math.ceil(resolution.z / BLUR_WORKGROUP_SIZE[2]), ); - computePipeline.with(computeLayout, bindGroups[currentTexture]) + computePipeline + .with(bindGroups[currentTexture]) .dispatchWorkgroups( Math.ceil(NUM_AGENTS / AGENT_WORKGROUP_SIZE), ); @@ -505,12 +507,8 @@ function frame() { loadOp: 'clear', storeOp: 'store', }) - .with( - renderLayout, - renderBindGroups[1 - currentTexture], - ).draw(3); - - root['~unstable'].flush(); + .with(renderBindGroups[1 - currentTexture]) + .draw(3); currentTexture = 1 - currentTexture; diff --git a/apps/typegpu-docs/src/examples/tests/texture-test/index.ts b/apps/typegpu-docs/src/examples/tests/texture-test/index.ts index 8b97f52ba..8d6536242 100644 --- a/apps/typegpu-docs/src/examples/tests/texture-test/index.ts +++ b/apps/typegpu-docs/src/examples/tests/texture-test/index.ts @@ -80,14 +80,14 @@ const pipeline = root['~unstable'] function render() { pipeline - .with(layout, bindGroup) + .with(bindGroup) .withColorAttachment({ view: context.getCurrentTexture().createView(), loadOp: 'clear', storeOp: 'store', }) .draw(3); - root['~unstable'].flush(); + requestAnimationFrame(render); } requestAnimationFrame(render); diff --git a/packages/typegpu/src/core/buffer/buffer.ts b/packages/typegpu/src/core/buffer/buffer.ts index 61ded5373..05727db14 100644 --- a/packages/typegpu/src/core/buffer/buffer.ts +++ b/packages/typegpu/src/core/buffer/buffer.ts @@ -176,6 +176,8 @@ class TgpuBufferImpl implements TgpuBuffer { public readonly resourceType = 'buffer'; public flags: GPUBufferUsageFlags = GPUBufferUsage.COPY_DST | GPUBufferUsage.COPY_SRC; + + readonly #device: GPUDevice; private _buffer: GPUBuffer | null = null; private _ownBuffer: boolean; private _destroyed = false; @@ -189,12 +191,13 @@ class TgpuBufferImpl implements TgpuBuffer { usableAsIndex = false; constructor( - private readonly _group: ExperimentalTgpuRoot, + root: ExperimentalTgpuRoot, public readonly dataType: TData, public readonly initialOrBuffer?: Infer | GPUBuffer | undefined, private readonly _disallowedUsages?: ('uniform' | 'storage' | 'vertex' | 'index')[], ) { + this.#device = root.device; if (isGPUBuffer(initialOrBuffer)) { this._ownBuffer = false; this._buffer = initialOrBuffer; @@ -205,14 +208,12 @@ class TgpuBufferImpl implements TgpuBuffer { } get buffer() { - const device = this._group.device; - if (this._destroyed) { throw new Error('This buffer has been destroyed'); } if (!this._buffer) { - this._buffer = device.createBuffer({ + this._buffer = this.#device.createBuffer({ size: sizeOf(this.dataType), usage: this.flags, mappedAtCreation: !!this.initial, @@ -317,7 +318,6 @@ class TgpuBufferImpl implements TgpuBuffer { write(data: Infer): void { const gpuBuffer = this.buffer; - const device = this._group.device; if (gpuBuffer.mapState === 'mapped') { const mapped = gpuBuffer.getMappedRange(); @@ -330,16 +330,12 @@ class TgpuBufferImpl implements TgpuBuffer { this._hostBuffer = new ArrayBuffer(size); } - // Flushing any commands yet to be encoded. - this._group.flush(); - this._writeToTarget(this._hostBuffer, data); - device.queue.writeBuffer(gpuBuffer, 0, this._hostBuffer, 0, size); + this.#device.queue.writeBuffer(gpuBuffer, 0, this._hostBuffer, 0, size); } public writePartial(data: InferPartial): void { const gpuBuffer = this.buffer; - const device = this._group.device; const instructions = getWriteInstructions(this.dataType, data); @@ -352,7 +348,7 @@ class TgpuBufferImpl implements TgpuBuffer { } } else { for (const instruction of instructions) { - device.queue.writeBuffer( + this.#device.queue.writeBuffer( gpuBuffer, instruction.data.byteOffset, instruction.data, @@ -365,19 +361,15 @@ class TgpuBufferImpl implements TgpuBuffer { public clear(): void { const gpuBuffer = this.buffer; - const device = this._group.device; if (gpuBuffer.mapState === 'mapped') { new Uint8Array(gpuBuffer.getMappedRange()).fill(0); return; } - // Flushing any commands yet to be encoded. - this._group.flush(); - - const encoder = device.createCommandEncoder(); + const encoder = this.#device.createCommandEncoder(); encoder.clearBuffer(gpuBuffer); - device.queue.submit([encoder.finish()]); + this.#device.queue.submit([encoder.finish()]); } copyFrom(srcBuffer: TgpuBuffer>): void { @@ -386,16 +378,13 @@ class TgpuBufferImpl implements TgpuBuffer { } const size = sizeOf(this.dataType); - const encoder = this._group.commandEncoder; + const encoder = this.#device.createCommandEncoder(); encoder.copyBufferToBuffer(srcBuffer.buffer, 0, this.buffer, 0, size); + this.#device.queue.submit([encoder.finish()]); } async read(): Promise> { - // Flushing any commands yet to be encoded. - this._group.flush(); - const gpuBuffer = this.buffer; - const device = this._group.device; if (gpuBuffer.mapState === 'mapped') { const mapped = gpuBuffer.getMappedRange(); @@ -410,12 +399,12 @@ class TgpuBufferImpl implements TgpuBuffer { return res; } - const stagingBuffer = device.createBuffer({ + const stagingBuffer = this.#device.createBuffer({ size: sizeOf(this.dataType), usage: GPUBufferUsage.COPY_DST | GPUBufferUsage.MAP_READ, }); - const commandEncoder = device.createCommandEncoder(); + const commandEncoder = this.#device.createCommandEncoder(); commandEncoder.copyBufferToBuffer( gpuBuffer, 0, @@ -424,8 +413,8 @@ class TgpuBufferImpl implements TgpuBuffer { sizeOf(this.dataType), ); - device.queue.submit([commandEncoder.finish()]); - await device.queue.onSubmittedWorkDone(); + this.#device.queue.submit([commandEncoder.finish()]); + await this.#device.queue.onSubmittedWorkDone(); await stagingBuffer.mapAsync(GPUMapMode.READ, 0, sizeOf(this.dataType)); const res = readData( diff --git a/packages/typegpu/src/core/pipeline/computePipeline.ts b/packages/typegpu/src/core/pipeline/computePipeline.ts index 7abe63512..00e11c3a1 100644 --- a/packages/typegpu/src/core/pipeline/computePipeline.ts +++ b/packages/typegpu/src/core/pipeline/computePipeline.ts @@ -190,7 +190,8 @@ class TgpuComputePipelineImpl implements TgpuComputePipeline { ...setupTimestampWrites(this._priors, branch), }; - const pass = branch.commandEncoder.beginComputePass(passDescriptor); + const commandEncoder = branch.device.createCommandEncoder(); + const pass = commandEncoder.beginComputePass(passDescriptor); pass.setPipeline(memo.pipeline); @@ -216,6 +217,7 @@ class TgpuComputePipelineImpl implements TgpuComputePipeline { pass.dispatchWorkgroups(x, y, z); pass.end(); + branch.device.queue.submit([commandEncoder.finish()]); if (memo.logResources) { logDataFromGPU(memo.logResources); diff --git a/packages/typegpu/src/core/pipeline/renderPipeline.ts b/packages/typegpu/src/core/pipeline/renderPipeline.ts index b5b78addd..35a28aef1 100644 --- a/packages/typegpu/src/core/pipeline/renderPipeline.ts +++ b/packages/typegpu/src/core/pipeline/renderPipeline.ts @@ -489,7 +489,7 @@ class TgpuRenderPipelineImpl implements TgpuRenderPipeline { }) as unknown as this & HasIndexBuffer; } - private setupRenderPass(): GPURenderPassEncoder { + private setupRenderPass(encoder: GPUCommandEncoder): GPURenderPassEncoder { const internals = this[$internal]; const memo = internals.core.unwrap(); const { branch, fragmentFn } = internals.core.options; @@ -532,7 +532,7 @@ class TgpuRenderPipelineImpl implements TgpuRenderPipeline { } } - const pass = branch.commandEncoder.beginRenderPass(renderPassDescriptor); + const pass = encoder.beginRenderPass(renderPassDescriptor); pass.setPipeline(memo.pipeline); @@ -581,24 +581,27 @@ class TgpuRenderPipelineImpl implements TgpuRenderPipeline { firstInstance?: number, ): void { const internals = this[$internal]; - const pass = this.setupRenderPass(); - const { logResources } = internals.core.unwrap(); const { branch } = internals.core.options; + const { logResources } = internals.core.unwrap(); - pass.draw(vertexCount, instanceCount, firstVertex, firstInstance); + const commandEncoder = branch.device.createCommandEncoder(); + const pass = this.setupRenderPass(commandEncoder); + pass.draw(vertexCount, instanceCount, firstVertex, firstInstance); pass.end(); + branch.device.queue.submit([commandEncoder.finish()]); + if (logResources) { logDataFromGPU(logResources); } - internals.priors.performanceCallback - ? triggerPerformanceCallback({ + if (internals.priors.performanceCallback) { + triggerPerformanceCallback({ root: branch, priors: internals.priors, - }) - : branch.flush(); + }); + } } drawIndexed( @@ -614,12 +617,13 @@ class TgpuRenderPipelineImpl implements TgpuRenderPipeline { throw new Error('No index buffer set for this render pipeline.'); } + const { logResources } = internals.core.unwrap(); + const { branch } = internals.core.options; const { buffer, indexFormat, offsetBytes, sizeBytes } = internals.priors.indexBuffer; - const pass = this.setupRenderPass(); - const { logResources } = internals.core.unwrap(); - const { branch } = internals.core.options; + const commandEncoder = branch.device.createCommandEncoder(); + const pass = this.setupRenderPass(commandEncoder); if (isGPUBuffer(buffer)) { pass.setIndexBuffer(buffer, indexFormat, offsetBytes, sizeBytes); @@ -642,16 +646,18 @@ class TgpuRenderPipelineImpl implements TgpuRenderPipeline { pass.end(); + branch.device.queue.submit([commandEncoder.finish()]); + if (logResources) { logDataFromGPU(logResources); } - internals.priors.performanceCallback - ? triggerPerformanceCallback({ + if (internals.priors.performanceCallback) { + triggerPerformanceCallback({ root: branch, priors: internals.priors, - }) - : branch.flush(); + }); + } } } diff --git a/packages/typegpu/src/core/pipeline/timeable.ts b/packages/typegpu/src/core/pipeline/timeable.ts index 18d6c6956..409f6bfcd 100644 --- a/packages/typegpu/src/core/pipeline/timeable.ts +++ b/packages/typegpu/src/core/pipeline/timeable.ts @@ -151,15 +151,16 @@ export function triggerPerformanceCallback({ ); } - root.commandEncoder.resolveQuerySet( + const commandEncoder = root.device.createCommandEncoder(); + commandEncoder.resolveQuerySet( root.unwrap(querySet), 0, querySet.count, querySet[$internal].resolveBuffer, 0, ); + root.device.queue.submit([commandEncoder.finish()]); - root.flush(); root.device.queue.onSubmittedWorkDone().then(async () => { if (!querySet.available) { return; diff --git a/packages/typegpu/src/core/querySet/querySet.ts b/packages/typegpu/src/core/querySet/querySet.ts index c9bd7c83d..a24563d93 100644 --- a/packages/typegpu/src/core/querySet/querySet.ts +++ b/packages/typegpu/src/core/querySet/querySet.ts @@ -39,6 +39,8 @@ export function isQuerySet( class TgpuQuerySetImpl implements TgpuQuerySet { public readonly resourceType = 'query-set' as const; + + readonly #device: GPUDevice; private _querySet: GPUQuerySet | null = null; private readonly _ownQuerySet: boolean; private _destroyed = false; @@ -47,11 +49,12 @@ class TgpuQuerySetImpl implements TgpuQuerySet { private _resolveBuffer: GPUBuffer | null = null; constructor( - private readonly _group: ExperimentalTgpuRoot, + root: ExperimentalTgpuRoot, public readonly type: T, public readonly count: number, private readonly rawQuerySet?: GPUQuerySet, ) { + this.#device = root.device; this._ownQuerySet = !rawQuerySet; this._querySet = rawQuerySet || null; } @@ -67,7 +70,7 @@ class TgpuQuerySetImpl implements TgpuQuerySet { return this._querySet; } - this._querySet = this._group.device.createQuerySet({ + this._querySet = this.#device.createQuerySet({ type: this.type, count: this.count, }); @@ -87,7 +90,7 @@ class TgpuQuerySetImpl implements TgpuQuerySet { return { get readBuffer(): GPUBuffer { if (!self._readBuffer) { - self._readBuffer = self._group.device.createBuffer({ + self._readBuffer = self.#device.createBuffer({ size: self.count * BigUint64Array.BYTES_PER_ELEMENT, usage: GPUBufferUsage.COPY_DST | GPUBufferUsage.MAP_READ, }); @@ -96,7 +99,7 @@ class TgpuQuerySetImpl implements TgpuQuerySet { }, get resolveBuffer(): GPUBuffer { if (!self._resolveBuffer) { - self._resolveBuffer = self._group.device.createBuffer({ + self._resolveBuffer = self.#device.createBuffer({ size: self.count * BigUint64Array.BYTES_PER_ELEMENT, usage: GPUBufferUsage.QUERY_RESOLVE | GPUBufferUsage.COPY_SRC, }); @@ -122,7 +125,7 @@ class TgpuQuerySetImpl implements TgpuQuerySet { throw new Error('This QuerySet is busy resolving or reading.'); } - const commandEncoder = this._group.device.createCommandEncoder(); + const commandEncoder = this.#device.createCommandEncoder(); commandEncoder.resolveQuerySet( this.querySet, 0, @@ -130,18 +133,17 @@ class TgpuQuerySetImpl implements TgpuQuerySet { this[$internal].resolveBuffer, 0, ); - this._group.device.queue.submit([commandEncoder.finish()]); + this.#device.queue.submit([commandEncoder.finish()]); } async read(): Promise { - this._group.flush(); if (!this._resolveBuffer) { throw new Error('QuerySet must be resolved before reading.'); } this._available = false; try { - const commandEncoder = this._group.device.createCommandEncoder(); + const commandEncoder = this.#device.createCommandEncoder(); commandEncoder.copyBufferToBuffer( this[$internal].resolveBuffer, 0, @@ -149,8 +151,8 @@ class TgpuQuerySetImpl implements TgpuQuerySet { 0, this.count * BigUint64Array.BYTES_PER_ELEMENT, ); - this._group.device.queue.submit([commandEncoder.finish()]); - await this._group.device.queue.onSubmittedWorkDone(); + this.#device.queue.submit([commandEncoder.finish()]); + await this.#device.queue.onSubmittedWorkDone(); const readBuffer = this[$internal].readBuffer; await readBuffer.mapAsync(GPUMapMode.READ); diff --git a/packages/typegpu/src/core/root/init.ts b/packages/typegpu/src/core/root/init.ts index 9b047914a..edfc13d3e 100644 --- a/packages/typegpu/src/core/root/init.ts +++ b/packages/typegpu/src/core/root/init.ts @@ -264,8 +264,6 @@ class TgpuRootImpl extends WithBindingImpl key.unwrap(this) ); - private _commandEncoder: GPUCommandEncoder | null = null; - [$internal]: { logOptions: LogGeneratorOptions; }; @@ -285,14 +283,6 @@ class TgpuRootImpl extends WithBindingImpl }; } - get commandEncoder() { - if (!this._commandEncoder) { - this._commandEncoder = this.device.createCommandEncoder(); - } - - return this._commandEncoder; - } - get enabledFeatures() { return new Set(this.device.features) as ReadonlySet; } @@ -499,7 +489,8 @@ class TgpuRootImpl extends WithBindingImpl descriptor: GPURenderPassDescriptor, callback: (pass: RenderPass) => void, ): void { - const pass = this.commandEncoder.beginRenderPass(descriptor); + const commandEncoder = this.device.createCommandEncoder(); + const pass = commandEncoder.beginRenderPass(descriptor); const bindGroups = new Map< TgpuBindGroupLayout, @@ -646,15 +637,11 @@ class TgpuRootImpl extends WithBindingImpl }); pass.end(); + this.device.queue.submit([commandEncoder.finish()]); } flush() { - if (!this._commandEncoder) { - return; - } - - this.device.queue.submit([this._commandEncoder.finish()]); - this._commandEncoder = null; + console.warn('flush() has been deprecated, and has no effect.'); } } diff --git a/packages/typegpu/src/core/root/rootTypes.ts b/packages/typegpu/src/core/root/rootTypes.ts index 66f734121..32156bbe1 100644 --- a/packages/typegpu/src/core/root/rootTypes.ts +++ b/packages/typegpu/src/core/root/rootTypes.ts @@ -675,11 +675,6 @@ export interface ExperimentalTgpuRoot extends TgpuRoot, WithBinding { readonly shaderGenerator?: | ShaderGenerator | undefined; - /** - * The current command encoder. This property will - * hold the same value until `flush()` is called. - */ - readonly commandEncoder: GPUCommandEncoder; createTexture< TWidth extends number, @@ -718,10 +713,4 @@ export interface ExperimentalTgpuRoot extends TgpuRoot, WithBinding { descriptor: GPURenderPassDescriptor, callback: (pass: RenderPass) => void, ): void; - - /** - * Causes all commands enqueued by pipelines to be - * submitted to the GPU. - */ - flush(): void; } diff --git a/packages/typegpu/src/prepareDispatch.ts b/packages/typegpu/src/prepareDispatch.ts index 76989090a..e91762743 100644 --- a/packages/typegpu/src/prepareDispatch.ts +++ b/packages/typegpu/src/prepareDispatch.ts @@ -116,7 +116,6 @@ export function prepareDispatch( workgroupCount.y, workgroupCount.z, ); - root['~unstable'].flush(); }) as DispatchForArgs; return new PreparedDispatch(createDispatch, pipeline); From a7d5c000e991b7fe1b93f5bab258a0bce84e0c09 Mon Sep 17 00:00:00 2001 From: Iwo Plaza Date: Wed, 15 Oct 2025 11:00:36 +0200 Subject: [PATCH 2/6] Update renderPipeline.test.ts --- packages/typegpu/tests/renderPipeline.test.ts | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/packages/typegpu/tests/renderPipeline.test.ts b/packages/typegpu/tests/renderPipeline.test.ts index 2fbb77445..3ff441ba1 100644 --- a/packages/typegpu/tests/renderPipeline.test.ts +++ b/packages/typegpu/tests/renderPipeline.test.ts @@ -795,7 +795,7 @@ describe('TgpuRenderPipeline', () => { expect(() => pipelineWithIndex.drawIndexed(3)).not.toThrow(); }); - it('works when combining timestamp writes and index buffer', ({ root, device }) => { + it('works when combining timestamp writes and index buffer', ({ root, device, commandEncoder }) => { const vertexFn = tgpu['~unstable'] .vertexFn({ out: { pos: d.builtin.position }, @@ -811,7 +811,7 @@ describe('TgpuRenderPipeline', () => { const querySet = root.createQuerySet('timestamp', 2); const indexBuffer = root.createBuffer(d.arrayOf(d.u16, 2)).$usage('index'); - const beginRenderPassSpy = vi.spyOn(root.commandEncoder, 'beginRenderPass'); + const beginRenderPassSpy = vi.spyOn(commandEncoder, 'beginRenderPass'); const pipeline = root .withVertex(vertexFn, {}) @@ -867,7 +867,7 @@ describe('TgpuRenderPipeline', () => { }); }); - it('should handle a combination of timestamp writes, index buffer, and performance callback', ({ root, device }) => { + it('should handle a combination of timestamp writes, index buffer, and performance callback', ({ root, device, commandEncoder }) => { const vertexFn = tgpu['~unstable'] .vertexFn({ out: { pos: d.builtin.position }, @@ -882,8 +882,8 @@ describe('TgpuRenderPipeline', () => { const querySet = root.createQuerySet('timestamp', 2); const indexBuffer = root.createBuffer(d.arrayOf(d.u16, 2)).$usage('index'); - const beginRenderPassSpy = vi.spyOn(root.commandEncoder, 'beginRenderPass'); - const resolveQuerySetSpy = vi.spyOn(root.commandEncoder, 'resolveQuerySet'); + const beginRenderPassSpy = vi.spyOn(commandEncoder, 'beginRenderPass'); + const resolveQuerySetSpy = vi.spyOn(commandEncoder, 'resolveQuerySet'); const callback = vi.fn(); @@ -926,7 +926,7 @@ describe('TgpuRenderPipeline', () => { count: 2, }); - expect(root.commandEncoder.beginRenderPass).toHaveBeenCalledWith({ + expect(commandEncoder.beginRenderPass).toHaveBeenCalledWith({ colorAttachments: [ { loadOp: 'clear', From 15b85684c492b8aa47ee20630734fcb94f8f525d Mon Sep 17 00:00:00 2001 From: Iwo Plaza Date: Mon, 20 Oct 2025 14:31:27 +0200 Subject: [PATCH 3/6] Update init.ts --- packages/typegpu/src/core/root/init.ts | 2 -- 1 file changed, 2 deletions(-) diff --git a/packages/typegpu/src/core/root/init.ts b/packages/typegpu/src/core/root/init.ts index 0983d0680..c6c22461d 100644 --- a/packages/typegpu/src/core/root/init.ts +++ b/packages/typegpu/src/core/root/init.ts @@ -193,8 +193,6 @@ export class PreparedDispatchImpl workgroupCount.y, workgroupCount.z, ); - // Yeah, i know we flush here... but it's only a matter of time! - this.#root.flush(); } } From 8d07fb22d05250f8f8b040112933545406e79947 Mon Sep 17 00:00:00 2001 From: Iwo Plaza Date: Mon, 20 Oct 2025 16:30:37 +0200 Subject: [PATCH 4/6] Review fixes --- packages/typegpu/src/core/buffer/buffer.ts | 1 - packages/typegpu/src/core/querySet/querySet.ts | 1 - packages/typegpu/tests/querySet.test.ts | 1 - 3 files changed, 3 deletions(-) diff --git a/packages/typegpu/src/core/buffer/buffer.ts b/packages/typegpu/src/core/buffer/buffer.ts index 05727db14..6632f279b 100644 --- a/packages/typegpu/src/core/buffer/buffer.ts +++ b/packages/typegpu/src/core/buffer/buffer.ts @@ -414,7 +414,6 @@ class TgpuBufferImpl implements TgpuBuffer { ); this.#device.queue.submit([commandEncoder.finish()]); - await this.#device.queue.onSubmittedWorkDone(); await stagingBuffer.mapAsync(GPUMapMode.READ, 0, sizeOf(this.dataType)); const res = readData( diff --git a/packages/typegpu/src/core/querySet/querySet.ts b/packages/typegpu/src/core/querySet/querySet.ts index a24563d93..1d993fb2a 100644 --- a/packages/typegpu/src/core/querySet/querySet.ts +++ b/packages/typegpu/src/core/querySet/querySet.ts @@ -152,7 +152,6 @@ class TgpuQuerySetImpl implements TgpuQuerySet { this.count * BigUint64Array.BYTES_PER_ELEMENT, ); this.#device.queue.submit([commandEncoder.finish()]); - await this.#device.queue.onSubmittedWorkDone(); const readBuffer = this[$internal].readBuffer; await readBuffer.mapAsync(GPUMapMode.READ); diff --git a/packages/typegpu/tests/querySet.test.ts b/packages/typegpu/tests/querySet.test.ts index 386147d8a..0ec62f5ab 100644 --- a/packages/typegpu/tests/querySet.test.ts +++ b/packages/typegpu/tests/querySet.test.ts @@ -87,7 +87,6 @@ describe('TgpuQuerySet', () => { 2 * BigUint64Array.BYTES_PER_ELEMENT, ); expect(device.queue.submit).toHaveBeenCalled(); - expect(device.queue.onSubmittedWorkDone).toHaveBeenCalled(); expect(readBuffer.mapAsync).toHaveBeenCalledWith(GPUMapMode.READ); expect(readBuffer.getMappedRange).toHaveBeenCalled(); expect(readBuffer.unmap).toHaveBeenCalled(); From 0a1d0ae680e2ab6125b873862a763eed8857edf4 Mon Sep 17 00:00:00 2001 From: Iwo Plaza Date: Mon, 20 Oct 2025 16:36:02 +0200 Subject: [PATCH 5/6] Self review --- packages/typegpu/src/prepareDispatch.ts | 122 ------------------------ 1 file changed, 122 deletions(-) delete mode 100644 packages/typegpu/src/prepareDispatch.ts diff --git a/packages/typegpu/src/prepareDispatch.ts b/packages/typegpu/src/prepareDispatch.ts deleted file mode 100644 index e91762743..000000000 --- a/packages/typegpu/src/prepareDispatch.ts +++ /dev/null @@ -1,122 +0,0 @@ -import { builtin } from './builtin.ts'; -import { computeFn } from './core/function/tgpuComputeFn.ts'; -import { fn } from './core/function/tgpuFn.ts'; -import type { TgpuComputePipeline } from './core/pipeline/computePipeline.ts'; -import type { TgpuRoot } from './core/root/rootTypes.ts'; -import { u32 } from './data/numeric.ts'; -import { vec3f, vec3u } from './data/vector.ts'; -import type { v3u } from './data/wgslTypes.ts'; -import { ceil } from './std/numeric.ts'; -import type { TgpuBindGroup } from './tgpuBindGroupLayout.ts'; - -/** - * Changes the given array to a vec of 3 numbers, filling missing values with 1. - */ -function toVec3(arr: readonly (number | undefined)[]): v3u { - if (arr.includes(0)) { - throw new Error('Size and workgroupSize cannot contain zeroes.'); - } - return vec3u(arr[0] ?? 1, arr[1] ?? 1, arr[2] ?? 1); -} - -type DispatchForArgs = TArgs extends { length: infer TLength } - ? TLength extends 0 ? (() => void) - : TLength extends 1 ? ((x: number) => void) - : TLength extends 2 ? ((x: number, y: number) => void) - : TLength extends 3 ? ((x: number, y: number, z: number) => void) - : never - : never; - -class PreparedDispatch { - #pipeline: TgpuComputePipeline; - #createDispatch: (pipeline: TgpuComputePipeline) => DispatchForArgs; - constructor( - createDispatch: (pipeline: TgpuComputePipeline) => DispatchForArgs, - pipeline: TgpuComputePipeline, - ) { - this.#createDispatch = createDispatch; - this.#pipeline = pipeline; - } - - /** - * Returns a new PreparedDispatch with the specified bind group bound. - * Analogous to `TgpuComputePipeline.with(bindGroup)`. - */ - with(bindGroup: TgpuBindGroup): PreparedDispatch { - return new PreparedDispatch( - this.#createDispatch, - this.#pipeline.with(bindGroup), - ); - } - - /** - * Run the prepared dispatch. - * Unlike `TgpuComputePipeline.dispatchWorkgroups()`, - * this method takes in the number of threads to run in each dimension. - */ - get dispatch(): DispatchForArgs { - return this.#createDispatch(this.#pipeline); - } -} - -const workgroupSizeConfigs = [ - vec3u(1, 1, 1), - vec3u(256, 1, 1), - vec3u(16, 16, 1), - vec3u(8, 8, 4), -] as const; - -/** - * Creates a dispatch function for a compute pipeline. - * - * The returned function can be called multiple times to run GPU computations. - * - * @param root A TgpuRoot instance. - * @param callback A function converted to WGSL and executed on the GPU. Its arguments correspond to the global invocation IDs. - */ -export function prepareDispatch( - root: TgpuRoot, - callback: (...args: TArgs) => undefined, -): PreparedDispatch { - if (callback.length >= 4) { - throw new Error('Dispatch only supports up to three dimensions.'); - } - const workgroupSize = workgroupSizeConfigs[callback.length] as v3u; - const wrappedCallback = fn([u32, u32, u32])( - callback as (...args: number[]) => void, - ); - - const sizeUniform = root.createUniform(vec3u); - - // raw WGSL instead of TGSL - // because we do not run unplugin before shipping typegpu package - const mainCompute = computeFn({ - workgroupSize: workgroupSize, - in: { id: builtin.globalInvocationId }, - })`{ - if (any(in.id >= sizeUniform)) { - return; - } - wrappedCallback(in.id.x, in.id.y, in.id.z); -}`.$uses({ sizeUniform, wrappedCallback }); - - const pipeline = root['~unstable'] - .withCompute(mainCompute) - .createPipeline(); - - const createDispatch = (pipeline: TgpuComputePipeline) => - ((...size: (number | undefined)[]) => { - const sanitizedSize = toVec3(size); - const workgroupCount = ceil( - vec3f(sanitizedSize).div(vec3f(workgroupSize)), - ); - sizeUniform.write(sanitizedSize); - pipeline.dispatchWorkgroups( - workgroupCount.x, - workgroupCount.y, - workgroupCount.z, - ); - }) as DispatchForArgs; - - return new PreparedDispatch(createDispatch, pipeline); -} From 532136703c7fef19c0faa12c923e308cd8cf6a5b Mon Sep 17 00:00:00 2001 From: Iwo Plaza Date: Mon, 3 Nov 2025 12:30:45 +0100 Subject: [PATCH 6/6] Update rootTypes.ts --- packages/typegpu/src/core/root/rootTypes.ts | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/packages/typegpu/src/core/root/rootTypes.ts b/packages/typegpu/src/core/root/rootTypes.ts index a6d6073d9..38045b504 100644 --- a/packages/typegpu/src/core/root/rootTypes.ts +++ b/packages/typegpu/src/core/root/rootTypes.ts @@ -794,4 +794,11 @@ export interface ExperimentalTgpuRoot extends TgpuRoot, WithBinding { createComparisonSampler( props: WgslComparisonSamplerProps, ): TgpuFixedComparisonSampler; + + /** + * @deprecated Used to cause all commands enqueued by pipelines to be + * submitted to the GPU, but now commands are immediately dispatched, + * which makes this method unnecessary. + */ + flush(): void; }