diff --git a/apps/typegpu-docs/src/content/docs/fundamentals/pipelines.mdx b/apps/typegpu-docs/src/content/docs/fundamentals/pipelines.mdx index 1f29da63bf..ddd85a6692 100644 --- a/apps/typegpu-docs/src/content/docs/fundamentals/pipelines.mdx +++ b/apps/typegpu-docs/src/content/docs/fundamentals/pipelines.mdx @@ -325,9 +325,6 @@ It accepts the number of vertices and optionally the instance count, first verte After calling the method, the shader is set for execution immediately. Compute pipelines are executed using the `dispatchWorkgroups` method, which accepts the number of workgroups in each dimension. -Unlike render pipelines, after running this method, the execution is not submitted to the GPU immediately. -In order to do so, `root['~unstable'].flush()` needs to be run. -However, that is usually not necessary, as it is done automatically when trying to read the result of computation. ### Drawing with `drawIndexed` @@ -376,14 +373,14 @@ const mainFragment = tgpu['~unstable'].fragmentFn({ const indexBuffer = root .createBuffer(d.arrayOf(d.u16, 6), [0, 2, 1, 0, 3, 2]) .$usage('index'); - + const pipeline = root['~unstable'] .withVertex(vertex, { color: vertexLayout.attrib }) .withFragment(mainFragment, { format: presentationFormat }) .createPipeline() .withIndexBuffer(indexBuffer); - pipeline + pipeline .with(vertexLayout, colorBuffer) .drawIndexed(6); ``` @@ -407,8 +404,6 @@ root['~unstable'].beginRenderPass( pass.draw(3); }, ); - -root['~unstable'].flush(); ``` It is also possible to access the underlying WebGPU resources for the TypeGPU pipelines, by calling `root.unwrap(pipeline)`. diff --git a/apps/typegpu-docs/src/examples/image-processing/blur/index.ts b/apps/typegpu-docs/src/examples/image-processing/blur/index.ts index 7c7dd83f25..ab1ae41579 100644 --- a/apps/typegpu-docs/src/examples/image-processing/blur/index.ts +++ b/apps/typegpu-docs/src/examples/image-processing/blur/index.ts @@ -66,7 +66,7 @@ const ioLayout = tgpu.bindGroupLayout({ outTexture: { storageTexture: d.textureStorage2d('rgba8unorm') }, }); -const tileData = tgpu['~unstable'].workgroupVar( +const tileData = tgpu.workgroupVar( d.arrayOf(d.arrayOf(d.vec3f, 128), 4), ); @@ -191,7 +191,6 @@ function render() { loadOp: 'clear', storeOp: 'store', }).draw(3); - root['~unstable'].flush(); } render(); diff --git a/apps/typegpu-docs/src/examples/rendering/3d-fish/index.ts b/apps/typegpu-docs/src/examples/rendering/3d-fish/index.ts index 335b9d1928..57aa4afb1f 100644 --- a/apps/typegpu-docs/src/examples/rendering/3d-fish/index.ts +++ b/apps/typegpu-docs/src/examples/rendering/3d-fish/index.ts @@ -306,8 +306,6 @@ function frame(timestamp: DOMHighResTimeStamp) { .with(renderFishBindGroups[odd ? 1 : 0]) .draw(fishModel.polygonCount, p.fishAmount); - root['~unstable'].flush(); - requestAnimationFrame(frame); } enqueuePresetChanges(); diff --git a/apps/typegpu-docs/src/examples/rendering/cubemap-reflection/icosphere.ts b/apps/typegpu-docs/src/examples/rendering/cubemap-reflection/icosphere.ts index e313ffd867..a192c30be2 100644 --- a/apps/typegpu-docs/src/examples/rendering/cubemap-reflection/icosphere.ts +++ b/apps/typegpu-docs/src/examples/rendering/cubemap-reflection/icosphere.ts @@ -310,8 +310,6 @@ export class IcosphereGenerator { .with(bindGroup) .dispatchWorkgroups(xGroups, yGroups, 1); - this.root['~unstable'].flush(); - return nextBuffer; } diff --git a/apps/typegpu-docs/src/examples/rendering/cubemap-reflection/index.ts b/apps/typegpu-docs/src/examples/rendering/cubemap-reflection/index.ts index 79d2460484..8731a57e14 100644 --- a/apps/typegpu-docs/src/examples/rendering/cubemap-reflection/index.ts +++ b/apps/typegpu-docs/src/examples/rendering/cubemap-reflection/index.ts @@ -315,8 +315,6 @@ function render() { .with(renderBindGroup) .with(textureBindGroup) .draw(vertexBuffer.dataType.elementCount); - - root['~unstable'].flush(); } function loop() { diff --git a/apps/typegpu-docs/src/examples/rendering/simple-shadow/index.ts b/apps/typegpu-docs/src/examples/rendering/simple-shadow/index.ts index a2609a5782..58efdb6bc4 100644 --- a/apps/typegpu-docs/src/examples/rendering/simple-shadow/index.ts +++ b/apps/typegpu-docs/src/examples/rendering/simple-shadow/index.ts @@ -352,7 +352,6 @@ function render() { } }, ); - root['~unstable'].flush(); } frameId = requestAnimationFrame(render); diff --git a/apps/typegpu-docs/src/examples/rendering/two-boxes/index.ts b/apps/typegpu-docs/src/examples/rendering/two-boxes/index.ts index fb21379bdb..6f8d25b940 100644 --- a/apps/typegpu-docs/src/examples/rendering/two-boxes/index.ts +++ b/apps/typegpu-docs/src/examples/rendering/two-boxes/index.ts @@ -303,7 +303,6 @@ function render() { drawObject(cubeBuffer, bindGroup, 36, 'clear'); drawObject(secondCubeBuffer, secondBindGroup, 36, 'load'); drawObject(planeBuffer, planeBindGroup, 6, 'load'); - root['~unstable'].flush(); } function frame() { diff --git a/apps/typegpu-docs/src/examples/simulation/slime-mold-3d/index.ts b/apps/typegpu-docs/src/examples/simulation/slime-mold-3d/index.ts index addddc647b..2a6e2cc004 100644 --- a/apps/typegpu-docs/src/examples/simulation/slime-mold-3d/index.ts +++ b/apps/typegpu-docs/src/examples/simulation/slime-mold-3d/index.ts @@ -498,8 +498,6 @@ function frame() { .with(renderBindGroups[1 - currentTexture]) .draw(3); - root['~unstable'].flush(); - currentTexture = 1 - currentTexture; requestAnimationFrame(frame); diff --git a/apps/typegpu-docs/src/examples/tests/texture-test/index.ts b/apps/typegpu-docs/src/examples/tests/texture-test/index.ts index 3e7df9128d..193b781a2e 100644 --- a/apps/typegpu-docs/src/examples/tests/texture-test/index.ts +++ b/apps/typegpu-docs/src/examples/tests/texture-test/index.ts @@ -76,7 +76,7 @@ function render() { storeOp: 'store', }) .draw(3); - root['~unstable'].flush(); + requestAnimationFrame(render); } requestAnimationFrame(render); diff --git a/packages/typegpu/src/core/buffer/buffer.ts b/packages/typegpu/src/core/buffer/buffer.ts index 61ded5373b..6632f279bd 100644 --- a/packages/typegpu/src/core/buffer/buffer.ts +++ b/packages/typegpu/src/core/buffer/buffer.ts @@ -176,6 +176,8 @@ class TgpuBufferImpl implements TgpuBuffer { public readonly resourceType = 'buffer'; public flags: GPUBufferUsageFlags = GPUBufferUsage.COPY_DST | GPUBufferUsage.COPY_SRC; + + readonly #device: GPUDevice; private _buffer: GPUBuffer | null = null; private _ownBuffer: boolean; private _destroyed = false; @@ -189,12 +191,13 @@ class TgpuBufferImpl implements TgpuBuffer { usableAsIndex = false; constructor( - private readonly _group: ExperimentalTgpuRoot, + root: ExperimentalTgpuRoot, public readonly dataType: TData, public readonly initialOrBuffer?: Infer | GPUBuffer | undefined, private readonly _disallowedUsages?: ('uniform' | 'storage' | 'vertex' | 'index')[], ) { + this.#device = root.device; if (isGPUBuffer(initialOrBuffer)) { this._ownBuffer = false; this._buffer = initialOrBuffer; @@ -205,14 +208,12 @@ class TgpuBufferImpl implements TgpuBuffer { } get buffer() { - const device = this._group.device; - if (this._destroyed) { throw new Error('This buffer has been destroyed'); } if (!this._buffer) { - this._buffer = device.createBuffer({ + this._buffer = this.#device.createBuffer({ size: sizeOf(this.dataType), usage: this.flags, mappedAtCreation: !!this.initial, @@ -317,7 +318,6 @@ class TgpuBufferImpl implements TgpuBuffer { write(data: Infer): void { const gpuBuffer = this.buffer; - const device = this._group.device; if (gpuBuffer.mapState === 'mapped') { const mapped = gpuBuffer.getMappedRange(); @@ -330,16 +330,12 @@ class TgpuBufferImpl implements TgpuBuffer { this._hostBuffer = new ArrayBuffer(size); } - // Flushing any commands yet to be encoded. - this._group.flush(); - this._writeToTarget(this._hostBuffer, data); - device.queue.writeBuffer(gpuBuffer, 0, this._hostBuffer, 0, size); + this.#device.queue.writeBuffer(gpuBuffer, 0, this._hostBuffer, 0, size); } public writePartial(data: InferPartial): void { const gpuBuffer = this.buffer; - const device = this._group.device; const instructions = getWriteInstructions(this.dataType, data); @@ -352,7 +348,7 @@ class TgpuBufferImpl implements TgpuBuffer { } } else { for (const instruction of instructions) { - device.queue.writeBuffer( + this.#device.queue.writeBuffer( gpuBuffer, instruction.data.byteOffset, instruction.data, @@ -365,19 +361,15 @@ class TgpuBufferImpl implements TgpuBuffer { public clear(): void { const gpuBuffer = this.buffer; - const device = this._group.device; if (gpuBuffer.mapState === 'mapped') { new Uint8Array(gpuBuffer.getMappedRange()).fill(0); return; } - // Flushing any commands yet to be encoded. - this._group.flush(); - - const encoder = device.createCommandEncoder(); + const encoder = this.#device.createCommandEncoder(); encoder.clearBuffer(gpuBuffer); - device.queue.submit([encoder.finish()]); + this.#device.queue.submit([encoder.finish()]); } copyFrom(srcBuffer: TgpuBuffer>): void { @@ -386,16 +378,13 @@ class TgpuBufferImpl implements TgpuBuffer { } const size = sizeOf(this.dataType); - const encoder = this._group.commandEncoder; + const encoder = this.#device.createCommandEncoder(); encoder.copyBufferToBuffer(srcBuffer.buffer, 0, this.buffer, 0, size); + this.#device.queue.submit([encoder.finish()]); } async read(): Promise> { - // Flushing any commands yet to be encoded. - this._group.flush(); - const gpuBuffer = this.buffer; - const device = this._group.device; if (gpuBuffer.mapState === 'mapped') { const mapped = gpuBuffer.getMappedRange(); @@ -410,12 +399,12 @@ class TgpuBufferImpl implements TgpuBuffer { return res; } - const stagingBuffer = device.createBuffer({ + const stagingBuffer = this.#device.createBuffer({ size: sizeOf(this.dataType), usage: GPUBufferUsage.COPY_DST | GPUBufferUsage.MAP_READ, }); - const commandEncoder = device.createCommandEncoder(); + const commandEncoder = this.#device.createCommandEncoder(); commandEncoder.copyBufferToBuffer( gpuBuffer, 0, @@ -424,8 +413,7 @@ class TgpuBufferImpl implements TgpuBuffer { sizeOf(this.dataType), ); - device.queue.submit([commandEncoder.finish()]); - await device.queue.onSubmittedWorkDone(); + this.#device.queue.submit([commandEncoder.finish()]); await stagingBuffer.mapAsync(GPUMapMode.READ, 0, sizeOf(this.dataType)); const res = readData( diff --git a/packages/typegpu/src/core/pipeline/computePipeline.ts b/packages/typegpu/src/core/pipeline/computePipeline.ts index 7abe63512c..00e11c3a15 100644 --- a/packages/typegpu/src/core/pipeline/computePipeline.ts +++ b/packages/typegpu/src/core/pipeline/computePipeline.ts @@ -190,7 +190,8 @@ class TgpuComputePipelineImpl implements TgpuComputePipeline { ...setupTimestampWrites(this._priors, branch), }; - const pass = branch.commandEncoder.beginComputePass(passDescriptor); + const commandEncoder = branch.device.createCommandEncoder(); + const pass = commandEncoder.beginComputePass(passDescriptor); pass.setPipeline(memo.pipeline); @@ -216,6 +217,7 @@ class TgpuComputePipelineImpl implements TgpuComputePipeline { pass.dispatchWorkgroups(x, y, z); pass.end(); + branch.device.queue.submit([commandEncoder.finish()]); if (memo.logResources) { logDataFromGPU(memo.logResources); diff --git a/packages/typegpu/src/core/pipeline/renderPipeline.ts b/packages/typegpu/src/core/pipeline/renderPipeline.ts index b5b78addd4..35a28aef1e 100644 --- a/packages/typegpu/src/core/pipeline/renderPipeline.ts +++ b/packages/typegpu/src/core/pipeline/renderPipeline.ts @@ -489,7 +489,7 @@ class TgpuRenderPipelineImpl implements TgpuRenderPipeline { }) as unknown as this & HasIndexBuffer; } - private setupRenderPass(): GPURenderPassEncoder { + private setupRenderPass(encoder: GPUCommandEncoder): GPURenderPassEncoder { const internals = this[$internal]; const memo = internals.core.unwrap(); const { branch, fragmentFn } = internals.core.options; @@ -532,7 +532,7 @@ class TgpuRenderPipelineImpl implements TgpuRenderPipeline { } } - const pass = branch.commandEncoder.beginRenderPass(renderPassDescriptor); + const pass = encoder.beginRenderPass(renderPassDescriptor); pass.setPipeline(memo.pipeline); @@ -581,24 +581,27 @@ class TgpuRenderPipelineImpl implements TgpuRenderPipeline { firstInstance?: number, ): void { const internals = this[$internal]; - const pass = this.setupRenderPass(); - const { logResources } = internals.core.unwrap(); const { branch } = internals.core.options; + const { logResources } = internals.core.unwrap(); - pass.draw(vertexCount, instanceCount, firstVertex, firstInstance); + const commandEncoder = branch.device.createCommandEncoder(); + const pass = this.setupRenderPass(commandEncoder); + pass.draw(vertexCount, instanceCount, firstVertex, firstInstance); pass.end(); + branch.device.queue.submit([commandEncoder.finish()]); + if (logResources) { logDataFromGPU(logResources); } - internals.priors.performanceCallback - ? triggerPerformanceCallback({ + if (internals.priors.performanceCallback) { + triggerPerformanceCallback({ root: branch, priors: internals.priors, - }) - : branch.flush(); + }); + } } drawIndexed( @@ -614,12 +617,13 @@ class TgpuRenderPipelineImpl implements TgpuRenderPipeline { throw new Error('No index buffer set for this render pipeline.'); } + const { logResources } = internals.core.unwrap(); + const { branch } = internals.core.options; const { buffer, indexFormat, offsetBytes, sizeBytes } = internals.priors.indexBuffer; - const pass = this.setupRenderPass(); - const { logResources } = internals.core.unwrap(); - const { branch } = internals.core.options; + const commandEncoder = branch.device.createCommandEncoder(); + const pass = this.setupRenderPass(commandEncoder); if (isGPUBuffer(buffer)) { pass.setIndexBuffer(buffer, indexFormat, offsetBytes, sizeBytes); @@ -642,16 +646,18 @@ class TgpuRenderPipelineImpl implements TgpuRenderPipeline { pass.end(); + branch.device.queue.submit([commandEncoder.finish()]); + if (logResources) { logDataFromGPU(logResources); } - internals.priors.performanceCallback - ? triggerPerformanceCallback({ + if (internals.priors.performanceCallback) { + triggerPerformanceCallback({ root: branch, priors: internals.priors, - }) - : branch.flush(); + }); + } } } diff --git a/packages/typegpu/src/core/pipeline/timeable.ts b/packages/typegpu/src/core/pipeline/timeable.ts index 18d6c6956d..409f6bfcd2 100644 --- a/packages/typegpu/src/core/pipeline/timeable.ts +++ b/packages/typegpu/src/core/pipeline/timeable.ts @@ -151,15 +151,16 @@ export function triggerPerformanceCallback({ ); } - root.commandEncoder.resolveQuerySet( + const commandEncoder = root.device.createCommandEncoder(); + commandEncoder.resolveQuerySet( root.unwrap(querySet), 0, querySet.count, querySet[$internal].resolveBuffer, 0, ); + root.device.queue.submit([commandEncoder.finish()]); - root.flush(); root.device.queue.onSubmittedWorkDone().then(async () => { if (!querySet.available) { return; diff --git a/packages/typegpu/src/core/querySet/querySet.ts b/packages/typegpu/src/core/querySet/querySet.ts index c9bd7c83db..1d993fb2ad 100644 --- a/packages/typegpu/src/core/querySet/querySet.ts +++ b/packages/typegpu/src/core/querySet/querySet.ts @@ -39,6 +39,8 @@ export function isQuerySet( class TgpuQuerySetImpl implements TgpuQuerySet { public readonly resourceType = 'query-set' as const; + + readonly #device: GPUDevice; private _querySet: GPUQuerySet | null = null; private readonly _ownQuerySet: boolean; private _destroyed = false; @@ -47,11 +49,12 @@ class TgpuQuerySetImpl implements TgpuQuerySet { private _resolveBuffer: GPUBuffer | null = null; constructor( - private readonly _group: ExperimentalTgpuRoot, + root: ExperimentalTgpuRoot, public readonly type: T, public readonly count: number, private readonly rawQuerySet?: GPUQuerySet, ) { + this.#device = root.device; this._ownQuerySet = !rawQuerySet; this._querySet = rawQuerySet || null; } @@ -67,7 +70,7 @@ class TgpuQuerySetImpl implements TgpuQuerySet { return this._querySet; } - this._querySet = this._group.device.createQuerySet({ + this._querySet = this.#device.createQuerySet({ type: this.type, count: this.count, }); @@ -87,7 +90,7 @@ class TgpuQuerySetImpl implements TgpuQuerySet { return { get readBuffer(): GPUBuffer { if (!self._readBuffer) { - self._readBuffer = self._group.device.createBuffer({ + self._readBuffer = self.#device.createBuffer({ size: self.count * BigUint64Array.BYTES_PER_ELEMENT, usage: GPUBufferUsage.COPY_DST | GPUBufferUsage.MAP_READ, }); @@ -96,7 +99,7 @@ class TgpuQuerySetImpl implements TgpuQuerySet { }, get resolveBuffer(): GPUBuffer { if (!self._resolveBuffer) { - self._resolveBuffer = self._group.device.createBuffer({ + self._resolveBuffer = self.#device.createBuffer({ size: self.count * BigUint64Array.BYTES_PER_ELEMENT, usage: GPUBufferUsage.QUERY_RESOLVE | GPUBufferUsage.COPY_SRC, }); @@ -122,7 +125,7 @@ class TgpuQuerySetImpl implements TgpuQuerySet { throw new Error('This QuerySet is busy resolving or reading.'); } - const commandEncoder = this._group.device.createCommandEncoder(); + const commandEncoder = this.#device.createCommandEncoder(); commandEncoder.resolveQuerySet( this.querySet, 0, @@ -130,18 +133,17 @@ class TgpuQuerySetImpl implements TgpuQuerySet { this[$internal].resolveBuffer, 0, ); - this._group.device.queue.submit([commandEncoder.finish()]); + this.#device.queue.submit([commandEncoder.finish()]); } async read(): Promise { - this._group.flush(); if (!this._resolveBuffer) { throw new Error('QuerySet must be resolved before reading.'); } this._available = false; try { - const commandEncoder = this._group.device.createCommandEncoder(); + const commandEncoder = this.#device.createCommandEncoder(); commandEncoder.copyBufferToBuffer( this[$internal].resolveBuffer, 0, @@ -149,8 +151,7 @@ class TgpuQuerySetImpl implements TgpuQuerySet { 0, this.count * BigUint64Array.BYTES_PER_ELEMENT, ); - this._group.device.queue.submit([commandEncoder.finish()]); - await this._group.device.queue.onSubmittedWorkDone(); + this.#device.queue.submit([commandEncoder.finish()]); const readBuffer = this[$internal].readBuffer; await readBuffer.mapAsync(GPUMapMode.READ); diff --git a/packages/typegpu/src/core/root/init.ts b/packages/typegpu/src/core/root/init.ts index 86856bf60c..ec83817641 100644 --- a/packages/typegpu/src/core/root/init.ts +++ b/packages/typegpu/src/core/root/init.ts @@ -184,8 +184,6 @@ export class TgpuGuardedComputePipelineImpl workgroupCount.y, workgroupCount.z, ); - // Yeah, i know we flush here... but it's only a matter of time! - this.#root.flush(); } } @@ -394,8 +392,6 @@ class TgpuRootImpl extends WithBindingImpl key.unwrap(this) ); - private _commandEncoder: GPUCommandEncoder | null = null; - [$internal]: { logOptions: LogGeneratorOptions; }; @@ -415,14 +411,6 @@ class TgpuRootImpl extends WithBindingImpl }; } - get commandEncoder() { - if (!this._commandEncoder) { - this._commandEncoder = this.device.createCommandEncoder(); - } - - return this._commandEncoder; - } - get enabledFeatures() { return new Set(this.device.features) as ReadonlySet; } @@ -632,7 +620,8 @@ class TgpuRootImpl extends WithBindingImpl descriptor: GPURenderPassDescriptor, callback: (pass: RenderPass) => void, ): void { - const pass = this.commandEncoder.beginRenderPass(descriptor); + const commandEncoder = this.device.createCommandEncoder(); + const pass = commandEncoder.beginRenderPass(descriptor); const bindGroups = new Map< TgpuBindGroupLayout, @@ -779,15 +768,11 @@ class TgpuRootImpl extends WithBindingImpl }); pass.end(); + this.device.queue.submit([commandEncoder.finish()]); } flush() { - if (!this._commandEncoder) { - return; - } - - this.device.queue.submit([this._commandEncoder.finish()]); - this._commandEncoder = null; + console.warn('flush() has been deprecated, and has no effect.'); } } diff --git a/packages/typegpu/src/core/root/rootTypes.ts b/packages/typegpu/src/core/root/rootTypes.ts index ec1cd338ba..38045b5047 100644 --- a/packages/typegpu/src/core/root/rootTypes.ts +++ b/packages/typegpu/src/core/root/rootTypes.ts @@ -750,11 +750,6 @@ export interface ExperimentalTgpuRoot extends TgpuRoot, WithBinding { readonly shaderGenerator?: | ShaderGenerator | undefined; - /** - * The current command encoder. This property will - * hold the same value until `flush()` is called. - */ - readonly commandEncoder: GPUCommandEncoder; createTexture< TWidth extends number, @@ -801,8 +796,9 @@ export interface ExperimentalTgpuRoot extends TgpuRoot, WithBinding { ): TgpuFixedComparisonSampler; /** - * Causes all commands enqueued by pipelines to be - * submitted to the GPU. + * @deprecated Used to cause all commands enqueued by pipelines to be + * submitted to the GPU, but now commands are immediately dispatched, + * which makes this method unnecessary. */ flush(): void; } diff --git a/packages/typegpu/tests/querySet.test.ts b/packages/typegpu/tests/querySet.test.ts index 386147d8ac..0ec62f5ab1 100644 --- a/packages/typegpu/tests/querySet.test.ts +++ b/packages/typegpu/tests/querySet.test.ts @@ -87,7 +87,6 @@ describe('TgpuQuerySet', () => { 2 * BigUint64Array.BYTES_PER_ELEMENT, ); expect(device.queue.submit).toHaveBeenCalled(); - expect(device.queue.onSubmittedWorkDone).toHaveBeenCalled(); expect(readBuffer.mapAsync).toHaveBeenCalledWith(GPUMapMode.READ); expect(readBuffer.getMappedRange).toHaveBeenCalled(); expect(readBuffer.unmap).toHaveBeenCalled(); diff --git a/packages/typegpu/tests/renderPipeline.test.ts b/packages/typegpu/tests/renderPipeline.test.ts index 9adb4ecc83..c4fc9b9983 100644 --- a/packages/typegpu/tests/renderPipeline.test.ts +++ b/packages/typegpu/tests/renderPipeline.test.ts @@ -795,7 +795,7 @@ describe('TgpuRenderPipeline', () => { expect(() => pipelineWithIndex.drawIndexed(3)).not.toThrow(); }); - it('works when combining timestamp writes and index buffer', ({ root, device }) => { + it('works when combining timestamp writes and index buffer', ({ root, device, commandEncoder }) => { const vertexFn = tgpu['~unstable'] .vertexFn({ out: { pos: d.builtin.position }, @@ -811,7 +811,7 @@ describe('TgpuRenderPipeline', () => { const querySet = root.createQuerySet('timestamp', 2); const indexBuffer = root.createBuffer(d.arrayOf(d.u16, 2)).$usage('index'); - const beginRenderPassSpy = vi.spyOn(root.commandEncoder, 'beginRenderPass'); + const beginRenderPassSpy = vi.spyOn(commandEncoder, 'beginRenderPass'); const pipeline = root .withVertex(vertexFn, {}) @@ -867,7 +867,7 @@ describe('TgpuRenderPipeline', () => { }); }); - it('should handle a combination of timestamp writes, index buffer, and performance callback', ({ root, device }) => { + it('should handle a combination of timestamp writes, index buffer, and performance callback', ({ root, device, commandEncoder }) => { const vertexFn = tgpu['~unstable'] .vertexFn({ out: { pos: d.builtin.position }, @@ -882,8 +882,8 @@ describe('TgpuRenderPipeline', () => { const querySet = root.createQuerySet('timestamp', 2); const indexBuffer = root.createBuffer(d.arrayOf(d.u16, 2)).$usage('index'); - const beginRenderPassSpy = vi.spyOn(root.commandEncoder, 'beginRenderPass'); - const resolveQuerySetSpy = vi.spyOn(root.commandEncoder, 'resolveQuerySet'); + const beginRenderPassSpy = vi.spyOn(commandEncoder, 'beginRenderPass'); + const resolveQuerySetSpy = vi.spyOn(commandEncoder, 'resolveQuerySet'); const callback = vi.fn(); @@ -926,7 +926,7 @@ describe('TgpuRenderPipeline', () => { count: 2, }); - expect(root.commandEncoder.beginRenderPass).toHaveBeenCalledWith({ + expect(commandEncoder.beginRenderPass).toHaveBeenCalledWith({ colorAttachments: [ { loadOp: 'clear',