Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
9 changes: 2 additions & 7 deletions apps/typegpu-docs/src/content/docs/fundamentals/pipelines.mdx
Original file line number Diff line number Diff line change
Expand Up @@ -325,9 +325,6 @@ It accepts the number of vertices and optionally the instance count, first verte
After calling the method, the shader is set for execution immediately.

Compute pipelines are executed using the `dispatchWorkgroups` method, which accepts the number of workgroups in each dimension.
Unlike render pipelines, after running this method, the execution is not submitted to the GPU immediately.
In order to do so, `root['~unstable'].flush()` needs to be run.
However, that is usually not necessary, as it is done automatically when trying to read the result of computation.

### Drawing with `drawIndexed`

Expand Down Expand Up @@ -376,14 +373,14 @@ const mainFragment = tgpu['~unstable'].fragmentFn({
const indexBuffer = root
.createBuffer(d.arrayOf(d.u16, 6), [0, 2, 1, 0, 3, 2])
.$usage('index');

const pipeline = root['~unstable']
.withVertex(vertex, { color: vertexLayout.attrib })
.withFragment(mainFragment, { format: presentationFormat })
.createPipeline()
.withIndexBuffer(indexBuffer);

pipeline
pipeline
.with(vertexLayout, colorBuffer)
.drawIndexed(6);
```
Expand All @@ -407,8 +404,6 @@ root['~unstable'].beginRenderPass(
pass.draw(3);
},
);

root['~unstable'].flush();
```

It is also possible to access the underlying WebGPU resources for the TypeGPU pipelines, by calling `root.unwrap(pipeline)`.
Expand Down
5 changes: 2 additions & 3 deletions apps/typegpu-docs/src/examples/image-processing/blur/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -65,7 +65,7 @@ const ioLayout = tgpu.bindGroupLayout({
outTexture: { storageTexture: d.textureStorage2d('rgba8unorm') },
});

const tileData = tgpu['~unstable'].workgroupVar(
const tileData = tgpu.workgroupVar(
d.arrayOf(d.arrayOf(d.vec3f, 128), 4),
);

Expand Down Expand Up @@ -191,7 +191,7 @@ function render() {

for (const i of indices) {
computePipeline
.with(ioLayout, ioBindGroups[i])
.with(ioBindGroups[i])
.dispatchWorkgroups(
Math.ceil(srcWidth / settings.blockDim),
Math.ceil(srcHeight / 4),
Expand All @@ -203,7 +203,6 @@ function render() {
loadOp: 'clear',
storeOp: 'store',
}).draw(3);
root['~unstable'].flush();
}
render();

Expand Down
2 changes: 0 additions & 2 deletions apps/typegpu-docs/src/examples/rendering/3d-fish/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -304,8 +304,6 @@ function frame(timestamp: DOMHighResTimeStamp) {
.with(renderFishBindGroups[odd ? 1 : 0])
.draw(fishModel.polygonCount, p.fishAmount);

root['~unstable'].flush();

requestAnimationFrame(frame);
}
enqueuePresetChanges();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -310,8 +310,6 @@ export class IcosphereGenerator {
.with(bindGroup)
.dispatchWorkgroups(xGroups, yGroups, 1);

this.root['~unstable'].flush();

return nextBuffer;
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -315,8 +315,6 @@ function render() {
.with(renderBindGroup)
.with(textureBindGroup)
.draw(vertexBuffer.dataType.elementCount);

root['~unstable'].flush();
}

function loop() {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -352,7 +352,6 @@ function render() {
}
},
);
root['~unstable'].flush();
}
frameId = requestAnimationFrame(render);

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -303,7 +303,6 @@ function render() {
drawObject(cubeBuffer, bindGroup, 36, 'clear');
drawObject(secondCubeBuffer, secondBindGroup, 36, 'load');
drawObject(planeBuffer, planeBindGroup, 6, 'load');
root['~unstable'].flush();
}

function frame() {
Expand Down
14 changes: 6 additions & 8 deletions apps/typegpu-docs/src/examples/simulation/slime-mold-3d/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -487,14 +487,16 @@ function frame() {

params.writePartial({ deltaTime });

blurPipeline.with(computeLayout, bindGroups[currentTexture])
blurPipeline
.with(bindGroups[currentTexture])
.dispatchWorkgroups(
Math.ceil(resolution.x / BLUR_WORKGROUP_SIZE[0]),
Math.ceil(resolution.y / BLUR_WORKGROUP_SIZE[1]),
Math.ceil(resolution.z / BLUR_WORKGROUP_SIZE[2]),
);

computePipeline.with(computeLayout, bindGroups[currentTexture])
computePipeline
.with(bindGroups[currentTexture])
.dispatchWorkgroups(
Math.ceil(NUM_AGENTS / AGENT_WORKGROUP_SIZE),
);
Expand All @@ -505,12 +507,8 @@ function frame() {
loadOp: 'clear',
storeOp: 'store',
})
.with(
renderLayout,
renderBindGroups[1 - currentTexture],
).draw(3);

root['~unstable'].flush();
.with(renderBindGroups[1 - currentTexture])
.draw(3);

currentTexture = 1 - currentTexture;

Expand Down
4 changes: 2 additions & 2 deletions apps/typegpu-docs/src/examples/tests/texture-test/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -80,14 +80,14 @@ const pipeline = root['~unstable']

function render() {
pipeline
.with(layout, bindGroup)
.with(bindGroup)
.withColorAttachment({
view: context.getCurrentTexture().createView(),
loadOp: 'clear',
storeOp: 'store',
})
.draw(3);
root['~unstable'].flush();

requestAnimationFrame(render);
}
requestAnimationFrame(render);
Expand Down
40 changes: 14 additions & 26 deletions packages/typegpu/src/core/buffer/buffer.ts
Original file line number Diff line number Diff line change
Expand Up @@ -176,6 +176,8 @@ class TgpuBufferImpl<TData extends AnyData> implements TgpuBuffer<TData> {
public readonly resourceType = 'buffer';
public flags: GPUBufferUsageFlags = GPUBufferUsage.COPY_DST |
GPUBufferUsage.COPY_SRC;

readonly #device: GPUDevice;
private _buffer: GPUBuffer | null = null;
private _ownBuffer: boolean;
private _destroyed = false;
Expand All @@ -189,12 +191,13 @@ class TgpuBufferImpl<TData extends AnyData> implements TgpuBuffer<TData> {
usableAsIndex = false;

constructor(
private readonly _group: ExperimentalTgpuRoot,
root: ExperimentalTgpuRoot,
public readonly dataType: TData,
public readonly initialOrBuffer?: Infer<TData> | GPUBuffer | undefined,
private readonly _disallowedUsages?:
('uniform' | 'storage' | 'vertex' | 'index')[],
) {
this.#device = root.device;
if (isGPUBuffer(initialOrBuffer)) {
this._ownBuffer = false;
this._buffer = initialOrBuffer;
Expand All @@ -205,14 +208,12 @@ class TgpuBufferImpl<TData extends AnyData> implements TgpuBuffer<TData> {
}

get buffer() {
const device = this._group.device;

if (this._destroyed) {
throw new Error('This buffer has been destroyed');
}

if (!this._buffer) {
this._buffer = device.createBuffer({
this._buffer = this.#device.createBuffer({
size: sizeOf(this.dataType),
usage: this.flags,
mappedAtCreation: !!this.initial,
Expand Down Expand Up @@ -317,7 +318,6 @@ class TgpuBufferImpl<TData extends AnyData> implements TgpuBuffer<TData> {

write(data: Infer<TData>): void {
const gpuBuffer = this.buffer;
const device = this._group.device;

if (gpuBuffer.mapState === 'mapped') {
const mapped = gpuBuffer.getMappedRange();
Expand All @@ -330,16 +330,12 @@ class TgpuBufferImpl<TData extends AnyData> implements TgpuBuffer<TData> {
this._hostBuffer = new ArrayBuffer(size);
}

// Flushing any commands yet to be encoded.
this._group.flush();

this._writeToTarget(this._hostBuffer, data);
device.queue.writeBuffer(gpuBuffer, 0, this._hostBuffer, 0, size);
this.#device.queue.writeBuffer(gpuBuffer, 0, this._hostBuffer, 0, size);
}

public writePartial(data: InferPartial<TData>): void {
const gpuBuffer = this.buffer;
const device = this._group.device;

const instructions = getWriteInstructions(this.dataType, data);

Expand All @@ -352,7 +348,7 @@ class TgpuBufferImpl<TData extends AnyData> implements TgpuBuffer<TData> {
}
} else {
for (const instruction of instructions) {
device.queue.writeBuffer(
this.#device.queue.writeBuffer(
gpuBuffer,
instruction.data.byteOffset,
instruction.data,
Expand All @@ -365,19 +361,15 @@ class TgpuBufferImpl<TData extends AnyData> implements TgpuBuffer<TData> {

public clear(): void {
const gpuBuffer = this.buffer;
const device = this._group.device;

if (gpuBuffer.mapState === 'mapped') {
new Uint8Array(gpuBuffer.getMappedRange()).fill(0);
return;
}

// Flushing any commands yet to be encoded.
this._group.flush();

const encoder = device.createCommandEncoder();
const encoder = this.#device.createCommandEncoder();
encoder.clearBuffer(gpuBuffer);
device.queue.submit([encoder.finish()]);
this.#device.queue.submit([encoder.finish()]);
}

copyFrom(srcBuffer: TgpuBuffer<MemIdentity<TData>>): void {
Expand All @@ -386,16 +378,13 @@ class TgpuBufferImpl<TData extends AnyData> implements TgpuBuffer<TData> {
}

const size = sizeOf(this.dataType);
const encoder = this._group.commandEncoder;
const encoder = this.#device.createCommandEncoder();
encoder.copyBufferToBuffer(srcBuffer.buffer, 0, this.buffer, 0, size);
this.#device.queue.submit([encoder.finish()]);
}

async read(): Promise<Infer<TData>> {
// Flushing any commands yet to be encoded.
this._group.flush();

const gpuBuffer = this.buffer;
const device = this._group.device;

if (gpuBuffer.mapState === 'mapped') {
const mapped = gpuBuffer.getMappedRange();
Expand All @@ -410,12 +399,12 @@ class TgpuBufferImpl<TData extends AnyData> implements TgpuBuffer<TData> {
return res;
}

const stagingBuffer = device.createBuffer({
const stagingBuffer = this.#device.createBuffer({
size: sizeOf(this.dataType),
usage: GPUBufferUsage.COPY_DST | GPUBufferUsage.MAP_READ,
});

const commandEncoder = device.createCommandEncoder();
const commandEncoder = this.#device.createCommandEncoder();
commandEncoder.copyBufferToBuffer(
gpuBuffer,
0,
Expand All @@ -424,8 +413,7 @@ class TgpuBufferImpl<TData extends AnyData> implements TgpuBuffer<TData> {
sizeOf(this.dataType),
);

device.queue.submit([commandEncoder.finish()]);
await device.queue.onSubmittedWorkDone();
this.#device.queue.submit([commandEncoder.finish()]);
await stagingBuffer.mapAsync(GPUMapMode.READ, 0, sizeOf(this.dataType));

const res = readData(
Expand Down
4 changes: 3 additions & 1 deletion packages/typegpu/src/core/pipeline/computePipeline.ts
Original file line number Diff line number Diff line change
Expand Up @@ -190,7 +190,8 @@ class TgpuComputePipelineImpl implements TgpuComputePipeline {
...setupTimestampWrites(this._priors, branch),
};

const pass = branch.commandEncoder.beginComputePass(passDescriptor);
const commandEncoder = branch.device.createCommandEncoder();
const pass = commandEncoder.beginComputePass(passDescriptor);

pass.setPipeline(memo.pipeline);

Expand All @@ -216,6 +217,7 @@ class TgpuComputePipelineImpl implements TgpuComputePipeline {

pass.dispatchWorkgroups(x, y, z);
pass.end();
branch.device.queue.submit([commandEncoder.finish()]);

if (memo.logResources) {
logDataFromGPU(memo.logResources);
Expand Down
Loading