Skip to content
Closed
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
66 changes: 50 additions & 16 deletions lib/std/Progress.zig
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ draw_buffer: []u8,
node_parents: []Node.Parent,
node_storage: []Node.Storage,
node_freelist: []Node.OptionalIndex,
node_freelist_first: Node.OptionalIndex,
node_freelist_first: Node.GenOptIndex,
node_end_index: u32,

pub const TerminalMode = union(enum) {
Expand Down Expand Up @@ -142,6 +142,30 @@ pub const Node = struct {
}
};

/// Generational optional index.
const GenOptIndex = packed struct(u32) {
opt_index: u8,
generation: u24,

const none_gen0: @This() = .{
.generation = 0,
.opt_index = @intFromEnum(OptionalIndex.none),
};

fn unwrap_index(i: @This()) ?Index {
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Function names in Zig are camel case

const oi: OptionalIndex = @enumFromInt(i.opt_index);
return oi.unwrap();
}

fn without_generation(i: @This()) OptionalIndex {
return @enumFromInt(i.opt_index);
}

fn next_generation(i: @This()) u24 {
return i.generation +% 1;
}
};

pub const OptionalIndex = enum(u8) {
none = std.math.maxInt(u8),
/// Index into `node_storage`.
Expand All @@ -156,6 +180,10 @@ pub const Node = struct {
assert(@intFromEnum(i) != @intFromEnum(Parent.unused));
return @enumFromInt(@intFromEnum(i));
}

fn withGeneration(i: @This(), gen: u24) GenOptIndex {
return .{ .generation = gen, .opt_index = @intFromEnum(i) };
}
};

/// Index into `node_storage`.
Expand Down Expand Up @@ -185,22 +213,26 @@ pub const Node = struct {
const parent = node_index.toParent();

const freelist_head = &global_progress.node_freelist_first;
var opt_free_index = @atomicLoad(Node.OptionalIndex, freelist_head, .seq_cst);
while (opt_free_index.unwrap()) |free_index| {
var opt_free_index = @atomicLoad(Node.GenOptIndex, freelist_head, .seq_cst);
while (opt_free_index.unwrap_index()) |free_index| {
const freelist_ptr = freelistByIndex(free_index);
const next = @atomicLoad(Node.OptionalIndex, freelist_ptr, .seq_cst);
opt_free_index = @cmpxchgWeak(Node.OptionalIndex, freelist_head, opt_free_index, next, .seq_cst, .seq_cst) orelse {
const new_free = next.withGeneration(opt_free_index.next_generation());
opt_free_index = @cmpxchgWeak(Node.GenOptIndex, freelist_head, opt_free_index, new_free, .seq_cst, .seq_cst) orelse {
// We won the allocation race.
return init(free_index, parent, name, estimated_total_items);
};
}

const free_index = @atomicRmw(u32, &global_progress.node_end_index, .Add, 1, .monotonic);
if (free_index >= global_progress.node_storage.len) {
// Ran out of node storage memory. Progress for this node will not be tracked.
_ = @atomicRmw(u32, &global_progress.node_end_index, .Sub, 1, .monotonic);
return Node.none;
}
var cur_free_index = @atomicLoad(u32, &global_progress.node_end_index, .monotonic);
const free_index = brk: while (true) {
const new_node_end_index = cur_free_index + 1;
if (new_node_end_index >= global_progress.node_storage.len) {
// Ran out of node storage memory. Progress for this node will not be tracked.
return Node.none;
}
cur_free_index = @cmpxchgWeak(u32, &global_progress.node_end_index, cur_free_index, new_node_end_index, .monotonic, .monotonic) orelse break :brk cur_free_index;
};

return init(@enumFromInt(free_index), parent, name, estimated_total_items);
}
Expand Down Expand Up @@ -243,15 +275,16 @@ pub const Node = struct {
}
const index = n.index.unwrap() orelse return;
const parent_ptr = parentByIndex(index);
if (parent_ptr.unwrap()) |parent_index| {
if (@atomicLoad(Parent, parent_ptr, .acquire).unwrap()) |parent_index| {
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I added the acquire-load to match the other acquire-load of parentByIndex() in Node.init(). I think it makes sense, but tbh I'm not 100% sure it's necessary.

_ = @atomicRmw(u32, &storageByIndex(parent_index).completed_count, .Add, 1, .monotonic);
@atomicStore(Node.Parent, parent_ptr, .unused, .seq_cst);

const freelist_head = &global_progress.node_freelist_first;
var first = @atomicLoad(Node.OptionalIndex, freelist_head, .seq_cst);
var first = @atomicLoad(Node.GenOptIndex, freelist_head, .seq_cst);
while (true) {
@atomicStore(Node.OptionalIndex, freelistByIndex(index), first, .seq_cst);
first = @cmpxchgWeak(Node.OptionalIndex, freelist_head, first, index.toOptional(), .seq_cst, .seq_cst) orelse break;
@atomicStore(Node.OptionalIndex, freelistByIndex(index), first.without_generation(), .seq_cst);
const new_first = index.toOptional().withGeneration(first.next_generation());
first = @cmpxchgWeak(Node.GenOptIndex, freelist_head, first, new_first, .seq_cst, .seq_cst) orelse break;
}
} else {
@atomicStore(bool, &global_progress.done, true, .seq_cst);
Expand Down Expand Up @@ -307,7 +340,8 @@ pub const Node = struct {
@atomicStore(u8, &storage.name[name_len], 0, .monotonic);

const parent_ptr = parentByIndex(free_index);
assert(parent_ptr.* == .unused);
const current_parent = @atomicLoad(Node.Parent, parent_ptr, .acquire);
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

In non-safe builds this introduces an unnecessary acquire-load. Not sure what to do about that.

assert(current_parent == .unused);
@atomicStore(Node.Parent, parent_ptr, parent, .release);

return .{ .index = free_index.toOptional() };
Expand All @@ -330,7 +364,7 @@ var global_progress: Progress = .{
.node_parents = &node_parents_buffer,
.node_storage = &node_storage_buffer,
.node_freelist = &node_freelist_buffer,
.node_freelist_first = .none,
.node_freelist_first = .none_gen0,
.node_end_index = 0,
};

Expand Down
Loading