From 1d46c1317ce45702b908066f3ef1cc8ada1a7088 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?V=C3=ADctor=20Rold=C3=A1n=20Betancort?= Date: Wed, 21 Feb 2024 09:40:43 +0000 Subject: [PATCH] reduces chunking allocations for wide relations For most situations, when the number of elements to dispatch is below the default 100 elements, the slice with elements to dispatch was appropriately pre-allocated. However, when dispatching large number of elements, the silence will grow from 1 up to the number of chunks, causing wasteful allocations in the critical path. To simplify the logic, +1 is added to account for the situation the number of elements is less than the chunk size. For values above that it will cause one excess slice entry allocation, but seems like a reasonable tradeoff w.r.t extra annoying code to handle it. --- internal/graph/check.go | 10 ++++++++-- pkg/tuple/onrbytypeset.go | 14 ++++++++++++-- 2 files changed, 20 insertions(+), 4 deletions(-) diff --git a/internal/graph/check.go b/internal/graph/check.go index 70d7f716d3..e8958c0b43 100644 --- a/internal/graph/check.go +++ b/internal/graph/check.go @@ -383,7 +383,10 @@ func (cc *ConcurrentChecker) checkDirect(ctx context.Context, crc currentRequest it.Close() // Convert the subjects into batched requests. - toDispatch := make([]directDispatch, 0, subjectsToDispatch.Len()) + // To simplify the logic, +1 is added to account for the situation where + // the number of elements is less than the chunk size, and spare us some annoying code. + expectedNumberOfChunks := subjectsToDispatch.ValueLen()/int(crc.maxDispatchCount) + 1 + toDispatch := make([]directDispatch, 0, expectedNumberOfChunks) subjectsToDispatch.ForEachType(func(rr *core.RelationReference, resourceIds []string) { chunkCount := 0.0 slicez.ForEachChunk(resourceIds, crc.maxDispatchCount, func(resourceIdChunk []string) { @@ -601,7 +604,10 @@ func (cc *ConcurrentChecker) checkTupleToUserset(ctx context.Context, crc curren it.Close() // Convert the subjects into batched requests. - toDispatch := make([]directDispatch, 0, subjectsToDispatch.Len()) + // To simplify the logic, +1 is added to account for the situation where + // the number of elements is less than the chunk size, and spare us some annoying code. + expectedNumberOfChunks := subjectsToDispatch.ValueLen()/int(crc.maxDispatchCount) + 1 + toDispatch := make([]directDispatch, 0, expectedNumberOfChunks) subjectsToDispatch.ForEachType(func(rr *core.RelationReference, resourceIds []string) { chunkCount := 0.0 slicez.ForEachChunk(resourceIds, crc.maxDispatchCount, func(resourceIdChunk []string) { diff --git a/pkg/tuple/onrbytypeset.go b/pkg/tuple/onrbytypeset.go index 2d02481ee0..56da96ddbc 100644 --- a/pkg/tuple/onrbytypeset.go +++ b/pkg/tuple/onrbytypeset.go @@ -66,7 +66,17 @@ func (s *ONRByTypeSet) IsEmpty() bool { return len(s.byType) == 0 } -// Len returns the number of keys in the set. -func (s *ONRByTypeSet) Len() int { +// KeyLen returns the number of keys in the set. +func (s *ONRByTypeSet) KeyLen() int { return len(s.byType) } + +// ValueLen returns the number of values in the set. +func (s *ONRByTypeSet) ValueLen() int { + var total int + for _, vals := range s.byType { + total += len(vals) + } + + return total +}