Skip to content

Commit

Permalink
MemArena max pending free is now a template parameter
Browse files Browse the repository at this point in the history
VMemVector is much smaller now thanks to this (40 bytes instead of 144) since it doesn't need to support out of order frees.
  • Loading branch information
jlaumon committed Dec 15, 2024
1 parent 9a23140 commit 2a498d9
Show file tree
Hide file tree
Showing 8 changed files with 223 additions and 158 deletions.
42 changes: 26 additions & 16 deletions Bedrock/Allocator.h
Original file line number Diff line number Diff line change
Expand Up @@ -33,13 +33,14 @@ struct TempAllocator
};



// Allocates from an externally provided MemArena.
template <typename taType>
struct ArenaAllocator
template <typename taType, int taMaxPendingFrees>
struct ArenaAllocatorBase
{
ArenaAllocator() = default;
ArenaAllocator(MemArena& inArena) : mArena(&inArena) {}
using MemArenaType = MemArena<taMaxPendingFrees>;

ArenaAllocatorBase() = default;
ArenaAllocatorBase(MemArenaType& inArena) : mArena(&inArena) {}

// Allocate memory.
taType* Allocate(int inSize) { return (taType*)mArena->Alloc(inSize * sizeof(taType)).mPtr; }
Expand All @@ -48,22 +49,28 @@ struct ArenaAllocator
// Try changing the size of an existing allocation, return false if unsuccessful.
bool TryRealloc(taType* inPtr, int inCurrentSize, int inNewSize);

MemArena* GetArena() { return mArena; }
const MemArena* GetArena() const { return mArena; }
MemArenaType* GetArena() { return mArena; }
const MemArenaType* GetArena() const { return mArena; }

private:
MemArena* mArena = nullptr;
MemArenaType* mArena = nullptr;
};

// Shorter version with default number of allowed out of order frees.
// This alias is needed because containers only accept alloctor with a single template parameter.
template <typename taType>
using ArenaAllocator = ArenaAllocatorBase<taType, cDefaultMaxPendingFrees>;


// Allocates from an internal VMemArena which uses virtual memory.
// The VMemArena can grow as necessary by committing more virtual memory.
template <typename taType>
struct VMemAllocator
{
static constexpr int64 cDefaultReservedSize = VMemArena::cDefaultReservedSize; // By default the arena will reserve that much virtual memory.
static constexpr int64 cDefaultCommitSize = VMemArena::cDefaultCommitSize; // By default the arena will commit that much virtual memory every time it grows.
using VMemArenaType = VMemArena<0>; // Don't need to support any out of order free since the arena isn't shared.

static constexpr int64 cDefaultReservedSize = VMemArenaType::cDefaultReservedSize; // By default the arena will reserve that much virtual memory.
static constexpr int64 cDefaultCommitSize = VMemArenaType::cDefaultCommitSize; // By default the arena will commit that much virtual memory every time it grows.

VMemAllocator() = default;
VMemAllocator(int inReserveSizeInBytes, int inCommitIncreaseSizeInBytes = cDefaultCommitSize)
Expand All @@ -77,7 +84,7 @@ struct VMemAllocator
bool TryRealloc(taType* inPtr, int inCurrentSize, int inNewSize);

private:
VMemArena mArena;
VMemArenaType mArena;
};


Expand Down Expand Up @@ -118,8 +125,8 @@ bool TempAllocator<taType>::TryRealloc(taType* inPtr, int inCurrentSize, int inN
}


template <typename taType> bool
ArenaAllocator<taType>::TryRealloc(taType* inPtr, int inCurrentSize, int inNewSize)
template <typename taType, int taMaxPendingFrees>
bool ArenaAllocatorBase<taType, taMaxPendingFrees>::TryRealloc(taType* inPtr, int inCurrentSize, int inNewSize)
{
gAssert(inPtr != nullptr); // Call Allocate instead.

Expand All @@ -128,17 +135,20 @@ ArenaAllocator<taType>::TryRealloc(taType* inPtr, int inCurrentSize, int inNewSi
}


template <typename taType> taType* VMemAllocator<taType>::Allocate(int inSize)
template <typename taType>
taType* VMemAllocator<taType>::Allocate(int inSize)
{
// If the arena wasn't initialized yet, do it now (with default values).
// It's better to do it lazily than reserving virtual memory in every container default constructor.
if (mArena.GetMemBlock() == nullptr) [[unlikely]]
mArena = VMemArena(cDefaultReservedSize, cDefaultCommitSize);
mArena = VMemArenaType(cDefaultReservedSize, cDefaultCommitSize);

return (taType*)mArena.Alloc(inSize * sizeof(taType)).mPtr;
}

template <typename taType> bool VMemAllocator<taType>::TryRealloc(taType* inPtr, int inCurrentSize, int inNewSize)

template <typename taType>
bool VMemAllocator<taType>::TryRealloc(taType* inPtr, int inCurrentSize, int inNewSize)
{
gAssert(inPtr != nullptr); // Call Allocate instead.

Expand Down
9 changes: 8 additions & 1 deletion Bedrock/Assert.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -33,5 +33,12 @@ bool gReportAssert(const char* inCondition, const char* inFile, int inLine)
return true;
}

#endif

#endif

void gCrash(const char* inMessage)
{
gTrace("%s", inMessage);

CRASH;
}
20 changes: 15 additions & 5 deletions Bedrock/Assert.h
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,18 @@
#pragma once

// Break to the debugger (or crash if no debugger is attached).
#define breakpoint __debugbreak()
#define BREAKPOINT __debugbreak()

// Cause a crash.
#ifdef __clang__
#define CRASH __builtin_trap()
#elif _MSC_VER
extern "C" void __ud2();
#define CRASH __ud2()
#else
#error Unknown compiler
#endif


#ifdef ASSERTS_ENABLED

Expand All @@ -11,7 +22,7 @@
do \
{ \
if (!(condition) && gReportAssert(#condition, __FILE__, __LINE__)) [[unlikely]] \
breakpoint; \
BREAKPOINT; \
} while (0)

// Internal assert report function. Return true if it should break.
Expand All @@ -27,6 +38,5 @@ bool gReportAssert(const char* inCondition, const char* inFile, int inLine);
#define gBoundsCheck(index, size) gAssert((index) >= 0 && (index) < (size))


// Force a crash.
// TODO: This needs to be improved. Do something with the message, do a proper crash instead of a breakpoint.
[[noreturn]] inline void gCrash(const char* inMessage) { breakpoint; }
// Print a message then crash.
[[noreturn]] void gCrash(const char* inMessage);
100 changes: 1 addition & 99 deletions Bedrock/MemoryArena.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -2,109 +2,11 @@
#include <Bedrock/MemoryArena.h>
#include <Bedrock/Test.h>

void MemArena::AddPendingFree(FreeBlock inFreeBlock)
{
for (int i = 0; i < mNumPendingFrees; i++)
{
if (inFreeBlock.mEndOffset < mPendingFrees[i].BeginOffset())
{
// Inserting before.
if (mNumPendingFrees == cMaxPendingFrees) [[unlikely]]
gCrash("MemArena: too many out of order frees");

// Move all the blocks towards the back to make room.
gMemMove(&mPendingFrees[i + 1], &mPendingFrees[i], sizeof(FreeBlock) * (mNumPendingFrees - i));

// Place the new block.
mPendingFrees[i] = inFreeBlock;
mNumPendingFrees++;

return;
}

if (inFreeBlock.mEndOffset == mPendingFrees[i].BeginOffset())
{
// Inserting just before, merge instead.
mPendingFrees[i].mSize += inFreeBlock.mSize;

return;
}

if (inFreeBlock.BeginOffset() == mPendingFrees[i].mEndOffset)
{
// Inserting just after, merge instead.
mPendingFrees[i].mEndOffset = inFreeBlock.mEndOffset;
mPendingFrees[i].mSize += inFreeBlock.mSize;

// Check if the next block can be merged as well now.
if ((i + 1) < mNumPendingFrees)
{
if (mPendingFrees[i].mEndOffset == mPendingFrees[i + 1].BeginOffset())
{
// Merge the next block.
mPendingFrees[i].mEndOffset = mPendingFrees[i + 1].mEndOffset;
mPendingFrees[i].mSize += mPendingFrees[i + 1].mSize;

// Move all the following blocks towards the front to fill the gap.
gMemMove(&mPendingFrees[i + 1], &mPendingFrees[i + 2], sizeof(FreeBlock) * (mNumPendingFrees - 2 - i));
mNumPendingFrees--;
}
}

return;
}
}

// Otherwise add it at the back of the list.
if (mNumPendingFrees == cMaxPendingFrees)
gCrash("MemArena: too many out of order frees");

mPendingFrees[mNumPendingFrees] = inFreeBlock;
mNumPendingFrees++;
}


void MemArena::TryRemovePendingFrees()
{
if (mNumPendingFrees == 0)
return;

// Pending blocks are sorted and coalesced, so we only need to check the last one.
if (mPendingFrees[mNumPendingFrees - 1].mEndOffset == mCurrentOffset)
{
// Free it.
mCurrentOffset -= mPendingFrees[mNumPendingFrees - 1].mSize;
mNumPendingFrees--;
}
}


VMemArena::~VMemArena()
{
FreeReserved();
}

void VMemArena::CommitMore(int inNewEndOffset)
{
gAssert(inNewEndOffset > mEndOffset);

int64 commit_size = gMax(mCommitIncreaseSize, (inNewEndOffset - mEndOffset));
MemBlock committed_mem = gVMemCommit({ mBeginPtr + mEndOffset, commit_size });

mEndOffset = (int)(committed_mem.mPtr + committed_mem.mSize - mBeginPtr);
}

void VMemArena::FreeReserved()
{
if (mBeginPtr != nullptr)
gVMemFree({ mBeginPtr, mEndReservedOffset });
}



REGISTER_TEST("MemArena")
{
alignas(MemArena::cAlignment) uint8 buffer[MemArena::cAlignment * 5];
alignas(MemArena<>::cAlignment) uint8 buffer[MemArena<>::cAlignment * 5];
MemArena arena({ buffer, sizeof(buffer) });

MemBlock b1 = arena.Alloc(1);
Expand Down
Loading

0 comments on commit 2a498d9

Please sign in to comment.