Skip to content

Commit

Permalink
Merge pull request eclipse-omr#7176 from kangyining/criu-serializatio…
Browse files Browse the repository at this point in the history
…n-again

Clear Inactive Objects at Checkpoint
  • Loading branch information
babsingh authored Nov 11, 2023
2 parents 3a12f6a + 7c33b95 commit 576733c
Show file tree
Hide file tree
Showing 11 changed files with 112 additions and 18 deletions.
10 changes: 10 additions & 0 deletions gc/base/GCCode.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -262,3 +262,13 @@ MM_GCCode::isRASDumpGC() const
{
return J9MMCONSTANT_EXPLICIT_GC_RASDUMP_COMPACT == _gcCode;
}

/**
* Determine if the GC should clear bits for objects marked as deleted.
* @return true if we should clear the heap (currently only at snapshot)
*/
bool
MM_GCCode::shouldClearHeap() const
{
return J9MMCONSTANT_EXPLICIT_GC_PREPARE_FOR_CHECKPOINT == _gcCode;
}
6 changes: 6 additions & 0 deletions gc/base/GCCode.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -85,6 +85,12 @@ class MM_GCCode {
* @return true if OOM can be thrown at the end of this GC
*/
bool isOutOfMemoryGC() const;

/**
* Determine if the GC should clear bits for objects marked as deleted.
* @return true if we should clear the heap (currently only at snapshot)
*/
bool shouldClearHeap() const;
};

#endif /* GCCODE_HPP_ */
4 changes: 2 additions & 2 deletions gc/base/ParallelHeapWalker.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -147,7 +147,7 @@ MM_ParallelHeapWalker::allObjectsDoParallel(MM_EnvironmentBase *env, MM_HeapWalk
* otherwise walk all objects in the heap in a single threaded linear fashion.
*/
void
MM_ParallelHeapWalker::allObjectsDo(MM_EnvironmentBase *env, MM_HeapWalkerObjectFunc function, void *userData, uintptr_t walkFlags, bool parallel, bool prepareHeapForWalk)
MM_ParallelHeapWalker::allObjectsDo(MM_EnvironmentBase *env, MM_HeapWalkerObjectFunc function, void *userData, uintptr_t walkFlags, bool parallel, bool prepareHeapForWalk, bool includeDeadObjects)
{
if (parallel) {
GC_OMRVMInterface::flushCachesForWalk(env->getOmrVM());
Expand All @@ -158,7 +158,7 @@ MM_ParallelHeapWalker::allObjectsDo(MM_EnvironmentBase *env, MM_HeapWalkerObject
MM_ParallelObjectDoTask objectDoTask(env, this, function, userData, walkFlags, parallel);
env->getExtensions()->dispatcher->run(env, &objectDoTask);
} else {
MM_HeapWalker::allObjectsDo(env, function, userData, walkFlags, parallel, prepareHeapForWalk);
MM_HeapWalker::allObjectsDo(env, function, userData, walkFlags, parallel, prepareHeapForWalk, includeDeadObjects);
}
}

Expand Down
2 changes: 1 addition & 1 deletion gc/base/ParallelHeapWalker.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,7 @@ class MM_ParallelHeapWalker : public MM_HeapWalker
* If parallel is set to true, task is dispatched to GC threads and walks the heap segments in parallel,
* otherwise walk all objects in the heap in a single threaded linear fashion.
*/
virtual void allObjectsDo(MM_EnvironmentBase *env, MM_HeapWalkerObjectFunc function, void *userData, uintptr_t walkFlags, bool parallel, bool prepareHeapForWalk);
virtual void allObjectsDo(MM_EnvironmentBase *env, MM_HeapWalkerObjectFunc function, void *userData, uintptr_t walkFlags, bool parallel, bool prepareHeapForWalk, bool includeDeadObjects);

MM_MarkMap *getMarkMap() {
return _markMap;
Expand Down
2 changes: 2 additions & 0 deletions gc/base/j9mm.tdf
Original file line number Diff line number Diff line change
Expand Up @@ -1025,3 +1025,5 @@ TraceEvent=Trc_MM_MSSSS_flip_backout Overhead=1 Level=1 Group=scavenger Template
TraceEvent=Trc_MM_MSSSS_flip_restore_tilt_after_percolate Overhead=1 Level=1 Group=scavenger Template="MSSSS::flip restore_tilt_after_percolate last free entry %zx size %zu"
TraceEvent=Trc_MM_MSSSS_flip_restore_tilt_after_percolate_with_stats Overhead=1 Level=1 Group=scavenge Template="MSSSS::flip restore_tilt_after_percolate heapAlignedLastFreeEntry %zu section (%zu) aligned size %zu"
TraceEvent=Trc_MM_MSSSS_flip_restore_tilt_after_percolate_current_status Overhead=1 Level=1 Group=scavenge Template="MSSSS::flip restore_tilt_after_percolate %sallocateSize %zu survivorSize %zu"

TraceEvent=Trc_MM_clearheap Overhead=1 Level=1 Template="clearheap with free memory size: %zu, object size: %zu"
6 changes: 3 additions & 3 deletions gc/base/standard/HeapWalker.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -189,7 +189,7 @@ MM_HeapWalker::allObjectSlotsDo(MM_EnvironmentBase *env, MM_HeapWalkerSlotFunc f
modifiedWalkFlags &= ~J9_MU_WALK_NEW_AND_REMEMBERED_ONLY;
}

allObjectsDo(env, heapWalkerObjectSlotsDo, (void *)&slotObjectDoUserData, modifiedWalkFlags, parallel, prepareHeapForWalk);
allObjectsDo(env, heapWalkerObjectSlotsDo, (void *)&slotObjectDoUserData, modifiedWalkFlags, parallel, prepareHeapForWalk, false);

#if defined(OMR_GC_MODRON_SCAVENGER)
/* If J9_MU_WALK_NEW_AND_REMEMBERED_ONLY is specified, allObjectsDo will only walk
Expand All @@ -205,7 +205,7 @@ MM_HeapWalker::allObjectSlotsDo(MM_EnvironmentBase *env, MM_HeapWalkerSlotFunc f
* Walk all objects in the heap in a single threaded linear fashion.
*/
void
MM_HeapWalker::allObjectsDo(MM_EnvironmentBase *env, MM_HeapWalkerObjectFunc function, void *userData, uintptr_t walkFlags, bool parallel, bool prepareHeapForWalk)
MM_HeapWalker::allObjectsDo(MM_EnvironmentBase *env, MM_HeapWalkerObjectFunc function, void *userData, uintptr_t walkFlags, bool parallel, bool prepareHeapForWalk, bool includeDeadObjects)
{
uintptr_t typeFlags = 0;

Expand All @@ -225,7 +225,7 @@ MM_HeapWalker::allObjectsDo(MM_EnvironmentBase *env, MM_HeapWalkerObjectFunc fun
if (typeFlags == (region->getTypeFlags() & typeFlags)) {
/* Optimization to avoid virtual dispatch for every slot in the system */
omrobjectptr_t object = NULL;
GC_ObjectHeapIteratorAddressOrderedList liveObjectIterator(extensions, region, false);
GC_ObjectHeapIteratorAddressOrderedList liveObjectIterator(extensions, region, includeDeadObjects);

while (NULL != (object = liveObjectIterator.nextObject())) {
function(omrVMThread, region, object, userData);
Expand Down
2 changes: 1 addition & 1 deletion gc/base/standard/HeapWalker.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,7 @@ class MM_HeapWalker : public MM_BaseVirtual

public:
virtual void allObjectSlotsDo(MM_EnvironmentBase *env, MM_HeapWalkerSlotFunc function, void *userData, uintptr_t walkFlags, bool parallel, bool prepareHeapForWalk);
virtual void allObjectsDo(MM_EnvironmentBase *env, MM_HeapWalkerObjectFunc function, void *userData, uintptr_t walkFlags, bool parallel, bool prepareHeapForWalk);
virtual void allObjectsDo(MM_EnvironmentBase *env, MM_HeapWalkerObjectFunc function, void *userData, uintptr_t walkFlags, bool parallel, bool prepareHeapForWalk, bool includeDeadObjects);

static MM_HeapWalker *newInstance(MM_EnvironmentBase *env);
virtual void kill(MM_EnvironmentBase *env);
Expand Down
92 changes: 82 additions & 10 deletions gc/base/standard/ParallelGlobalGC.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -185,6 +185,39 @@ fixObject(OMR_VMThread *omrVMThread, MM_HeapRegionDescriptor *region, omrobjectp
}
}

struct HeapSizes {
uintptr_t freeBytes;
uintptr_t objectBytes;
};

static void
clearFreeEntry(OMR_VMThread *omrVMThread, MM_HeapRegionDescriptor *region, omrobjectptr_t object, void *userData)
{
MM_GCExtensionsBase *extensions = MM_GCExtensionsBase::getExtensions(omrVMThread->_vm);
MM_ParallelGlobalGC *collector = (MM_ParallelGlobalGC *)extensions->getGlobalCollector();
HeapSizes *counter = (HeapSizes *)userData;

if (extensions->objectModel.isDeadObject(object)) {
if (extensions->objectModel.isSingleSlotDeadObject(object)) {
counter->freeBytes += extensions->objectModel.getSizeInBytesSingleSlotDeadObject(object);
} else {
/* Clear both linked and unlinked free entries. */
memset(
(void *)((uintptr_t)object + sizeof(MM_HeapLinkedFreeHeader)),
0,
extensions->objectModel.getSizeInBytesMultiSlotDeadObject(object) - sizeof(MM_HeapLinkedFreeHeader));

counter->freeBytes += extensions->objectModel.getSizeInBytesMultiSlotDeadObject(object);
}
} else {
/* An assertion is used to ensure that for every object which is not
* a hole (not cleared), it must be a marked object.
*/
counter->objectBytes += extensions->objectModel.getConsumedSizeInBytesWithHeader(object);
Assert_MM_true(collector->getMarkingScheme()->isMarked(object));
}
}

#if defined(OMR_GC_MODRON_SCAVENGER)
/**
* Fix the heap if the remembered set for the scavenger is in an overflow state.
Expand Down Expand Up @@ -468,7 +501,7 @@ MM_ParallelGlobalGC::mainThreadGarbageCollect(MM_EnvironmentBase *env, MM_Alloca
_delegate.postMarkProcessing(env);

sweep(env, allocDescription, rebuildMarkBits);

const MM_GCCode gcCode = env->_cycleState->_gcCode;

#if defined(OMR_GC_MODRON_COMPACTION)
/* If a compaction was required, then do one */
Expand All @@ -477,8 +510,11 @@ MM_ParallelGlobalGC::mainThreadGarbageCollect(MM_EnvironmentBase *env, MM_Alloca
if (GLOBALGC_ESTIMATE_FRAGMENTATION == (_extensions->estimateFragmentation & GLOBALGC_ESTIMATE_FRAGMENTATION)) {
_collectionStatistics._tenureFragmentation |= MACRO_FRAGMENTATION;
}

mainThreadCompact(env, allocDescription, rebuildMarkBits);
/* Although normally walking the heap after a compact doesn't require fixing the mark map,
* the clearFreeEntry callback asserts that everything that is not cleared is a marked
* object. So the mark map is also rebuilt before calling the clearFreeEntry callback.
*/
mainThreadCompact(env, allocDescription, rebuildMarkBits || gcCode.shouldClearHeap());
_collectionStatistics._tenureFragmentation = NO_FRAGMENTATION;
if (_extensions->processLargeAllocateStats) {
processLargeAllocateStatsAfterCompact(env);
Expand Down Expand Up @@ -506,8 +542,10 @@ MM_ParallelGlobalGC::mainThreadGarbageCollect(MM_EnvironmentBase *env, MM_Alloca
compactedThisCycle = _compactThisCycle;
#endif /* OMR_GC_MODRON_COMPACTION */

/* If the delegate has isAllowUserHeapWalk set, fix the heap so that it can be walked */
if (_delegate.isAllowUserHeapWalk() || env->_cycleState->_gcCode.isRASDumpGC()) {
/* If the delegate has isAllowUserHeapWalk set, or should prepare for a snapshot,
* fix the heap so that it can be walked
*/
if (_delegate.isAllowUserHeapWalk() || gcCode.isRASDumpGC() || gcCode.shouldClearHeap()) {
if (!_fixHeapForWalkCompleted) {
#if defined(OMR_GC_MODRON_COMPACTION)
if (compactedThisCycle) {
Expand All @@ -532,7 +570,7 @@ MM_ParallelGlobalGC::mainThreadGarbageCollect(MM_EnvironmentBase *env, MM_Alloca
* any expand or contract target.
* Concurrent Scavenger requires this be done after fixup heap for walk pass.
*/
env->_cycleState->_activeSubSpace->checkResize(env, allocDescription, env->_cycleState->_gcCode.isExplicitGC());
env->_cycleState->_activeSubSpace->checkResize(env, allocDescription, gcCode.isExplicitGC());
}
#endif

Expand Down Expand Up @@ -1092,7 +1130,10 @@ MM_ParallelGlobalGC::internalPostCollect(MM_EnvironmentBase *env, MM_MemorySubSp
MM_GlobalCollector::internalPostCollect(env, subSpace);

tenureMemoryPoolPostCollect(env);

/* The clear pass should execute before reporting the cycle end, where heap walks and fixups are reported */
if (env->_cycleState->_gcCode.shouldClearHeap()) {
clearHeap(env, clearFreeEntry);
}
reportGCCycleFinalIncrementEnding(env);
reportGlobalGCIncrementEnd(env);
reportGCIncrementEnd(env);
Expand Down Expand Up @@ -1282,7 +1323,7 @@ MM_ParallelGlobalGC::fixHeapForWalk(MM_EnvironmentBase *env, UDATA walkFlags, ui
OMRPORT_ACCESS_FROM_ENVIRONMENT(env);
U_64 startTime = omrtime_hires_clock();

_heapWalker->allObjectsDo(env, walkFunction, &fixedObjectCount, walkFlags, true, false);
_heapWalker->allObjectsDo(env, walkFunction, &fixedObjectCount, walkFlags, true, false, false);

_extensions->globalGCStats.fixHeapForWalkTime = omrtime_hires_delta(startTime, omrtime_hires_clock(), OMRPORT_TIME_DELTA_IN_MICROSECONDS);
_extensions->globalGCStats.fixHeapForWalkReason = walkReason;
Expand All @@ -1292,6 +1333,37 @@ MM_ParallelGlobalGC::fixHeapForWalk(MM_EnvironmentBase *env, UDATA walkFlags, ui
return fixedObjectCount;
}

/**
* Clearing all dead multi-slot objects, whether linked or unlinked.
* Currently only called at snapshot time.
* @param[in] env The environment for the calling thread
* @param[in] walkFunction the callback function to operate on each object
*/
void
MM_ParallelGlobalGC::clearHeap(MM_EnvironmentBase *env, MM_HeapWalkerObjectFunc walkFunction)
{
HeapSizes counter = {0, 0};
OMRPORT_ACCESS_FROM_ENVIRONMENT(env);
U_64 startTime = omrtime_hires_clock();

/* This is the second heap walk which clears all holes. */
_heapWalker->allObjectsDo(env, walkFunction, &counter, MEMORY_TYPE_RAM, false, false, true);
/* Report clearing as one pass with the cumulative time of the two heap walks
* including fixHeapForWalk, and clearHeap runs.
*/
MM_GlobalGCStats *stats = &_extensions->globalGCStats;
stats->fixHeapForWalkTime += omrtime_hires_delta(startTime, omrtime_hires_clock(), OMRPORT_TIME_DELTA_IN_MICROSECONDS);
Assert_MM_true(FIXUP_NONE != stats->fixHeapForWalkReason);
stats->fixHeapForWalkReason = FIXUP_AND_CLEAR_HEAP;

Trc_MM_clearheap(env->getLanguageVMThread(), counter.freeBytes, counter.objectBytes);
/* Assertion to ensure that all items of the heap are visited, so that
* the sum of sizes of all marked objects and holes is equal to the
* heap size.
*/
Assert_MM_true(counter.freeBytes + counter.objectBytes == _extensions->heap->getMemorySize());
}

/* (non-doxygen)
* @see MM_GlobalCollector::heapAddRange()
*/
Expand Down Expand Up @@ -1899,14 +1971,14 @@ void
MM_ParallelGlobalGC::poisonHeap(MM_EnvironmentBase *env)
{
/* This will poison only the heap slots */
_heapWalker->allObjectsDo(env, poisonReferenceSlots, NULL, 0, true, false);
_heapWalker->allObjectsDo(env, poisonReferenceSlots, NULL, 0, true, false, false);
}

void
MM_ParallelGlobalGC::healHeap(MM_EnvironmentBase *env)
{
/* This will heal only the heap slots */
_heapWalker->allObjectsDo(env, healReferenceSlots, NULL, 0, false, false);
_heapWalker->allObjectsDo(env, healReferenceSlots, NULL, 0, false, false, false);
/* Don't have the mark map at the start of gc so we'll have to iterate over
* in a sequential manner
*/
Expand Down
1 change: 1 addition & 0 deletions gc/base/standard/ParallelGlobalGC.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -285,6 +285,7 @@ class MM_ParallelGlobalGC : public MM_GlobalCollector
*/
uintptr_t fixHeapForWalk(MM_EnvironmentBase *env, UDATA walkFlags, uintptr_t walkReason, MM_HeapWalkerObjectFunc walkFunction);
MM_HeapWalker *getHeapWalker() { return _heapWalker; }
void clearHeap(MM_EnvironmentBase *env, MM_HeapWalkerObjectFunc walkFunction);
virtual void prepareHeapForWalk(MM_EnvironmentBase *env);

virtual bool heapAddRange(MM_EnvironmentBase *env, MM_MemorySubSpace *subspace, uintptr_t size, void *lowAddress, void *highAddress);
Expand Down
2 changes: 2 additions & 0 deletions gc/verbose/VerboseHandlerOutput.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -279,6 +279,8 @@ MM_VerboseHandlerOutput::getHeapFixupReasonString(uintptr_t reason)
return "class unloading";
case FIXUP_DEBUG_TOOLING:
return "debug tooling";
case FIXUP_AND_CLEAR_HEAP:
return "fixup and clear heap";
default:
return "unknown";
}
Expand Down
3 changes: 2 additions & 1 deletion include_core/omrgcconsts.h
Original file line number Diff line number Diff line change
Expand Up @@ -465,7 +465,8 @@ typedef enum {
typedef enum {
FIXUP_NONE = 0,
FIXUP_CLASS_UNLOADING,
FIXUP_DEBUG_TOOLING
FIXUP_DEBUG_TOOLING,
FIXUP_AND_CLEAR_HEAP
} FixUpReason;

typedef enum {
Expand Down

0 comments on commit 576733c

Please sign in to comment.