Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

(0.37) Handle continuation scanning in pending to be mounted case #17046

Merged
merged 2 commits into from
Mar 28, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
53 changes: 34 additions & 19 deletions runtime/gc_base/GCExtensions.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -311,29 +311,44 @@ MM_GCExtensions::releaseNativesForContinuationObject(MM_EnvironmentBase* env, j9
}

bool
MM_GCExtensions::needScanStacksForContinuationObject(J9VMThread *vmThread, j9object_t objectPtr, bool isGlobalGC)
MM_GCExtensions::needScanStacksForContinuationObject(J9VMThread *vmThread, j9object_t objectPtr, bool isConcurrentGC, bool isGlobalGC, bool beingMounted)
{
bool needScan = false;
#if JAVA_SPEC_VERSION >= 19
jboolean started = J9VMJDKINTERNALVMCONTINUATION_STARTED(vmThread, objectPtr);
jboolean finished = J9VMJDKINTERNALVMCONTINUATION_FINISHED(vmThread, objectPtr);
J9VMContinuation *continuation = J9VMJDKINTERNALVMCONTINUATION_VMREF(vmThread, objectPtr);
/**
* We don't scan mounted continuations:
*
* for concurrent GCs, since stack is actively changing. Instead, we scan them during preMount or during root scanning if already mounted at cycle start or during postUnmount (might be indirectly via card cleaning) or during final STW (via root re-scan) if still mounted at cycle end
* for sliding compacts to avoid double slot fixups
*
* For fully STW GCs, there is no harm to scan them, but it's a waste of time since they are scanned during root scanning already.
*
* We don't scan currently scanned for the same collector either - one scan is enough for the same collector, but there could be concurrent scavenger(local collector) and concurrent marking(global collector) overlapping,
* they are irrelevant and both are concurrent, we handle them independently and separately, they are not blocked or ignored each other.
*
* we don't scan the continuation object before started and after finished - java stack does not exist.
*/
if (started && !finished) {
Assert_MM_true(NULL != continuation);
needScan = !VM_VMHelpers::isContinuationMountedOrConcurrentlyScanned(continuation, isGlobalGC);
if (NULL != continuation) {
/**
* We don't scan mounted continuations:
*
* for concurrent GCs, since stack is actively changing. Instead, we scan them during preMount
* or during root scanning if already mounted at cycle start or during postUnmount (might
* be indirectly via card cleaning) or during final STW (via root re-scan) if still mounted
* at cycle end.
* for sliding compacts to avoid double slot fixups
* If continuation is currently being mounted by this thread, we must be in preMount/postUnmount
* callback and must scan.
*
* For fully STW GCs, there is no harm to scan them, but it's a waste of time since they are
* scanned during root scanning already.
*
* We don't scan currently scanned for the same collector either - one scan is enough for
* the same collector, but there could be concurrent scavenger(local collector) and
* concurrent marking(global collector) overlapping, they are irrelevant and both are
* concurrent, we handle them independently and separately, they are not blocked or ignored
* each other.
*
* we don't scan the continuation object before started and after finished - java stack
* does not exist.
*/
if (isConcurrentGC) {
needScan = VM_VMHelpers::tryWinningConcurrentGCScan(continuation, isGlobalGC, beingMounted);
} else {
/* for STW GCs */
uintptr_t continuationState = continuation->state;
Assert_MM_false(beingMounted);
Assert_MM_false(VM_VMHelpers::isConcurrentlyScanned(continuationState));
needScan = VM_VMHelpers::isActive(continuationState) && !VM_VMHelpers::isContinuationFullyMounted(continuationState);
}
}
#endif /* JAVA_SPEC_VERSION >= 19 */
return needScan;
Expand Down
2 changes: 1 addition & 1 deletion runtime/gc_base/GCExtensions.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -307,7 +307,7 @@ class MM_GCExtensions : public MM_GCExtensionsBase {
* @param[in] isGlobalGC
* @return true if we need to scan the java stack
*/
static bool needScanStacksForContinuationObject(J9VMThread *vmThread, j9object_t objectPtr, bool isGlobalGC);
static bool needScanStacksForContinuationObject(J9VMThread *vmThread, j9object_t objectPtr, bool isConcurrentGC, bool isGlobalGC, bool beingMounted);

/**
* Create a GCExtensions object
Expand Down
7 changes: 4 additions & 3 deletions runtime/gc_glue_java/CompactSchemeFixupObject.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -75,15 +75,16 @@ MM_CompactSchemeFixupObject::fixupContinuationNativeSlots(MM_EnvironmentStandard
* mounted Virtual threads later during root fixup, we will skip it during this heap fixup pass
* (hence passing true for scanOnlyUnmounted parameter).
*/
const bool isConcurrentGC = false;
const bool isGlobalGC = true;
if (MM_GCExtensions::needScanStacksForContinuationObject(currentThread, objectPtr, isGlobalGC)) {
const bool beingMounted = false;
if (MM_GCExtensions::needScanStacksForContinuationObject(currentThread, objectPtr, isConcurrentGC, isGlobalGC, beingMounted)) {
StackIteratorData4CompactSchemeFixupObject localData;
localData.compactSchemeFixupObject = this;
localData.env = env;
localData.fromObject = objectPtr;
const bool isConcurrentGC = false;

GC_VMThreadStackSlotIterator::scanSlots(currentThread, objectPtr, (void *)&localData, stackSlotIteratorForCompactScheme, false, false, isConcurrentGC, isGlobalGC);
GC_VMThreadStackSlotIterator::scanContinuationSlots(currentThread, objectPtr, (void *)&localData, stackSlotIteratorForCompactScheme, false, false);
}
}

Expand Down
8 changes: 4 additions & 4 deletions runtime/gc_glue_java/HeapWalkerDelegate.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -60,17 +60,17 @@ MM_HeapWalkerDelegate::doContinuationNativeSlots(MM_EnvironmentBase *env, omrobj
{
J9VMThread *currentThread = (J9VMThread *)env->getLanguageVMThread();

const bool isConcurrentGC = false;
const bool isGlobalGC = true;
if (MM_GCExtensions::needScanStacksForContinuationObject(currentThread, objectPtr, isGlobalGC)) {
const bool beingMounted = false;
if (MM_GCExtensions::needScanStacksForContinuationObject(currentThread, objectPtr, isConcurrentGC, isGlobalGC, beingMounted)) {
StackIteratorData4HeapWalker localData;
localData.heapWalker = _heapWalker;
localData.env = env;
localData.fromObject = objectPtr;
localData.function = function;
localData.userData = userData;
/* so far there is no case we need ClassWalk for heapwalker, so we set stackFrameClassWalkNeeded = false */
const bool isConcurrentGC = false;

GC_VMThreadStackSlotIterator::scanSlots(currentThread, objectPtr, (void *)&localData, stackSlotIteratorForHeapWalker, false, false, isConcurrentGC, isGlobalGC);
GC_VMThreadStackSlotIterator::scanContinuationSlots(currentThread, objectPtr, (void *)&localData, stackSlotIteratorForHeapWalker, false, false);
}
}
12 changes: 8 additions & 4 deletions runtime/gc_glue_java/MarkingDelegate.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -261,8 +261,11 @@ void
MM_MarkingDelegate::scanContinuationNativeSlots(MM_EnvironmentBase *env, omrobjectptr_t objectPtr)
{
J9VMThread *currentThread = (J9VMThread *)env->getLanguageVMThread();
/* In STW GC there are no racing carrier threads doing mount and no need for the synchronization. */
bool isConcurrentGC = J9_ARE_ANY_BITS_SET(currentThread->privateFlags, J9_PRIVATE_FLAGS_CONCURRENT_MARK_ACTIVE);
const bool isGlobalGC = true;
if (MM_GCExtensions::needScanStacksForContinuationObject(currentThread, objectPtr, isGlobalGC)) {
const bool beingMounted = false;
if (MM_GCExtensions::needScanStacksForContinuationObject(currentThread, objectPtr, isConcurrentGC, isGlobalGC, beingMounted)) {
StackIteratorData4MarkingDelegate localData;
localData.markingDelegate = this;
localData.env = env;
Expand All @@ -272,10 +275,11 @@ MM_MarkingDelegate::scanContinuationNativeSlots(MM_EnvironmentBase *env, omrobje
#if defined(J9VM_GC_DYNAMIC_CLASS_UNLOADING)
stackFrameClassWalkNeeded = isDynamicClassUnloadingEnabled();
#endif /* J9VM_GC_DYNAMIC_CLASS_UNLOADING */
/* In STW GC there are no racing carrier threads doing mount and no need for the synchronization. */
bool isConcurrentGC = J9_ARE_ANY_BITS_SET(currentThread->privateFlags, J9_PRIVATE_FLAGS_CONCURRENT_MARK_ACTIVE);

GC_VMThreadStackSlotIterator::scanSlots(currentThread, objectPtr, (void *)&localData, stackSlotIteratorForMarkingDelegate, stackFrameClassWalkNeeded, false, isConcurrentGC, isGlobalGC);
GC_VMThreadStackSlotIterator::scanContinuationSlots(currentThread, objectPtr, (void *)&localData, stackSlotIteratorForMarkingDelegate, stackFrameClassWalkNeeded, false);
if (isConcurrentGC) {
VM_VMHelpers::exitConcurrentGCScan(currentThread, objectPtr, isGlobalGC);
}
}
}

Expand Down
11 changes: 7 additions & 4 deletions runtime/gc_glue_java/MetronomeDelegate.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1644,11 +1644,12 @@ stackSlotIteratorForRealtimeGC(J9JavaVM *javaVM, J9Object **slotPtr, void *local
}

void
MM_MetronomeDelegate::scanContinuationNativeSlots(MM_EnvironmentRealtime *env, J9Object *objectPtr)
MM_MetronomeDelegate::scanContinuationNativeSlots(MM_EnvironmentRealtime *env, J9Object *objectPtr, bool beingMounted)
{
J9VMThread *currentThread = (J9VMThread *)env->getLanguageVMThread();
bool isConcurrentGC = _realtimeGC->isCollectorConcurrentTracing();
const bool isGlobalGC = true;
if (MM_GCExtensions::needScanStacksForContinuationObject(currentThread, objectPtr, isGlobalGC)) {
if (MM_GCExtensions::needScanStacksForContinuationObject(currentThread, objectPtr, isConcurrentGC, isGlobalGC, beingMounted)) {
StackIteratorData4RealtimeMarkingScheme localData;
localData.realtimeMarkingScheme = _markingScheme;
localData.env = env;
Expand All @@ -1659,9 +1660,11 @@ MM_MetronomeDelegate::scanContinuationNativeSlots(MM_EnvironmentRealtime *env, J
stackFrameClassWalkNeeded = isDynamicClassUnloadingEnabled();
#endif /* J9VM_GC_DYNAMIC_CLASS_UNLOADING */
/* In STW GC there are no racing carrier threads doing mount and no need for the synchronization. */
bool isConcurrentGC = _realtimeGC->isCollectorConcurrentTracing();

GC_VMThreadStackSlotIterator::scanSlots(currentThread, objectPtr, (void *)&localData, stackSlotIteratorForRealtimeGC, stackFrameClassWalkNeeded, false, isConcurrentGC, isGlobalGC);
GC_VMThreadStackSlotIterator::scanContinuationSlots(currentThread, objectPtr, (void *)&localData, stackSlotIteratorForRealtimeGC, stackFrameClassWalkNeeded, false);
if (isConcurrentGC) {
VM_VMHelpers::exitConcurrentGCScan(currentThread, objectPtr, isGlobalGC);
}
}
}

Expand Down
2 changes: 1 addition & 1 deletion runtime/gc_glue_java/MetronomeDelegate.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -176,7 +176,7 @@ class MM_MetronomeDelegate : public MM_BaseNonVirtual
void setUnmarkedImpliesCleared();
void unsetUnmarkedImpliesCleared();

void scanContinuationNativeSlots(MM_EnvironmentRealtime *env, J9Object *objectPtr);
void scanContinuationNativeSlots(MM_EnvironmentRealtime *env, J9Object *objectPtr, bool beingMounted = false);
UDATA scanContinuationObject(MM_EnvironmentRealtime *env, J9Object *objectPtr);

#if defined(J9VM_GC_DYNAMIC_CLASS_UNLOADING)
Expand Down
20 changes: 15 additions & 5 deletions runtime/gc_glue_java/ScavengerDelegate.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -340,22 +340,32 @@ stackSlotIteratorForScavenge(J9JavaVM *javaVM, J9Object **slotPtr, void *localDa
}

bool
MM_ScavengerDelegate::scanContinuationNativeSlots(MM_EnvironmentStandard *env, omrobjectptr_t objectPtr, MM_ScavengeScanReason reason)
MM_ScavengerDelegate::scanContinuationNativeSlots(MM_EnvironmentStandard *env, omrobjectptr_t objectPtr, MM_ScavengeScanReason reason, bool beingMounted)
{
bool shouldRemember = false;

J9VMThread *currentThread = (J9VMThread *)env->getLanguageVMThread();
/* In STW GC there are no racing carrier threads doing mount and no need for the synchronization. */
bool isConcurrentGC = false;
if (MUTATOR_THREAD == env->getThreadType()) {
isConcurrentGC = _extensions->isConcurrentScavengerInProgress();
} else {
#if defined(OMR_GC_CONCURRENT_SCAVENGER)
isConcurrentGC = _extensions->scavenger->isCurrentPhaseConcurrent();
#endif /* defined(OMR_GC_CONCURRENT_SCAVENGER) */
}
const bool isGlobalGC = false;
if (MM_GCExtensions::needScanStacksForContinuationObject(currentThread, objectPtr, isGlobalGC)) {
if (MM_GCExtensions::needScanStacksForContinuationObject(currentThread, objectPtr, isConcurrentGC, isGlobalGC, beingMounted)) {
StackIteratorData4Scavenge localData;
localData.scavengerDelegate = this;
localData.env = env;
localData.reason = reason;
localData.shouldRemember = &shouldRemember;
/* In STW GC there are no racing carrier threads doing mount and no need for the synchronization. */
bool isConcurrentGC = _extensions->isConcurrentScavengerInProgress();

GC_VMThreadStackSlotIterator::scanSlots(currentThread, objectPtr, (void *)&localData, stackSlotIteratorForScavenge, false, false, isConcurrentGC, isGlobalGC);
GC_VMThreadStackSlotIterator::scanContinuationSlots(currentThread, objectPtr, (void *)&localData, stackSlotIteratorForScavenge, false, false);
if (isConcurrentGC) {
VM_VMHelpers::exitConcurrentGCScan(currentThread, objectPtr, isGlobalGC);
}
}
return shouldRemember;
}
Expand Down
2 changes: 1 addition & 1 deletion runtime/gc_glue_java/ScavengerDelegate.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -156,7 +156,7 @@ class MM_ScavengerDelegate : public MM_BaseNonVirtual {

void setShouldScavengeUnfinalizedObjects(bool shouldScavenge) { _shouldScavengeUnfinalizedObjects = shouldScavenge; }
void setShouldScavengeContinuationObjects(bool shouldScavenge) { _shouldScavengeContinuationObjects = shouldScavenge; }
bool scanContinuationNativeSlots(MM_EnvironmentStandard *env, omrobjectptr_t objectPtr, MM_ScavengeScanReason reason);
bool scanContinuationNativeSlots(MM_EnvironmentStandard *env, omrobjectptr_t objectPtr, MM_ScavengeScanReason reason, bool beingMounted = false);

volatile bool getShouldScavengeFinalizableObjects() { return _shouldScavengeFinalizableObjects; }
volatile bool getShouldScavengeUnfinalizedObjects() { return _shouldScavengeUnfinalizedObjects; }
Expand Down
5 changes: 3 additions & 2 deletions runtime/gc_modron_standard/StandardAccessBarrier.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1042,10 +1042,11 @@ MM_StandardAccessBarrier::preMountContinuation(J9VMThread *vmThread, j9object_t
{
#if defined(OMR_GC_CONCURRENT_SCAVENGER)
if (_extensions->isConcurrentScavengerInProgress()) {
/* concurrent scavenger in progress */
/* concurrent scavenger in active */
MM_EnvironmentStandard *env = MM_EnvironmentStandard::getEnvironment(vmThread->omrVMThread);
MM_ScavengeScanReason reason = SCAN_REASON_SCAVENGE;
_scavenger->getDelegate()->scanContinuationNativeSlots(env, contObject, reason);
const bool beingMounted = true;
_scavenger->getDelegate()->scanContinuationNativeSlots(env, contObject, reason, beingMounted);
}
#endif /* OMR_GC_CONCURRENT_SCAVENGER */
}
Expand Down
3 changes: 2 additions & 1 deletion runtime/gc_realtime/RealtimeAccessBarrier.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -955,7 +955,8 @@ MM_RealtimeAccessBarrier::preMountContinuation(J9VMThread *vmThread, j9object_t
{
MM_EnvironmentRealtime *env = MM_EnvironmentRealtime::getEnvironment(vmThread->omrVMThread);
if (isBarrierActive(env)) {
_realtimeGC->getRealtimeDelegate()->scanContinuationNativeSlots(env, contObject);
const bool beingMounted = true;
_realtimeGC->getRealtimeDelegate()->scanContinuationNativeSlots(env, contObject, beingMounted);
}
}

Expand Down
13 changes: 7 additions & 6 deletions runtime/gc_structs/VMThreadStackSlotIterator.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -130,21 +130,22 @@ GC_VMThreadStackSlotIterator::scanSlots(
}

void
GC_VMThreadStackSlotIterator::scanSlots(
GC_VMThreadStackSlotIterator::scanContinuationSlots(
J9VMThread *vmThread,
j9object_t continuationObjectPtr,
void *userData,
J9MODRON_OSLOTITERATOR *oSlotIterator,
bool includeStackFrameClassReferences,
bool trackVisibleFrameDepth,
bool isConcurrentGC,
bool isGlobalGC
bool trackVisibleFrameDepth
)
{
J9StackWalkState stackWalkState;

initializeStackWalkState(&stackWalkState, vmThread, userData, oSlotIterator, includeStackFrameClassReferences, trackVisibleFrameDepth);
VM_VMHelpers::walkContinuationStackFramesWrapper(vmThread, continuationObjectPtr, &stackWalkState, isConcurrentGC, isGlobalGC);

#if JAVA_SPEC_VERSION >= 19
J9VMContinuation *continuation = J9VMJDKINTERNALVMCONTINUATION_VMREF(vmThread, continuationObjectPtr);
vmThread->javaVM->internalVMFunctions->walkContinuationStackFrames(vmThread, continuation, &stackWalkState);
#endif /* JAVA_SPEC_VERSION >= 19 */
}

#if JAVA_SPEC_VERSION >= 19
Expand Down
6 changes: 2 additions & 4 deletions runtime/gc_structs/VMThreadStackSlotIterator.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -60,15 +60,13 @@ class GC_VMThreadStackSlotIterator
bool includeStackFrameClassReferences,
bool trackVisibleFrameDepth);

static void scanSlots(
static void scanContinuationSlots(
J9VMThread *vmThread,
j9object_t continuationObjectPtr,
void *userData,
J9MODRON_OSLOTITERATOR *oSlotIterator,
bool includeStackFrameClassReferences,
bool trackVisibleFrameDepth,
bool isConcurrentGC,
bool isGlobalGC);
bool trackVisibleFrameDepth);

#if JAVA_SPEC_VERSION >= 19
static void scanSlots(
Expand Down
7 changes: 4 additions & 3 deletions runtime/gc_vlhgc/CopyForwardScheme.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -2322,8 +2322,10 @@ MMINLINE void
MM_CopyForwardScheme::scanContinuationNativeSlots(MM_EnvironmentVLHGC *env, MM_AllocationContextTarok *reservingContext, J9Object *objectPtr, ScanReason reason)
{
J9VMThread *currentThread = (J9VMThread *)env->getLanguageVMThread();
const bool isConcurrentGC = false;
const bool isGlobalGC = false;
if (MM_GCExtensions::needScanStacksForContinuationObject(currentThread, objectPtr, isGlobalGC)) {
const bool beingMounted = false;
if (MM_GCExtensions::needScanStacksForContinuationObject(currentThread, objectPtr, isConcurrentGC, isGlobalGC, beingMounted)) {
StackIteratorData4CopyForward localData;
localData.copyForwardScheme = this;
localData.env = env;
Expand All @@ -2333,9 +2335,8 @@ MM_CopyForwardScheme::scanContinuationNativeSlots(MM_EnvironmentVLHGC *env, MM_A
#if defined(J9VM_GC_DYNAMIC_CLASS_UNLOADING)
stackFrameClassWalkNeeded = isDynamicClassUnloadingEnabled();
#endif /* J9VM_GC_DYNAMIC_CLASS_UNLOADING */
const bool isConcurrentGC = false;

GC_VMThreadStackSlotIterator::scanSlots(currentThread, objectPtr, (void *)&localData, stackSlotIteratorForCopyForwardScheme, stackFrameClassWalkNeeded, false, isConcurrentGC, isGlobalGC);
GC_VMThreadStackSlotIterator::scanContinuationSlots(currentThread, objectPtr, (void *)&localData, stackSlotIteratorForCopyForwardScheme, stackFrameClassWalkNeeded, false);
}
}

Expand Down
7 changes: 4 additions & 3 deletions runtime/gc_vlhgc/GlobalMarkCardScrubber.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -192,16 +192,17 @@ bool MM_GlobalMarkCardScrubber::scrubContinuationNativeSlots(MM_EnvironmentVLHGC
{
bool doScrub = true;
J9VMThread *currentThread = (J9VMThread *)env->getLanguageVMThread();
const bool isConcurrentGC = false;
const bool isGlobalGC = true;
if (MM_GCExtensions::needScanStacksForContinuationObject(currentThread, objectPtr, isGlobalGC)) {
const bool beingMounted = false;
if (MM_GCExtensions::needScanStacksForContinuationObject(currentThread, objectPtr, isConcurrentGC, isGlobalGC, beingMounted)) {
StackIteratorData4GlobalMarkCardScrubber localData;
localData.globalMarkCardScrubber = this;
localData.env = env;
localData.doScrub = &doScrub;
localData.fromObject = objectPtr;
const bool isConcurrentGC = false;

GC_VMThreadStackSlotIterator::scanSlots(currentThread, objectPtr, (void *)&localData, stackSlotIteratorForGlobalMarkCardScrubber, false, false, isConcurrentGC, isGlobalGC);
GC_VMThreadStackSlotIterator::scanContinuationSlots(currentThread, objectPtr, (void *)&localData, stackSlotIteratorForGlobalMarkCardScrubber, false, false);
}
return doScrub;
}
Expand Down
12 changes: 8 additions & 4 deletions runtime/gc_vlhgc/GlobalMarkingScheme.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -792,8 +792,11 @@ void
MM_GlobalMarkingScheme::scanContinuationNativeSlots(MM_EnvironmentVLHGC *env, J9Object *objectPtr, ScanReason reason)
{
J9VMThread *currentThread = (J9VMThread *)env->getLanguageVMThread();
/* In STW GC there are no racing carrier threads doing mount and no need for the synchronization. */
bool isConcurrentGC = (MM_VLHGCIncrementStats::mark_concurrent == static_cast<MM_CycleStateVLHGC*>(env->_cycleState)->_vlhgcIncrementStats._globalMarkIncrementType);
const bool isGlobalGC = true;
if (MM_GCExtensions::needScanStacksForContinuationObject(currentThread, objectPtr, isGlobalGC)) {
const bool beingMounted = false;
if (MM_GCExtensions::needScanStacksForContinuationObject(currentThread, objectPtr, isConcurrentGC, isGlobalGC, beingMounted)) {
StackIteratorData4GlobalMarkingScheme localData;
localData.globalMarkingScheme = this;
localData.env = env;
Expand All @@ -802,10 +805,11 @@ MM_GlobalMarkingScheme::scanContinuationNativeSlots(MM_EnvironmentVLHGC *env, J9
#if defined(J9VM_GC_DYNAMIC_CLASS_UNLOADING)
stackFrameClassWalkNeeded = isDynamicClassUnloadingEnabled();
#endif /* J9VM_GC_DYNAMIC_CLASS_UNLOADING */
/* In STW GC there are no racing carrier threads doing mount and no need for the synchronization. */
bool isConcurrentGC = (MM_VLHGCIncrementStats::mark_concurrent == static_cast<MM_CycleStateVLHGC*>(env->_cycleState)->_vlhgcIncrementStats._globalMarkIncrementType);

GC_VMThreadStackSlotIterator::scanSlots(currentThread, objectPtr, (void *)&localData, stackSlotIteratorForGlobalMarkingScheme, stackFrameClassWalkNeeded, false, isConcurrentGC, isGlobalGC);
GC_VMThreadStackSlotIterator::scanContinuationSlots(currentThread, objectPtr, (void *)&localData, stackSlotIteratorForGlobalMarkingScheme, stackFrameClassWalkNeeded, false);
if (isConcurrentGC) {
VM_VMHelpers::exitConcurrentGCScan(currentThread, objectPtr, isGlobalGC);
}
}
}

Expand Down
Loading