diff --git a/runtime/gc_base/GCExtensions.cpp b/runtime/gc_base/GCExtensions.cpp index 2d6adf1a015..66928a9dfc3 100644 --- a/runtime/gc_base/GCExtensions.cpp +++ b/runtime/gc_base/GCExtensions.cpp @@ -311,29 +311,44 @@ MM_GCExtensions::releaseNativesForContinuationObject(MM_EnvironmentBase* env, j9 } bool -MM_GCExtensions::needScanStacksForContinuationObject(J9VMThread *vmThread, j9object_t objectPtr, bool isGlobalGC) +MM_GCExtensions::needScanStacksForContinuationObject(J9VMThread *vmThread, j9object_t objectPtr, bool isConcurrentGC, bool isGlobalGC, bool beingMounted) { bool needScan = false; #if JAVA_SPEC_VERSION >= 19 - jboolean started = J9VMJDKINTERNALVMCONTINUATION_STARTED(vmThread, objectPtr); - jboolean finished = J9VMJDKINTERNALVMCONTINUATION_FINISHED(vmThread, objectPtr); J9VMContinuation *continuation = J9VMJDKINTERNALVMCONTINUATION_VMREF(vmThread, objectPtr); - /** - * We don't scan mounted continuations: - * - * for concurrent GCs, since stack is actively changing. Instead, we scan them during preMount or during root scanning if already mounted at cycle start or during postUnmount (might be indirectly via card cleaning) or during final STW (via root re-scan) if still mounted at cycle end - * for sliding compacts to avoid double slot fixups - * - * For fully STW GCs, there is no harm to scan them, but it's a waste of time since they are scanned during root scanning already. - * - * We don't scan currently scanned for the same collector either - one scan is enough for the same collector, but there could be concurrent scavenger(local collector) and concurrent marking(global collector) overlapping, - * they are irrelevant and both are concurrent, we handle them independently and separately, they are not blocked or ignored each other. - * - * we don't scan the continuation object before started and after finished - java stack does not exist. - */ - if (started && !finished) { - Assert_MM_true(NULL != continuation); - needScan = !VM_VMHelpers::isContinuationMountedOrConcurrentlyScanned(continuation, isGlobalGC); + if (NULL != continuation) { + /** + * We don't scan mounted continuations: + * + * for concurrent GCs, since stack is actively changing. Instead, we scan them during preMount + * or during root scanning if already mounted at cycle start or during postUnmount (might + * be indirectly via card cleaning) or during final STW (via root re-scan) if still mounted + * at cycle end. + * for sliding compacts to avoid double slot fixups + * If continuation is currently being mounted by this thread, we must be in preMount/postUnmount + * callback and must scan. + * + * For fully STW GCs, there is no harm to scan them, but it's a waste of time since they are + * scanned during root scanning already. + * + * We don't scan currently scanned for the same collector either - one scan is enough for + * the same collector, but there could be concurrent scavenger(local collector) and + * concurrent marking(global collector) overlapping, they are irrelevant and both are + * concurrent, we handle them independently and separately, they are not blocked or ignored + * each other. + * + * we don't scan the continuation object before started and after finished - java stack + * does not exist. + */ + if (isConcurrentGC) { + needScan = VM_VMHelpers::tryWinningConcurrentGCScan(continuation, isGlobalGC, beingMounted); + } else { + /* for STW GCs */ + uintptr_t continuationState = continuation->state; + Assert_MM_false(beingMounted); + Assert_MM_false(VM_VMHelpers::isConcurrentlyScanned(continuationState)); + needScan = VM_VMHelpers::isActive(continuationState) && !VM_VMHelpers::isContinuationFullyMounted(continuationState); + } } #endif /* JAVA_SPEC_VERSION >= 19 */ return needScan; diff --git a/runtime/gc_base/GCExtensions.hpp b/runtime/gc_base/GCExtensions.hpp index 8afe67db258..7a5dce2d90a 100644 --- a/runtime/gc_base/GCExtensions.hpp +++ b/runtime/gc_base/GCExtensions.hpp @@ -307,7 +307,7 @@ class MM_GCExtensions : public MM_GCExtensionsBase { * @param[in] isGlobalGC * @return true if we need to scan the java stack */ - static bool needScanStacksForContinuationObject(J9VMThread *vmThread, j9object_t objectPtr, bool isGlobalGC); + static bool needScanStacksForContinuationObject(J9VMThread *vmThread, j9object_t objectPtr, bool isConcurrentGC, bool isGlobalGC, bool beingMounted); /** * Create a GCExtensions object diff --git a/runtime/gc_glue_java/CompactSchemeFixupObject.cpp b/runtime/gc_glue_java/CompactSchemeFixupObject.cpp index 0a44ead7770..45ad6b51af7 100644 --- a/runtime/gc_glue_java/CompactSchemeFixupObject.cpp +++ b/runtime/gc_glue_java/CompactSchemeFixupObject.cpp @@ -75,15 +75,16 @@ MM_CompactSchemeFixupObject::fixupContinuationNativeSlots(MM_EnvironmentStandard * mounted Virtual threads later during root fixup, we will skip it during this heap fixup pass * (hence passing true for scanOnlyUnmounted parameter). */ + const bool isConcurrentGC = false; const bool isGlobalGC = true; - if (MM_GCExtensions::needScanStacksForContinuationObject(currentThread, objectPtr, isGlobalGC)) { + const bool beingMounted = false; + if (MM_GCExtensions::needScanStacksForContinuationObject(currentThread, objectPtr, isConcurrentGC, isGlobalGC, beingMounted)) { StackIteratorData4CompactSchemeFixupObject localData; localData.compactSchemeFixupObject = this; localData.env = env; localData.fromObject = objectPtr; - const bool isConcurrentGC = false; - GC_VMThreadStackSlotIterator::scanSlots(currentThread, objectPtr, (void *)&localData, stackSlotIteratorForCompactScheme, false, false, isConcurrentGC, isGlobalGC); + GC_VMThreadStackSlotIterator::scanContinuationSlots(currentThread, objectPtr, (void *)&localData, stackSlotIteratorForCompactScheme, false, false); } } diff --git a/runtime/gc_glue_java/HeapWalkerDelegate.cpp b/runtime/gc_glue_java/HeapWalkerDelegate.cpp index b4e51489781..252a4952c5a 100644 --- a/runtime/gc_glue_java/HeapWalkerDelegate.cpp +++ b/runtime/gc_glue_java/HeapWalkerDelegate.cpp @@ -60,17 +60,17 @@ MM_HeapWalkerDelegate::doContinuationNativeSlots(MM_EnvironmentBase *env, omrobj { J9VMThread *currentThread = (J9VMThread *)env->getLanguageVMThread(); + const bool isConcurrentGC = false; const bool isGlobalGC = true; - if (MM_GCExtensions::needScanStacksForContinuationObject(currentThread, objectPtr, isGlobalGC)) { + const bool beingMounted = false; + if (MM_GCExtensions::needScanStacksForContinuationObject(currentThread, objectPtr, isConcurrentGC, isGlobalGC, beingMounted)) { StackIteratorData4HeapWalker localData; localData.heapWalker = _heapWalker; localData.env = env; localData.fromObject = objectPtr; localData.function = function; localData.userData = userData; - /* so far there is no case we need ClassWalk for heapwalker, so we set stackFrameClassWalkNeeded = false */ - const bool isConcurrentGC = false; - GC_VMThreadStackSlotIterator::scanSlots(currentThread, objectPtr, (void *)&localData, stackSlotIteratorForHeapWalker, false, false, isConcurrentGC, isGlobalGC); + GC_VMThreadStackSlotIterator::scanContinuationSlots(currentThread, objectPtr, (void *)&localData, stackSlotIteratorForHeapWalker, false, false); } } diff --git a/runtime/gc_glue_java/MarkingDelegate.cpp b/runtime/gc_glue_java/MarkingDelegate.cpp index 8820e47ff61..fa1fff81ce2 100644 --- a/runtime/gc_glue_java/MarkingDelegate.cpp +++ b/runtime/gc_glue_java/MarkingDelegate.cpp @@ -261,8 +261,11 @@ void MM_MarkingDelegate::scanContinuationNativeSlots(MM_EnvironmentBase *env, omrobjectptr_t objectPtr) { J9VMThread *currentThread = (J9VMThread *)env->getLanguageVMThread(); + /* In STW GC there are no racing carrier threads doing mount and no need for the synchronization. */ + bool isConcurrentGC = J9_ARE_ANY_BITS_SET(currentThread->privateFlags, J9_PRIVATE_FLAGS_CONCURRENT_MARK_ACTIVE); const bool isGlobalGC = true; - if (MM_GCExtensions::needScanStacksForContinuationObject(currentThread, objectPtr, isGlobalGC)) { + const bool beingMounted = false; + if (MM_GCExtensions::needScanStacksForContinuationObject(currentThread, objectPtr, isConcurrentGC, isGlobalGC, beingMounted)) { StackIteratorData4MarkingDelegate localData; localData.markingDelegate = this; localData.env = env; @@ -272,10 +275,11 @@ MM_MarkingDelegate::scanContinuationNativeSlots(MM_EnvironmentBase *env, omrobje #if defined(J9VM_GC_DYNAMIC_CLASS_UNLOADING) stackFrameClassWalkNeeded = isDynamicClassUnloadingEnabled(); #endif /* J9VM_GC_DYNAMIC_CLASS_UNLOADING */ - /* In STW GC there are no racing carrier threads doing mount and no need for the synchronization. */ - bool isConcurrentGC = J9_ARE_ANY_BITS_SET(currentThread->privateFlags, J9_PRIVATE_FLAGS_CONCURRENT_MARK_ACTIVE); - GC_VMThreadStackSlotIterator::scanSlots(currentThread, objectPtr, (void *)&localData, stackSlotIteratorForMarkingDelegate, stackFrameClassWalkNeeded, false, isConcurrentGC, isGlobalGC); + GC_VMThreadStackSlotIterator::scanContinuationSlots(currentThread, objectPtr, (void *)&localData, stackSlotIteratorForMarkingDelegate, stackFrameClassWalkNeeded, false); + if (isConcurrentGC) { + VM_VMHelpers::exitConcurrentGCScan(currentThread, objectPtr, isGlobalGC); + } } } diff --git a/runtime/gc_glue_java/MetronomeDelegate.cpp b/runtime/gc_glue_java/MetronomeDelegate.cpp index 4a7f2709d4d..04e0f191e0a 100644 --- a/runtime/gc_glue_java/MetronomeDelegate.cpp +++ b/runtime/gc_glue_java/MetronomeDelegate.cpp @@ -1644,11 +1644,12 @@ stackSlotIteratorForRealtimeGC(J9JavaVM *javaVM, J9Object **slotPtr, void *local } void -MM_MetronomeDelegate::scanContinuationNativeSlots(MM_EnvironmentRealtime *env, J9Object *objectPtr) +MM_MetronomeDelegate::scanContinuationNativeSlots(MM_EnvironmentRealtime *env, J9Object *objectPtr, bool beingMounted) { J9VMThread *currentThread = (J9VMThread *)env->getLanguageVMThread(); + bool isConcurrentGC = _realtimeGC->isCollectorConcurrentTracing(); const bool isGlobalGC = true; - if (MM_GCExtensions::needScanStacksForContinuationObject(currentThread, objectPtr, isGlobalGC)) { + if (MM_GCExtensions::needScanStacksForContinuationObject(currentThread, objectPtr, isConcurrentGC, isGlobalGC, beingMounted)) { StackIteratorData4RealtimeMarkingScheme localData; localData.realtimeMarkingScheme = _markingScheme; localData.env = env; @@ -1659,9 +1660,11 @@ MM_MetronomeDelegate::scanContinuationNativeSlots(MM_EnvironmentRealtime *env, J stackFrameClassWalkNeeded = isDynamicClassUnloadingEnabled(); #endif /* J9VM_GC_DYNAMIC_CLASS_UNLOADING */ /* In STW GC there are no racing carrier threads doing mount and no need for the synchronization. */ - bool isConcurrentGC = _realtimeGC->isCollectorConcurrentTracing(); - GC_VMThreadStackSlotIterator::scanSlots(currentThread, objectPtr, (void *)&localData, stackSlotIteratorForRealtimeGC, stackFrameClassWalkNeeded, false, isConcurrentGC, isGlobalGC); + GC_VMThreadStackSlotIterator::scanContinuationSlots(currentThread, objectPtr, (void *)&localData, stackSlotIteratorForRealtimeGC, stackFrameClassWalkNeeded, false); + if (isConcurrentGC) { + VM_VMHelpers::exitConcurrentGCScan(currentThread, objectPtr, isGlobalGC); + } } } diff --git a/runtime/gc_glue_java/MetronomeDelegate.hpp b/runtime/gc_glue_java/MetronomeDelegate.hpp index e875c3e0c61..bec37998c21 100644 --- a/runtime/gc_glue_java/MetronomeDelegate.hpp +++ b/runtime/gc_glue_java/MetronomeDelegate.hpp @@ -176,7 +176,7 @@ class MM_MetronomeDelegate : public MM_BaseNonVirtual void setUnmarkedImpliesCleared(); void unsetUnmarkedImpliesCleared(); - void scanContinuationNativeSlots(MM_EnvironmentRealtime *env, J9Object *objectPtr); + void scanContinuationNativeSlots(MM_EnvironmentRealtime *env, J9Object *objectPtr, bool beingMounted = false); UDATA scanContinuationObject(MM_EnvironmentRealtime *env, J9Object *objectPtr); #if defined(J9VM_GC_DYNAMIC_CLASS_UNLOADING) diff --git a/runtime/gc_glue_java/ScavengerDelegate.cpp b/runtime/gc_glue_java/ScavengerDelegate.cpp index 2b2f6b67225..6cc42c3a766 100644 --- a/runtime/gc_glue_java/ScavengerDelegate.cpp +++ b/runtime/gc_glue_java/ScavengerDelegate.cpp @@ -340,22 +340,32 @@ stackSlotIteratorForScavenge(J9JavaVM *javaVM, J9Object **slotPtr, void *localDa } bool -MM_ScavengerDelegate::scanContinuationNativeSlots(MM_EnvironmentStandard *env, omrobjectptr_t objectPtr, MM_ScavengeScanReason reason) +MM_ScavengerDelegate::scanContinuationNativeSlots(MM_EnvironmentStandard *env, omrobjectptr_t objectPtr, MM_ScavengeScanReason reason, bool beingMounted) { bool shouldRemember = false; J9VMThread *currentThread = (J9VMThread *)env->getLanguageVMThread(); + /* In STW GC there are no racing carrier threads doing mount and no need for the synchronization. */ + bool isConcurrentGC = false; + if (MUTATOR_THREAD == env->getThreadType()) { + isConcurrentGC = _extensions->isConcurrentScavengerInProgress(); + } else { +#if defined(OMR_GC_CONCURRENT_SCAVENGER) + isConcurrentGC = _extensions->scavenger->isCurrentPhaseConcurrent(); +#endif /* defined(OMR_GC_CONCURRENT_SCAVENGER) */ + } const bool isGlobalGC = false; - if (MM_GCExtensions::needScanStacksForContinuationObject(currentThread, objectPtr, isGlobalGC)) { + if (MM_GCExtensions::needScanStacksForContinuationObject(currentThread, objectPtr, isConcurrentGC, isGlobalGC, beingMounted)) { StackIteratorData4Scavenge localData; localData.scavengerDelegate = this; localData.env = env; localData.reason = reason; localData.shouldRemember = &shouldRemember; - /* In STW GC there are no racing carrier threads doing mount and no need for the synchronization. */ - bool isConcurrentGC = _extensions->isConcurrentScavengerInProgress(); - GC_VMThreadStackSlotIterator::scanSlots(currentThread, objectPtr, (void *)&localData, stackSlotIteratorForScavenge, false, false, isConcurrentGC, isGlobalGC); + GC_VMThreadStackSlotIterator::scanContinuationSlots(currentThread, objectPtr, (void *)&localData, stackSlotIteratorForScavenge, false, false); + if (isConcurrentGC) { + VM_VMHelpers::exitConcurrentGCScan(currentThread, objectPtr, isGlobalGC); + } } return shouldRemember; } diff --git a/runtime/gc_glue_java/ScavengerDelegate.hpp b/runtime/gc_glue_java/ScavengerDelegate.hpp index e7c27000c37..44be2673757 100644 --- a/runtime/gc_glue_java/ScavengerDelegate.hpp +++ b/runtime/gc_glue_java/ScavengerDelegate.hpp @@ -156,7 +156,7 @@ class MM_ScavengerDelegate : public MM_BaseNonVirtual { void setShouldScavengeUnfinalizedObjects(bool shouldScavenge) { _shouldScavengeUnfinalizedObjects = shouldScavenge; } void setShouldScavengeContinuationObjects(bool shouldScavenge) { _shouldScavengeContinuationObjects = shouldScavenge; } - bool scanContinuationNativeSlots(MM_EnvironmentStandard *env, omrobjectptr_t objectPtr, MM_ScavengeScanReason reason); + bool scanContinuationNativeSlots(MM_EnvironmentStandard *env, omrobjectptr_t objectPtr, MM_ScavengeScanReason reason, bool beingMounted = false); volatile bool getShouldScavengeFinalizableObjects() { return _shouldScavengeFinalizableObjects; } volatile bool getShouldScavengeUnfinalizedObjects() { return _shouldScavengeUnfinalizedObjects; } diff --git a/runtime/gc_modron_standard/StandardAccessBarrier.cpp b/runtime/gc_modron_standard/StandardAccessBarrier.cpp index 889488511c1..560d28ff498 100644 --- a/runtime/gc_modron_standard/StandardAccessBarrier.cpp +++ b/runtime/gc_modron_standard/StandardAccessBarrier.cpp @@ -1042,10 +1042,11 @@ MM_StandardAccessBarrier::preMountContinuation(J9VMThread *vmThread, j9object_t { #if defined(OMR_GC_CONCURRENT_SCAVENGER) if (_extensions->isConcurrentScavengerInProgress()) { - /* concurrent scavenger in progress */ + /* concurrent scavenger in active */ MM_EnvironmentStandard *env = MM_EnvironmentStandard::getEnvironment(vmThread->omrVMThread); MM_ScavengeScanReason reason = SCAN_REASON_SCAVENGE; - _scavenger->getDelegate()->scanContinuationNativeSlots(env, contObject, reason); + const bool beingMounted = true; + _scavenger->getDelegate()->scanContinuationNativeSlots(env, contObject, reason, beingMounted); } #endif /* OMR_GC_CONCURRENT_SCAVENGER */ } diff --git a/runtime/gc_realtime/RealtimeAccessBarrier.cpp b/runtime/gc_realtime/RealtimeAccessBarrier.cpp index cbc2bbc2211..2451a8b4c72 100644 --- a/runtime/gc_realtime/RealtimeAccessBarrier.cpp +++ b/runtime/gc_realtime/RealtimeAccessBarrier.cpp @@ -955,7 +955,8 @@ MM_RealtimeAccessBarrier::preMountContinuation(J9VMThread *vmThread, j9object_t { MM_EnvironmentRealtime *env = MM_EnvironmentRealtime::getEnvironment(vmThread->omrVMThread); if (isBarrierActive(env)) { - _realtimeGC->getRealtimeDelegate()->scanContinuationNativeSlots(env, contObject); + const bool beingMounted = true; + _realtimeGC->getRealtimeDelegate()->scanContinuationNativeSlots(env, contObject, beingMounted); } } diff --git a/runtime/gc_structs/VMThreadStackSlotIterator.cpp b/runtime/gc_structs/VMThreadStackSlotIterator.cpp index 39322e66b4d..77270e105c9 100644 --- a/runtime/gc_structs/VMThreadStackSlotIterator.cpp +++ b/runtime/gc_structs/VMThreadStackSlotIterator.cpp @@ -130,21 +130,22 @@ GC_VMThreadStackSlotIterator::scanSlots( } void -GC_VMThreadStackSlotIterator::scanSlots( +GC_VMThreadStackSlotIterator::scanContinuationSlots( J9VMThread *vmThread, j9object_t continuationObjectPtr, void *userData, J9MODRON_OSLOTITERATOR *oSlotIterator, bool includeStackFrameClassReferences, - bool trackVisibleFrameDepth, - bool isConcurrentGC, - bool isGlobalGC + bool trackVisibleFrameDepth ) { J9StackWalkState stackWalkState; - initializeStackWalkState(&stackWalkState, vmThread, userData, oSlotIterator, includeStackFrameClassReferences, trackVisibleFrameDepth); - VM_VMHelpers::walkContinuationStackFramesWrapper(vmThread, continuationObjectPtr, &stackWalkState, isConcurrentGC, isGlobalGC); + +#if JAVA_SPEC_VERSION >= 19 + J9VMContinuation *continuation = J9VMJDKINTERNALVMCONTINUATION_VMREF(vmThread, continuationObjectPtr); + vmThread->javaVM->internalVMFunctions->walkContinuationStackFrames(vmThread, continuation, &stackWalkState); +#endif /* JAVA_SPEC_VERSION >= 19 */ } #if JAVA_SPEC_VERSION >= 19 diff --git a/runtime/gc_structs/VMThreadStackSlotIterator.hpp b/runtime/gc_structs/VMThreadStackSlotIterator.hpp index 76830f9f587..06115a664fb 100644 --- a/runtime/gc_structs/VMThreadStackSlotIterator.hpp +++ b/runtime/gc_structs/VMThreadStackSlotIterator.hpp @@ -60,15 +60,13 @@ class GC_VMThreadStackSlotIterator bool includeStackFrameClassReferences, bool trackVisibleFrameDepth); - static void scanSlots( + static void scanContinuationSlots( J9VMThread *vmThread, j9object_t continuationObjectPtr, void *userData, J9MODRON_OSLOTITERATOR *oSlotIterator, bool includeStackFrameClassReferences, - bool trackVisibleFrameDepth, - bool isConcurrentGC, - bool isGlobalGC); + bool trackVisibleFrameDepth); #if JAVA_SPEC_VERSION >= 19 static void scanSlots( diff --git a/runtime/gc_vlhgc/CopyForwardScheme.cpp b/runtime/gc_vlhgc/CopyForwardScheme.cpp index 40d8fcef3d1..9dce70e502e 100644 --- a/runtime/gc_vlhgc/CopyForwardScheme.cpp +++ b/runtime/gc_vlhgc/CopyForwardScheme.cpp @@ -2322,8 +2322,10 @@ MMINLINE void MM_CopyForwardScheme::scanContinuationNativeSlots(MM_EnvironmentVLHGC *env, MM_AllocationContextTarok *reservingContext, J9Object *objectPtr, ScanReason reason) { J9VMThread *currentThread = (J9VMThread *)env->getLanguageVMThread(); + const bool isConcurrentGC = false; const bool isGlobalGC = false; - if (MM_GCExtensions::needScanStacksForContinuationObject(currentThread, objectPtr, isGlobalGC)) { + const bool beingMounted = false; + if (MM_GCExtensions::needScanStacksForContinuationObject(currentThread, objectPtr, isConcurrentGC, isGlobalGC, beingMounted)) { StackIteratorData4CopyForward localData; localData.copyForwardScheme = this; localData.env = env; @@ -2333,9 +2335,8 @@ MM_CopyForwardScheme::scanContinuationNativeSlots(MM_EnvironmentVLHGC *env, MM_A #if defined(J9VM_GC_DYNAMIC_CLASS_UNLOADING) stackFrameClassWalkNeeded = isDynamicClassUnloadingEnabled(); #endif /* J9VM_GC_DYNAMIC_CLASS_UNLOADING */ - const bool isConcurrentGC = false; - GC_VMThreadStackSlotIterator::scanSlots(currentThread, objectPtr, (void *)&localData, stackSlotIteratorForCopyForwardScheme, stackFrameClassWalkNeeded, false, isConcurrentGC, isGlobalGC); + GC_VMThreadStackSlotIterator::scanContinuationSlots(currentThread, objectPtr, (void *)&localData, stackSlotIteratorForCopyForwardScheme, stackFrameClassWalkNeeded, false); } } diff --git a/runtime/gc_vlhgc/GlobalMarkCardScrubber.cpp b/runtime/gc_vlhgc/GlobalMarkCardScrubber.cpp index 4004f75a8da..542185fc0c1 100644 --- a/runtime/gc_vlhgc/GlobalMarkCardScrubber.cpp +++ b/runtime/gc_vlhgc/GlobalMarkCardScrubber.cpp @@ -192,16 +192,17 @@ bool MM_GlobalMarkCardScrubber::scrubContinuationNativeSlots(MM_EnvironmentVLHGC { bool doScrub = true; J9VMThread *currentThread = (J9VMThread *)env->getLanguageVMThread(); + const bool isConcurrentGC = false; const bool isGlobalGC = true; - if (MM_GCExtensions::needScanStacksForContinuationObject(currentThread, objectPtr, isGlobalGC)) { + const bool beingMounted = false; + if (MM_GCExtensions::needScanStacksForContinuationObject(currentThread, objectPtr, isConcurrentGC, isGlobalGC, beingMounted)) { StackIteratorData4GlobalMarkCardScrubber localData; localData.globalMarkCardScrubber = this; localData.env = env; localData.doScrub = &doScrub; localData.fromObject = objectPtr; - const bool isConcurrentGC = false; - GC_VMThreadStackSlotIterator::scanSlots(currentThread, objectPtr, (void *)&localData, stackSlotIteratorForGlobalMarkCardScrubber, false, false, isConcurrentGC, isGlobalGC); + GC_VMThreadStackSlotIterator::scanContinuationSlots(currentThread, objectPtr, (void *)&localData, stackSlotIteratorForGlobalMarkCardScrubber, false, false); } return doScrub; } diff --git a/runtime/gc_vlhgc/GlobalMarkingScheme.cpp b/runtime/gc_vlhgc/GlobalMarkingScheme.cpp index 765c2f7d9e1..68d17de1862 100644 --- a/runtime/gc_vlhgc/GlobalMarkingScheme.cpp +++ b/runtime/gc_vlhgc/GlobalMarkingScheme.cpp @@ -792,8 +792,11 @@ void MM_GlobalMarkingScheme::scanContinuationNativeSlots(MM_EnvironmentVLHGC *env, J9Object *objectPtr, ScanReason reason) { J9VMThread *currentThread = (J9VMThread *)env->getLanguageVMThread(); + /* In STW GC there are no racing carrier threads doing mount and no need for the synchronization. */ + bool isConcurrentGC = (MM_VLHGCIncrementStats::mark_concurrent == static_cast(env->_cycleState)->_vlhgcIncrementStats._globalMarkIncrementType); const bool isGlobalGC = true; - if (MM_GCExtensions::needScanStacksForContinuationObject(currentThread, objectPtr, isGlobalGC)) { + const bool beingMounted = false; + if (MM_GCExtensions::needScanStacksForContinuationObject(currentThread, objectPtr, isConcurrentGC, isGlobalGC, beingMounted)) { StackIteratorData4GlobalMarkingScheme localData; localData.globalMarkingScheme = this; localData.env = env; @@ -802,10 +805,11 @@ MM_GlobalMarkingScheme::scanContinuationNativeSlots(MM_EnvironmentVLHGC *env, J9 #if defined(J9VM_GC_DYNAMIC_CLASS_UNLOADING) stackFrameClassWalkNeeded = isDynamicClassUnloadingEnabled(); #endif /* J9VM_GC_DYNAMIC_CLASS_UNLOADING */ - /* In STW GC there are no racing carrier threads doing mount and no need for the synchronization. */ - bool isConcurrentGC = (MM_VLHGCIncrementStats::mark_concurrent == static_cast(env->_cycleState)->_vlhgcIncrementStats._globalMarkIncrementType); - GC_VMThreadStackSlotIterator::scanSlots(currentThread, objectPtr, (void *)&localData, stackSlotIteratorForGlobalMarkingScheme, stackFrameClassWalkNeeded, false, isConcurrentGC, isGlobalGC); + GC_VMThreadStackSlotIterator::scanContinuationSlots(currentThread, objectPtr, (void *)&localData, stackSlotIteratorForGlobalMarkingScheme, stackFrameClassWalkNeeded, false); + if (isConcurrentGC) { + VM_VMHelpers::exitConcurrentGCScan(currentThread, objectPtr, isGlobalGC); + } } } diff --git a/runtime/gc_vlhgc/WriteOnceCompactor.cpp b/runtime/gc_vlhgc/WriteOnceCompactor.cpp index 013d2a2b2cb..89627c0fd91 100644 --- a/runtime/gc_vlhgc/WriteOnceCompactor.cpp +++ b/runtime/gc_vlhgc/WriteOnceCompactor.cpp @@ -1240,15 +1240,16 @@ MM_WriteOnceCompactor::fixupContinuationNativeSlots(MM_EnvironmentVLHGC* env, J9 * mounted Virtual threads later during root fixup, we will skip it during this heap fixup pass * (hence passing true for scanOnlyUnmounted parameter). */ + const bool isConcurrentGC = false; const bool isGlobalGC = MM_CycleState::CT_GLOBAL_GARBAGE_COLLECTION == env->_cycleState->_collectionType; - if (MM_GCExtensions::needScanStacksForContinuationObject(currentThread, objectPtr, isGlobalGC)) { + const bool beingMounted = false; + if (MM_GCExtensions::needScanStacksForContinuationObject(currentThread, objectPtr, isConcurrentGC, isGlobalGC, beingMounted)) { StackIteratorData4WriteOnceCompactor localData; localData.writeOnceCompactor = this; localData.env = env; localData.fromObject = objectPtr; - const bool isConcurrentGC = false; - GC_VMThreadStackSlotIterator::scanSlots(currentThread, objectPtr, (void *)&localData, stackSlotIteratorForWriteOnceCompactor, false, false, isConcurrentGC, isGlobalGC); + GC_VMThreadStackSlotIterator::scanContinuationSlots(currentThread, objectPtr, (void *)&localData, stackSlotIteratorForWriteOnceCompactor, false, false); } } diff --git a/runtime/oti/VMHelpers.hpp b/runtime/oti/VMHelpers.hpp index f6e2ec085c5..770746175a0 100644 --- a/runtime/oti/VMHelpers.hpp +++ b/runtime/oti/VMHelpers.hpp @@ -43,6 +43,7 @@ #include "ute.h" #include "AtomicSupport.hpp" #include "ObjectAllocationAPI.hpp" +#include "objhelp.h" typedef enum { J9_BCLOOP_SEND_TARGET_INITIAL_STATIC = 0, @@ -2050,7 +2051,38 @@ class VM_VMHelpers } #if JAVA_SPEC_VERSION >= 19 - static VMINLINE uintptr_t getConcurrentGCMask(bool isGlobalGC) + static VMINLINE bool + isStarted(ContinuationState continuationState) + { + return J9_ARE_ALL_BITS_SET(continuationState, J9_GC_CONTINUATION_STATE_STARTED); + } + + static VMINLINE void + setContinuationStarted(J9VMContinuation *continuation) + { + continuation->state |= J9_GC_CONTINUATION_STATE_STARTED; + } + + static VMINLINE bool + isFinished(ContinuationState continuationState) + { + return J9_ARE_ALL_BITS_SET(continuationState, J9_GC_CONTINUATION_STATE_FINISHED); + } + + static VMINLINE void + setContinuationFinished(J9VMContinuation *continuation) + { + continuation->state |= J9_GC_CONTINUATION_STATE_FINISHED; + } + + static VMINLINE bool + isActive(ContinuationState continuationState) + { + return isStarted(continuationState) && !isFinished(continuationState); + } + + static VMINLINE uintptr_t + getConcurrentGCMask(bool isGlobalGC) { if (isGlobalGC) { return J9_GC_CONTINUATION_STATE_CONCURRENT_SCAN_GLOBAL; @@ -2067,80 +2099,150 @@ class VM_VMHelpers * if isGlobalGC == false, only check if local concurrent scanning case. */ static VMINLINE bool - isConcurrentlyScannedFromContinuationState(uintptr_t continuationState) + isConcurrentlyScanned(ContinuationState continuationState) { return J9_ARE_ANY_BITS_SET(continuationState, J9_GC_CONTINUATION_STATE_CONCURRENT_SCAN_ANY); } static VMINLINE bool - isConcurrentlyScannedFromContinuationState(uintptr_t continuationState, bool isGlobalGC) + isConcurrentlyScanned(ContinuationState continuationState, bool isGlobalGC) { uintptr_t concurrentGCMask = getConcurrentGCMask(isGlobalGC); - return J9_ARE_ANY_BITS_SET(continuationState, concurrentGCMask); + return J9_ARE_ALL_BITS_SET(continuationState, concurrentGCMask); + } + + static VMINLINE void + setConcurrentlyScanned(ContinuationState *continuationState, bool isGlobalGC) + { + *continuationState |= getConcurrentGCMask(isGlobalGC); + } + + static VMINLINE void + resetConcurrentlyScanned(ContinuationState *continuationState, bool isGlobalGC) + { + *continuationState &= ~getConcurrentGCMask(isGlobalGC); + } + + static VMINLINE bool + isPendingToBeMounted(ContinuationState continuationState) + { + return J9_ARE_ALL_BITS_SET(continuationState, J9_GC_CONTINUATION_STATE_PENDING_TO_BE_MOUNTED); + } + + static VMINLINE void + resetPendingState(ContinuationState *continuationState) + { + *continuationState &= ~J9_GC_CONTINUATION_STATE_PENDING_TO_BE_MOUNTED; } /** * Check if the related J9VMContinuation is mounted to carrier thread - * @param[in] continuation the related J9VMContinuation + * if carrierThreadID has been set in j9vmcontinuation->state, the continuation might be mounted, + * there also is pending to be mounted case, when the mounting is blocked by the concurrent continuation + * scanning or related vm access. + * + * @param[in] continuationState the related J9VMContinuation->state * @return true if it is mounted. */ static VMINLINE bool - isContinuationMounted(J9VMContinuation *continuation) + isContinuationFullyMounted(ContinuationState continuationState) { - return J9_ARE_ANY_BITS_SET(continuation->state, ~(uintptr_t)J9_GC_CONTINUATION_STATE_CONCURRENT_SCAN_ANY); + bool mounted = J9_ARE_ANY_BITS_SET(continuationState, J9_GC_CONTINUATION_STATE_CARRIERID_MASK); + if (mounted && isPendingToBeMounted(continuationState)) { + mounted = false; + } + return mounted; } static VMINLINE J9VMThread * - getCarrierThreadFromContinuationState(uintptr_t continuationState) + getCarrierThread(ContinuationState continuationState) { - return (J9VMThread *)(continuationState & (~(uintptr_t)J9_GC_CONTINUATION_STATE_CONCURRENT_SCAN_ANY)); + return (J9VMThread *)(continuationState & J9_GC_CONTINUATION_STATE_CARRIERID_MASK); } static VMINLINE bool - isContinuationMountedOrConcurrentlyScanned(J9VMContinuation *continuation, bool isGlobalGC) + isContinuationMountedWithCarrierThread(ContinuationState continuationState, J9VMThread *carrierThread) { - return isContinuationMounted(continuation) || isConcurrentlyScannedFromContinuationState(continuation->state, isGlobalGC); + return carrierThread == getCarrierThread(continuationState); + } + + static VMINLINE void + settingCarrierAndPendingState(ContinuationState *continuationState, J9VMThread *carrierThread) + { + /* also set PendingToBeMounted */ + *continuationState |= (uintptr_t)carrierThread | J9_GC_CONTINUATION_STATE_PENDING_TO_BE_MOUNTED; + } + + static VMINLINE void + resetContinuationCarrierID(J9VMContinuation *continuation) + { + continuation->state &= ~J9_GC_CONTINUATION_STATE_CARRIERID_MASK; } /* * * param[in] checkConcurrentState can be J9_GC_CONTINUATION_STATE_CONCURRENT_SCAN_LOCAL or J9_GC_CONTINUATION_STATE_CONCURRENT_SCAN_GLOBAL * - * If WinningConcurrentGCScan set J9_GC_CONTINUATION_bit0:STATE_CONCURRENT_SCAN_LOCAL or bit1:J9_GC_CONTINUATION_STATE_CONCURRENT_SCAN_GLOBAL in the state base on checkConcurrentState - * If low tagging(bit0 or bit1) failed due to either + * There is no need scanning before continuation is started or after continuation is finished. + * If WinningConcurrentGCScan set J9_GC_CONTINUATION_bit3:STATE_CONCURRENT_SCAN_LOCAL or bit4:J9_GC_CONTINUATION_STATE_CONCURRENT_SCAN_GLOBAL in the state base on checkConcurrentState + * If low tagging(bit3 or bit4) failed due to either * * a carrier thread winning to mount, we don't need to do anything, since it will be compensated by pre/post mount actions - * another GC thread winning to scan(bit0/bit1,bit0 and bit1 is irrelevant and independent), again don't do anything, and let the winning thread do the work, instead + * if it is pending to be mounted case(another concurrent scanning block the mounting), + * another GC thread winning to scan(bit3/bit4,bit3 and bit4 is irrelevant and independent), again don't do anything, and let the winning thread do the work, instead */ static VMINLINE bool - tryWinningConcurrentGCScan(J9VMContinuation *continuation, bool isGlobalGC) + tryWinningConcurrentGCScan(J9VMContinuation *continuation, bool isGlobalGC, bool beingMounted) { - uintptr_t complementGCConcurrentState = J9_GC_CONTINUATION_STATE_CONCURRENT_SCAN_NONE; - uintptr_t returnedState = J9_GC_CONTINUATION_STATE_CONCURRENT_SCAN_NONE; do { - /* preserve the concurrent GC state for the other type of GC */ - complementGCConcurrentState = continuation->state & getConcurrentGCMask(!isGlobalGC); - returnedState = VM_AtomicSupport::lockCompareExchange(&continuation->state, complementGCConcurrentState, complementGCConcurrentState | getConcurrentGCMask(isGlobalGC)); - /* if the other GC happened to change its concurrentGC state since us taking a snapshot of their state, we'll have to retry */ - } while (complementGCConcurrentState != (returnedState & complementGCConcurrentState)); - - /* if returned state does not contain carrier ID, return that we won */ - return (complementGCConcurrentState == returnedState); + uintptr_t oldContinuationState = continuation->state; + if (VM_VMHelpers::isActive(oldContinuationState)) { + + /* If it's being concurrently scanned within the same type of GC by another thread , it's unnecessary to do it again */ + if (!isConcurrentlyScanned(oldContinuationState, isGlobalGC)) { + /* If it's fully mounted, it's unnecessary to scan now, since it will be compensated by pre/post mount actions. + If it's being mounted by this thread, we must be in pre/post mount (would be nice, but not trivial to assert it), + therefore we must scan to aid concurrent GC. + */ + if (beingMounted || !isContinuationFullyMounted(oldContinuationState)) { + /* Try to set scan bit for this GC type */ + uintptr_t newContinuationState = oldContinuationState; + setConcurrentlyScanned(&newContinuationState, isGlobalGC); + uintptr_t returnedState = VM_AtomicSupport::lockCompareExchange(&continuation->state, oldContinuationState, newContinuationState); + /* If no other thread changed anything (mounted or won scanning for any GC), we succeeded, otherwise retry */ + if (oldContinuationState == returnedState) { + return true; + } else { + continue; + } + } + } + } + } while (false); + /* We did not even try to win, since it was either mounted or already being scanned */ + return false; } /** - * clear CONCURRENTSCANNING flag bit0:for LocalConcurrentScanning /bit1:for GlobalConcurrentScanning base on checkConcurrentState, - * if all CONCURRENTSCANNING bits(bit0 and bit1) are cleared and the continuation mounting is blocked by concurrent scanning, notify it. + * clear CONCURRENTSCANNING flag bit3:for LocalConcurrentScanning /bit4:for GlobalConcurrentScanning base on checkConcurrentState, + * if all CONCURRENTSCANNING bits(bit3 and bit4) are cleared and the continuation mounting is blocked by concurrent scanning, notify it. * @param [in] checkConcurrentState can be J9_GC_CONTINUATION_STATE_CONCURRENT_SCAN_LOCAL or J9_GC_CONTINUATION_STATE_CONCURRENT_SCAN_GLOBAL */ static VMINLINE void exitConcurrentGCScan(J9VMContinuation *continuation, bool isGlobalGC) { - /* clear CONCURRENTSCANNING flag bit0:LocalConcurrentScanning /bit1:GlobalConcurrentScanning */ - uintptr_t oldContinuationState = VM_AtomicSupport::bitAnd(&continuation->state, ~getConcurrentGCMask(isGlobalGC)); - uintptr_t complementGCConcurrentState = oldContinuationState & getConcurrentGCMask(!isGlobalGC); - if (!complementGCConcurrentState) { - J9VMThread *carrierThread = getCarrierThreadFromContinuationState(oldContinuationState); + /* clear CONCURRENTSCANNING flag bit3:LocalConcurrentScanning /bit4:GlobalConcurrentScanning */ + uintptr_t oldContinuationState = 0; + uintptr_t returnContinuationState = 0; + do { + oldContinuationState = continuation->state; + uintptr_t newContinuationState = oldContinuationState; + resetConcurrentlyScanned(&newContinuationState, isGlobalGC); + returnContinuationState = VM_AtomicSupport::lockCompareExchange(&continuation->state, oldContinuationState, newContinuationState); + } while (returnContinuationState != oldContinuationState); + + if (!isConcurrentlyScanned(returnContinuationState, !isGlobalGC)) { + J9VMThread *carrierThread = getCarrierThread(returnContinuationState); if (NULL != carrierThread) { omrthread_monitor_enter(carrierThread->publicFlagsMutex); /* notify the waiting carrierThread that we just finished scanning and we were the only/last GC to scan it, so that it can proceed with mounting. */ @@ -2152,24 +2254,13 @@ class VM_VMHelpers #endif /* JAVA_SPEC_VERSION >= 19 */ - static VMINLINE UDATA - walkContinuationStackFramesWrapper(J9VMThread *vmThread, j9object_t continuationObject, J9StackWalkState *walkState, bool isConcurrentGC, bool isGlobalGC) + static VMINLINE void + exitConcurrentGCScan(J9VMThread *vmThread, j9object_t continuationObject, bool isGlobalGC) { - UDATA rc = J9_STACKWALK_RC_NONE; #if JAVA_SPEC_VERSION >= 19 J9VMContinuation *continuation = J9VMJDKINTERNALVMCONTINUATION_VMREF(vmThread, continuationObject); - if (isConcurrentGC && (NULL != continuation)) { - if (!tryWinningConcurrentGCScan(continuation, isGlobalGC)) { - /* if continuation is mounted or already being scanned by another GC thread of the same GC type, we do nothing */ - return rc; - } - } - rc = vmThread->javaVM->internalVMFunctions->walkContinuationStackFrames(vmThread, continuation, walkState); - if (isConcurrentGC && (NULL != continuation)) { - exitConcurrentGCScan(continuation, isGlobalGC); - } + exitConcurrentGCScan(continuation, isGlobalGC); #endif /* JAVA_SPEC_VERSION >= 19 */ - return rc; } static VMINLINE UDATA diff --git a/runtime/oti/j9consts.h b/runtime/oti/j9consts.h index 68745425bb9..6564db9690e 100644 --- a/runtime/oti/j9consts.h +++ b/runtime/oti/j9consts.h @@ -492,10 +492,14 @@ extern "C" { #define J9_GC_MARK_MAP_LOG_SIZEOF_UDATA 0x5 #define J9_GC_MARK_MAP_UDATA_MASK 0x1F #endif /* J9VM_ENV_DATA64 */ -#define J9_GC_CONTINUATION_STATE_CONCURRENT_SCAN_NONE 0 -#define J9_GC_CONTINUATION_STATE_CONCURRENT_SCAN_LOCAL 0x1 -#define J9_GC_CONTINUATION_STATE_CONCURRENT_SCAN_GLOBAL 0x2 -#define J9_GC_CONTINUATION_STATE_CONCURRENT_SCAN_ANY (J9_GC_CONTINUATION_STATE_CONCURRENT_SCAN_LOCAL | J9_GC_CONTINUATION_STATE_CONCURRENT_SCAN_GLOBAL) +#define J9_GC_CONTINUATION_STATE_CONCURRENT_SCAN_NONE 0 +#define J9_GC_CONTINUATION_STATE_STARTED 0x1 +#define J9_GC_CONTINUATION_STATE_FINISHED 0x2 +#define J9_GC_CONTINUATION_STATE_PENDING_TO_BE_MOUNTED 0x4 +#define J9_GC_CONTINUATION_STATE_CONCURRENT_SCAN_LOCAL 0x8 +#define J9_GC_CONTINUATION_STATE_CONCURRENT_SCAN_GLOBAL 0x10 +#define J9_GC_CONTINUATION_STATE_CONCURRENT_SCAN_ANY (J9_GC_CONTINUATION_STATE_CONCURRENT_SCAN_LOCAL | J9_GC_CONTINUATION_STATE_CONCURRENT_SCAN_GLOBAL) +#define J9_GC_CONTINUATION_STATE_CARRIERID_MASK (~(uintptr_t)0xff) #define J9VMGC_SIZECLASSES_MIN 0x1 #define J9VMGC_SIZECLASSES_MIN_SMALL 0x1 diff --git a/runtime/oti/j9nonbuilder.h b/runtime/oti/j9nonbuilder.h index 4bfd3311924..006eb68cd51 100644 --- a/runtime/oti/j9nonbuilder.h +++ b/runtime/oti/j9nonbuilder.h @@ -5020,6 +5020,8 @@ typedef struct J9JITGPRSpillArea { } J9JITGPRSpillArea; #if JAVA_SPEC_VERSION >= 19 +typedef uintptr_t ContinuationState; + typedef struct J9VMContinuation { UDATA* arg0EA; UDATA* bytecodes; @@ -5034,7 +5036,10 @@ typedef struct J9VMContinuation { struct J9JITGPRSpillArea jitGPRs; struct J9I2JState i2jState; struct J9VMEntryLocalStorage* oldEntryLocalStorage; - volatile UDATA state; /* it's a bit-wise struct of CarrierThread ID and ConcurrentlyScanned flag bit0:localConcurrentScan-J9_GC_CONTINUATION_STATE_CONCURRENT_SCAN_LOCAL, bit1:globalConcurrentScan-J9_GC_CONTINUATION_STATE_CONCURRENT_SCAN_GLOBAL */ + /* it's a bit-wise struct of CarrierThread ID and continuation flags + * low 8 bits are reserved for flags and the rest are the carrier thread ID. + */ + volatile ContinuationState state; UDATA dropFlags; } J9VMContinuation; #endif /* JAVA_SPEC_VERSION >= 19 */ diff --git a/runtime/vm/BytecodeInterpreter.hpp b/runtime/vm/BytecodeInterpreter.hpp index 2bcd4738266..d89ddec4bf7 100644 --- a/runtime/vm/BytecodeInterpreter.hpp +++ b/runtime/vm/BytecodeInterpreter.hpp @@ -5227,9 +5227,6 @@ class INTERPRETER_CLASS buildInternalNativeStackFrame(REGISTER_ARGS); updateVMStruct(REGISTER_ARGS); - /* Notify GC of Continuation stack swap */ - _vm->memoryManagerFunctions->preMountContinuation(_currentThread, continuationObject); - if (enterContinuation(_currentThread, continuationObject)) { _sendMethod = J9VMJDKINTERNALVMCONTINUATION_EXECUTE_METHOD(_currentThread->javaVM); rc = GOTO_RUN_METHOD; @@ -5257,10 +5254,6 @@ class INTERPRETER_CLASS /* store the current Continuation state and swap to carrier thread stack */ yieldContinuation(_currentThread); - j9object_t continuationObject = J9VMJAVALANGTHREAD_CONT(_currentThread, _currentThread->carrierThreadObject); - /* Notify GC of Continuation stack swap */ - _vm->memoryManagerFunctions->postUnmountContinuation(_currentThread, continuationObject); - VMStructHasBeenUpdated(REGISTER_ARGS); restoreInternalNativeStackFrame(REGISTER_ARGS); diff --git a/runtime/vm/ContinuationHelpers.cpp b/runtime/vm/ContinuationHelpers.cpp index a88332c48ca..f184f7bdfee 100644 --- a/runtime/vm/ContinuationHelpers.cpp +++ b/runtime/vm/ContinuationHelpers.cpp @@ -90,28 +90,46 @@ createContinuation(J9VMThread *currentThread, j9object_t continuationObject) return result; } -void -synchronizeWithConcurrentGCScan(J9VMThread *currentThread, J9VMContinuation *continuation) +j9object_t +synchronizeWithConcurrentGCScan(J9VMThread *currentThread, j9object_t continuationObject, J9VMContinuation *continuation) { - volatile uintptr_t *localAddr = &continuation->state; - /* atomically 'or' (not 'set') continuation->state with currentThread */ - uintptr_t oldContinuationState = VM_AtomicSupport::bitOr(localAddr, (uintptr_t)currentThread); + uintptr_t oldContinuationState = 0; + uintptr_t returnContinuationState = 0; + do { + oldContinuationState = continuation->state; + uintptr_t newContinuationState = oldContinuationState; + VM_VMHelpers::settingCarrierAndPendingState(&newContinuationState, currentThread); + returnContinuationState = VM_AtomicSupport::lockCompareExchange(&continuation->state, oldContinuationState, newContinuationState); + } while (returnContinuationState != oldContinuationState); + Assert_VM_false(VM_VMHelpers::isPendingToBeMounted(returnContinuationState)); + Assert_VM_Null(VM_VMHelpers::getCarrierThread(returnContinuationState)); - Assert_VM_Null(VM_VMHelpers::getCarrierThreadFromContinuationState(oldContinuationState)); + do { + if (VM_VMHelpers::isConcurrentlyScanned(returnContinuationState)) { + /* currentThread was low tagged (GC was already in progress), but by 'or'-ing our ID, we let GC know there is a pending mount */ - if (VM_VMHelpers::isConcurrentlyScannedFromContinuationState(oldContinuationState)) { - /* currentThread was low tagged (GC was already in progress), but by 'or'-ing our ID, we let GC know there is a pending mount */ - internalReleaseVMAccess(currentThread); + PUSH_OBJECT_IN_SPECIAL_FRAME(currentThread, continuationObject); + internalReleaseVMAccess(currentThread); - omrthread_monitor_enter(currentThread->publicFlagsMutex); - while (VM_VMHelpers::isConcurrentlyScannedFromContinuationState(*localAddr)) { - /* GC is still concurrently scanning the continuation(currentThread was still low tagged), wait for GC thread to notify us when it's done. */ - omrthread_monitor_wait(currentThread->publicFlagsMutex); - } - omrthread_monitor_exit(currentThread->publicFlagsMutex); + omrthread_monitor_enter(currentThread->publicFlagsMutex); + while (VM_VMHelpers::isConcurrentlyScanned(continuation->state)) { + /* GC is still concurrently scanning the continuation(currentThread was still low tagged), wait for GC thread to notify us when it's done. */ + omrthread_monitor_wait(currentThread->publicFlagsMutex); + } + omrthread_monitor_exit(currentThread->publicFlagsMutex); - internalAcquireVMAccess(currentThread); - } + internalAcquireVMAccess(currentThread); + continuationObject = POP_OBJECT_IN_SPECIAL_FRAME(currentThread); + } + oldContinuationState = continuation->state; + Assert_VM_true(VM_VMHelpers::isContinuationMountedWithCarrierThread(oldContinuationState, currentThread)); + Assert_VM_true(VM_VMHelpers::isPendingToBeMounted(oldContinuationState)); + uintptr_t newContinuationState = oldContinuationState; + VM_VMHelpers::resetPendingState(&newContinuationState); + returnContinuationState = VM_AtomicSupport::lockCompareExchange(&continuation->state, oldContinuationState, newContinuationState); + } while (oldContinuationState != returnContinuationState); + + return continuationObject; } BOOLEAN @@ -133,12 +151,19 @@ enterContinuation(J9VMThread *currentThread, j9object_t continuationObject) Assert_VM_notNull(continuation); /* let GC know we are mounting, so they don't need to scan us, or if there is already ongoing scan wait till it's complete. */ - synchronizeWithConcurrentGCScan(currentThread, continuation); + continuationObject = synchronizeWithConcurrentGCScan(currentThread, continuationObject, continuation); + + /* defer preMountContinuation() after synchronizeWithConcurrentGCScan() to compensate potential missing concurrent scan + * between synchronizeWithConcurrentGCScan() to swapFieldsWithContinuation(). + */ + if (started) { + /* Notify GC of Continuation stack swap */ + currentThread->javaVM->memoryManagerFunctions->preMountContinuation(currentThread, continuationObject); + } VM_ContinuationHelpers::swapFieldsWithContinuation(currentThread, continuation, started); currentThread->currentContinuation = continuation; - /* Reset counters which determine if the current continuation is pinned. */ currentThread->continuationPinCount = 0; currentThread->ownedMonitorCount = 0; @@ -152,6 +177,8 @@ enterContinuation(J9VMThread *currentThread, j9object_t continuationObject) } else { /* start new Continuation execution */ J9VMJDKINTERNALVMCONTINUATION_SET_STARTED(currentThread, continuationObject, JNI_TRUE); + VM_VMHelpers::setContinuationStarted(continuation); + /* prepare callin frame, send method will be set by interpreter */ J9SFJNICallInFrame *frame = ((J9SFJNICallInFrame*)currentThread->sp) - 1; @@ -198,8 +225,23 @@ yieldContinuation(J9VMThread *currentThread) * * must be maintained for weakly ordered CPUs, to unsure that once the continuation is again available for GC scan (on potentially remote CPUs), all CPUs see up-to-date stack . */ - Assert_VM_true((uintptr_t)currentThread == continuation->state); - continuation->state = J9_GC_CONTINUATION_STATE_CONCURRENT_SCAN_NONE; + j9object_t continuationObject = J9VMJAVALANGTHREAD_CONT(currentThread, currentThread->carrierThreadObject); + /* Notify GC of Continuation stack swap */ + jboolean finished = J9VMJDKINTERNALVMCONTINUATION_FINISHED(currentThread, continuationObject); + + if (finished) { + VM_VMHelpers::setContinuationFinished(continuation); + } + Assert_VM_true(VM_VMHelpers::isContinuationMountedWithCarrierThread(continuation->state, currentThread)); + VM_VMHelpers::resetContinuationCarrierID(continuation); + /* Logically postUnmountContinuation(), which add the related continuation Object to the rememberedSet or dirty the Card for concurrent marking for future scanning, should be called + * before resetContinuationCarrierID(), but the scan might happened before resetContinuationCarrierID() if concurrent card clean happens, then the related compensating scan might be + * missed due to the continuation still is stated as mounted(we don't scan any mounted continuation, it should be scanned during root scanning via J9VMThread->currentContinuation). + * so calling postUnmountContinuation() after resetContinuationCarrierID() to avoid the missing scan case. + */ + if (!finished) { + currentThread->javaVM->memoryManagerFunctions->postUnmountContinuation(currentThread, continuationObject); + } return result; } @@ -219,6 +261,8 @@ freeContinuation(J9VMThread *currentThread, j9object_t continuationObject) currentStack = previous; } while (NULL != currentStack); + Assert_VM_true(VM_VMHelpers::isFinished(continuation->state)); + /* Free the J9VMContinuation struct */ j9mem_free_memory(continuation);