diff --git a/make/RunTests.gmk b/make/RunTests.gmk index 45494b859b798..bfd55394b2fc2 100644 --- a/make/RunTests.gmk +++ b/make/RunTests.gmk @@ -853,11 +853,7 @@ define SetupRunJtregTestBody endif ifneq ($$(findstring -XX:+UseZGC, $$(JTREG_ALL_OPTIONS)), ) - ifneq ($$(findstring -XX:-ZGenerational, $$(JTREG_ALL_OPTIONS)), ) - JTREG_AUTO_PROBLEM_LISTS += ProblemList-zgc.txt - else - JTREG_AUTO_PROBLEM_LISTS += ProblemList-generational-zgc.txt - endif + JTREG_AUTO_PROBLEM_LISTS += ProblemList-zgc.txt endif ifneq ($$(JTREG_EXTRA_PROBLEM_LISTS), ) diff --git a/make/hotspot/gensrc/GensrcAdlc.gmk b/make/hotspot/gensrc/GensrcAdlc.gmk index ddb2c3e33e513..ce3f268402672 100644 --- a/make/hotspot/gensrc/GensrcAdlc.gmk +++ b/make/hotspot/gensrc/GensrcAdlc.gmk @@ -193,8 +193,6 @@ ifeq ($(call check-jvm-feature, compiler2), true) ifeq ($(call check-jvm-feature, zgc), true) AD_SRC_FILES += $(call uniq, $(wildcard $(foreach d, $(AD_SRC_ROOTS), \ - $d/cpu/$(HOTSPOT_TARGET_CPU_ARCH)/gc/x/x_$(HOTSPOT_TARGET_CPU).ad \ - $d/cpu/$(HOTSPOT_TARGET_CPU_ARCH)/gc/x/x_$(HOTSPOT_TARGET_CPU_ARCH).ad \ $d/cpu/$(HOTSPOT_TARGET_CPU_ARCH)/gc/z/z_$(HOTSPOT_TARGET_CPU).ad \ $d/cpu/$(HOTSPOT_TARGET_CPU_ARCH)/gc/z/z_$(HOTSPOT_TARGET_CPU_ARCH).ad \ ))) diff --git a/make/hotspot/lib/JvmFeatures.gmk b/make/hotspot/lib/JvmFeatures.gmk index c4c030810fc44..b94031515f79e 100644 --- a/make/hotspot/lib/JvmFeatures.gmk +++ b/make/hotspot/lib/JvmFeatures.gmk @@ -150,7 +150,6 @@ endif ifneq ($(call check-jvm-feature, zgc), true) JVM_CFLAGS_FEATURES += -DINCLUDE_ZGC=0 JVM_EXCLUDE_PATTERNS += gc/z - JVM_EXCLUDE_PATTERNS += gc/x endif ifneq ($(call check-jvm-feature, shenandoahgc), true) diff --git a/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.cpp b/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.cpp index ebd8302715100..3d1be91e9b232 100644 --- a/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.cpp @@ -990,10 +990,7 @@ void LIR_Assembler::mem2reg(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_Patch __ decode_heap_oop(dest->as_register()); } - if (!(UseZGC && !ZGenerational)) { - // Load barrier has not yet been applied, so ZGC can't verify the oop here - __ verify_oop(dest->as_register()); - } + __ verify_oop(dest->as_register()); } } diff --git a/src/hotspot/cpu/aarch64/gc/x/xBarrierSetAssembler_aarch64.cpp b/src/hotspot/cpu/aarch64/gc/x/xBarrierSetAssembler_aarch64.cpp deleted file mode 100644 index 5c891e8c170fb..0000000000000 --- a/src/hotspot/cpu/aarch64/gc/x/xBarrierSetAssembler_aarch64.cpp +++ /dev/null @@ -1,462 +0,0 @@ -/* - * Copyright (c) 2019, 2023, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#include "precompiled.hpp" -#include "asm/macroAssembler.inline.hpp" -#include "code/codeBlob.hpp" -#include "code/vmreg.inline.hpp" -#include "gc/x/xBarrier.inline.hpp" -#include "gc/x/xBarrierSet.hpp" -#include "gc/x/xBarrierSetAssembler.hpp" -#include "gc/x/xBarrierSetRuntime.hpp" -#include "gc/x/xThreadLocalData.hpp" -#include "memory/resourceArea.hpp" -#include "runtime/sharedRuntime.hpp" -#include "utilities/macros.hpp" -#ifdef COMPILER1 -#include "c1/c1_LIRAssembler.hpp" -#include "c1/c1_MacroAssembler.hpp" -#include "gc/x/c1/xBarrierSetC1.hpp" -#endif // COMPILER1 -#ifdef COMPILER2 -#include "gc/x/c2/xBarrierSetC2.hpp" -#endif // COMPILER2 - -#ifdef PRODUCT -#define BLOCK_COMMENT(str) /* nothing */ -#else -#define BLOCK_COMMENT(str) __ block_comment(str) -#endif - -#undef __ -#define __ masm-> - -void XBarrierSetAssembler::load_at(MacroAssembler* masm, - DecoratorSet decorators, - BasicType type, - Register dst, - Address src, - Register tmp1, - Register tmp2) { - if (!XBarrierSet::barrier_needed(decorators, type)) { - // Barrier not needed - BarrierSetAssembler::load_at(masm, decorators, type, dst, src, tmp1, tmp2); - return; - } - - assert_different_registers(rscratch1, rscratch2, src.base()); - assert_different_registers(rscratch1, rscratch2, dst); - - Label done; - - // Load bad mask into scratch register. - __ ldr(rscratch1, address_bad_mask_from_thread(rthread)); - __ lea(rscratch2, src); - __ ldr(dst, src); - - // Test reference against bad mask. If mask bad, then we need to fix it up. - __ tst(dst, rscratch1); - __ br(Assembler::EQ, done); - - __ enter(/*strip_ret_addr*/true); - - __ push_call_clobbered_registers_except(RegSet::of(dst)); - - if (c_rarg0 != dst) { - __ mov(c_rarg0, dst); - } - __ mov(c_rarg1, rscratch2); - - __ call_VM_leaf(XBarrierSetRuntime::load_barrier_on_oop_field_preloaded_addr(decorators), 2); - - // Make sure dst has the return value. - if (dst != r0) { - __ mov(dst, r0); - } - - __ pop_call_clobbered_registers_except(RegSet::of(dst)); - __ leave(); - - __ bind(done); -} - -#ifdef ASSERT - -void XBarrierSetAssembler::store_at(MacroAssembler* masm, - DecoratorSet decorators, - BasicType type, - Address dst, - Register val, - Register tmp1, - Register tmp2, - Register tmp3) { - // Verify value - if (is_reference_type(type)) { - // Note that src could be noreg, which means we - // are storing null and can skip verification. - if (val != noreg) { - Label done; - - // tmp1, tmp2 and tmp3 are often set to noreg. - RegSet savedRegs = RegSet::of(rscratch1); - __ push(savedRegs, sp); - - __ ldr(rscratch1, address_bad_mask_from_thread(rthread)); - __ tst(val, rscratch1); - __ br(Assembler::EQ, done); - __ stop("Verify oop store failed"); - __ should_not_reach_here(); - __ bind(done); - __ pop(savedRegs, sp); - } - } - - // Store value - BarrierSetAssembler::store_at(masm, decorators, type, dst, val, tmp1, tmp2, noreg); -} - -#endif // ASSERT - -void XBarrierSetAssembler::arraycopy_prologue(MacroAssembler* masm, - DecoratorSet decorators, - bool is_oop, - Register src, - Register dst, - Register count, - RegSet saved_regs) { - if (!is_oop) { - // Barrier not needed - return; - } - - BLOCK_COMMENT("XBarrierSetAssembler::arraycopy_prologue {"); - - assert_different_registers(src, count, rscratch1); - - __ push(saved_regs, sp); - - if (count == c_rarg0) { - if (src == c_rarg1) { - // exactly backwards!! - __ mov(rscratch1, c_rarg0); - __ mov(c_rarg0, c_rarg1); - __ mov(c_rarg1, rscratch1); - } else { - __ mov(c_rarg1, count); - __ mov(c_rarg0, src); - } - } else { - __ mov(c_rarg0, src); - __ mov(c_rarg1, count); - } - - __ call_VM_leaf(XBarrierSetRuntime::load_barrier_on_oop_array_addr(), 2); - - __ pop(saved_regs, sp); - - BLOCK_COMMENT("} XBarrierSetAssembler::arraycopy_prologue"); -} - -void XBarrierSetAssembler::try_resolve_jobject_in_native(MacroAssembler* masm, - Register jni_env, - Register robj, - Register tmp, - Label& slowpath) { - BLOCK_COMMENT("XBarrierSetAssembler::try_resolve_jobject_in_native {"); - - assert_different_registers(jni_env, robj, tmp); - - // Resolve jobject - BarrierSetAssembler::try_resolve_jobject_in_native(masm, jni_env, robj, tmp, slowpath); - - // The Address offset is too large to direct load - -784. Our range is +127, -128. - __ mov(tmp, (int64_t)(in_bytes(XThreadLocalData::address_bad_mask_offset()) - - in_bytes(JavaThread::jni_environment_offset()))); - - // Load address bad mask - __ add(tmp, jni_env, tmp); - __ ldr(tmp, Address(tmp)); - - // Check address bad mask - __ tst(robj, tmp); - __ br(Assembler::NE, slowpath); - - BLOCK_COMMENT("} XBarrierSetAssembler::try_resolve_jobject_in_native"); -} - -#ifdef COMPILER1 - -#undef __ -#define __ ce->masm()-> - -void XBarrierSetAssembler::generate_c1_load_barrier_test(LIR_Assembler* ce, - LIR_Opr ref) const { - assert_different_registers(rscratch1, rthread, ref->as_register()); - - __ ldr(rscratch1, address_bad_mask_from_thread(rthread)); - __ tst(ref->as_register(), rscratch1); -} - -void XBarrierSetAssembler::generate_c1_load_barrier_stub(LIR_Assembler* ce, - XLoadBarrierStubC1* stub) const { - // Stub entry - __ bind(*stub->entry()); - - Register ref = stub->ref()->as_register(); - Register ref_addr = noreg; - Register tmp = noreg; - - if (stub->tmp()->is_valid()) { - // Load address into tmp register - ce->leal(stub->ref_addr(), stub->tmp()); - ref_addr = tmp = stub->tmp()->as_pointer_register(); - } else { - // Address already in register - ref_addr = stub->ref_addr()->as_address_ptr()->base()->as_pointer_register(); - } - - assert_different_registers(ref, ref_addr, noreg); - - // Save r0 unless it is the result or tmp register - // Set up SP to accommodate parameters and maybe r0.. - if (ref != r0 && tmp != r0) { - __ sub(sp, sp, 32); - __ str(r0, Address(sp, 16)); - } else { - __ sub(sp, sp, 16); - } - - // Setup arguments and call runtime stub - ce->store_parameter(ref_addr, 1); - ce->store_parameter(ref, 0); - - __ far_call(stub->runtime_stub()); - - // Verify result - __ verify_oop(r0); - - // Move result into place - if (ref != r0) { - __ mov(ref, r0); - } - - // Restore r0 unless it is the result or tmp register - if (ref != r0 && tmp != r0) { - __ ldr(r0, Address(sp, 16)); - __ add(sp, sp, 32); - } else { - __ add(sp, sp, 16); - } - - // Stub exit - __ b(*stub->continuation()); -} - -#undef __ -#define __ sasm-> - -void XBarrierSetAssembler::generate_c1_load_barrier_runtime_stub(StubAssembler* sasm, - DecoratorSet decorators) const { - __ prologue("zgc_load_barrier stub", false); - - __ push_call_clobbered_registers_except(RegSet::of(r0)); - - // Setup arguments - __ load_parameter(0, c_rarg0); - __ load_parameter(1, c_rarg1); - - __ call_VM_leaf(XBarrierSetRuntime::load_barrier_on_oop_field_preloaded_addr(decorators), 2); - - __ pop_call_clobbered_registers_except(RegSet::of(r0)); - - __ epilogue(); -} -#endif // COMPILER1 - -#ifdef COMPILER2 - -OptoReg::Name XBarrierSetAssembler::refine_register(const Node* node, OptoReg::Name opto_reg) { - if (!OptoReg::is_reg(opto_reg)) { - return OptoReg::Bad; - } - - const VMReg vm_reg = OptoReg::as_VMReg(opto_reg); - if (vm_reg->is_FloatRegister()) { - return opto_reg & ~1; - } - - return opto_reg; -} - -#undef __ -#define __ _masm-> - -class XSaveLiveRegisters { -private: - MacroAssembler* const _masm; - RegSet _gp_regs; - FloatRegSet _fp_regs; - PRegSet _p_regs; - -public: - void initialize(XLoadBarrierStubC2* stub) { - // Record registers that needs to be saved/restored - RegMaskIterator rmi(stub->live()); - while (rmi.has_next()) { - const OptoReg::Name opto_reg = rmi.next(); - if (OptoReg::is_reg(opto_reg)) { - const VMReg vm_reg = OptoReg::as_VMReg(opto_reg); - if (vm_reg->is_Register()) { - _gp_regs += RegSet::of(vm_reg->as_Register()); - } else if (vm_reg->is_FloatRegister()) { - _fp_regs += FloatRegSet::of(vm_reg->as_FloatRegister()); - } else if (vm_reg->is_PRegister()) { - _p_regs += PRegSet::of(vm_reg->as_PRegister()); - } else { - fatal("Unknown register type"); - } - } - } - - // Remove C-ABI SOE registers, scratch regs and _ref register that will be updated - _gp_regs -= RegSet::range(r19, r30) + RegSet::of(r8, r9, stub->ref()); - } - - XSaveLiveRegisters(MacroAssembler* masm, XLoadBarrierStubC2* stub) : - _masm(masm), - _gp_regs(), - _fp_regs(), - _p_regs() { - - // Figure out what registers to save/restore - initialize(stub); - - // Save registers - __ push(_gp_regs, sp); - __ push_fp(_fp_regs, sp); - __ push_p(_p_regs, sp); - } - - ~XSaveLiveRegisters() { - // Restore registers - __ pop_p(_p_regs, sp); - __ pop_fp(_fp_regs, sp); - - // External runtime call may clobber ptrue reg - __ reinitialize_ptrue(); - - __ pop(_gp_regs, sp); - } -}; - -#undef __ -#define __ _masm-> - -class XSetupArguments { -private: - MacroAssembler* const _masm; - const Register _ref; - const Address _ref_addr; - -public: - XSetupArguments(MacroAssembler* masm, XLoadBarrierStubC2* stub) : - _masm(masm), - _ref(stub->ref()), - _ref_addr(stub->ref_addr()) { - - // Setup arguments - if (_ref_addr.base() == noreg) { - // No self healing - if (_ref != c_rarg0) { - __ mov(c_rarg0, _ref); - } - __ mov(c_rarg1, 0); - } else { - // Self healing - if (_ref == c_rarg0) { - // _ref is already at correct place - __ lea(c_rarg1, _ref_addr); - } else if (_ref != c_rarg1) { - // _ref is in wrong place, but not in c_rarg1, so fix it first - __ lea(c_rarg1, _ref_addr); - __ mov(c_rarg0, _ref); - } else if (_ref_addr.base() != c_rarg0 && _ref_addr.index() != c_rarg0) { - assert(_ref == c_rarg1, "Mov ref first, vacating c_rarg0"); - __ mov(c_rarg0, _ref); - __ lea(c_rarg1, _ref_addr); - } else { - assert(_ref == c_rarg1, "Need to vacate c_rarg1 and _ref_addr is using c_rarg0"); - if (_ref_addr.base() == c_rarg0 || _ref_addr.index() == c_rarg0) { - __ mov(rscratch2, c_rarg1); - __ lea(c_rarg1, _ref_addr); - __ mov(c_rarg0, rscratch2); - } else { - ShouldNotReachHere(); - } - } - } - } - - ~XSetupArguments() { - // Transfer result - if (_ref != r0) { - __ mov(_ref, r0); - } - } -}; - -#undef __ -#define __ masm-> - -void XBarrierSetAssembler::generate_c2_load_barrier_stub(MacroAssembler* masm, XLoadBarrierStubC2* stub) const { - BLOCK_COMMENT("XLoadBarrierStubC2"); - - // Stub entry - __ bind(*stub->entry()); - - { - XSaveLiveRegisters save_live_registers(masm, stub); - XSetupArguments setup_arguments(masm, stub); - __ mov(rscratch1, stub->slow_path()); - __ blr(rscratch1); - } - // Stub exit - __ b(*stub->continuation()); -} - -#endif // COMPILER2 - -#undef __ -#define __ masm-> - -void XBarrierSetAssembler::check_oop(MacroAssembler* masm, Register obj, Register tmp1, Register tmp2, Label& error) { - // Check if mask is good. - // verifies that XAddressBadMask & r0 == 0 - __ ldr(tmp2, Address(rthread, XThreadLocalData::address_bad_mask_offset())); - __ andr(tmp1, obj, tmp2); - __ cbnz(tmp1, error); - - BarrierSetAssembler::check_oop(masm, obj, tmp1, tmp2, error); -} - -#undef __ diff --git a/src/hotspot/cpu/aarch64/gc/x/xBarrierSetAssembler_aarch64.hpp b/src/hotspot/cpu/aarch64/gc/x/xBarrierSetAssembler_aarch64.hpp deleted file mode 100644 index 8c1e9521757b4..0000000000000 --- a/src/hotspot/cpu/aarch64/gc/x/xBarrierSetAssembler_aarch64.hpp +++ /dev/null @@ -1,110 +0,0 @@ -/* - * Copyright (c) 2019, 2022, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#ifndef CPU_AARCH64_GC_X_XBARRIERSETASSEMBLER_AARCH64_HPP -#define CPU_AARCH64_GC_X_XBARRIERSETASSEMBLER_AARCH64_HPP - -#include "code/vmreg.hpp" -#include "oops/accessDecorators.hpp" -#ifdef COMPILER2 -#include "opto/optoreg.hpp" -#endif // COMPILER2 - -#ifdef COMPILER1 -class LIR_Assembler; -class LIR_Opr; -class StubAssembler; -#endif // COMPILER1 - -#ifdef COMPILER2 -class Node; -#endif // COMPILER2 - -#ifdef COMPILER1 -class XLoadBarrierStubC1; -#endif // COMPILER1 - -#ifdef COMPILER2 -class XLoadBarrierStubC2; -#endif // COMPILER2 - -class XBarrierSetAssembler : public XBarrierSetAssemblerBase { -public: - virtual void load_at(MacroAssembler* masm, - DecoratorSet decorators, - BasicType type, - Register dst, - Address src, - Register tmp1, - Register tmp2); - -#ifdef ASSERT - virtual void store_at(MacroAssembler* masm, - DecoratorSet decorators, - BasicType type, - Address dst, - Register val, - Register tmp1, - Register tmp2, - Register tmp3); -#endif // ASSERT - - virtual void arraycopy_prologue(MacroAssembler* masm, - DecoratorSet decorators, - bool is_oop, - Register src, - Register dst, - Register count, - RegSet saved_regs); - - virtual void try_resolve_jobject_in_native(MacroAssembler* masm, - Register jni_env, - Register robj, - Register tmp, - Label& slowpath); - - virtual NMethodPatchingType nmethod_patching_type() { return NMethodPatchingType::conc_data_patch; } - -#ifdef COMPILER1 - void generate_c1_load_barrier_test(LIR_Assembler* ce, - LIR_Opr ref) const; - - void generate_c1_load_barrier_stub(LIR_Assembler* ce, - XLoadBarrierStubC1* stub) const; - - void generate_c1_load_barrier_runtime_stub(StubAssembler* sasm, - DecoratorSet decorators) const; -#endif // COMPILER1 - -#ifdef COMPILER2 - OptoReg::Name refine_register(const Node* node, - OptoReg::Name opto_reg); - - void generate_c2_load_barrier_stub(MacroAssembler* masm, - XLoadBarrierStubC2* stub) const; -#endif // COMPILER2 - - void check_oop(MacroAssembler* masm, Register obj, Register tmp1, Register tmp2, Label& error); -}; - -#endif // CPU_AARCH64_GC_X_XBARRIERSETASSEMBLER_AARCH64_HPP diff --git a/src/hotspot/cpu/aarch64/gc/x/xGlobals_aarch64.cpp b/src/hotspot/cpu/aarch64/gc/x/xGlobals_aarch64.cpp deleted file mode 100644 index a9c53da3d0186..0000000000000 --- a/src/hotspot/cpu/aarch64/gc/x/xGlobals_aarch64.cpp +++ /dev/null @@ -1,212 +0,0 @@ -/* - * Copyright (c) 2017, 2022, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#include "precompiled.hpp" -#include "gc/shared/gcLogPrecious.hpp" -#include "gc/shared/gc_globals.hpp" -#include "gc/x/xGlobals.hpp" -#include "runtime/globals.hpp" -#include "runtime/os.hpp" -#include "utilities/globalDefinitions.hpp" -#include "utilities/powerOfTwo.hpp" - -#ifdef LINUX -#include -#endif // LINUX - -// -// The heap can have three different layouts, depending on the max heap size. -// -// Address Space & Pointer Layout 1 -// -------------------------------- -// -// +--------------------------------+ 0x00007FFFFFFFFFFF (127TB) -// . . -// . . -// . . -// +--------------------------------+ 0x0000014000000000 (20TB) -// | Remapped View | -// +--------------------------------+ 0x0000010000000000 (16TB) -// . . -// +--------------------------------+ 0x00000c0000000000 (12TB) -// | Marked1 View | -// +--------------------------------+ 0x0000080000000000 (8TB) -// | Marked0 View | -// +--------------------------------+ 0x0000040000000000 (4TB) -// . . -// +--------------------------------+ 0x0000000000000000 -// -// 6 4 4 4 4 -// 3 6 5 2 1 0 -// +--------------------+----+-----------------------------------------------+ -// |00000000 00000000 00|1111|11 11111111 11111111 11111111 11111111 11111111| -// +--------------------+----+-----------------------------------------------+ -// | | | -// | | * 41-0 Object Offset (42-bits, 4TB address space) -// | | -// | * 45-42 Metadata Bits (4-bits) 0001 = Marked0 (Address view 4-8TB) -// | 0010 = Marked1 (Address view 8-12TB) -// | 0100 = Remapped (Address view 16-20TB) -// | 1000 = Finalizable (Address view N/A) -// | -// * 63-46 Fixed (18-bits, always zero) -// -// -// Address Space & Pointer Layout 2 -// -------------------------------- -// -// +--------------------------------+ 0x00007FFFFFFFFFFF (127TB) -// . . -// . . -// . . -// +--------------------------------+ 0x0000280000000000 (40TB) -// | Remapped View | -// +--------------------------------+ 0x0000200000000000 (32TB) -// . . -// +--------------------------------+ 0x0000180000000000 (24TB) -// | Marked1 View | -// +--------------------------------+ 0x0000100000000000 (16TB) -// | Marked0 View | -// +--------------------------------+ 0x0000080000000000 (8TB) -// . . -// +--------------------------------+ 0x0000000000000000 -// -// 6 4 4 4 4 -// 3 7 6 3 2 0 -// +------------------+-----+------------------------------------------------+ -// |00000000 00000000 0|1111|111 11111111 11111111 11111111 11111111 11111111| -// +-------------------+----+------------------------------------------------+ -// | | | -// | | * 42-0 Object Offset (43-bits, 8TB address space) -// | | -// | * 46-43 Metadata Bits (4-bits) 0001 = Marked0 (Address view 8-16TB) -// | 0010 = Marked1 (Address view 16-24TB) -// | 0100 = Remapped (Address view 32-40TB) -// | 1000 = Finalizable (Address view N/A) -// | -// * 63-47 Fixed (17-bits, always zero) -// -// -// Address Space & Pointer Layout 3 -// -------------------------------- -// -// +--------------------------------+ 0x00007FFFFFFFFFFF (127TB) -// . . -// . . -// . . -// +--------------------------------+ 0x0000500000000000 (80TB) -// | Remapped View | -// +--------------------------------+ 0x0000400000000000 (64TB) -// . . -// +--------------------------------+ 0x0000300000000000 (48TB) -// | Marked1 View | -// +--------------------------------+ 0x0000200000000000 (32TB) -// | Marked0 View | -// +--------------------------------+ 0x0000100000000000 (16TB) -// . . -// +--------------------------------+ 0x0000000000000000 -// -// 6 4 4 4 4 -// 3 8 7 4 3 0 -// +------------------+----+-------------------------------------------------+ -// |00000000 00000000 |1111|1111 11111111 11111111 11111111 11111111 11111111| -// +------------------+----+-------------------------------------------------+ -// | | | -// | | * 43-0 Object Offset (44-bits, 16TB address space) -// | | -// | * 47-44 Metadata Bits (4-bits) 0001 = Marked0 (Address view 16-32TB) -// | 0010 = Marked1 (Address view 32-48TB) -// | 0100 = Remapped (Address view 64-80TB) -// | 1000 = Finalizable (Address view N/A) -// | -// * 63-48 Fixed (16-bits, always zero) -// - -// Default value if probing is not implemented for a certain platform -// Max address bit is restricted by implicit assumptions in the code, for instance -// the bit layout of XForwardingEntry or Partial array entry (see XMarkStackEntry) in mark stack -static const size_t DEFAULT_MAX_ADDRESS_BIT = 46; -// Minimum value returned, if probing fails -static const size_t MINIMUM_MAX_ADDRESS_BIT = 36; - -static size_t probe_valid_max_address_bit() { -#ifdef LINUX - size_t max_address_bit = 0; - const size_t page_size = os::vm_page_size(); - for (size_t i = DEFAULT_MAX_ADDRESS_BIT; i > MINIMUM_MAX_ADDRESS_BIT; --i) { - const uintptr_t base_addr = ((uintptr_t) 1U) << i; - if (msync((void*)base_addr, page_size, MS_ASYNC) == 0) { - // msync succeeded, the address is valid, and maybe even already mapped. - max_address_bit = i; - break; - } - if (errno != ENOMEM) { - // Some error occurred. This should never happen, but msync - // has some undefined behavior, hence ignore this bit. -#ifdef ASSERT - fatal("Received '%s' while probing the address space for the highest valid bit", os::errno_name(errno)); -#else // ASSERT - log_warning_p(gc)("Received '%s' while probing the address space for the highest valid bit", os::errno_name(errno)); -#endif // ASSERT - continue; - } - // Since msync failed with ENOMEM, the page might not be mapped. - // Try to map it, to see if the address is valid. - void* const result_addr = mmap((void*) base_addr, page_size, PROT_NONE, MAP_PRIVATE|MAP_ANONYMOUS|MAP_NORESERVE, -1, 0); - if (result_addr != MAP_FAILED) { - munmap(result_addr, page_size); - } - if ((uintptr_t) result_addr == base_addr) { - // address is valid - max_address_bit = i; - break; - } - } - if (max_address_bit == 0) { - // probing failed, allocate a very high page and take that bit as the maximum - const uintptr_t high_addr = ((uintptr_t) 1U) << DEFAULT_MAX_ADDRESS_BIT; - void* const result_addr = mmap((void*) high_addr, page_size, PROT_NONE, MAP_PRIVATE|MAP_ANONYMOUS|MAP_NORESERVE, -1, 0); - if (result_addr != MAP_FAILED) { - max_address_bit = BitsPerSize_t - count_leading_zeros((size_t) result_addr) - 1; - munmap(result_addr, page_size); - } - } - log_info_p(gc, init)("Probing address space for the highest valid bit: " SIZE_FORMAT, max_address_bit); - return MAX2(max_address_bit, MINIMUM_MAX_ADDRESS_BIT); -#else // LINUX - return DEFAULT_MAX_ADDRESS_BIT; -#endif // LINUX -} - -size_t XPlatformAddressOffsetBits() { - const static size_t valid_max_address_offset_bits = probe_valid_max_address_bit() + 1; - const size_t max_address_offset_bits = valid_max_address_offset_bits - 3; - const size_t min_address_offset_bits = max_address_offset_bits - 2; - const size_t address_offset = round_up_power_of_2(MaxHeapSize * XVirtualToPhysicalRatio); - const size_t address_offset_bits = log2i_exact(address_offset); - return clamp(address_offset_bits, min_address_offset_bits, max_address_offset_bits); -} - -size_t XPlatformAddressMetadataShift() { - return XPlatformAddressOffsetBits(); -} diff --git a/src/hotspot/cpu/aarch64/gc/x/xGlobals_aarch64.hpp b/src/hotspot/cpu/aarch64/gc/x/xGlobals_aarch64.hpp deleted file mode 100644 index 870b0d74d5766..0000000000000 --- a/src/hotspot/cpu/aarch64/gc/x/xGlobals_aarch64.hpp +++ /dev/null @@ -1,33 +0,0 @@ -/* - * Copyright (c) 2015, 2022, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#ifndef CPU_AARCH64_GC_X_XGLOBALS_AARCH64_HPP -#define CPU_AARCH64_GC_X_XGLOBALS_AARCH64_HPP - -const size_t XPlatformHeapViews = 3; -const size_t XPlatformCacheLineSize = 64; - -size_t XPlatformAddressOffsetBits(); -size_t XPlatformAddressMetadataShift(); - -#endif // CPU_AARCH64_GC_X_XGLOBALS_AARCH64_HPP diff --git a/src/hotspot/cpu/aarch64/gc/x/x_aarch64.ad b/src/hotspot/cpu/aarch64/gc/x/x_aarch64.ad deleted file mode 100644 index 6e401724baa82..0000000000000 --- a/src/hotspot/cpu/aarch64/gc/x/x_aarch64.ad +++ /dev/null @@ -1,249 +0,0 @@ -// -// Copyright (c) 2019, 2024, Oracle and/or its affiliates. All rights reserved. -// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. -// -// This code is free software; you can redistribute it and/or modify it -// under the terms of the GNU General Public License version 2 only, as -// published by the Free Software Foundation. -// -// This code is distributed in the hope that it will be useful, but WITHOUT -// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License -// version 2 for more details (a copy is included in the LICENSE file that -// accompanied this code). -// -// You should have received a copy of the GNU General Public License version -// 2 along with this work; if not, write to the Free Software Foundation, -// Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. -// -// Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA -// or visit www.oracle.com if you need additional information or have any -// questions. -// - -source_hpp %{ - -#include "gc/shared/gc_globals.hpp" -#include "gc/x/c2/xBarrierSetC2.hpp" -#include "gc/x/xThreadLocalData.hpp" - -%} - -source %{ - -static void x_load_barrier(MacroAssembler* masm, const MachNode* node, Address ref_addr, Register ref, Register tmp, uint8_t barrier_data) { - if (barrier_data == XLoadBarrierElided) { - return; - } - XLoadBarrierStubC2* const stub = XLoadBarrierStubC2::create(node, ref_addr, ref, tmp, barrier_data); - __ ldr(tmp, Address(rthread, XThreadLocalData::address_bad_mask_offset())); - __ andr(tmp, tmp, ref); - __ cbnz(tmp, *stub->entry()); - __ bind(*stub->continuation()); -} - -static void x_load_barrier_slow_path(MacroAssembler* masm, const MachNode* node, Address ref_addr, Register ref, Register tmp) { - XLoadBarrierStubC2* const stub = XLoadBarrierStubC2::create(node, ref_addr, ref, tmp, XLoadBarrierStrong); - __ b(*stub->entry()); - __ bind(*stub->continuation()); -} - -%} - -// Load Pointer -instruct xLoadP(iRegPNoSp dst, memory8 mem, rFlagsReg cr) -%{ - match(Set dst (LoadP mem)); - predicate(UseZGC && !ZGenerational && !needs_acquiring_load(n) && (n->as_Load()->barrier_data() != 0)); - effect(TEMP dst, KILL cr); - - ins_cost(4 * INSN_COST); - - format %{ "ldr $dst, $mem" %} - - ins_encode %{ - Address ref_addr = mem2address($mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp); - if (ref_addr.getMode() == Address::base_plus_offset) { - // Fix up any out-of-range offsets. - assert_different_registers(rscratch1, as_Register($mem$$base)); - assert_different_registers(rscratch1, $dst$$Register); - ref_addr = __ legitimize_address(ref_addr, 8, rscratch1); - } - __ ldr($dst$$Register, ref_addr); - x_load_barrier(masm, this, ref_addr, $dst$$Register, rscratch2 /* tmp */, barrier_data()); - %} - - ins_pipe(iload_reg_mem); -%} - -// Load Pointer Volatile -instruct xLoadPVolatile(iRegPNoSp dst, indirect mem /* sync_memory */, rFlagsReg cr) -%{ - match(Set dst (LoadP mem)); - predicate(UseZGC && !ZGenerational && needs_acquiring_load(n) && n->as_Load()->barrier_data() != 0); - effect(TEMP dst, KILL cr); - - ins_cost(VOLATILE_REF_COST); - - format %{ "ldar $dst, $mem\t" %} - - ins_encode %{ - __ ldar($dst$$Register, $mem$$Register); - x_load_barrier(masm, this, Address($mem$$Register), $dst$$Register, rscratch2 /* tmp */, barrier_data()); - %} - - ins_pipe(pipe_serial); -%} - -instruct xCompareAndSwapP(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{ - match(Set res (CompareAndSwapP mem (Binary oldval newval))); - match(Set res (WeakCompareAndSwapP mem (Binary oldval newval))); - predicate(UseZGC && !ZGenerational && !needs_acquiring_load_exclusive(n) && n->as_LoadStore()->barrier_data() == XLoadBarrierStrong); - effect(KILL cr, TEMP_DEF res); - - ins_cost(2 * VOLATILE_REF_COST); - - format %{ "cmpxchg $mem, $oldval, $newval\n\t" - "cset $res, EQ" %} - - ins_encode %{ - guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding"); - __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::xword, - false /* acquire */, true /* release */, false /* weak */, rscratch2); - __ cset($res$$Register, Assembler::EQ); - if (barrier_data() != XLoadBarrierElided) { - Label good; - __ ldr(rscratch1, Address(rthread, XThreadLocalData::address_bad_mask_offset())); - __ andr(rscratch1, rscratch1, rscratch2); - __ cbz(rscratch1, good); - x_load_barrier_slow_path(masm, this, Address($mem$$Register), rscratch2 /* ref */, rscratch1 /* tmp */); - __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::xword, - false /* acquire */, true /* release */, false /* weak */, rscratch2); - __ cset($res$$Register, Assembler::EQ); - __ bind(good); - } - %} - - ins_pipe(pipe_slow); -%} - -instruct xCompareAndSwapPAcq(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{ - match(Set res (CompareAndSwapP mem (Binary oldval newval))); - match(Set res (WeakCompareAndSwapP mem (Binary oldval newval))); - predicate(UseZGC && !ZGenerational && needs_acquiring_load_exclusive(n) && (n->as_LoadStore()->barrier_data() == XLoadBarrierStrong)); - effect(KILL cr, TEMP_DEF res); - - ins_cost(2 * VOLATILE_REF_COST); - - format %{ "cmpxchg $mem, $oldval, $newval\n\t" - "cset $res, EQ" %} - - ins_encode %{ - guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding"); - __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::xword, - true /* acquire */, true /* release */, false /* weak */, rscratch2); - __ cset($res$$Register, Assembler::EQ); - if (barrier_data() != XLoadBarrierElided) { - Label good; - __ ldr(rscratch1, Address(rthread, XThreadLocalData::address_bad_mask_offset())); - __ andr(rscratch1, rscratch1, rscratch2); - __ cbz(rscratch1, good); - x_load_barrier_slow_path(masm, this, Address($mem$$Register), rscratch2 /* ref */, rscratch1 /* tmp */ ); - __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::xword, - true /* acquire */, true /* release */, false /* weak */, rscratch2); - __ cset($res$$Register, Assembler::EQ); - __ bind(good); - } - %} - - ins_pipe(pipe_slow); -%} - -instruct xCompareAndExchangeP(iRegPNoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{ - match(Set res (CompareAndExchangeP mem (Binary oldval newval))); - predicate(UseZGC && !ZGenerational && !needs_acquiring_load_exclusive(n) && n->as_LoadStore()->barrier_data() == XLoadBarrierStrong); - effect(TEMP_DEF res, KILL cr); - - ins_cost(2 * VOLATILE_REF_COST); - - format %{ "cmpxchg $res = $mem, $oldval, $newval" %} - - ins_encode %{ - guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding"); - __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::xword, - false /* acquire */, true /* release */, false /* weak */, $res$$Register); - if (barrier_data() != XLoadBarrierElided) { - Label good; - __ ldr(rscratch1, Address(rthread, XThreadLocalData::address_bad_mask_offset())); - __ andr(rscratch1, rscratch1, $res$$Register); - __ cbz(rscratch1, good); - x_load_barrier_slow_path(masm, this, Address($mem$$Register), $res$$Register /* ref */, rscratch1 /* tmp */); - __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::xword, - false /* acquire */, true /* release */, false /* weak */, $res$$Register); - __ bind(good); - } - %} - - ins_pipe(pipe_slow); -%} - -instruct xCompareAndExchangePAcq(iRegPNoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{ - match(Set res (CompareAndExchangeP mem (Binary oldval newval))); - predicate(UseZGC && !ZGenerational && needs_acquiring_load_exclusive(n) && n->as_LoadStore()->barrier_data() == XLoadBarrierStrong); - effect(TEMP_DEF res, KILL cr); - - ins_cost(2 * VOLATILE_REF_COST); - - format %{ "cmpxchg $res = $mem, $oldval, $newval" %} - - ins_encode %{ - guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding"); - __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::xword, - true /* acquire */, true /* release */, false /* weak */, $res$$Register); - if (barrier_data() != XLoadBarrierElided) { - Label good; - __ ldr(rscratch1, Address(rthread, XThreadLocalData::address_bad_mask_offset())); - __ andr(rscratch1, rscratch1, $res$$Register); - __ cbz(rscratch1, good); - x_load_barrier_slow_path(masm, this, Address($mem$$Register), $res$$Register /* ref */, rscratch1 /* tmp */); - __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::xword, - true /* acquire */, true /* release */, false /* weak */, $res$$Register); - __ bind(good); - } - %} - - ins_pipe(pipe_slow); -%} - -instruct xGetAndSetP(indirect mem, iRegP newv, iRegPNoSp prev, rFlagsReg cr) %{ - match(Set prev (GetAndSetP mem newv)); - predicate(UseZGC && !ZGenerational && !needs_acquiring_load_exclusive(n) && n->as_LoadStore()->barrier_data() != 0); - effect(TEMP_DEF prev, KILL cr); - - ins_cost(2 * VOLATILE_REF_COST); - - format %{ "atomic_xchg $prev, $newv, [$mem]" %} - - ins_encode %{ - __ atomic_xchg($prev$$Register, $newv$$Register, $mem$$Register); - x_load_barrier(masm, this, Address(noreg, 0), $prev$$Register, rscratch2 /* tmp */, barrier_data()); - %} - - ins_pipe(pipe_serial); -%} - -instruct xGetAndSetPAcq(indirect mem, iRegP newv, iRegPNoSp prev, rFlagsReg cr) %{ - match(Set prev (GetAndSetP mem newv)); - predicate(UseZGC && !ZGenerational && needs_acquiring_load_exclusive(n) && (n->as_LoadStore()->barrier_data() != 0)); - effect(TEMP_DEF prev, KILL cr); - - ins_cost(VOLATILE_REF_COST); - - format %{ "atomic_xchg_acq $prev, $newv, [$mem]" %} - - ins_encode %{ - __ atomic_xchgal($prev$$Register, $newv$$Register, $mem$$Register); - x_load_barrier(masm, this, Address(noreg, 0), $prev$$Register, rscratch2 /* tmp */, barrier_data()); - %} - ins_pipe(pipe_serial); -%} diff --git a/src/hotspot/cpu/aarch64/gc/z/z_aarch64.ad b/src/hotspot/cpu/aarch64/gc/z/z_aarch64.ad index 088f92a01573e..47abaae3d5b77 100644 --- a/src/hotspot/cpu/aarch64/gc/z/z_aarch64.ad +++ b/src/hotspot/cpu/aarch64/gc/z/z_aarch64.ad @@ -104,7 +104,7 @@ static void z_store_barrier(MacroAssembler* masm, const MachNode* node, Address instruct zLoadP(iRegPNoSp dst, memory8 mem, rFlagsReg cr) %{ match(Set dst (LoadP mem)); - predicate(UseZGC && ZGenerational && !needs_acquiring_load(n) && n->as_Load()->barrier_data() != 0); + predicate(UseZGC && !needs_acquiring_load(n) && n->as_Load()->barrier_data() != 0); effect(TEMP dst, KILL cr); ins_cost(4 * INSN_COST); @@ -130,7 +130,7 @@ instruct zLoadP(iRegPNoSp dst, memory8 mem, rFlagsReg cr) instruct zLoadPVolatile(iRegPNoSp dst, indirect mem /* sync_memory */, rFlagsReg cr) %{ match(Set dst (LoadP mem)); - predicate(UseZGC && ZGenerational && needs_acquiring_load(n) && n->as_Load()->barrier_data() != 0); + predicate(UseZGC && needs_acquiring_load(n) && n->as_Load()->barrier_data() != 0); effect(TEMP dst, KILL cr); ins_cost(VOLATILE_REF_COST); @@ -149,7 +149,7 @@ instruct zLoadPVolatile(iRegPNoSp dst, indirect mem /* sync_memory */, rFlagsReg // Store Pointer instruct zStoreP(memory mem, iRegP src, iRegPNoSp tmp, rFlagsReg cr) %{ - predicate(UseZGC && ZGenerational && !needs_releasing_store(n) && n->as_Store()->barrier_data() != 0); + predicate(UseZGC && !needs_releasing_store(n) && n->as_Store()->barrier_data() != 0); match(Set mem (StoreP mem src)); effect(TEMP tmp, KILL cr); @@ -166,7 +166,7 @@ instruct zStoreP(memory mem, iRegP src, iRegPNoSp tmp, rFlagsReg cr) // Store Pointer Volatile instruct zStorePVolatile(indirect mem, iRegP src, iRegPNoSp tmp, rFlagsReg cr) %{ - predicate(UseZGC && ZGenerational && needs_releasing_store(n) && n->as_Store()->barrier_data() != 0); + predicate(UseZGC && needs_releasing_store(n) && n->as_Store()->barrier_data() != 0); match(Set mem (StoreP mem src)); effect(TEMP tmp, KILL cr); @@ -183,7 +183,7 @@ instruct zStorePVolatile(indirect mem, iRegP src, iRegPNoSp tmp, rFlagsReg cr) instruct zCompareAndSwapP(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, iRegPNoSp oldval_tmp, iRegPNoSp newval_tmp, rFlagsReg cr) %{ match(Set res (CompareAndSwapP mem (Binary oldval newval))); match(Set res (WeakCompareAndSwapP mem (Binary oldval newval))); - predicate(UseZGC && ZGenerational && !needs_acquiring_load_exclusive(n) && n->as_LoadStore()->barrier_data() != 0); + predicate(UseZGC && !needs_acquiring_load_exclusive(n) && n->as_LoadStore()->barrier_data() != 0); effect(TEMP oldval_tmp, TEMP newval_tmp, TEMP res, KILL cr); ins_cost(2 * VOLATILE_REF_COST); @@ -207,7 +207,7 @@ instruct zCompareAndSwapP(iRegINoSp res, indirect mem, iRegP oldval, iRegP newva instruct zCompareAndSwapPAcq(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, iRegPNoSp oldval_tmp, iRegPNoSp newval_tmp, rFlagsReg cr) %{ match(Set res (CompareAndSwapP mem (Binary oldval newval))); match(Set res (WeakCompareAndSwapP mem (Binary oldval newval))); - predicate(UseZGC && ZGenerational && needs_acquiring_load_exclusive(n) && n->as_LoadStore()->barrier_data() != 0); + predicate(UseZGC && needs_acquiring_load_exclusive(n) && n->as_LoadStore()->barrier_data() != 0); effect(TEMP oldval_tmp, TEMP newval_tmp, TEMP res, KILL cr); ins_cost(2 * VOLATILE_REF_COST); @@ -231,7 +231,7 @@ instruct zCompareAndSwapPAcq(iRegINoSp res, indirect mem, iRegP oldval, iRegP ne instruct zCompareAndExchangeP(iRegPNoSp res, indirect mem, iRegP oldval, iRegP newval, iRegPNoSp oldval_tmp, iRegPNoSp newval_tmp, rFlagsReg cr) %{ match(Set res (CompareAndExchangeP mem (Binary oldval newval))); - predicate(UseZGC && ZGenerational && !needs_acquiring_load_exclusive(n) && n->as_LoadStore()->barrier_data() != 0); + predicate(UseZGC && !needs_acquiring_load_exclusive(n) && n->as_LoadStore()->barrier_data() != 0); effect(TEMP oldval_tmp, TEMP newval_tmp, TEMP res, KILL cr); ins_cost(2 * VOLATILE_REF_COST); @@ -254,7 +254,7 @@ instruct zCompareAndExchangeP(iRegPNoSp res, indirect mem, iRegP oldval, iRegP n instruct zCompareAndExchangePAcq(iRegPNoSp res, indirect mem, iRegP oldval, iRegP newval, iRegPNoSp oldval_tmp, iRegPNoSp newval_tmp, rFlagsReg cr) %{ match(Set res (CompareAndExchangeP mem (Binary oldval newval))); - predicate(UseZGC && ZGenerational && needs_acquiring_load_exclusive(n) && n->as_LoadStore()->barrier_data() != 0); + predicate(UseZGC && needs_acquiring_load_exclusive(n) && n->as_LoadStore()->barrier_data() != 0); effect(TEMP oldval_tmp, TEMP newval_tmp, TEMP res, KILL cr); ins_cost(2 * VOLATILE_REF_COST); @@ -277,7 +277,7 @@ instruct zCompareAndExchangePAcq(iRegPNoSp res, indirect mem, iRegP oldval, iReg instruct zGetAndSetP(indirect mem, iRegP newv, iRegPNoSp prev, rFlagsReg cr) %{ match(Set prev (GetAndSetP mem newv)); - predicate(UseZGC && ZGenerational && !needs_acquiring_load_exclusive(n) && n->as_LoadStore()->barrier_data() != 0); + predicate(UseZGC && !needs_acquiring_load_exclusive(n) && n->as_LoadStore()->barrier_data() != 0); effect(TEMP prev, KILL cr); ins_cost(2 * VOLATILE_REF_COST); @@ -295,7 +295,7 @@ instruct zGetAndSetP(indirect mem, iRegP newv, iRegPNoSp prev, rFlagsReg cr) %{ instruct zGetAndSetPAcq(indirect mem, iRegP newv, iRegPNoSp prev, rFlagsReg cr) %{ match(Set prev (GetAndSetP mem newv)); - predicate(UseZGC && ZGenerational && needs_acquiring_load_exclusive(n) && n->as_LoadStore()->barrier_data() != 0); + predicate(UseZGC && needs_acquiring_load_exclusive(n) && n->as_LoadStore()->barrier_data() != 0); effect(TEMP prev, KILL cr); ins_cost(2 * VOLATILE_REF_COST); diff --git a/src/hotspot/cpu/ppc/gc/x/xBarrierSetAssembler_ppc.cpp b/src/hotspot/cpu/ppc/gc/x/xBarrierSetAssembler_ppc.cpp deleted file mode 100644 index ca826e47352b5..0000000000000 --- a/src/hotspot/cpu/ppc/gc/x/xBarrierSetAssembler_ppc.cpp +++ /dev/null @@ -1,585 +0,0 @@ -/* - * Copyright (c) 2021, 2024, Oracle and/or its affiliates. All rights reserved. - * Copyright (c) 2021, 2024 SAP SE. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#include "precompiled.hpp" -#include "asm/register.hpp" -#include "asm/macroAssembler.inline.hpp" -#include "code/codeBlob.hpp" -#include "code/vmreg.inline.hpp" -#include "gc/x/xBarrier.inline.hpp" -#include "gc/x/xBarrierSet.hpp" -#include "gc/x/xBarrierSetAssembler.hpp" -#include "gc/x/xBarrierSetRuntime.hpp" -#include "gc/x/xThreadLocalData.hpp" -#include "memory/resourceArea.hpp" -#include "register_ppc.hpp" -#include "runtime/sharedRuntime.hpp" -#include "utilities/globalDefinitions.hpp" -#include "utilities/macros.hpp" -#ifdef COMPILER1 -#include "c1/c1_LIRAssembler.hpp" -#include "c1/c1_MacroAssembler.hpp" -#include "gc/x/c1/xBarrierSetC1.hpp" -#endif // COMPILER1 -#ifdef COMPILER2 -#include "gc/x/c2/xBarrierSetC2.hpp" -#endif // COMPILER2 - -#undef __ -#define __ masm-> - -void XBarrierSetAssembler::load_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type, - Register base, RegisterOrConstant ind_or_offs, Register dst, - Register tmp1, Register tmp2, - MacroAssembler::PreservationLevel preservation_level, Label *L_handle_null) { - __ block_comment("load_at (zgc) {"); - - // Check whether a special gc barrier is required for this particular load - // (e.g. whether it's a reference load or not) - if (!XBarrierSet::barrier_needed(decorators, type)) { - BarrierSetAssembler::load_at(masm, decorators, type, base, ind_or_offs, dst, - tmp1, tmp2, preservation_level, L_handle_null); - return; - } - - if (ind_or_offs.is_register()) { - assert_different_registers(base, ind_or_offs.as_register(), tmp1, tmp2, R0, noreg); - assert_different_registers(dst, ind_or_offs.as_register(), tmp1, tmp2, R0, noreg); - } else { - assert_different_registers(base, tmp1, tmp2, R0, noreg); - assert_different_registers(dst, tmp1, tmp2, R0, noreg); - } - - /* ==== Load the pointer using the standard implementation for the actual heap access - and the decompression of compressed pointers ==== */ - // Result of 'load_at' (standard implementation) will be written back to 'dst'. - // As 'base' is required for the C-call, it must be reserved in case of a register clash. - Register saved_base = base; - if (base == dst) { - __ mr(tmp2, base); - saved_base = tmp2; - } - - BarrierSetAssembler::load_at(masm, decorators, type, base, ind_or_offs, dst, - tmp1, noreg, preservation_level, L_handle_null); - - /* ==== Check whether pointer is dirty ==== */ - Label skip_barrier; - - // Load bad mask into scratch register. - __ ld(tmp1, (intptr_t) XThreadLocalData::address_bad_mask_offset(), R16_thread); - - // The color bits of the to-be-tested pointer do not have to be equivalent to the 'bad_mask' testing bits. - // A pointer is classified as dirty if any of the color bits that also match the bad mask is set. - // Conversely, it follows that the logical AND of the bad mask and the pointer must be zero - // if the pointer is not dirty. - // Only dirty pointers must be processed by this barrier, so we can skip it in case the latter condition holds true. - __ and_(tmp1, tmp1, dst); - __ beq(CCR0, skip_barrier); - - /* ==== Invoke barrier ==== */ - int nbytes_save = 0; - - const bool needs_frame = preservation_level >= MacroAssembler::PRESERVATION_FRAME_LR; - const bool preserve_gp_registers = preservation_level >= MacroAssembler::PRESERVATION_FRAME_LR_GP_REGS; - const bool preserve_fp_registers = preservation_level >= MacroAssembler::PRESERVATION_FRAME_LR_GP_FP_REGS; - - const bool preserve_R3 = dst != R3_ARG1; - - if (needs_frame) { - if (preserve_gp_registers) { - nbytes_save = (preserve_fp_registers - ? MacroAssembler::num_volatile_gp_regs + MacroAssembler::num_volatile_fp_regs - : MacroAssembler::num_volatile_gp_regs) * BytesPerWord; - nbytes_save -= preserve_R3 ? 0 : BytesPerWord; - __ save_volatile_gprs(R1_SP, -nbytes_save, preserve_fp_registers, preserve_R3); - } - - __ save_LR(tmp1); - __ push_frame_reg_args(nbytes_save, tmp1); - } - - // Setup arguments - if (saved_base != R3_ARG1) { - __ mr_if_needed(R3_ARG1, dst); - __ add(R4_ARG2, ind_or_offs, saved_base); - } else if (dst != R4_ARG2) { - __ add(R4_ARG2, ind_or_offs, saved_base); - __ mr(R3_ARG1, dst); - } else { - __ add(R0, ind_or_offs, saved_base); - __ mr(R3_ARG1, dst); - __ mr(R4_ARG2, R0); - } - - __ call_VM_leaf(XBarrierSetRuntime::load_barrier_on_oop_field_preloaded_addr(decorators)); - - Register result = R3_RET; - if (needs_frame) { - __ pop_frame(); - __ restore_LR(tmp1); - - if (preserve_R3) { - __ mr(R0, R3_RET); - result = R0; - } - - if (preserve_gp_registers) { - __ restore_volatile_gprs(R1_SP, -nbytes_save, preserve_fp_registers, preserve_R3); - } - } - __ mr_if_needed(dst, result); - - __ bind(skip_barrier); - __ block_comment("} load_at (zgc)"); -} - -#ifdef ASSERT -// The Z store barrier only verifies the pointers it is operating on and is thus a sole debugging measure. -void XBarrierSetAssembler::store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type, - Register base, RegisterOrConstant ind_or_offs, Register val, - Register tmp1, Register tmp2, Register tmp3, - MacroAssembler::PreservationLevel preservation_level) { - __ block_comment("store_at (zgc) {"); - - // If the 'val' register is 'noreg', the to-be-stored value is a null pointer. - if (is_reference_type(type) && val != noreg) { - __ ld(tmp1, in_bytes(XThreadLocalData::address_bad_mask_offset()), R16_thread); - __ and_(tmp1, tmp1, val); - __ asm_assert_eq("Detected dirty pointer on the heap in Z store barrier"); - } - - // Store value - BarrierSetAssembler::store_at(masm, decorators, type, base, ind_or_offs, val, tmp1, tmp2, tmp3, preservation_level); - - __ block_comment("} store_at (zgc)"); -} -#endif // ASSERT - -void XBarrierSetAssembler::arraycopy_prologue(MacroAssembler *masm, DecoratorSet decorators, BasicType component_type, - Register src, Register dst, Register count, - Register preserve1, Register preserve2) { - __ block_comment("arraycopy_prologue (zgc) {"); - - /* ==== Check whether a special gc barrier is required for this particular load ==== */ - if (!is_reference_type(component_type)) { - return; - } - - Label skip_barrier; - - // Fast path: Array is of length zero - __ cmpdi(CCR0, count, 0); - __ beq(CCR0, skip_barrier); - - /* ==== Ensure register sanity ==== */ - Register tmp_R11 = R11_scratch1; - - assert_different_registers(src, dst, count, tmp_R11, noreg); - if (preserve1 != noreg) { - // Not technically required, but unlikely being intended. - assert_different_registers(preserve1, preserve2); - } - - /* ==== Invoke barrier (slowpath) ==== */ - int nbytes_save = 0; - - { - assert(!noreg->is_volatile(), "sanity"); - - if (preserve1->is_volatile()) { - __ std(preserve1, -BytesPerWord * ++nbytes_save, R1_SP); - } - - if (preserve2->is_volatile() && preserve1 != preserve2) { - __ std(preserve2, -BytesPerWord * ++nbytes_save, R1_SP); - } - - __ std(src, -BytesPerWord * ++nbytes_save, R1_SP); - __ std(dst, -BytesPerWord * ++nbytes_save, R1_SP); - __ std(count, -BytesPerWord * ++nbytes_save, R1_SP); - - __ save_LR(tmp_R11); - __ push_frame_reg_args(nbytes_save, tmp_R11); - } - - // XBarrierSetRuntime::load_barrier_on_oop_array_addr(src, count) - if (count == R3_ARG1) { - if (src == R4_ARG2) { - // Arguments are provided in reverse order - __ mr(tmp_R11, count); - __ mr(R3_ARG1, src); - __ mr(R4_ARG2, tmp_R11); - } else { - __ mr(R4_ARG2, count); - __ mr(R3_ARG1, src); - } - } else { - __ mr_if_needed(R3_ARG1, src); - __ mr_if_needed(R4_ARG2, count); - } - - __ call_VM_leaf(XBarrierSetRuntime::load_barrier_on_oop_array_addr()); - - __ pop_frame(); - __ restore_LR(tmp_R11); - - { - __ ld(count, -BytesPerWord * nbytes_save--, R1_SP); - __ ld(dst, -BytesPerWord * nbytes_save--, R1_SP); - __ ld(src, -BytesPerWord * nbytes_save--, R1_SP); - - if (preserve2->is_volatile() && preserve1 != preserve2) { - __ ld(preserve2, -BytesPerWord * nbytes_save--, R1_SP); - } - - if (preserve1->is_volatile()) { - __ ld(preserve1, -BytesPerWord * nbytes_save--, R1_SP); - } - } - - __ bind(skip_barrier); - - __ block_comment("} arraycopy_prologue (zgc)"); -} - -void XBarrierSetAssembler::try_resolve_jobject_in_native(MacroAssembler* masm, Register dst, Register jni_env, - Register obj, Register tmp, Label& slowpath) { - __ block_comment("try_resolve_jobject_in_native (zgc) {"); - - assert_different_registers(jni_env, obj, tmp); - - // Resolve the pointer using the standard implementation for weak tag handling and pointer verification. - BarrierSetAssembler::try_resolve_jobject_in_native(masm, dst, jni_env, obj, tmp, slowpath); - - // Check whether pointer is dirty. - __ ld(tmp, - in_bytes(XThreadLocalData::address_bad_mask_offset() - JavaThread::jni_environment_offset()), - jni_env); - - __ and_(tmp, obj, tmp); - __ bne(CCR0, slowpath); - - __ block_comment("} try_resolve_jobject_in_native (zgc)"); -} - -#undef __ - -#ifdef COMPILER1 -#define __ ce->masm()-> - -// Code emitted by LIR node "LIR_OpXLoadBarrierTest" which in turn is emitted by XBarrierSetC1::load_barrier. -// The actual compare and branch instructions are represented as stand-alone LIR nodes. -void XBarrierSetAssembler::generate_c1_load_barrier_test(LIR_Assembler* ce, - LIR_Opr ref) const { - __ block_comment("load_barrier_test (zgc) {"); - - __ ld(R0, in_bytes(XThreadLocalData::address_bad_mask_offset()), R16_thread); - __ andr(R0, R0, ref->as_pointer_register()); - __ cmpdi(CCR5 /* as mandated by LIR node */, R0, 0); - - __ block_comment("} load_barrier_test (zgc)"); -} - -// Code emitted by code stub "XLoadBarrierStubC1" which in turn is emitted by XBarrierSetC1::load_barrier. -// Invokes the runtime stub which is defined just below. -void XBarrierSetAssembler::generate_c1_load_barrier_stub(LIR_Assembler* ce, - XLoadBarrierStubC1* stub) const { - __ block_comment("c1_load_barrier_stub (zgc) {"); - - __ bind(*stub->entry()); - - /* ==== Determine relevant data registers and ensure register sanity ==== */ - Register ref = stub->ref()->as_register(); - Register ref_addr = noreg; - - // Determine reference address - if (stub->tmp()->is_valid()) { - // 'tmp' register is given, so address might have an index or a displacement. - ce->leal(stub->ref_addr(), stub->tmp()); - ref_addr = stub->tmp()->as_pointer_register(); - } else { - // 'tmp' register is not given, so address must have neither an index nor a displacement. - // The address' base register is thus usable as-is. - assert(stub->ref_addr()->as_address_ptr()->disp() == 0, "illegal displacement"); - assert(!stub->ref_addr()->as_address_ptr()->index()->is_valid(), "illegal index"); - - ref_addr = stub->ref_addr()->as_address_ptr()->base()->as_pointer_register(); - } - - assert_different_registers(ref, ref_addr, R0, noreg); - - /* ==== Invoke stub ==== */ - // Pass arguments via stack. The stack pointer will be bumped by the stub. - __ std(ref, (intptr_t) -1 * BytesPerWord, R1_SP); - __ std(ref_addr, (intptr_t) -2 * BytesPerWord, R1_SP); - - __ load_const_optimized(R0, stub->runtime_stub()); - __ call_stub(R0); - - // The runtime stub passes the result via the R0 register, overriding the previously-loaded stub address. - __ mr_if_needed(ref, R0); - __ b(*stub->continuation()); - - __ block_comment("} c1_load_barrier_stub (zgc)"); -} - -#undef __ -#define __ sasm-> - -// Code emitted by runtime code stub which in turn is emitted by XBarrierSetC1::generate_c1_runtime_stubs. -void XBarrierSetAssembler::generate_c1_load_barrier_runtime_stub(StubAssembler* sasm, - DecoratorSet decorators) const { - __ block_comment("c1_load_barrier_runtime_stub (zgc) {"); - - const int stack_parameters = 2; - const int nbytes_save = (MacroAssembler::num_volatile_regs + stack_parameters) * BytesPerWord; - - __ save_volatile_gprs(R1_SP, -nbytes_save); - __ save_LR(R0); - - // Load arguments back again from the stack. - __ ld(R3_ARG1, (intptr_t) -1 * BytesPerWord, R1_SP); // ref - __ ld(R4_ARG2, (intptr_t) -2 * BytesPerWord, R1_SP); // ref_addr - - __ push_frame_reg_args(nbytes_save, R0); - - __ call_VM_leaf(XBarrierSetRuntime::load_barrier_on_oop_field_preloaded_addr(decorators)); - - __ verify_oop(R3_RET, "Bad pointer after barrier invocation"); - __ mr(R0, R3_RET); - - __ pop_frame(); - __ restore_LR(R3_RET); - __ restore_volatile_gprs(R1_SP, -nbytes_save); - - __ blr(); - - __ block_comment("} c1_load_barrier_runtime_stub (zgc)"); -} - -#undef __ -#endif // COMPILER1 - -#ifdef COMPILER2 - -OptoReg::Name XBarrierSetAssembler::refine_register(const Node* node, OptoReg::Name opto_reg) const { - if (!OptoReg::is_reg(opto_reg)) { - return OptoReg::Bad; - } - - VMReg vm_reg = OptoReg::as_VMReg(opto_reg); - if ((vm_reg->is_Register() || vm_reg ->is_FloatRegister()) && (opto_reg & 1) != 0) { - return OptoReg::Bad; - } - - return opto_reg; -} - -#define __ _masm-> - -class XSaveLiveRegisters { - MacroAssembler* _masm; - RegMask _reg_mask; - Register _result_reg; - int _frame_size; - - public: - XSaveLiveRegisters(MacroAssembler *masm, XLoadBarrierStubC2 *stub) - : _masm(masm), _reg_mask(stub->live()), _result_reg(stub->ref()) { - - const int register_save_size = iterate_over_register_mask(ACTION_COUNT_ONLY) * BytesPerWord; - _frame_size = align_up(register_save_size, frame::alignment_in_bytes) - + frame::native_abi_reg_args_size; - - __ save_LR_CR(R0); - __ push_frame(_frame_size, R0); - - iterate_over_register_mask(ACTION_SAVE, _frame_size); - } - - ~XSaveLiveRegisters() { - iterate_over_register_mask(ACTION_RESTORE, _frame_size); - - __ addi(R1_SP, R1_SP, _frame_size); - __ restore_LR_CR(R0); - } - - private: - enum IterationAction : int { - ACTION_SAVE, - ACTION_RESTORE, - ACTION_COUNT_ONLY - }; - - int iterate_over_register_mask(IterationAction action, int offset = 0) { - int reg_save_index = 0; - RegMaskIterator live_regs_iterator(_reg_mask); - - while(live_regs_iterator.has_next()) { - const OptoReg::Name opto_reg = live_regs_iterator.next(); - - // Filter out stack slots (spilled registers, i.e., stack-allocated registers). - if (!OptoReg::is_reg(opto_reg)) { - continue; - } - - const VMReg vm_reg = OptoReg::as_VMReg(opto_reg); - if (vm_reg->is_Register()) { - Register std_reg = vm_reg->as_Register(); - - // '_result_reg' will hold the end result of the operation. Its content must thus not be preserved. - if (std_reg == _result_reg) { - continue; - } - - if (std_reg->encoding() >= R2->encoding() && std_reg->encoding() <= R12->encoding()) { - reg_save_index++; - - if (action == ACTION_SAVE) { - _masm->std(std_reg, offset - reg_save_index * BytesPerWord, R1_SP); - } else if (action == ACTION_RESTORE) { - _masm->ld(std_reg, offset - reg_save_index * BytesPerWord, R1_SP); - } else { - assert(action == ACTION_COUNT_ONLY, "Sanity"); - } - } - } else if (vm_reg->is_FloatRegister()) { - FloatRegister fp_reg = vm_reg->as_FloatRegister(); - if (fp_reg->encoding() >= F0->encoding() && fp_reg->encoding() <= F13->encoding()) { - reg_save_index++; - - if (action == ACTION_SAVE) { - _masm->stfd(fp_reg, offset - reg_save_index * BytesPerWord, R1_SP); - } else if (action == ACTION_RESTORE) { - _masm->lfd(fp_reg, offset - reg_save_index * BytesPerWord, R1_SP); - } else { - assert(action == ACTION_COUNT_ONLY, "Sanity"); - } - } - } else if (vm_reg->is_ConditionRegister()) { - // NOP. Conditions registers are covered by save_LR_CR - } else if (vm_reg->is_VectorSRegister()) { - assert(SuperwordUseVSX, "or should not reach here"); - VectorSRegister vs_reg = vm_reg->as_VectorSRegister(); - if (vs_reg->encoding() >= VSR32->encoding() && vs_reg->encoding() <= VSR51->encoding()) { - reg_save_index += 2; - - Register spill_addr = R0; - if (action == ACTION_SAVE) { - _masm->addi(spill_addr, R1_SP, offset - reg_save_index * BytesPerWord); - _masm->stxvd2x(vs_reg, spill_addr); - } else if (action == ACTION_RESTORE) { - _masm->addi(spill_addr, R1_SP, offset - reg_save_index * BytesPerWord); - _masm->lxvd2x(vs_reg, spill_addr); - } else { - assert(action == ACTION_COUNT_ONLY, "Sanity"); - } - } - } else { - if (vm_reg->is_SpecialRegister()) { - fatal("Special registers are unsupported. Found register %s", vm_reg->name()); - } else { - fatal("Register type is not known"); - } - } - } - - return reg_save_index; - } -}; - -#undef __ -#define __ _masm-> - -class XSetupArguments { - MacroAssembler* const _masm; - const Register _ref; - const Address _ref_addr; - - public: - XSetupArguments(MacroAssembler* masm, XLoadBarrierStubC2* stub) : - _masm(masm), - _ref(stub->ref()), - _ref_addr(stub->ref_addr()) { - - // Desired register/argument configuration: - // _ref: R3_ARG1 - // _ref_addr: R4_ARG2 - - // '_ref_addr' can be unspecified. In that case, the barrier will not heal the reference. - if (_ref_addr.base() == noreg) { - assert_different_registers(_ref, R0, noreg); - - __ mr_if_needed(R3_ARG1, _ref); - __ li(R4_ARG2, 0); - } else { - assert_different_registers(_ref, _ref_addr.base(), R0, noreg); - assert(!_ref_addr.index()->is_valid(), "reference addresses must not contain an index component"); - - if (_ref != R4_ARG2) { - // Calculate address first as the address' base register might clash with R4_ARG2 - __ addi(R4_ARG2, _ref_addr.base(), _ref_addr.disp()); - __ mr_if_needed(R3_ARG1, _ref); - } else if (_ref_addr.base() != R3_ARG1) { - __ mr(R3_ARG1, _ref); - __ addi(R4_ARG2, _ref_addr.base(), _ref_addr.disp()); // Clobbering _ref - } else { - // Arguments are provided in inverse order (i.e. _ref == R4_ARG2, _ref_addr == R3_ARG1) - __ mr(R0, _ref); - __ addi(R4_ARG2, _ref_addr.base(), _ref_addr.disp()); - __ mr(R3_ARG1, R0); - } - } - } -}; - -#undef __ -#define __ masm-> - -void XBarrierSetAssembler::generate_c2_load_barrier_stub(MacroAssembler* masm, XLoadBarrierStubC2* stub) const { - __ block_comment("generate_c2_load_barrier_stub (zgc) {"); - - __ bind(*stub->entry()); - - Register ref = stub->ref(); - Address ref_addr = stub->ref_addr(); - - assert_different_registers(ref, ref_addr.base()); - - { - XSaveLiveRegisters save_live_registers(masm, stub); - XSetupArguments setup_arguments(masm, stub); - - __ call_VM_leaf(stub->slow_path()); - __ mr_if_needed(ref, R3_RET); - } - - __ b(*stub->continuation()); - - __ block_comment("} generate_c2_load_barrier_stub (zgc)"); -} - -#undef __ -#endif // COMPILER2 diff --git a/src/hotspot/cpu/ppc/gc/x/xBarrierSetAssembler_ppc.hpp b/src/hotspot/cpu/ppc/gc/x/xBarrierSetAssembler_ppc.hpp deleted file mode 100644 index 8dfd4524dfe5a..0000000000000 --- a/src/hotspot/cpu/ppc/gc/x/xBarrierSetAssembler_ppc.hpp +++ /dev/null @@ -1,93 +0,0 @@ -/* - * Copyright (c) 2021, 2022, Oracle and/or its affiliates. All rights reserved. - * Copyright (c) 2021, 2022 SAP SE. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#ifndef CPU_PPC_GC_X_XBARRIERSETASSEMBLER_PPC_HPP -#define CPU_PPC_GC_X_XBARRIERSETASSEMBLER_PPC_HPP - -#include "code/vmreg.hpp" -#include "oops/accessDecorators.hpp" -#ifdef COMPILER2 -#include "opto/optoreg.hpp" -#endif // COMPILER2 - -#ifdef COMPILER1 -class LIR_Assembler; -class LIR_Opr; -class StubAssembler; -#endif // COMPILER1 - -#ifdef COMPILER2 -class Node; -#endif // COMPILER2 - -#ifdef COMPILER1 -class XLoadBarrierStubC1; -#endif // COMPILER1 - -#ifdef COMPILER2 -class XLoadBarrierStubC2; -#endif // COMPILER2 - -class XBarrierSetAssembler : public XBarrierSetAssemblerBase { -public: - virtual void load_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type, - Register base, RegisterOrConstant ind_or_offs, Register dst, - Register tmp1, Register tmp2, - MacroAssembler::PreservationLevel preservation_level, Label *L_handle_null = nullptr); - -#ifdef ASSERT - virtual void store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type, - Register base, RegisterOrConstant ind_or_offs, Register val, - Register tmp1, Register tmp2, Register tmp3, - MacroAssembler::PreservationLevel preservation_level); -#endif // ASSERT - - virtual void arraycopy_prologue(MacroAssembler* masm, DecoratorSet decorators, BasicType type, - Register src, Register dst, Register count, - Register preserve1, Register preserve2); - - virtual void try_resolve_jobject_in_native(MacroAssembler* masm, Register dst, Register jni_env, - Register obj, Register tmp, Label& slowpath); - - virtual NMethodPatchingType nmethod_patching_type() { return NMethodPatchingType::conc_data_patch; } - -#ifdef COMPILER1 - void generate_c1_load_barrier_test(LIR_Assembler* ce, - LIR_Opr ref) const; - - void generate_c1_load_barrier_stub(LIR_Assembler* ce, - XLoadBarrierStubC1* stub) const; - - void generate_c1_load_barrier_runtime_stub(StubAssembler* sasm, - DecoratorSet decorators) const; -#endif // COMPILER1 - -#ifdef COMPILER2 - OptoReg::Name refine_register(const Node* node, OptoReg::Name opto_reg) const; - - void generate_c2_load_barrier_stub(MacroAssembler* masm, XLoadBarrierStubC2* stub) const; -#endif // COMPILER2 -}; - -#endif // CPU_PPC_GC_X_XBARRIERSETASSEMBLER_PPC_HPP diff --git a/src/hotspot/cpu/ppc/gc/x/xGlobals_ppc.cpp b/src/hotspot/cpu/ppc/gc/x/xGlobals_ppc.cpp deleted file mode 100644 index 3218a765fc703..0000000000000 --- a/src/hotspot/cpu/ppc/gc/x/xGlobals_ppc.cpp +++ /dev/null @@ -1,203 +0,0 @@ -/* - * Copyright (c) 2021, 2023, Oracle and/or its affiliates. All rights reserved. - * Copyright (c) 2021 SAP SE. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#include "precompiled.hpp" -#include "gc/shared/gcLogPrecious.hpp" -#include "gc/shared/gc_globals.hpp" -#include "gc/x/xGlobals.hpp" -#include "runtime/globals.hpp" -#include "runtime/os.hpp" -#include "utilities/globalDefinitions.hpp" -#include "utilities/powerOfTwo.hpp" -#include - -#ifdef LINUX -#include -#endif // LINUX - -// -// The overall memory layouts across different power platforms are similar and only differ with regards to -// the position of the highest addressable bit; the position of the metadata bits and the size of the actual -// addressable heap address space are adjusted accordingly. -// -// The following memory schema shows an exemplary layout in which bit '45' is the highest addressable bit. -// It is assumed that this virtual memory address space layout is predominant on the power platform. -// -// Standard Address Space & Pointer Layout -// --------------------------------------- -// -// +--------------------------------+ 0x00007FFFFFFFFFFF (127 TiB - 1) -// . . -// . . -// . . -// +--------------------------------+ 0x0000140000000000 (20 TiB) -// | Remapped View | -// +--------------------------------+ 0x0000100000000000 (16 TiB) -// . . -// +--------------------------------+ 0x00000c0000000000 (12 TiB) -// | Marked1 View | -// +--------------------------------+ 0x0000080000000000 (8 TiB) -// | Marked0 View | -// +--------------------------------+ 0x0000040000000000 (4 TiB) -// . . -// +--------------------------------+ 0x0000000000000000 -// -// 6 4 4 4 4 -// 3 6 5 2 1 0 -// +--------------------+----+-----------------------------------------------+ -// |00000000 00000000 00|1111|11 11111111 11111111 11111111 11111111 11111111| -// +--------------------+----+-----------------------------------------------+ -// | | | -// | | * 41-0 Object Offset (42-bits, 4TB address space) -// | | -// | * 45-42 Metadata Bits (4-bits) 0001 = Marked0 (Address view 4-8TB) -// | 0010 = Marked1 (Address view 8-12TB) -// | 0100 = Remapped (Address view 16-20TB) -// | 1000 = Finalizable (Address view N/A) -// | -// * 63-46 Fixed (18-bits, always zero) -// - -// Maximum value as per spec (Power ISA v2.07): 2 ^ 60 bytes, i.e. 1 EiB (exbibyte) -static const unsigned int MAXIMUM_MAX_ADDRESS_BIT = 60; - -// Most modern power processors provide an address space with not more than 45 bit addressable bit, -// that is an address space of 32 TiB in size. -static const unsigned int DEFAULT_MAX_ADDRESS_BIT = 45; - -// Minimum value returned, if probing fails: 64 GiB -static const unsigned int MINIMUM_MAX_ADDRESS_BIT = 36; - -// Determines the highest addressable bit of the virtual address space (depends on platform) -// by trying to interact with memory in that address range, -// i.e. by syncing existing mappings (msync) or by temporarily mapping the memory area (mmap). -// If one of those operations succeeds, it is proven that the targeted memory area is within the virtual address space. -// -// To reduce the number of required system calls to a bare minimum, the DEFAULT_MAX_ADDRESS_BIT is intentionally set -// lower than what the ABI would theoretically permit. -// Such an avoidance strategy, however, might impose unnecessary limits on processors that exceed this limit. -// If DEFAULT_MAX_ADDRESS_BIT is addressable, the next higher bit will be tested as well to ensure that -// the made assumption does not artificially restrict the memory availability. -static unsigned int probe_valid_max_address_bit(size_t init_bit, size_t min_bit) { - assert(init_bit >= min_bit, "Sanity"); - assert(init_bit <= MAXIMUM_MAX_ADDRESS_BIT, "Test bit is outside the assumed address space range"); - -#ifdef LINUX - unsigned int max_valid_address_bit = 0; - void* last_allocatable_address = nullptr; - - const size_t page_size = os::vm_page_size(); - - for (size_t i = init_bit; i >= min_bit; --i) { - void* base_addr = (void*) (((unsigned long) 1U) << i); - - /* ==== Try msync-ing already mapped memory page ==== */ - if (msync(base_addr, page_size, MS_ASYNC) == 0) { - // The page of the given address was synced by the linux kernel and must thus be both, mapped and valid. - max_valid_address_bit = i; - break; - } - if (errno != ENOMEM) { - // An unexpected error occurred, i.e. an error not indicating that the targeted memory page is unmapped, - // but pointing out another type of issue. - // Even though this should never happen, those issues may come up due to undefined behavior. -#ifdef ASSERT - fatal("Received '%s' while probing the address space for the highest valid bit", os::errno_name(errno)); -#else // ASSERT - log_warning_p(gc)("Received '%s' while probing the address space for the highest valid bit", os::errno_name(errno)); -#endif // ASSERT - continue; - } - - /* ==== Try mapping memory page on our own ==== */ - last_allocatable_address = mmap(base_addr, page_size, PROT_NONE, MAP_PRIVATE|MAP_ANONYMOUS|MAP_NORESERVE, -1, 0); - if (last_allocatable_address != MAP_FAILED) { - munmap(last_allocatable_address, page_size); - } - - if (last_allocatable_address == base_addr) { - // As the linux kernel mapped exactly the page we have requested, the address must be valid. - max_valid_address_bit = i; - break; - } - - log_info_p(gc, init)("Probe failed for bit '%zu'", i); - } - - if (max_valid_address_bit == 0) { - // Probing did not bring up any usable address bit. - // As an alternative, the VM evaluates the address returned by mmap as it is expected that the reserved page - // will be close to the probed address that was out-of-range. - // As per mmap(2), "the kernel [will take] [the address] as a hint about where to - // place the mapping; on Linux, the mapping will be created at a nearby page boundary". - // It should thus be a "close enough" approximation to the real virtual memory address space limit. - // - // This recovery strategy is only applied in production builds. - // In debug builds, an assertion in 'XPlatformAddressOffsetBits' will bail out the VM to indicate that - // the assumed address space is no longer up-to-date. - if (last_allocatable_address != MAP_FAILED) { - const unsigned int bitpos = BitsPerSize_t - count_leading_zeros((size_t) last_allocatable_address) - 1; - log_info_p(gc, init)("Did not find any valid addresses within the range, using address '%u' instead", bitpos); - return bitpos; - } - -#ifdef ASSERT - fatal("Available address space can not be determined"); -#else // ASSERT - log_warning_p(gc)("Cannot determine available address space. Falling back to default value."); - return DEFAULT_MAX_ADDRESS_BIT; -#endif // ASSERT - } else { - if (max_valid_address_bit == init_bit) { - // An usable address bit has been found immediately. - // To ensure that the entire virtual address space is exploited, the next highest bit will be tested as well. - log_info_p(gc, init)("Hit valid address '%u' on first try, retrying with next higher bit", max_valid_address_bit); - return MAX2(max_valid_address_bit, probe_valid_max_address_bit(init_bit + 1, init_bit + 1)); - } - } - - log_info_p(gc, init)("Found valid address '%u'", max_valid_address_bit); - return max_valid_address_bit; -#else // LINUX - return DEFAULT_MAX_ADDRESS_BIT; -#endif // LINUX -} - -size_t XPlatformAddressOffsetBits() { - const static unsigned int valid_max_address_offset_bits = - probe_valid_max_address_bit(DEFAULT_MAX_ADDRESS_BIT, MINIMUM_MAX_ADDRESS_BIT) + 1; - assert(valid_max_address_offset_bits >= MINIMUM_MAX_ADDRESS_BIT, - "Highest addressable bit is outside the assumed address space range"); - - const size_t max_address_offset_bits = valid_max_address_offset_bits - 3; - const size_t min_address_offset_bits = max_address_offset_bits - 2; - const size_t address_offset = round_up_power_of_2(MaxHeapSize * XVirtualToPhysicalRatio); - const size_t address_offset_bits = log2i_exact(address_offset); - - return clamp(address_offset_bits, min_address_offset_bits, max_address_offset_bits); -} - -size_t XPlatformAddressMetadataShift() { - return XPlatformAddressOffsetBits(); -} diff --git a/src/hotspot/cpu/ppc/gc/x/xGlobals_ppc.hpp b/src/hotspot/cpu/ppc/gc/x/xGlobals_ppc.hpp deleted file mode 100644 index be88b05b02a82..0000000000000 --- a/src/hotspot/cpu/ppc/gc/x/xGlobals_ppc.hpp +++ /dev/null @@ -1,36 +0,0 @@ -/* - * Copyright (c) 2021, 2022, Oracle and/or its affiliates. All rights reserved. - * Copyright (c) 2021 SAP SE. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#ifndef CPU_PPC_GC_X_XGLOBALS_PPC_HPP -#define CPU_PPC_GC_X_XGLOBALS_PPC_HPP - -#include "globalDefinitions_ppc.hpp" - -const size_t XPlatformHeapViews = 3; -const size_t XPlatformCacheLineSize = DEFAULT_CACHE_LINE_SIZE; - -size_t XPlatformAddressOffsetBits(); -size_t XPlatformAddressMetadataShift(); - -#endif // CPU_PPC_GC_X_XGLOBALS_PPC_HPP diff --git a/src/hotspot/cpu/ppc/gc/x/x_ppc.ad b/src/hotspot/cpu/ppc/gc/x/x_ppc.ad deleted file mode 100644 index b206b6593fb45..0000000000000 --- a/src/hotspot/cpu/ppc/gc/x/x_ppc.ad +++ /dev/null @@ -1,298 +0,0 @@ -// -// Copyright (c) 2023, Oracle and/or its affiliates. All rights reserved. -// Copyright (c) 2021 SAP SE. All rights reserved. -// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. -// -// This code is free software; you can redistribute it and/or modify it -// under the terms of the GNU General Public License version 2 only, as -// published by the Free Software Foundation. -// -// This code is distributed in the hope that it will be useful, but WITHOUT -// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License -// version 2 for more details (a copy is included in the LICENSE file that -// accompanied this code). -// -// You should have received a copy of the GNU General Public License version -// 2 along with this work; if not, write to the Free Software Foundation, -// Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. -// -// Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA -// or visit www.oracle.com if you need additional information or have any -// questions. -// - -source_hpp %{ - -#include "gc/shared/gc_globals.hpp" -#include "gc/x/c2/xBarrierSetC2.hpp" -#include "gc/x/xThreadLocalData.hpp" - -%} - -source %{ - -static void x_load_barrier(MacroAssembler* masm, const MachNode* node, Address ref_addr, Register ref, - Register tmp, uint8_t barrier_data) { - if (barrier_data == XLoadBarrierElided) { - return; - } - - XLoadBarrierStubC2* const stub = XLoadBarrierStubC2::create(node, ref_addr, ref, tmp, barrier_data); - __ ld(tmp, in_bytes(XThreadLocalData::address_bad_mask_offset()), R16_thread); - __ and_(tmp, tmp, ref); - __ bne_far(CCR0, *stub->entry(), MacroAssembler::bc_far_optimize_on_relocate); - __ bind(*stub->continuation()); -} - -static void x_load_barrier_slow_path(MacroAssembler* masm, const MachNode* node, Address ref_addr, Register ref, - Register tmp) { - XLoadBarrierStubC2* const stub = XLoadBarrierStubC2::create(node, ref_addr, ref, tmp, XLoadBarrierStrong); - __ b(*stub->entry()); - __ bind(*stub->continuation()); -} - -static void x_compare_and_swap(MacroAssembler* masm, const MachNode* node, - Register res, Register mem, Register oldval, Register newval, - Register tmp_xchg, Register tmp_mask, - bool weak, bool acquire) { - // z-specific load barrier requires strong CAS operations. - // Weak CAS operations are thus only emitted if the barrier is elided. - __ cmpxchgd(CCR0, tmp_xchg, oldval, newval, mem, - MacroAssembler::MemBarNone, MacroAssembler::cmpxchgx_hint_atomic_update(), res, nullptr, true, - weak && node->barrier_data() == XLoadBarrierElided); - - if (node->barrier_data() != XLoadBarrierElided) { - Label skip_barrier; - - __ ld(tmp_mask, in_bytes(XThreadLocalData::address_bad_mask_offset()), R16_thread); - __ and_(tmp_mask, tmp_mask, tmp_xchg); - __ beq(CCR0, skip_barrier); - - // CAS must have failed because pointer in memory is bad. - x_load_barrier_slow_path(masm, node, Address(mem), tmp_xchg, res /* used as tmp */); - - __ cmpxchgd(CCR0, tmp_xchg, oldval, newval, mem, - MacroAssembler::MemBarNone, MacroAssembler::cmpxchgx_hint_atomic_update(), res, nullptr, true, weak); - - __ bind(skip_barrier); - } - - if (acquire) { - if (support_IRIW_for_not_multiple_copy_atomic_cpu) { - // Uses the isync instruction as an acquire barrier. - // This exploits the compare and the branch in the z load barrier (load, compare and branch, isync). - __ isync(); - } else { - __ sync(); - } - } -} - -static void x_compare_and_exchange(MacroAssembler* masm, const MachNode* node, - Register res, Register mem, Register oldval, Register newval, Register tmp, - bool weak, bool acquire) { - // z-specific load barrier requires strong CAS operations. - // Weak CAS operations are thus only emitted if the barrier is elided. - __ cmpxchgd(CCR0, res, oldval, newval, mem, - MacroAssembler::MemBarNone, MacroAssembler::cmpxchgx_hint_atomic_update(), noreg, nullptr, true, - weak && node->barrier_data() == XLoadBarrierElided); - - if (node->barrier_data() != XLoadBarrierElided) { - Label skip_barrier; - __ ld(tmp, in_bytes(XThreadLocalData::address_bad_mask_offset()), R16_thread); - __ and_(tmp, tmp, res); - __ beq(CCR0, skip_barrier); - - x_load_barrier_slow_path(masm, node, Address(mem), res, tmp); - - __ cmpxchgd(CCR0, res, oldval, newval, mem, - MacroAssembler::MemBarNone, MacroAssembler::cmpxchgx_hint_atomic_update(), noreg, nullptr, true, weak); - - __ bind(skip_barrier); - } - - if (acquire) { - if (support_IRIW_for_not_multiple_copy_atomic_cpu) { - // Uses the isync instruction as an acquire barrier. - // This exploits the compare and the branch in the z load barrier (load, compare and branch, isync). - __ isync(); - } else { - __ sync(); - } - } -} - -%} - -instruct xLoadP(iRegPdst dst, memoryAlg4 mem, iRegPdst tmp, flagsRegCR0 cr0) -%{ - match(Set dst (LoadP mem)); - effect(TEMP_DEF dst, TEMP tmp, KILL cr0); - ins_cost(MEMORY_REF_COST); - - predicate((UseZGC && !ZGenerational && n->as_Load()->barrier_data() != 0) - && (n->as_Load()->is_unordered() || followed_by_acquire(n))); - - format %{ "LD $dst, $mem" %} - ins_encode %{ - assert($mem$$index == 0, "sanity"); - __ ld($dst$$Register, $mem$$disp, $mem$$base$$Register); - x_load_barrier(masm, this, Address($mem$$base$$Register, $mem$$disp), $dst$$Register, $tmp$$Register, barrier_data()); - %} - ins_pipe(pipe_class_default); -%} - -// Load Pointer Volatile -instruct xLoadP_acq(iRegPdst dst, memoryAlg4 mem, iRegPdst tmp, flagsRegCR0 cr0) -%{ - match(Set dst (LoadP mem)); - effect(TEMP_DEF dst, TEMP tmp, KILL cr0); - ins_cost(3 * MEMORY_REF_COST); - - // Predicate on instruction order is implicitly present due to the predicate of the cheaper zLoadP operation - predicate(UseZGC && !ZGenerational && n->as_Load()->barrier_data() != 0); - - format %{ "LD acq $dst, $mem" %} - ins_encode %{ - __ ld($dst$$Register, $mem$$disp, $mem$$base$$Register); - x_load_barrier(masm, this, Address($mem$$base$$Register, $mem$$disp), $dst$$Register, $tmp$$Register, barrier_data()); - - // Uses the isync instruction as an acquire barrier. - // This exploits the compare and the branch in the z load barrier (load, compare and branch, isync). - __ isync(); - %} - ins_pipe(pipe_class_default); -%} - -instruct xCompareAndSwapP(iRegIdst res, iRegPdst mem, iRegPsrc oldval, iRegPsrc newval, - iRegPdst tmp_xchg, iRegPdst tmp_mask, flagsRegCR0 cr0) %{ - match(Set res (CompareAndSwapP mem (Binary oldval newval))); - effect(TEMP_DEF res, TEMP tmp_xchg, TEMP tmp_mask, KILL cr0); - - predicate((UseZGC && !ZGenerational && n->as_LoadStore()->barrier_data() == XLoadBarrierStrong) - && (((CompareAndSwapNode*)n)->order() != MemNode::acquire && ((CompareAndSwapNode*) n)->order() != MemNode::seqcst)); - - format %{ "CMPXCHG $res, $mem, $oldval, $newval; as bool; ptr" %} - ins_encode %{ - x_compare_and_swap(masm, this, - $res$$Register, $mem$$Register, $oldval$$Register, $newval$$Register, - $tmp_xchg$$Register, $tmp_mask$$Register, - false /* weak */, false /* acquire */); - %} - ins_pipe(pipe_class_default); -%} - -instruct xCompareAndSwapP_acq(iRegIdst res, iRegPdst mem, iRegPsrc oldval, iRegPsrc newval, - iRegPdst tmp_xchg, iRegPdst tmp_mask, flagsRegCR0 cr0) %{ - match(Set res (CompareAndSwapP mem (Binary oldval newval))); - effect(TEMP_DEF res, TEMP tmp_xchg, TEMP tmp_mask, KILL cr0); - - predicate((UseZGC && !ZGenerational && n->as_LoadStore()->barrier_data() == XLoadBarrierStrong) - && (((CompareAndSwapNode*)n)->order() == MemNode::acquire || ((CompareAndSwapNode*) n)->order() == MemNode::seqcst)); - - format %{ "CMPXCHG acq $res, $mem, $oldval, $newval; as bool; ptr" %} - ins_encode %{ - x_compare_and_swap(masm, this, - $res$$Register, $mem$$Register, $oldval$$Register, $newval$$Register, - $tmp_xchg$$Register, $tmp_mask$$Register, - false /* weak */, true /* acquire */); - %} - ins_pipe(pipe_class_default); -%} - -instruct xCompareAndSwapPWeak(iRegIdst res, iRegPdst mem, iRegPsrc oldval, iRegPsrc newval, - iRegPdst tmp_xchg, iRegPdst tmp_mask, flagsRegCR0 cr0) %{ - match(Set res (WeakCompareAndSwapP mem (Binary oldval newval))); - effect(TEMP_DEF res, TEMP tmp_xchg, TEMP tmp_mask, KILL cr0); - - predicate((UseZGC && !ZGenerational && n->as_LoadStore()->barrier_data() == XLoadBarrierStrong) - && ((CompareAndSwapNode*)n)->order() != MemNode::acquire && ((CompareAndSwapNode*) n)->order() != MemNode::seqcst); - - format %{ "weak CMPXCHG $res, $mem, $oldval, $newval; as bool; ptr" %} - ins_encode %{ - x_compare_and_swap(masm, this, - $res$$Register, $mem$$Register, $oldval$$Register, $newval$$Register, - $tmp_xchg$$Register, $tmp_mask$$Register, - true /* weak */, false /* acquire */); - %} - ins_pipe(pipe_class_default); -%} - -instruct xCompareAndSwapPWeak_acq(iRegIdst res, iRegPdst mem, iRegPsrc oldval, iRegPsrc newval, - iRegPdst tmp_xchg, iRegPdst tmp_mask, flagsRegCR0 cr0) %{ - match(Set res (WeakCompareAndSwapP mem (Binary oldval newval))); - effect(TEMP_DEF res, TEMP tmp_xchg, TEMP tmp_mask, KILL cr0); - - predicate((UseZGC && !ZGenerational && n->as_LoadStore()->barrier_data() == XLoadBarrierStrong) - && (((CompareAndSwapNode*)n)->order() == MemNode::acquire || ((CompareAndSwapNode*) n)->order() == MemNode::seqcst)); - - format %{ "weak CMPXCHG acq $res, $mem, $oldval, $newval; as bool; ptr" %} - ins_encode %{ - x_compare_and_swap(masm, this, - $res$$Register, $mem$$Register, $oldval$$Register, $newval$$Register, - $tmp_xchg$$Register, $tmp_mask$$Register, - true /* weak */, true /* acquire */); - %} - ins_pipe(pipe_class_default); -%} - -instruct xCompareAndExchangeP(iRegPdst res, iRegPdst mem, iRegPsrc oldval, iRegPsrc newval, - iRegPdst tmp, flagsRegCR0 cr0) %{ - match(Set res (CompareAndExchangeP mem (Binary oldval newval))); - effect(TEMP_DEF res, TEMP tmp, KILL cr0); - - predicate((UseZGC && !ZGenerational && n->as_LoadStore()->barrier_data() == XLoadBarrierStrong) - && ( - ((CompareAndSwapNode*)n)->order() != MemNode::acquire - && ((CompareAndSwapNode*)n)->order() != MemNode::seqcst - )); - - format %{ "CMPXCHG $res, $mem, $oldval, $newval; as ptr; ptr" %} - ins_encode %{ - x_compare_and_exchange(masm, this, - $res$$Register, $mem$$Register, $oldval$$Register, $newval$$Register, $tmp$$Register, - false /* weak */, false /* acquire */); - %} - ins_pipe(pipe_class_default); -%} - -instruct xCompareAndExchangeP_acq(iRegPdst res, iRegPdst mem, iRegPsrc oldval, iRegPsrc newval, - iRegPdst tmp, flagsRegCR0 cr0) %{ - match(Set res (CompareAndExchangeP mem (Binary oldval newval))); - effect(TEMP_DEF res, TEMP tmp, KILL cr0); - - predicate((UseZGC && !ZGenerational && n->as_LoadStore()->barrier_data() == XLoadBarrierStrong) - && ( - ((CompareAndSwapNode*)n)->order() == MemNode::acquire - || ((CompareAndSwapNode*)n)->order() == MemNode::seqcst - )); - - format %{ "CMPXCHG acq $res, $mem, $oldval, $newval; as ptr; ptr" %} - ins_encode %{ - x_compare_and_exchange(masm, this, - $res$$Register, $mem$$Register, $oldval$$Register, $newval$$Register, $tmp$$Register, - false /* weak */, true /* acquire */); - %} - ins_pipe(pipe_class_default); -%} - -instruct xGetAndSetP(iRegPdst res, iRegPdst mem, iRegPsrc newval, iRegPdst tmp, flagsRegCR0 cr0) %{ - match(Set res (GetAndSetP mem newval)); - effect(TEMP_DEF res, TEMP tmp, KILL cr0); - - predicate(UseZGC && !ZGenerational && n->as_LoadStore()->barrier_data() != 0); - - format %{ "GetAndSetP $res, $mem, $newval" %} - ins_encode %{ - __ getandsetd($res$$Register, $newval$$Register, $mem$$Register, MacroAssembler::cmpxchgx_hint_atomic_update()); - x_load_barrier(masm, this, Address(noreg, (intptr_t) 0), $res$$Register, $tmp$$Register, barrier_data()); - - if (support_IRIW_for_not_multiple_copy_atomic_cpu) { - __ isync(); - } else { - __ sync(); - } - %} - ins_pipe(pipe_class_default); -%} diff --git a/src/hotspot/cpu/ppc/gc/z/z_ppc.ad b/src/hotspot/cpu/ppc/gc/z/z_ppc.ad index bb696a4738f40..97b49bc1b026e 100644 --- a/src/hotspot/cpu/ppc/gc/z/z_ppc.ad +++ b/src/hotspot/cpu/ppc/gc/z/z_ppc.ad @@ -143,7 +143,7 @@ instruct zLoadP(iRegPdst dst, memoryAlg4 mem, flagsRegCR0 cr0) effect(TEMP_DEF dst, KILL cr0); ins_cost(MEMORY_REF_COST); - predicate((UseZGC && ZGenerational && n->as_Load()->barrier_data() != 0) + predicate((UseZGC && n->as_Load()->barrier_data() != 0) && (n->as_Load()->is_unordered() || followed_by_acquire(n))); format %{ "LD $dst, $mem" %} @@ -163,7 +163,7 @@ instruct zLoadP_acq(iRegPdst dst, memoryAlg4 mem, flagsRegCR0 cr0) ins_cost(3 * MEMORY_REF_COST); // Predicate on instruction order is implicitly present due to the predicate of the cheaper zLoadP operation - predicate(UseZGC && ZGenerational && n->as_Load()->barrier_data() != 0); + predicate(UseZGC && n->as_Load()->barrier_data() != 0); format %{ "LD acq $dst, $mem" %} ins_encode %{ @@ -181,7 +181,7 @@ instruct zLoadP_acq(iRegPdst dst, memoryAlg4 mem, flagsRegCR0 cr0) // Store Pointer instruct zStoreP(memoryAlg4 mem, iRegPsrc src, iRegPdst tmp, flagsRegCR0 cr0) %{ - predicate(UseZGC && ZGenerational && n->as_Store()->barrier_data() != 0); + predicate(UseZGC && n->as_Store()->barrier_data() != 0); match(Set mem (StoreP mem src)); effect(TEMP tmp, KILL cr0); ins_cost(2 * MEMORY_REF_COST); @@ -195,7 +195,7 @@ instruct zStoreP(memoryAlg4 mem, iRegPsrc src, iRegPdst tmp, flagsRegCR0 cr0) instruct zStorePNull(memoryAlg4 mem, immP_0 zero, iRegPdst tmp, flagsRegCR0 cr0) %{ - predicate(UseZGC && ZGenerational && n->as_Store()->barrier_data() != 0); + predicate(UseZGC && n->as_Store()->barrier_data() != 0); match(Set mem (StoreP mem zero)); effect(TEMP tmp, KILL cr0); ins_cost(MEMORY_REF_COST); @@ -213,7 +213,7 @@ instruct zCompareAndSwapP(iRegIdst res, iRegPdst mem, iRegPsrc oldval, iRegPsrc match(Set res (WeakCompareAndSwapP mem (Binary oldval newval))); effect(TEMP_DEF res, TEMP tmp1, TEMP tmp2, KILL cr0); - predicate((UseZGC && ZGenerational && n->as_LoadStore()->barrier_data() != 0) + predicate((UseZGC && n->as_LoadStore()->barrier_data() != 0) && (((CompareAndSwapNode*)n)->order() != MemNode::acquire && ((CompareAndSwapNode*) n)->order() != MemNode::seqcst)); format %{ "CMPXCHG $res, $mem, $oldval, $newval; as bool; ptr" %} @@ -232,7 +232,7 @@ instruct zCompareAndSwapP_acq(iRegIdst res, iRegPdst mem, iRegPsrc oldval, iRegP match(Set res (WeakCompareAndSwapP mem (Binary oldval newval))); effect(TEMP_DEF res, TEMP tmp1, TEMP tmp2, KILL cr0); - predicate((UseZGC && ZGenerational && n->as_LoadStore()->barrier_data() != 0) + predicate((UseZGC && n->as_LoadStore()->barrier_data() != 0) && (((CompareAndSwapNode*)n)->order() == MemNode::acquire || ((CompareAndSwapNode*) n)->order() == MemNode::seqcst)); format %{ "CMPXCHG acq $res, $mem, $oldval, $newval; as bool; ptr" %} @@ -250,7 +250,7 @@ instruct zCompareAndExchangeP(iRegPdst res, iRegPdst mem, iRegPsrc oldval, iRegP match(Set res (CompareAndExchangeP mem (Binary oldval newval))); effect(TEMP_DEF res, TEMP tmp, KILL cr0); - predicate((UseZGC && ZGenerational && n->as_LoadStore()->barrier_data() != 0) + predicate((UseZGC && n->as_LoadStore()->barrier_data() != 0) && ( ((CompareAndSwapNode*)n)->order() != MemNode::acquire && ((CompareAndSwapNode*)n)->order() != MemNode::seqcst @@ -270,7 +270,7 @@ instruct zCompareAndExchangeP_acq(iRegPdst res, iRegPdst mem, iRegPsrc oldval, i match(Set res (CompareAndExchangeP mem (Binary oldval newval))); effect(TEMP_DEF res, TEMP tmp, KILL cr0); - predicate((UseZGC && ZGenerational && n->as_LoadStore()->barrier_data() != 0) + predicate((UseZGC && n->as_LoadStore()->barrier_data() != 0) && ( ((CompareAndSwapNode*)n)->order() == MemNode::acquire || ((CompareAndSwapNode*)n)->order() == MemNode::seqcst @@ -289,7 +289,7 @@ instruct zGetAndSetP(iRegPdst res, iRegPdst mem, iRegPsrc newval, iRegPdst tmp, match(Set res (GetAndSetP mem newval)); effect(TEMP_DEF res, TEMP tmp, KILL cr0); - predicate(UseZGC && ZGenerational && n->as_LoadStore()->barrier_data() != 0); + predicate(UseZGC && n->as_LoadStore()->barrier_data() != 0); format %{ "GetAndSetP $res, $mem, $newval" %} ins_encode %{ diff --git a/src/hotspot/cpu/ppc/stubGenerator_ppc.cpp b/src/hotspot/cpu/ppc/stubGenerator_ppc.cpp index 206c161287fa2..b3ace8898ad63 100644 --- a/src/hotspot/cpu/ppc/stubGenerator_ppc.cpp +++ b/src/hotspot/cpu/ppc/stubGenerator_ppc.cpp @@ -49,7 +49,6 @@ #include "utilities/align.hpp" #include "utilities/powerOfTwo.hpp" #if INCLUDE_ZGC -#include "gc/x/xBarrierSetAssembler.hpp" #include "gc/z/zBarrierSetAssembler.hpp" #endif @@ -1976,7 +1975,7 @@ class StubGenerator: public StubCodeGenerator { generate_conjoint_int_copy_core(aligned); } else { #if INCLUDE_ZGC - if (UseZGC && ZGenerational) { + if (UseZGC) { ZBarrierSetAssembler *zbs = (ZBarrierSetAssembler*)bs; zbs->generate_conjoint_oop_copy(_masm, dest_uninitialized); } else @@ -2019,7 +2018,7 @@ class StubGenerator: public StubCodeGenerator { generate_disjoint_int_copy_core(aligned); } else { #if INCLUDE_ZGC - if (UseZGC && ZGenerational) { + if (UseZGC) { ZBarrierSetAssembler *zbs = (ZBarrierSetAssembler*)bs; zbs->generate_disjoint_oop_copy(_masm, dest_uninitialized); } else @@ -2137,7 +2136,7 @@ class StubGenerator: public StubCodeGenerator { } else { __ bind(store_null); #if INCLUDE_ZGC - if (UseZGC && ZGenerational) { + if (UseZGC) { __ store_heap_oop(R10_oop, R8_offset, R4_to, R11_scratch1, R12_tmp, noreg, MacroAssembler::PRESERVATION_FRAME_LR_GP_REGS, dest_uninitialized ? IS_DEST_UNINITIALIZED : 0); @@ -2153,7 +2152,7 @@ class StubGenerator: public StubCodeGenerator { // ======== loop entry is here ======== __ bind(load_element); #if INCLUDE_ZGC - if (UseZGC && ZGenerational) { + if (UseZGC) { __ load_heap_oop(R10_oop, R8_offset, R3_from, R11_scratch1, R12_tmp, MacroAssembler::PRESERVATION_FRAME_LR_GP_REGS, diff --git a/src/hotspot/cpu/riscv/c1_LIRAssembler_riscv.cpp b/src/hotspot/cpu/riscv/c1_LIRAssembler_riscv.cpp index a2bf4439463d6..21bf089118b45 100644 --- a/src/hotspot/cpu/riscv/c1_LIRAssembler_riscv.cpp +++ b/src/hotspot/cpu/riscv/c1_LIRAssembler_riscv.cpp @@ -838,10 +838,7 @@ void LIR_Assembler::mem2reg(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_Patch __ decode_heap_oop(dest->as_register()); } - if (!(UseZGC && !ZGenerational)) { - // Load barrier has not yet been applied, so ZGC can't verify the oop here - __ verify_oop(dest->as_register()); - } + __ verify_oop(dest->as_register()); } } diff --git a/src/hotspot/cpu/riscv/gc/x/xBarrierSetAssembler_riscv.cpp b/src/hotspot/cpu/riscv/gc/x/xBarrierSetAssembler_riscv.cpp deleted file mode 100644 index eb8d4c44b88a1..0000000000000 --- a/src/hotspot/cpu/riscv/gc/x/xBarrierSetAssembler_riscv.cpp +++ /dev/null @@ -1,454 +0,0 @@ -/* - * Copyright (c) 2019, 2023, Oracle and/or its affiliates. All rights reserved. - * Copyright (c) 2020, 2023, Huawei Technologies Co., Ltd. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - * - */ - -#include "precompiled.hpp" -#include "asm/macroAssembler.inline.hpp" -#include "code/codeBlob.hpp" -#include "code/vmreg.inline.hpp" -#include "gc/x/xBarrier.inline.hpp" -#include "gc/x/xBarrierSet.hpp" -#include "gc/x/xBarrierSetAssembler.hpp" -#include "gc/x/xBarrierSetRuntime.hpp" -#include "gc/x/xThreadLocalData.hpp" -#include "memory/resourceArea.hpp" -#include "runtime/sharedRuntime.hpp" -#include "utilities/macros.hpp" -#ifdef COMPILER1 -#include "c1/c1_LIRAssembler.hpp" -#include "c1/c1_MacroAssembler.hpp" -#include "gc/x/c1/xBarrierSetC1.hpp" -#endif // COMPILER1 -#ifdef COMPILER2 -#include "gc/x/c2/xBarrierSetC2.hpp" -#endif // COMPILER2 - -#ifdef PRODUCT -#define BLOCK_COMMENT(str) /* nothing */ -#else -#define BLOCK_COMMENT(str) __ block_comment(str) -#endif - -#undef __ -#define __ masm-> - -void XBarrierSetAssembler::load_at(MacroAssembler* masm, - DecoratorSet decorators, - BasicType type, - Register dst, - Address src, - Register tmp1, - Register tmp2) { - if (!XBarrierSet::barrier_needed(decorators, type)) { - // Barrier not needed - BarrierSetAssembler::load_at(masm, decorators, type, dst, src, tmp1, tmp2); - return; - } - - assert_different_registers(t1, src.base()); - assert_different_registers(t0, t1, dst); - - Label done; - - // Load bad mask into temp register. - __ la(t0, src); - __ ld(t1, address_bad_mask_from_thread(xthread)); - __ ld(dst, Address(t0)); - - // Test reference against bad mask. If mask bad, then we need to fix it up. - __ andr(t1, dst, t1); - __ beqz(t1, done); - - __ enter(); - - __ push_call_clobbered_registers_except(RegSet::of(dst)); - - if (c_rarg0 != dst) { - __ mv(c_rarg0, dst); - } - - __ mv(c_rarg1, t0); - - __ call_VM_leaf(XBarrierSetRuntime::load_barrier_on_oop_field_preloaded_addr(decorators), 2); - - // Make sure dst has the return value. - if (dst != x10) { - __ mv(dst, x10); - } - - __ pop_call_clobbered_registers_except(RegSet::of(dst)); - __ leave(); - - __ bind(done); -} - -#ifdef ASSERT - -void XBarrierSetAssembler::store_at(MacroAssembler* masm, - DecoratorSet decorators, - BasicType type, - Address dst, - Register val, - Register tmp1, - Register tmp2, - Register tmp3) { - // Verify value - if (is_reference_type(type)) { - // Note that src could be noreg, which means we - // are storing null and can skip verification. - if (val != noreg) { - Label done; - - // tmp1, tmp2 and tmp3 are often set to noreg. - RegSet savedRegs = RegSet::of(t0); - __ push_reg(savedRegs, sp); - - __ ld(t0, address_bad_mask_from_thread(xthread)); - __ andr(t0, val, t0); - __ beqz(t0, done); - __ stop("Verify oop store failed"); - __ should_not_reach_here(); - __ bind(done); - __ pop_reg(savedRegs, sp); - } - } - - // Store value - BarrierSetAssembler::store_at(masm, decorators, type, dst, val, tmp1, tmp2, noreg); -} - -#endif // ASSERT - -void XBarrierSetAssembler::arraycopy_prologue(MacroAssembler* masm, - DecoratorSet decorators, - bool is_oop, - Register src, - Register dst, - Register count, - RegSet saved_regs) { - if (!is_oop) { - // Barrier not needed - return; - } - - BLOCK_COMMENT("XBarrierSetAssembler::arraycopy_prologue {"); - - assert_different_registers(src, count, t0); - - __ push_reg(saved_regs, sp); - - if (count == c_rarg0 && src == c_rarg1) { - // exactly backwards!! - __ xorr(c_rarg0, c_rarg0, c_rarg1); - __ xorr(c_rarg1, c_rarg0, c_rarg1); - __ xorr(c_rarg0, c_rarg0, c_rarg1); - } else { - __ mv(c_rarg0, src); - __ mv(c_rarg1, count); - } - - __ call_VM_leaf(XBarrierSetRuntime::load_barrier_on_oop_array_addr(), 2); - - __ pop_reg(saved_regs, sp); - - BLOCK_COMMENT("} XBarrierSetAssembler::arraycopy_prologue"); -} - -void XBarrierSetAssembler::try_resolve_jobject_in_native(MacroAssembler* masm, - Register jni_env, - Register robj, - Register tmp, - Label& slowpath) { - BLOCK_COMMENT("XBarrierSetAssembler::try_resolve_jobject_in_native {"); - - assert_different_registers(jni_env, robj, tmp); - - // Resolve jobject - BarrierSetAssembler::try_resolve_jobject_in_native(masm, jni_env, robj, tmp, slowpath); - - // Compute the offset of address bad mask from the field of jni_environment - long int bad_mask_relative_offset = (long int) (in_bytes(XThreadLocalData::address_bad_mask_offset()) - - in_bytes(JavaThread::jni_environment_offset())); - - // Load the address bad mask - __ ld(tmp, Address(jni_env, bad_mask_relative_offset)); - - // Check address bad mask - __ andr(tmp, robj, tmp); - __ bnez(tmp, slowpath); - - BLOCK_COMMENT("} XBarrierSetAssembler::try_resolve_jobject_in_native"); -} - -#ifdef COMPILER2 - -OptoReg::Name XBarrierSetAssembler::refine_register(const Node* node, OptoReg::Name opto_reg) { - if (!OptoReg::is_reg(opto_reg)) { - return OptoReg::Bad; - } - - const VMReg vm_reg = OptoReg::as_VMReg(opto_reg); - if (vm_reg->is_FloatRegister()) { - return opto_reg & ~1; - } - - return opto_reg; -} - -#undef __ -#define __ _masm-> - -class XSaveLiveRegisters { -private: - MacroAssembler* const _masm; - RegSet _gp_regs; - FloatRegSet _fp_regs; - VectorRegSet _vp_regs; - -public: - void initialize(XLoadBarrierStubC2* stub) { - // Record registers that needs to be saved/restored - RegMaskIterator rmi(stub->live()); - while (rmi.has_next()) { - const OptoReg::Name opto_reg = rmi.next(); - if (OptoReg::is_reg(opto_reg)) { - const VMReg vm_reg = OptoReg::as_VMReg(opto_reg); - if (vm_reg->is_Register()) { - _gp_regs += RegSet::of(vm_reg->as_Register()); - } else if (vm_reg->is_FloatRegister()) { - _fp_regs += FloatRegSet::of(vm_reg->as_FloatRegister()); - } else if (vm_reg->is_VectorRegister()) { - const VMReg vm_reg_base = OptoReg::as_VMReg(opto_reg & ~(VectorRegister::max_slots_per_register - 1)); - _vp_regs += VectorRegSet::of(vm_reg_base->as_VectorRegister()); - } else { - fatal("Unknown register type"); - } - } - } - - // Remove C-ABI SOE registers, tmp regs and _ref register that will be updated - _gp_regs -= RegSet::range(x18, x27) + RegSet::of(x2) + RegSet::of(x8, x9) + RegSet::of(x5, stub->ref()); - } - - XSaveLiveRegisters(MacroAssembler* masm, XLoadBarrierStubC2* stub) : - _masm(masm), - _gp_regs(), - _fp_regs(), - _vp_regs() { - // Figure out what registers to save/restore - initialize(stub); - - // Save registers - __ push_reg(_gp_regs, sp); - __ push_fp(_fp_regs, sp); - __ push_v(_vp_regs, sp); - } - - ~XSaveLiveRegisters() { - // Restore registers - __ pop_v(_vp_regs, sp); - __ pop_fp(_fp_regs, sp); - __ pop_reg(_gp_regs, sp); - } -}; - -class XSetupArguments { -private: - MacroAssembler* const _masm; - const Register _ref; - const Address _ref_addr; - -public: - XSetupArguments(MacroAssembler* masm, XLoadBarrierStubC2* stub) : - _masm(masm), - _ref(stub->ref()), - _ref_addr(stub->ref_addr()) { - - // Setup arguments - if (_ref_addr.base() == noreg) { - // No self healing - if (_ref != c_rarg0) { - __ mv(c_rarg0, _ref); - } - __ mv(c_rarg1, zr); - } else { - // Self healing - if (_ref == c_rarg0) { - // _ref is already at correct place - __ la(c_rarg1, _ref_addr); - } else if (_ref != c_rarg1) { - // _ref is in wrong place, but not in c_rarg1, so fix it first - __ la(c_rarg1, _ref_addr); - __ mv(c_rarg0, _ref); - } else if (_ref_addr.base() != c_rarg0) { - assert(_ref == c_rarg1, "Mov ref first, vacating c_rarg0"); - __ mv(c_rarg0, _ref); - __ la(c_rarg1, _ref_addr); - } else { - assert(_ref == c_rarg1, "Need to vacate c_rarg1 and _ref_addr is using c_rarg0"); - if (_ref_addr.base() == c_rarg0) { - __ mv(t1, c_rarg1); - __ la(c_rarg1, _ref_addr); - __ mv(c_rarg0, t1); - } else { - ShouldNotReachHere(); - } - } - } - } - - ~XSetupArguments() { - // Transfer result - if (_ref != x10) { - __ mv(_ref, x10); - } - } -}; - -#undef __ -#define __ masm-> - -void XBarrierSetAssembler::generate_c2_load_barrier_stub(MacroAssembler* masm, XLoadBarrierStubC2* stub) const { - BLOCK_COMMENT("XLoadBarrierStubC2"); - - // Stub entry - __ bind(*stub->entry()); - - { - XSaveLiveRegisters save_live_registers(masm, stub); - XSetupArguments setup_arguments(masm, stub); - - __ mv(t1, stub->slow_path()); - __ jalr(t1); - } - - // Stub exit - __ j(*stub->continuation()); -} - -#endif // COMPILER2 - -#ifdef COMPILER1 -#undef __ -#define __ ce->masm()-> - -void XBarrierSetAssembler::generate_c1_load_barrier_test(LIR_Assembler* ce, - LIR_Opr ref) const { - assert_different_registers(xthread, ref->as_register(), t1); - __ ld(t1, address_bad_mask_from_thread(xthread)); - __ andr(t1, t1, ref->as_register()); -} - -void XBarrierSetAssembler::generate_c1_load_barrier_stub(LIR_Assembler* ce, - XLoadBarrierStubC1* stub) const { - // Stub entry - __ bind(*stub->entry()); - - Register ref = stub->ref()->as_register(); - Register ref_addr = noreg; - Register tmp = noreg; - - if (stub->tmp()->is_valid()) { - // Load address into tmp register - ce->leal(stub->ref_addr(), stub->tmp()); - ref_addr = tmp = stub->tmp()->as_pointer_register(); - } else { - // Address already in register - ref_addr = stub->ref_addr()->as_address_ptr()->base()->as_pointer_register(); - } - - assert_different_registers(ref, ref_addr, noreg); - - // Save x10 unless it is the result or tmp register - // Set up SP to accommodate parameters and maybe x10. - if (ref != x10 && tmp != x10) { - __ sub(sp, sp, 32); - __ sd(x10, Address(sp, 16)); - } else { - __ sub(sp, sp, 16); - } - - // Setup arguments and call runtime stub - ce->store_parameter(ref_addr, 1); - ce->store_parameter(ref, 0); - - __ far_call(stub->runtime_stub()); - - // Verify result - __ verify_oop(x10); - - - // Move result into place - if (ref != x10) { - __ mv(ref, x10); - } - - // Restore x10 unless it is the result or tmp register - if (ref != x10 && tmp != x10) { - __ ld(x10, Address(sp, 16)); - __ add(sp, sp, 32); - } else { - __ add(sp, sp, 16); - } - - // Stub exit - __ j(*stub->continuation()); -} - -#undef __ -#define __ sasm-> - -void XBarrierSetAssembler::generate_c1_load_barrier_runtime_stub(StubAssembler* sasm, - DecoratorSet decorators) const { - __ prologue("zgc_load_barrier stub", false); - - __ push_call_clobbered_registers_except(RegSet::of(x10)); - - // Setup arguments - __ load_parameter(0, c_rarg0); - __ load_parameter(1, c_rarg1); - - __ call_VM_leaf(XBarrierSetRuntime::load_barrier_on_oop_field_preloaded_addr(decorators), 2); - - __ pop_call_clobbered_registers_except(RegSet::of(x10)); - - __ epilogue(); -} - -#endif // COMPILER1 - -#undef __ -#define __ masm-> - -void XBarrierSetAssembler::check_oop(MacroAssembler* masm, Register obj, Register tmp1, Register tmp2, Label& error) { - // Check if mask is good. - // verifies that XAddressBadMask & obj == 0 - __ ld(tmp2, Address(xthread, XThreadLocalData::address_bad_mask_offset())); - __ andr(tmp1, obj, tmp2); - __ bnez(tmp1, error); - - BarrierSetAssembler::check_oop(masm, obj, tmp1, tmp2, error); -} - -#undef __ diff --git a/src/hotspot/cpu/riscv/gc/x/xBarrierSetAssembler_riscv.hpp b/src/hotspot/cpu/riscv/gc/x/xBarrierSetAssembler_riscv.hpp deleted file mode 100644 index cbf5077999bfb..0000000000000 --- a/src/hotspot/cpu/riscv/gc/x/xBarrierSetAssembler_riscv.hpp +++ /dev/null @@ -1,112 +0,0 @@ -/* - * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved. - * Copyright (c) 2020, 2021, Huawei Technologies Co., Ltd. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - * - */ - -#ifndef CPU_RISCV_GC_X_XBARRIERSETASSEMBLER_RISCV_HPP -#define CPU_RISCV_GC_X_XBARRIERSETASSEMBLER_RISCV_HPP - -#include "code/vmreg.hpp" -#include "oops/accessDecorators.hpp" -#ifdef COMPILER2 -#include "opto/optoreg.hpp" -#endif // COMPILER2 - -#ifdef COMPILER1 -class LIR_Assembler; -class LIR_Opr; -class StubAssembler; -#endif // COMPILER1 - -#ifdef COMPILER2 -class Node; -#endif // COMPILER2 - -#ifdef COMPILER1 -class XLoadBarrierStubC1; -#endif // COMPILER1 - -#ifdef COMPILER2 -class XLoadBarrierStubC2; -#endif // COMPILER2 - -class XBarrierSetAssembler : public XBarrierSetAssemblerBase { -public: - virtual void load_at(MacroAssembler* masm, - DecoratorSet decorators, - BasicType type, - Register dst, - Address src, - Register tmp1, - Register tmp2); - -#ifdef ASSERT - virtual void store_at(MacroAssembler* masm, - DecoratorSet decorators, - BasicType type, - Address dst, - Register val, - Register tmp1, - Register tmp2, - Register tmp3); -#endif // ASSERT - - virtual void arraycopy_prologue(MacroAssembler* masm, - DecoratorSet decorators, - bool is_oop, - Register src, - Register dst, - Register count, - RegSet saved_regs); - - virtual void try_resolve_jobject_in_native(MacroAssembler* masm, - Register jni_env, - Register robj, - Register tmp, - Label& slowpath); - - virtual NMethodPatchingType nmethod_patching_type() { return NMethodPatchingType::conc_data_patch; } - -#ifdef COMPILER1 - void generate_c1_load_barrier_test(LIR_Assembler* ce, - LIR_Opr ref) const; - - void generate_c1_load_barrier_stub(LIR_Assembler* ce, - XLoadBarrierStubC1* stub) const; - - void generate_c1_load_barrier_runtime_stub(StubAssembler* sasm, - DecoratorSet decorators) const; -#endif // COMPILER1 - -#ifdef COMPILER2 - OptoReg::Name refine_register(const Node* node, - OptoReg::Name opto_reg); - - void generate_c2_load_barrier_stub(MacroAssembler* masm, - XLoadBarrierStubC2* stub) const; -#endif // COMPILER2 - - void check_oop(MacroAssembler* masm, Register obj, Register tmp1, Register tmp2, Label& error); -}; - -#endif // CPU_RISCV_GC_X_XBARRIERSETASSEMBLER_RISCV_HPP diff --git a/src/hotspot/cpu/riscv/gc/x/xGlobals_riscv.cpp b/src/hotspot/cpu/riscv/gc/x/xGlobals_riscv.cpp deleted file mode 100644 index 602dab5674738..0000000000000 --- a/src/hotspot/cpu/riscv/gc/x/xGlobals_riscv.cpp +++ /dev/null @@ -1,212 +0,0 @@ -/* - * Copyright (c) 2017, 2022, Oracle and/or its affiliates. All rights reserved. - * Copyright (c) 2020, 2021, Huawei Technologies Co., Ltd. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - * - */ - -#include "precompiled.hpp" -#include "gc/shared/gcLogPrecious.hpp" -#include "gc/shared/gc_globals.hpp" -#include "gc/x/xGlobals.hpp" -#include "runtime/globals.hpp" -#include "runtime/os.hpp" -#include "utilities/globalDefinitions.hpp" -#include "utilities/powerOfTwo.hpp" - -#ifdef LINUX -#include -#endif // LINUX - -// -// The heap can have three different layouts, depending on the max heap size. -// -// Address Space & Pointer Layout 1 -// -------------------------------- -// -// +--------------------------------+ 0x00007FFFFFFFFFFF (127TB) -// . . -// . . -// . . -// +--------------------------------+ 0x0000014000000000 (20TB) -// | Remapped View | -// +--------------------------------+ 0x0000010000000000 (16TB) -// . . -// +--------------------------------+ 0x00000c0000000000 (12TB) -// | Marked1 View | -// +--------------------------------+ 0x0000080000000000 (8TB) -// | Marked0 View | -// +--------------------------------+ 0x0000040000000000 (4TB) -// . . -// +--------------------------------+ 0x0000000000000000 -// -// 6 4 4 4 4 -// 3 6 5 2 1 0 -// +--------------------+----+-----------------------------------------------+ -// |00000000 00000000 00|1111|11 11111111 11111111 11111111 11111111 11111111| -// +--------------------+----+-----------------------------------------------+ -// | | | -// | | * 41-0 Object Offset (42-bits, 4TB address space) -// | | -// | * 45-42 Metadata Bits (4-bits) 0001 = Marked0 (Address view 4-8TB) -// | 0010 = Marked1 (Address view 8-12TB) -// | 0100 = Remapped (Address view 16-20TB) -// | 1000 = Finalizable (Address view N/A) -// | -// * 63-46 Fixed (18-bits, always zero) -// -// -// Address Space & Pointer Layout 2 -// -------------------------------- -// -// +--------------------------------+ 0x00007FFFFFFFFFFF (127TB) -// . . -// . . -// . . -// +--------------------------------+ 0x0000280000000000 (40TB) -// | Remapped View | -// +--------------------------------+ 0x0000200000000000 (32TB) -// . . -// +--------------------------------+ 0x0000180000000000 (24TB) -// | Marked1 View | -// +--------------------------------+ 0x0000100000000000 (16TB) -// | Marked0 View | -// +--------------------------------+ 0x0000080000000000 (8TB) -// . . -// +--------------------------------+ 0x0000000000000000 -// -// 6 4 4 4 4 -// 3 7 6 3 2 0 -// +------------------+-----+------------------------------------------------+ -// |00000000 00000000 0|1111|111 11111111 11111111 11111111 11111111 11111111| -// +-------------------+----+------------------------------------------------+ -// | | | -// | | * 42-0 Object Offset (43-bits, 8TB address space) -// | | -// | * 46-43 Metadata Bits (4-bits) 0001 = Marked0 (Address view 8-16TB) -// | 0010 = Marked1 (Address view 16-24TB) -// | 0100 = Remapped (Address view 32-40TB) -// | 1000 = Finalizable (Address view N/A) -// | -// * 63-47 Fixed (17-bits, always zero) -// -// -// Address Space & Pointer Layout 3 -// -------------------------------- -// -// +--------------------------------+ 0x00007FFFFFFFFFFF (127TB) -// . . -// . . -// . . -// +--------------------------------+ 0x0000500000000000 (80TB) -// | Remapped View | -// +--------------------------------+ 0x0000400000000000 (64TB) -// . . -// +--------------------------------+ 0x0000300000000000 (48TB) -// | Marked1 View | -// +--------------------------------+ 0x0000200000000000 (32TB) -// | Marked0 View | -// +--------------------------------+ 0x0000100000000000 (16TB) -// . . -// +--------------------------------+ 0x0000000000000000 -// -// 6 4 4 4 4 -// 3 8 7 4 3 0 -// +------------------+----+-------------------------------------------------+ -// |00000000 00000000 |1111|1111 11111111 11111111 11111111 11111111 11111111| -// +------------------+----+-------------------------------------------------+ -// | | | -// | | * 43-0 Object Offset (44-bits, 16TB address space) -// | | -// | * 47-44 Metadata Bits (4-bits) 0001 = Marked0 (Address view 16-32TB) -// | 0010 = Marked1 (Address view 32-48TB) -// | 0100 = Remapped (Address view 64-80TB) -// | 1000 = Finalizable (Address view N/A) -// | -// * 63-48 Fixed (16-bits, always zero) -// - -// Default value if probing is not implemented for a certain platform: 128TB -static const size_t DEFAULT_MAX_ADDRESS_BIT = 47; -// Minimum value returned, if probing fails: 64GB -static const size_t MINIMUM_MAX_ADDRESS_BIT = 36; - -static size_t probe_valid_max_address_bit() { -#ifdef LINUX - size_t max_address_bit = 0; - const size_t page_size = os::vm_page_size(); - for (size_t i = DEFAULT_MAX_ADDRESS_BIT; i > MINIMUM_MAX_ADDRESS_BIT; --i) { - const uintptr_t base_addr = ((uintptr_t) 1U) << i; - if (msync((void*)base_addr, page_size, MS_ASYNC) == 0) { - // msync succeeded, the address is valid, and maybe even already mapped. - max_address_bit = i; - break; - } - if (errno != ENOMEM) { - // Some error occurred. This should never happen, but msync - // has some undefined behavior, hence ignore this bit. -#ifdef ASSERT - fatal("Received '%s' while probing the address space for the highest valid bit", os::errno_name(errno)); -#else // ASSERT - log_warning_p(gc)("Received '%s' while probing the address space for the highest valid bit", os::errno_name(errno)); -#endif // ASSERT - continue; - } - // Since msync failed with ENOMEM, the page might not be mapped. - // Try to map it, to see if the address is valid. - void* const result_addr = mmap((void*) base_addr, page_size, PROT_NONE, MAP_PRIVATE|MAP_ANONYMOUS|MAP_NORESERVE, -1, 0); - if (result_addr != MAP_FAILED) { - munmap(result_addr, page_size); - } - if ((uintptr_t) result_addr == base_addr) { - // address is valid - max_address_bit = i; - break; - } - } - if (max_address_bit == 0) { - // probing failed, allocate a very high page and take that bit as the maximum - const uintptr_t high_addr = ((uintptr_t) 1U) << DEFAULT_MAX_ADDRESS_BIT; - void* const result_addr = mmap((void*) high_addr, page_size, PROT_NONE, MAP_PRIVATE|MAP_ANONYMOUS|MAP_NORESERVE, -1, 0); - if (result_addr != MAP_FAILED) { - max_address_bit = BitsPerSize_t - count_leading_zeros((size_t) result_addr) - 1; - munmap(result_addr, page_size); - } - } - log_info_p(gc, init)("Probing address space for the highest valid bit: " SIZE_FORMAT, max_address_bit); - return MAX2(max_address_bit, MINIMUM_MAX_ADDRESS_BIT); -#else // LINUX - return DEFAULT_MAX_ADDRESS_BIT; -#endif // LINUX -} - -size_t XPlatformAddressOffsetBits() { - const static size_t valid_max_address_offset_bits = probe_valid_max_address_bit() + 1; - const size_t max_address_offset_bits = valid_max_address_offset_bits - 3; - const size_t min_address_offset_bits = max_address_offset_bits - 2; - const size_t address_offset = round_up_power_of_2(MaxHeapSize * XVirtualToPhysicalRatio); - const size_t address_offset_bits = log2i_exact(address_offset); - return clamp(address_offset_bits, min_address_offset_bits, max_address_offset_bits); -} - -size_t XPlatformAddressMetadataShift() { - return XPlatformAddressOffsetBits(); -} diff --git a/src/hotspot/cpu/riscv/gc/x/xGlobals_riscv.hpp b/src/hotspot/cpu/riscv/gc/x/xGlobals_riscv.hpp deleted file mode 100644 index 836dc7aac0d1d..0000000000000 --- a/src/hotspot/cpu/riscv/gc/x/xGlobals_riscv.hpp +++ /dev/null @@ -1,35 +0,0 @@ -/* - * Copyright (c) 2015, 2022, Oracle and/or its affiliates. All rights reserved. - * Copyright (c) 2020, 2021, Huawei Technologies Co., Ltd. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - * - */ - -#ifndef CPU_RISCV_GC_X_XGLOBALS_RISCV_HPP -#define CPU_RISCV_GC_X_XGLOBALS_RISCV_HPP - -const size_t XPlatformHeapViews = 3; -const size_t XPlatformCacheLineSize = 64; - -size_t XPlatformAddressOffsetBits(); -size_t XPlatformAddressMetadataShift(); - -#endif // CPU_RISCV_GC_X_XGLOBALS_RISCV_HPP diff --git a/src/hotspot/cpu/riscv/gc/x/x_riscv.ad b/src/hotspot/cpu/riscv/gc/x/x_riscv.ad deleted file mode 100644 index b93b7066425b9..0000000000000 --- a/src/hotspot/cpu/riscv/gc/x/x_riscv.ad +++ /dev/null @@ -1,229 +0,0 @@ -// -// Copyright (c) 2019, 2023, Oracle and/or its affiliates. All rights reserved. -// Copyright (c) 2020, 2021, Huawei Technologies Co., Ltd. All rights reserved. -// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. -// -// This code is free software; you can redistribute it and/or modify it -// under the terms of the GNU General Public License version 2 only, as -// published by the Free Software Foundation. -// -// This code is distributed in the hope that it will be useful, but WITHOUT -// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License -// version 2 for more details (a copy is included in the LICENSE file that -// accompanied this code). -// -// You should have received a copy of the GNU General Public License version -// 2 along with this work; if not, write to the Free Software Foundation, -// Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. -// -// Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA -// or visit www.oracle.com if you need additional information or have any -// questions. -// - -source_hpp %{ - -#include "gc/shared/gc_globals.hpp" -#include "gc/x/c2/xBarrierSetC2.hpp" -#include "gc/x/xThreadLocalData.hpp" - -%} - -source %{ - -static void x_load_barrier(MacroAssembler* masm, const MachNode* node, Address ref_addr, Register ref, Register tmp, int barrier_data) { - if (barrier_data == XLoadBarrierElided) { - return; - } - XLoadBarrierStubC2* const stub = XLoadBarrierStubC2::create(node, ref_addr, ref, tmp, barrier_data); - __ ld(tmp, Address(xthread, XThreadLocalData::address_bad_mask_offset())); - __ andr(tmp, tmp, ref); - __ bnez(tmp, *stub->entry(), true /* far */); - __ bind(*stub->continuation()); -} - -static void x_load_barrier_slow_path(MacroAssembler* masm, const MachNode* node, Address ref_addr, Register ref, Register tmp) { - XLoadBarrierStubC2* const stub = XLoadBarrierStubC2::create(node, ref_addr, ref, tmp, XLoadBarrierStrong); - __ j(*stub->entry()); - __ bind(*stub->continuation()); -} - -%} - -// Load Pointer -instruct xLoadP(iRegPNoSp dst, memory mem, iRegPNoSp tmp, rFlagsReg cr) -%{ - match(Set dst (LoadP mem)); - predicate(UseZGC && !ZGenerational && (n->as_Load()->barrier_data() != 0)); - effect(TEMP dst, TEMP tmp, KILL cr); - - ins_cost(4 * DEFAULT_COST); - - format %{ "ld $dst, $mem, #@zLoadP" %} - - ins_encode %{ - const Address ref_addr (as_Register($mem$$base), $mem$$disp); - __ ld($dst$$Register, ref_addr); - x_load_barrier(masm, this, ref_addr, $dst$$Register, $tmp$$Register /* tmp */, barrier_data()); - %} - - ins_pipe(iload_reg_mem); -%} - -instruct xCompareAndSwapP(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, iRegPNoSp tmp, rFlagsReg cr) %{ - match(Set res (CompareAndSwapP mem (Binary oldval newval))); - match(Set res (WeakCompareAndSwapP mem (Binary oldval newval))); - predicate(UseZGC && !ZGenerational && !needs_acquiring_load_reserved(n) && n->as_LoadStore()->barrier_data() == XLoadBarrierStrong); - effect(TEMP_DEF res, TEMP tmp, KILL cr); - - ins_cost(2 * VOLATILE_REF_COST); - - format %{ "cmpxchg $mem, $oldval, $newval, #@zCompareAndSwapP\n\t" - "mv $res, $res == $oldval" %} - - ins_encode %{ - Label failed; - guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding"); - __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::int64, - Assembler::relaxed /* acquire */, Assembler::rl /* release */, $tmp$$Register); - __ sub(t0, $tmp$$Register, $oldval$$Register); - __ seqz($res$$Register, t0); - if (barrier_data() != XLoadBarrierElided) { - Label good; - __ ld(t0, Address(xthread, XThreadLocalData::address_bad_mask_offset())); - __ andr(t0, t0, $tmp$$Register); - __ beqz(t0, good); - x_load_barrier_slow_path(masm, this, Address($mem$$Register), $tmp$$Register /* ref */, $res$$Register /* tmp */); - __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::int64, - Assembler::relaxed /* acquire */, Assembler::rl /* release */, $res$$Register, - true /* result_as_bool */); - __ bind(good); - } - %} - - ins_pipe(pipe_slow); -%} - -instruct xCompareAndSwapPAcq(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, iRegPNoSp tmp, rFlagsReg cr) %{ - match(Set res (CompareAndSwapP mem (Binary oldval newval))); - match(Set res (WeakCompareAndSwapP mem (Binary oldval newval))); - predicate(UseZGC && !ZGenerational && needs_acquiring_load_reserved(n) && (n->as_LoadStore()->barrier_data() == XLoadBarrierStrong)); - effect(TEMP_DEF res, TEMP tmp, KILL cr); - - ins_cost(2 * VOLATILE_REF_COST); - - format %{ "cmpxchg $mem, $oldval, $newval, #@zCompareAndSwapPAcq\n\t" - "mv $res, $res == $oldval" %} - - ins_encode %{ - Label failed; - guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding"); - __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::int64, - Assembler::aq /* acquire */, Assembler::rl /* release */, $tmp$$Register); - __ sub(t0, $tmp$$Register, $oldval$$Register); - __ seqz($res$$Register, t0); - if (barrier_data() != XLoadBarrierElided) { - Label good; - __ ld(t0, Address(xthread, XThreadLocalData::address_bad_mask_offset())); - __ andr(t0, t0, $tmp$$Register); - __ beqz(t0, good); - x_load_barrier_slow_path(masm, this, Address($mem$$Register), $tmp$$Register /* ref */, $res$$Register /* tmp */); - __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::int64, - Assembler::aq /* acquire */, Assembler::rl /* release */, $res$$Register, - true /* result_as_bool */); - __ bind(good); - } - %} - - ins_pipe(pipe_slow); -%} - -instruct xCompareAndExchangeP(iRegPNoSp res, indirect mem, iRegP oldval, iRegP newval, iRegPNoSp tmp, rFlagsReg cr) %{ - match(Set res (CompareAndExchangeP mem (Binary oldval newval))); - predicate(UseZGC && !ZGenerational && !needs_acquiring_load_reserved(n) && n->as_LoadStore()->barrier_data() == XLoadBarrierStrong); - effect(TEMP_DEF res, TEMP tmp, KILL cr); - - ins_cost(2 * VOLATILE_REF_COST); - - format %{ "cmpxchg $res = $mem, $oldval, $newval, #@zCompareAndExchangeP" %} - - ins_encode %{ - guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding"); - __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::int64, - Assembler::relaxed /* acquire */, Assembler::rl /* release */, $res$$Register); - if (barrier_data() != XLoadBarrierElided) { - Label good; - __ ld(t0, Address(xthread, XThreadLocalData::address_bad_mask_offset())); - __ andr(t0, t0, $res$$Register); - __ beqz(t0, good); - x_load_barrier_slow_path(masm, this, Address($mem$$Register), $res$$Register /* ref */, $tmp$$Register /* tmp */); - __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::int64, - Assembler::relaxed /* acquire */, Assembler::rl /* release */, $res$$Register); - __ bind(good); - } - %} - - ins_pipe(pipe_slow); -%} - -instruct xCompareAndExchangePAcq(iRegPNoSp res, indirect mem, iRegP oldval, iRegP newval, iRegPNoSp tmp, rFlagsReg cr) %{ - match(Set res (CompareAndExchangeP mem (Binary oldval newval))); - predicate(UseZGC && !ZGenerational && needs_acquiring_load_reserved(n) && n->as_LoadStore()->barrier_data() == XLoadBarrierStrong); - effect(TEMP_DEF res, TEMP tmp, KILL cr); - - ins_cost(2 * VOLATILE_REF_COST); - - format %{ "cmpxchg $res = $mem, $oldval, $newval, #@zCompareAndExchangePAcq" %} - - ins_encode %{ - guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding"); - __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::int64, - Assembler::aq /* acquire */, Assembler::rl /* release */, $res$$Register); - if (barrier_data() != XLoadBarrierElided) { - Label good; - __ ld(t0, Address(xthread, XThreadLocalData::address_bad_mask_offset())); - __ andr(t0, t0, $res$$Register); - __ beqz(t0, good); - x_load_barrier_slow_path(masm, this, Address($mem$$Register), $res$$Register /* ref */, $tmp$$Register /* tmp */); - __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::int64, - Assembler::aq /* acquire */, Assembler::rl /* release */, $res$$Register); - __ bind(good); - } - %} - - ins_pipe(pipe_slow); -%} - -instruct xGetAndSetP(indirect mem, iRegP newv, iRegPNoSp prev, iRegPNoSp tmp, rFlagsReg cr) %{ - match(Set prev (GetAndSetP mem newv)); - predicate(UseZGC && !ZGenerational && !needs_acquiring_load_reserved(n) && n->as_LoadStore()->barrier_data() != 0); - effect(TEMP_DEF prev, TEMP tmp, KILL cr); - - ins_cost(2 * VOLATILE_REF_COST); - - format %{ "atomic_xchg $prev, $newv, [$mem], #@zGetAndSetP" %} - - ins_encode %{ - __ atomic_xchg($prev$$Register, $newv$$Register, as_Register($mem$$base)); - x_load_barrier(masm, this, Address(noreg, 0), $prev$$Register, $tmp$$Register /* tmp */, barrier_data()); - %} - - ins_pipe(pipe_serial); -%} - -instruct xGetAndSetPAcq(indirect mem, iRegP newv, iRegPNoSp prev, iRegPNoSp tmp, rFlagsReg cr) %{ - match(Set prev (GetAndSetP mem newv)); - predicate(UseZGC && !ZGenerational && needs_acquiring_load_reserved(n) && (n->as_LoadStore()->barrier_data() != 0)); - effect(TEMP_DEF prev, TEMP tmp, KILL cr); - - ins_cost(VOLATILE_REF_COST); - - format %{ "atomic_xchg_acq $prev, $newv, [$mem], #@zGetAndSetPAcq" %} - - ins_encode %{ - __ atomic_xchgal($prev$$Register, $newv$$Register, as_Register($mem$$base)); - x_load_barrier(masm, this, Address(noreg, 0), $prev$$Register, $tmp$$Register /* tmp */, barrier_data()); - %} - ins_pipe(pipe_serial); -%} diff --git a/src/hotspot/cpu/riscv/gc/z/z_riscv.ad b/src/hotspot/cpu/riscv/gc/z/z_riscv.ad index 24669f45eb4d2..8e33d514f46cb 100644 --- a/src/hotspot/cpu/riscv/gc/z/z_riscv.ad +++ b/src/hotspot/cpu/riscv/gc/z/z_riscv.ad @@ -94,7 +94,7 @@ static void z_store_barrier(MacroAssembler* masm, const MachNode* node, Address instruct zLoadP(iRegPNoSp dst, memory mem, iRegPNoSp tmp, rFlagsReg cr) %{ match(Set dst (LoadP mem)); - predicate(UseZGC && ZGenerational && n->as_Load()->barrier_data() != 0); + predicate(UseZGC && n->as_Load()->barrier_data() != 0); effect(TEMP dst, TEMP tmp, KILL cr); ins_cost(4 * DEFAULT_COST); @@ -113,7 +113,7 @@ instruct zLoadP(iRegPNoSp dst, memory mem, iRegPNoSp tmp, rFlagsReg cr) // Store Pointer instruct zStoreP(memory mem, iRegP src, iRegPNoSp tmp1, iRegPNoSp tmp2, rFlagsReg cr) %{ - predicate(UseZGC && ZGenerational && n->as_Store()->barrier_data() != 0); + predicate(UseZGC && n->as_Store()->barrier_data() != 0); match(Set mem (StoreP mem src)); effect(TEMP tmp1, TEMP tmp2, KILL cr); @@ -131,7 +131,7 @@ instruct zCompareAndSwapP(iRegINoSp res, indirect mem, iRegP oldval, iRegP newva iRegPNoSp oldval_tmp, iRegPNoSp newval_tmp, iRegPNoSp tmp1, rFlagsReg cr) %{ match(Set res (CompareAndSwapP mem (Binary oldval newval))); match(Set res (WeakCompareAndSwapP mem (Binary oldval newval))); - predicate(UseZGC && ZGenerational && !needs_acquiring_load_reserved(n) && n->as_LoadStore()->barrier_data() != 0); + predicate(UseZGC && !needs_acquiring_load_reserved(n) && n->as_LoadStore()->barrier_data() != 0); effect(TEMP oldval_tmp, TEMP newval_tmp, TEMP tmp1, TEMP_DEF res, KILL cr); ins_cost(2 * VOLATILE_REF_COST); @@ -154,7 +154,7 @@ instruct zCompareAndSwapPAcq(iRegINoSp res, indirect mem, iRegP oldval, iRegP ne iRegPNoSp oldval_tmp, iRegPNoSp newval_tmp, iRegPNoSp tmp1, rFlagsReg cr) %{ match(Set res (CompareAndSwapP mem (Binary oldval newval))); match(Set res (WeakCompareAndSwapP mem (Binary oldval newval))); - predicate(UseZGC && ZGenerational && needs_acquiring_load_reserved(n) && n->as_LoadStore()->barrier_data() != 0); + predicate(UseZGC && needs_acquiring_load_reserved(n) && n->as_LoadStore()->barrier_data() != 0); effect(TEMP oldval_tmp, TEMP newval_tmp, TEMP tmp1, TEMP_DEF res, KILL cr); ins_cost(2 * VOLATILE_REF_COST); @@ -176,7 +176,7 @@ instruct zCompareAndSwapPAcq(iRegINoSp res, indirect mem, iRegP oldval, iRegP ne instruct zCompareAndExchangeP(iRegPNoSp res, indirect mem, iRegP oldval, iRegP newval, iRegPNoSp oldval_tmp, iRegPNoSp newval_tmp, iRegPNoSp tmp1, rFlagsReg cr) %{ match(Set res (CompareAndExchangeP mem (Binary oldval newval))); - predicate(UseZGC && ZGenerational && !needs_acquiring_load_reserved(n) && n->as_LoadStore()->barrier_data() != 0); + predicate(UseZGC && !needs_acquiring_load_reserved(n) && n->as_LoadStore()->barrier_data() != 0); effect(TEMP oldval_tmp, TEMP newval_tmp, TEMP tmp1, TEMP_DEF res, KILL cr); ins_cost(2 * VOLATILE_REF_COST); @@ -198,7 +198,7 @@ instruct zCompareAndExchangeP(iRegPNoSp res, indirect mem, iRegP oldval, iRegP n instruct zCompareAndExchangePAcq(iRegPNoSp res, indirect mem, iRegP oldval, iRegP newval, iRegPNoSp oldval_tmp, iRegPNoSp newval_tmp, iRegPNoSp tmp1, rFlagsReg cr) %{ match(Set res (CompareAndExchangeP mem (Binary oldval newval))); - predicate(UseZGC && ZGenerational && needs_acquiring_load_reserved(n) && n->as_LoadStore()->barrier_data() != 0); + predicate(UseZGC && needs_acquiring_load_reserved(n) && n->as_LoadStore()->barrier_data() != 0); effect(TEMP oldval_tmp, TEMP newval_tmp, TEMP tmp1, TEMP_DEF res, KILL cr); ins_cost(2 * VOLATILE_REF_COST); @@ -219,7 +219,7 @@ instruct zCompareAndExchangePAcq(iRegPNoSp res, indirect mem, iRegP oldval, iReg instruct zGetAndSetP(indirect mem, iRegP newv, iRegPNoSp prev, iRegPNoSp tmp, rFlagsReg cr) %{ match(Set prev (GetAndSetP mem newv)); - predicate(UseZGC && ZGenerational && !needs_acquiring_load_reserved(n) && n->as_LoadStore()->barrier_data() != 0); + predicate(UseZGC && !needs_acquiring_load_reserved(n) && n->as_LoadStore()->barrier_data() != 0); effect(TEMP_DEF prev, TEMP tmp, KILL cr); ins_cost(2 * VOLATILE_REF_COST); @@ -237,7 +237,7 @@ instruct zGetAndSetP(indirect mem, iRegP newv, iRegPNoSp prev, iRegPNoSp tmp, rF instruct zGetAndSetPAcq(indirect mem, iRegP newv, iRegPNoSp prev, iRegPNoSp tmp, rFlagsReg cr) %{ match(Set prev (GetAndSetP mem newv)); - predicate(UseZGC && ZGenerational && needs_acquiring_load_reserved(n) && n->as_LoadStore()->barrier_data() != 0); + predicate(UseZGC && needs_acquiring_load_reserved(n) && n->as_LoadStore()->barrier_data() != 0); effect(TEMP_DEF prev, TEMP tmp, KILL cr); ins_cost(2 * VOLATILE_REF_COST); diff --git a/src/hotspot/cpu/riscv/stubGenerator_riscv.cpp b/src/hotspot/cpu/riscv/stubGenerator_riscv.cpp index a4744dfc05c06..77a2b794a7eb4 100644 --- a/src/hotspot/cpu/riscv/stubGenerator_riscv.cpp +++ b/src/hotspot/cpu/riscv/stubGenerator_riscv.cpp @@ -946,7 +946,7 @@ class StubGenerator: public StubCodeGenerator { // The size of copy32_loop body increases significantly with ZGC GC barriers. // Need conditional far branches to reach a point beyond the loop in this case. - bool is_far = UseZGC && ZGenerational; + bool is_far = UseZGC; __ beqz(count, done, is_far); __ slli(cnt, count, exact_log2(granularity)); diff --git a/src/hotspot/cpu/x86/c1_LIRAssembler_x86.cpp b/src/hotspot/cpu/x86/c1_LIRAssembler_x86.cpp index 6d9812c11ae6e..64265a9690940 100644 --- a/src/hotspot/cpu/x86/c1_LIRAssembler_x86.cpp +++ b/src/hotspot/cpu/x86/c1_LIRAssembler_x86.cpp @@ -1333,10 +1333,7 @@ void LIR_Assembler::mem2reg(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_Patch } #endif - if (!(UseZGC && !ZGenerational)) { - // Load barrier has not yet been applied, so ZGC can't verify the oop here - __ verify_oop(dest->as_register()); - } + __ verify_oop(dest->as_register()); } } diff --git a/src/hotspot/cpu/x86/gc/x/xBarrierSetAssembler_x86.cpp b/src/hotspot/cpu/x86/gc/x/xBarrierSetAssembler_x86.cpp deleted file mode 100644 index a7dc34b17b1f6..0000000000000 --- a/src/hotspot/cpu/x86/gc/x/xBarrierSetAssembler_x86.cpp +++ /dev/null @@ -1,734 +0,0 @@ -/* - * Copyright (c) 2018, 2023, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#include "precompiled.hpp" -#include "asm/macroAssembler.inline.hpp" -#include "code/codeBlob.hpp" -#include "code/vmreg.inline.hpp" -#include "gc/x/xBarrier.inline.hpp" -#include "gc/x/xBarrierSet.hpp" -#include "gc/x/xBarrierSetAssembler.hpp" -#include "gc/x/xBarrierSetRuntime.hpp" -#include "gc/x/xThreadLocalData.hpp" -#include "memory/resourceArea.hpp" -#include "runtime/sharedRuntime.hpp" -#include "utilities/macros.hpp" -#ifdef COMPILER1 -#include "c1/c1_LIRAssembler.hpp" -#include "c1/c1_MacroAssembler.hpp" -#include "gc/x/c1/xBarrierSetC1.hpp" -#endif // COMPILER1 -#ifdef COMPILER2 -#include "gc/x/c2/xBarrierSetC2.hpp" -#endif // COMPILER2 - -#ifdef PRODUCT -#define BLOCK_COMMENT(str) /* nothing */ -#else -#define BLOCK_COMMENT(str) __ block_comment(str) -#endif - -#undef __ -#define __ masm-> - -static void call_vm(MacroAssembler* masm, - address entry_point, - Register arg0, - Register arg1) { - // Setup arguments - if (arg1 == c_rarg0) { - if (arg0 == c_rarg1) { - __ xchgptr(c_rarg1, c_rarg0); - } else { - __ movptr(c_rarg1, arg1); - __ movptr(c_rarg0, arg0); - } - } else { - if (arg0 != c_rarg0) { - __ movptr(c_rarg0, arg0); - } - if (arg1 != c_rarg1) { - __ movptr(c_rarg1, arg1); - } - } - - // Call VM - __ MacroAssembler::call_VM_leaf_base(entry_point, 2); -} - -void XBarrierSetAssembler::load_at(MacroAssembler* masm, - DecoratorSet decorators, - BasicType type, - Register dst, - Address src, - Register tmp1, - Register tmp_thread) { - if (!XBarrierSet::barrier_needed(decorators, type)) { - // Barrier not needed - BarrierSetAssembler::load_at(masm, decorators, type, dst, src, tmp1, tmp_thread); - return; - } - - BLOCK_COMMENT("XBarrierSetAssembler::load_at {"); - - // Allocate scratch register - Register scratch = tmp1; - if (tmp1 == noreg) { - scratch = r12; - __ push(scratch); - } - - assert_different_registers(dst, scratch); - - Label done; - - // - // Fast Path - // - - // Load address - __ lea(scratch, src); - - // Load oop at address - __ movptr(dst, Address(scratch, 0)); - - // Test address bad mask - __ testptr(dst, address_bad_mask_from_thread(r15_thread)); - __ jcc(Assembler::zero, done); - - // - // Slow path - // - - // Save registers - __ push(rax); - __ push(rcx); - __ push(rdx); - __ push(rdi); - __ push(rsi); - __ push(r8); - __ push(r9); - __ push(r10); - __ push(r11); - - // We may end up here from generate_native_wrapper, then the method may have - // floats as arguments, and we must spill them before calling the VM runtime - // leaf. From the interpreter all floats are passed on the stack. - assert(Argument::n_float_register_parameters_j == 8, "Assumption"); - const int xmm_size = wordSize * 2; - const int xmm_spill_size = xmm_size * Argument::n_float_register_parameters_j; - __ subptr(rsp, xmm_spill_size); - __ movdqu(Address(rsp, xmm_size * 7), xmm7); - __ movdqu(Address(rsp, xmm_size * 6), xmm6); - __ movdqu(Address(rsp, xmm_size * 5), xmm5); - __ movdqu(Address(rsp, xmm_size * 4), xmm4); - __ movdqu(Address(rsp, xmm_size * 3), xmm3); - __ movdqu(Address(rsp, xmm_size * 2), xmm2); - __ movdqu(Address(rsp, xmm_size * 1), xmm1); - __ movdqu(Address(rsp, xmm_size * 0), xmm0); - - // Call VM - call_vm(masm, XBarrierSetRuntime::load_barrier_on_oop_field_preloaded_addr(decorators), dst, scratch); - - __ movdqu(xmm0, Address(rsp, xmm_size * 0)); - __ movdqu(xmm1, Address(rsp, xmm_size * 1)); - __ movdqu(xmm2, Address(rsp, xmm_size * 2)); - __ movdqu(xmm3, Address(rsp, xmm_size * 3)); - __ movdqu(xmm4, Address(rsp, xmm_size * 4)); - __ movdqu(xmm5, Address(rsp, xmm_size * 5)); - __ movdqu(xmm6, Address(rsp, xmm_size * 6)); - __ movdqu(xmm7, Address(rsp, xmm_size * 7)); - __ addptr(rsp, xmm_spill_size); - - __ pop(r11); - __ pop(r10); - __ pop(r9); - __ pop(r8); - __ pop(rsi); - __ pop(rdi); - __ pop(rdx); - __ pop(rcx); - - if (dst == rax) { - __ addptr(rsp, wordSize); - } else { - __ movptr(dst, rax); - __ pop(rax); - } - - __ bind(done); - - // Restore scratch register - if (tmp1 == noreg) { - __ pop(scratch); - } - - BLOCK_COMMENT("} XBarrierSetAssembler::load_at"); -} - -#ifdef ASSERT - -void XBarrierSetAssembler::store_at(MacroAssembler* masm, - DecoratorSet decorators, - BasicType type, - Address dst, - Register src, - Register tmp1, - Register tmp2, - Register tmp3) { - BLOCK_COMMENT("XBarrierSetAssembler::store_at {"); - - // Verify oop store - if (is_reference_type(type)) { - // Note that src could be noreg, which means we - // are storing null and can skip verification. - if (src != noreg) { - Label done; - __ testptr(src, address_bad_mask_from_thread(r15_thread)); - __ jcc(Assembler::zero, done); - __ stop("Verify oop store failed"); - __ should_not_reach_here(); - __ bind(done); - } - } - - // Store value - BarrierSetAssembler::store_at(masm, decorators, type, dst, src, tmp1, tmp2, tmp3); - - BLOCK_COMMENT("} XBarrierSetAssembler::store_at"); -} - -#endif // ASSERT - -void XBarrierSetAssembler::arraycopy_prologue(MacroAssembler* masm, - DecoratorSet decorators, - BasicType type, - Register src, - Register dst, - Register count) { - if (!XBarrierSet::barrier_needed(decorators, type)) { - // Barrier not needed - return; - } - - BLOCK_COMMENT("XBarrierSetAssembler::arraycopy_prologue {"); - - // Save registers - __ pusha(); - - // Call VM - call_vm(masm, XBarrierSetRuntime::load_barrier_on_oop_array_addr(), src, count); - - // Restore registers - __ popa(); - - BLOCK_COMMENT("} XBarrierSetAssembler::arraycopy_prologue"); -} - -void XBarrierSetAssembler::try_resolve_jobject_in_native(MacroAssembler* masm, - Register jni_env, - Register obj, - Register tmp, - Label& slowpath) { - BLOCK_COMMENT("XBarrierSetAssembler::try_resolve_jobject_in_native {"); - - // Resolve jobject - BarrierSetAssembler::try_resolve_jobject_in_native(masm, jni_env, obj, tmp, slowpath); - - // Test address bad mask - __ testptr(obj, address_bad_mask_from_jni_env(jni_env)); - __ jcc(Assembler::notZero, slowpath); - - BLOCK_COMMENT("} XBarrierSetAssembler::try_resolve_jobject_in_native"); -} - -#ifdef COMPILER1 - -#undef __ -#define __ ce->masm()-> - -void XBarrierSetAssembler::generate_c1_load_barrier_test(LIR_Assembler* ce, - LIR_Opr ref) const { - __ testptr(ref->as_register(), address_bad_mask_from_thread(r15_thread)); -} - -void XBarrierSetAssembler::generate_c1_load_barrier_stub(LIR_Assembler* ce, - XLoadBarrierStubC1* stub) const { - // Stub entry - __ bind(*stub->entry()); - - Register ref = stub->ref()->as_register(); - Register ref_addr = noreg; - Register tmp = noreg; - - if (stub->tmp()->is_valid()) { - // Load address into tmp register - ce->leal(stub->ref_addr(), stub->tmp()); - ref_addr = tmp = stub->tmp()->as_pointer_register(); - } else { - // Address already in register - ref_addr = stub->ref_addr()->as_address_ptr()->base()->as_pointer_register(); - } - - assert_different_registers(ref, ref_addr, noreg); - - // Save rax unless it is the result or tmp register - if (ref != rax && tmp != rax) { - __ push(rax); - } - - // Setup arguments and call runtime stub - __ subptr(rsp, 2 * BytesPerWord); - ce->store_parameter(ref_addr, 1); - ce->store_parameter(ref, 0); - __ call(RuntimeAddress(stub->runtime_stub())); - __ addptr(rsp, 2 * BytesPerWord); - - // Verify result - __ verify_oop(rax); - - // Move result into place - if (ref != rax) { - __ movptr(ref, rax); - } - - // Restore rax unless it is the result or tmp register - if (ref != rax && tmp != rax) { - __ pop(rax); - } - - // Stub exit - __ jmp(*stub->continuation()); -} - -#undef __ -#define __ sasm-> - -void XBarrierSetAssembler::generate_c1_load_barrier_runtime_stub(StubAssembler* sasm, - DecoratorSet decorators) const { - // Enter and save registers - __ enter(); - __ save_live_registers_no_oop_map(true /* save_fpu_registers */); - - // Setup arguments - __ load_parameter(1, c_rarg1); - __ load_parameter(0, c_rarg0); - - // Call VM - __ call_VM_leaf(XBarrierSetRuntime::load_barrier_on_oop_field_preloaded_addr(decorators), c_rarg0, c_rarg1); - - // Restore registers and return - __ restore_live_registers_except_rax(true /* restore_fpu_registers */); - __ leave(); - __ ret(0); -} - -#endif // COMPILER1 - -#ifdef COMPILER2 - -OptoReg::Name XBarrierSetAssembler::refine_register(const Node* node, OptoReg::Name opto_reg) { - if (!OptoReg::is_reg(opto_reg)) { - return OptoReg::Bad; - } - - const VMReg vm_reg = OptoReg::as_VMReg(opto_reg); - if (vm_reg->is_XMMRegister()) { - opto_reg &= ~15; - switch (node->ideal_reg()) { - case Op_VecX: - opto_reg |= 2; - break; - case Op_VecY: - opto_reg |= 4; - break; - case Op_VecZ: - opto_reg |= 8; - break; - default: - opto_reg |= 1; - break; - } - } - - return opto_reg; -} - -// We use the vec_spill_helper from the x86.ad file to avoid reinventing this wheel -extern void vec_spill_helper(C2_MacroAssembler *masm, bool is_load, - int stack_offset, int reg, uint ireg, outputStream* st); - -#undef __ -#define __ _masm-> - -class XSaveLiveRegisters { -private: - struct XMMRegisterData { - XMMRegister _reg; - int _size; - - // Used by GrowableArray::find() - bool operator == (const XMMRegisterData& other) { - return _reg == other._reg; - } - }; - - MacroAssembler* const _masm; - GrowableArray _gp_registers; - GrowableArray _opmask_registers; - GrowableArray _xmm_registers; - int _spill_size; - int _spill_offset; - - static int xmm_compare_register_size(XMMRegisterData* left, XMMRegisterData* right) { - if (left->_size == right->_size) { - return 0; - } - - return (left->_size < right->_size) ? -1 : 1; - } - - static int xmm_slot_size(OptoReg::Name opto_reg) { - // The low order 4 bytes denote what size of the XMM register is live - return (opto_reg & 15) << 3; - } - - static uint xmm_ideal_reg_for_size(int reg_size) { - switch (reg_size) { - case 8: - return Op_VecD; - case 16: - return Op_VecX; - case 32: - return Op_VecY; - case 64: - return Op_VecZ; - default: - fatal("Invalid register size %d", reg_size); - return 0; - } - } - - bool xmm_needs_vzeroupper() const { - return _xmm_registers.is_nonempty() && _xmm_registers.at(0)._size > 16; - } - - void xmm_register_save(const XMMRegisterData& reg_data) { - const OptoReg::Name opto_reg = OptoReg::as_OptoReg(reg_data._reg->as_VMReg()); - const uint ideal_reg = xmm_ideal_reg_for_size(reg_data._size); - _spill_offset -= reg_data._size; - C2_MacroAssembler c2_masm(__ code()); - vec_spill_helper(&c2_masm, false /* is_load */, _spill_offset, opto_reg, ideal_reg, tty); - } - - void xmm_register_restore(const XMMRegisterData& reg_data) { - const OptoReg::Name opto_reg = OptoReg::as_OptoReg(reg_data._reg->as_VMReg()); - const uint ideal_reg = xmm_ideal_reg_for_size(reg_data._size); - C2_MacroAssembler c2_masm(__ code()); - vec_spill_helper(&c2_masm, true /* is_load */, _spill_offset, opto_reg, ideal_reg, tty); - _spill_offset += reg_data._size; - } - - void gp_register_save(Register reg) { - _spill_offset -= 8; - __ movq(Address(rsp, _spill_offset), reg); - } - - void opmask_register_save(KRegister reg) { - _spill_offset -= 8; - __ kmov(Address(rsp, _spill_offset), reg); - } - - void gp_register_restore(Register reg) { - __ movq(reg, Address(rsp, _spill_offset)); - _spill_offset += 8; - } - - void opmask_register_restore(KRegister reg) { - __ kmov(reg, Address(rsp, _spill_offset)); - _spill_offset += 8; - } - - void initialize(XLoadBarrierStubC2* stub) { - // Create mask of caller saved registers that need to - // be saved/restored if live - RegMask caller_saved; - caller_saved.Insert(OptoReg::as_OptoReg(rax->as_VMReg())); - caller_saved.Insert(OptoReg::as_OptoReg(rcx->as_VMReg())); - caller_saved.Insert(OptoReg::as_OptoReg(rdx->as_VMReg())); - caller_saved.Insert(OptoReg::as_OptoReg(rsi->as_VMReg())); - caller_saved.Insert(OptoReg::as_OptoReg(rdi->as_VMReg())); - caller_saved.Insert(OptoReg::as_OptoReg(r8->as_VMReg())); - caller_saved.Insert(OptoReg::as_OptoReg(r9->as_VMReg())); - caller_saved.Insert(OptoReg::as_OptoReg(r10->as_VMReg())); - caller_saved.Insert(OptoReg::as_OptoReg(r11->as_VMReg())); - caller_saved.Remove(OptoReg::as_OptoReg(stub->ref()->as_VMReg())); - - if (UseAPX) { - caller_saved.Insert(OptoReg::as_OptoReg(r16->as_VMReg())); - caller_saved.Insert(OptoReg::as_OptoReg(r17->as_VMReg())); - caller_saved.Insert(OptoReg::as_OptoReg(r18->as_VMReg())); - caller_saved.Insert(OptoReg::as_OptoReg(r19->as_VMReg())); - caller_saved.Insert(OptoReg::as_OptoReg(r20->as_VMReg())); - caller_saved.Insert(OptoReg::as_OptoReg(r21->as_VMReg())); - caller_saved.Insert(OptoReg::as_OptoReg(r22->as_VMReg())); - caller_saved.Insert(OptoReg::as_OptoReg(r23->as_VMReg())); - caller_saved.Insert(OptoReg::as_OptoReg(r24->as_VMReg())); - caller_saved.Insert(OptoReg::as_OptoReg(r25->as_VMReg())); - caller_saved.Insert(OptoReg::as_OptoReg(r26->as_VMReg())); - caller_saved.Insert(OptoReg::as_OptoReg(r27->as_VMReg())); - caller_saved.Insert(OptoReg::as_OptoReg(r28->as_VMReg())); - caller_saved.Insert(OptoReg::as_OptoReg(r29->as_VMReg())); - caller_saved.Insert(OptoReg::as_OptoReg(r30->as_VMReg())); - caller_saved.Insert(OptoReg::as_OptoReg(r31->as_VMReg())); - } - - // Create mask of live registers - RegMask live = stub->live(); - if (stub->tmp() != noreg) { - live.Insert(OptoReg::as_OptoReg(stub->tmp()->as_VMReg())); - } - - int gp_spill_size = 0; - int opmask_spill_size = 0; - int xmm_spill_size = 0; - - // Record registers that needs to be saved/restored - RegMaskIterator rmi(live); - while (rmi.has_next()) { - const OptoReg::Name opto_reg = rmi.next(); - const VMReg vm_reg = OptoReg::as_VMReg(opto_reg); - - if (vm_reg->is_Register()) { - if (caller_saved.Member(opto_reg)) { - _gp_registers.append(vm_reg->as_Register()); - gp_spill_size += 8; - } - } else if (vm_reg->is_KRegister()) { - // All opmask registers are caller saved, thus spill the ones - // which are live. - if (_opmask_registers.find(vm_reg->as_KRegister()) == -1) { - _opmask_registers.append(vm_reg->as_KRegister()); - opmask_spill_size += 8; - } - } else if (vm_reg->is_XMMRegister()) { - // We encode in the low order 4 bits of the opto_reg, how large part of the register is live - const VMReg vm_reg_base = OptoReg::as_VMReg(opto_reg & ~15); - const int reg_size = xmm_slot_size(opto_reg); - const XMMRegisterData reg_data = { vm_reg_base->as_XMMRegister(), reg_size }; - const int reg_index = _xmm_registers.find(reg_data); - if (reg_index == -1) { - // Not previously appended - _xmm_registers.append(reg_data); - xmm_spill_size += reg_size; - } else { - // Previously appended, update size - const int reg_size_prev = _xmm_registers.at(reg_index)._size; - if (reg_size > reg_size_prev) { - _xmm_registers.at_put(reg_index, reg_data); - xmm_spill_size += reg_size - reg_size_prev; - } - } - } else { - fatal("Unexpected register type"); - } - } - - // Sort by size, largest first - _xmm_registers.sort(xmm_compare_register_size); - - // On Windows, the caller reserves stack space for spilling register arguments - const int arg_spill_size = frame::arg_reg_save_area_bytes; - - // Stack pointer must be 16 bytes aligned for the call - _spill_offset = _spill_size = align_up(xmm_spill_size + gp_spill_size + opmask_spill_size + arg_spill_size, 16); - } - -public: - XSaveLiveRegisters(MacroAssembler* masm, XLoadBarrierStubC2* stub) : - _masm(masm), - _gp_registers(), - _opmask_registers(), - _xmm_registers(), - _spill_size(0), - _spill_offset(0) { - - // - // Stack layout after registers have been spilled: - // - // | ... | original rsp, 16 bytes aligned - // ------------------ - // | zmm0 high | - // | ... | - // | zmm0 low | 16 bytes aligned - // | ... | - // | ymm1 high | - // | ... | - // | ymm1 low | 16 bytes aligned - // | ... | - // | xmmN high | - // | ... | - // | xmmN low | 8 bytes aligned - // | reg0 | 8 bytes aligned - // | reg1 | - // | ... | - // | regN | new rsp, if 16 bytes aligned - // | | else new rsp, 16 bytes aligned - // ------------------ - // - - // Figure out what registers to save/restore - initialize(stub); - - // Allocate stack space - if (_spill_size > 0) { - __ subptr(rsp, _spill_size); - } - - // Save XMM/YMM/ZMM registers - for (int i = 0; i < _xmm_registers.length(); i++) { - xmm_register_save(_xmm_registers.at(i)); - } - - if (xmm_needs_vzeroupper()) { - __ vzeroupper(); - } - - // Save general purpose registers - for (int i = 0; i < _gp_registers.length(); i++) { - gp_register_save(_gp_registers.at(i)); - } - - // Save opmask registers - for (int i = 0; i < _opmask_registers.length(); i++) { - opmask_register_save(_opmask_registers.at(i)); - } - } - - ~XSaveLiveRegisters() { - // Restore opmask registers - for (int i = _opmask_registers.length() - 1; i >= 0; i--) { - opmask_register_restore(_opmask_registers.at(i)); - } - - // Restore general purpose registers - for (int i = _gp_registers.length() - 1; i >= 0; i--) { - gp_register_restore(_gp_registers.at(i)); - } - - __ vzeroupper(); - - // Restore XMM/YMM/ZMM registers - for (int i = _xmm_registers.length() - 1; i >= 0; i--) { - xmm_register_restore(_xmm_registers.at(i)); - } - - // Free stack space - if (_spill_size > 0) { - __ addptr(rsp, _spill_size); - } - } -}; - -class XSetupArguments { -private: - MacroAssembler* const _masm; - const Register _ref; - const Address _ref_addr; - -public: - XSetupArguments(MacroAssembler* masm, XLoadBarrierStubC2* stub) : - _masm(masm), - _ref(stub->ref()), - _ref_addr(stub->ref_addr()) { - - // Setup arguments - if (_ref_addr.base() == noreg) { - // No self healing - if (_ref != c_rarg0) { - __ movq(c_rarg0, _ref); - } - __ xorq(c_rarg1, c_rarg1); - } else { - // Self healing - if (_ref == c_rarg0) { - __ lea(c_rarg1, _ref_addr); - } else if (_ref != c_rarg1) { - __ lea(c_rarg1, _ref_addr); - __ movq(c_rarg0, _ref); - } else if (_ref_addr.base() != c_rarg0 && _ref_addr.index() != c_rarg0) { - __ movq(c_rarg0, _ref); - __ lea(c_rarg1, _ref_addr); - } else { - __ xchgq(c_rarg0, c_rarg1); - if (_ref_addr.base() == c_rarg0) { - __ lea(c_rarg1, Address(c_rarg1, _ref_addr.index(), _ref_addr.scale(), _ref_addr.disp())); - } else if (_ref_addr.index() == c_rarg0) { - __ lea(c_rarg1, Address(_ref_addr.base(), c_rarg1, _ref_addr.scale(), _ref_addr.disp())); - } else { - ShouldNotReachHere(); - } - } - } - } - - ~XSetupArguments() { - // Transfer result - if (_ref != rax) { - __ movq(_ref, rax); - } - } -}; - -#undef __ -#define __ masm-> - -void XBarrierSetAssembler::generate_c2_load_barrier_stub(MacroAssembler* masm, XLoadBarrierStubC2* stub) const { - BLOCK_COMMENT("XLoadBarrierStubC2"); - - // Stub entry - __ bind(*stub->entry()); - - { - XSaveLiveRegisters save_live_registers(masm, stub); - XSetupArguments setup_arguments(masm, stub); - __ call(RuntimeAddress(stub->slow_path())); - } - - // Stub exit - __ jmp(*stub->continuation()); -} - -#endif // COMPILER2 - -#undef __ -#define __ masm-> - -void XBarrierSetAssembler::check_oop(MacroAssembler* masm, Register obj, Register tmp1, Register tmp2, Label& error) { - // Check if metadata bits indicate a bad oop - __ testptr(obj, Address(r15_thread, XThreadLocalData::address_bad_mask_offset())); - __ jcc(Assembler::notZero, error); - BarrierSetAssembler::check_oop(masm, obj, tmp1, tmp2, error); -} - -#undef __ diff --git a/src/hotspot/cpu/x86/gc/x/xBarrierSetAssembler_x86.hpp b/src/hotspot/cpu/x86/gc/x/xBarrierSetAssembler_x86.hpp deleted file mode 100644 index 52034ab786ec2..0000000000000 --- a/src/hotspot/cpu/x86/gc/x/xBarrierSetAssembler_x86.hpp +++ /dev/null @@ -1,109 +0,0 @@ -/* - * Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#ifndef CPU_X86_GC_X_XBARRIERSETASSEMBLER_X86_HPP -#define CPU_X86_GC_X_XBARRIERSETASSEMBLER_X86_HPP - -#include "code/vmreg.hpp" -#include "oops/accessDecorators.hpp" -#ifdef COMPILER2 -#include "opto/optoreg.hpp" -#endif // COMPILER2 - -class MacroAssembler; - -#ifdef COMPILER1 -class LIR_Assembler; -class LIR_Opr; -class StubAssembler; -#endif // COMPILER1 - -#ifdef COMPILER2 -class Node; -#endif // COMPILER2 - -#ifdef COMPILER1 -class XLoadBarrierStubC1; -#endif // COMPILER1 - -#ifdef COMPILER2 -class XLoadBarrierStubC2; -#endif // COMPILER2 - -class XBarrierSetAssembler : public XBarrierSetAssemblerBase { -public: - virtual void load_at(MacroAssembler* masm, - DecoratorSet decorators, - BasicType type, - Register dst, - Address src, - Register tmp1, - Register tmp_thread); - -#ifdef ASSERT - virtual void store_at(MacroAssembler* masm, - DecoratorSet decorators, - BasicType type, - Address dst, - Register src, - Register tmp1, - Register tmp2, - Register tmp3); -#endif // ASSERT - - virtual void arraycopy_prologue(MacroAssembler* masm, - DecoratorSet decorators, - BasicType type, - Register src, - Register dst, - Register count); - - virtual void try_resolve_jobject_in_native(MacroAssembler* masm, - Register jni_env, - Register obj, - Register tmp, - Label& slowpath); - -#ifdef COMPILER1 - void generate_c1_load_barrier_test(LIR_Assembler* ce, - LIR_Opr ref) const; - - void generate_c1_load_barrier_stub(LIR_Assembler* ce, - XLoadBarrierStubC1* stub) const; - - void generate_c1_load_barrier_runtime_stub(StubAssembler* sasm, - DecoratorSet decorators) const; -#endif // COMPILER1 - -#ifdef COMPILER2 - OptoReg::Name refine_register(const Node* node, - OptoReg::Name opto_reg); - - void generate_c2_load_barrier_stub(MacroAssembler* masm, - XLoadBarrierStubC2* stub) const; -#endif // COMPILER2 - - void check_oop(MacroAssembler* masm, Register obj, Register tmp1, Register tmp2, Label& error); -}; - -#endif // CPU_X86_GC_X_XBARRIERSETASSEMBLER_X86_HPP diff --git a/src/hotspot/cpu/x86/gc/x/xGlobals_x86.cpp b/src/hotspot/cpu/x86/gc/x/xGlobals_x86.cpp deleted file mode 100644 index baa99ddd60db7..0000000000000 --- a/src/hotspot/cpu/x86/gc/x/xGlobals_x86.cpp +++ /dev/null @@ -1,149 +0,0 @@ -/* - * Copyright (c) 2017, 2021, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#include "precompiled.hpp" -#include "gc/shared/gc_globals.hpp" -#include "gc/x/xGlobals.hpp" -#include "runtime/globals.hpp" -#include "utilities/globalDefinitions.hpp" -#include "utilities/powerOfTwo.hpp" - -// -// The heap can have three different layouts, depending on the max heap size. -// -// Address Space & Pointer Layout 1 -// -------------------------------- -// -// +--------------------------------+ 0x00007FFFFFFFFFFF (127TB) -// . . -// . . -// . . -// +--------------------------------+ 0x0000014000000000 (20TB) -// | Remapped View | -// +--------------------------------+ 0x0000010000000000 (16TB) -// . . -// +--------------------------------+ 0x00000c0000000000 (12TB) -// | Marked1 View | -// +--------------------------------+ 0x0000080000000000 (8TB) -// | Marked0 View | -// +--------------------------------+ 0x0000040000000000 (4TB) -// . . -// +--------------------------------+ 0x0000000000000000 -// -// 6 4 4 4 4 -// 3 6 5 2 1 0 -// +--------------------+----+-----------------------------------------------+ -// |00000000 00000000 00|1111|11 11111111 11111111 11111111 11111111 11111111| -// +--------------------+----+-----------------------------------------------+ -// | | | -// | | * 41-0 Object Offset (42-bits, 4TB address space) -// | | -// | * 45-42 Metadata Bits (4-bits) 0001 = Marked0 (Address view 4-8TB) -// | 0010 = Marked1 (Address view 8-12TB) -// | 0100 = Remapped (Address view 16-20TB) -// | 1000 = Finalizable (Address view N/A) -// | -// * 63-46 Fixed (18-bits, always zero) -// -// -// Address Space & Pointer Layout 2 -// -------------------------------- -// -// +--------------------------------+ 0x00007FFFFFFFFFFF (127TB) -// . . -// . . -// . . -// +--------------------------------+ 0x0000280000000000 (40TB) -// | Remapped View | -// +--------------------------------+ 0x0000200000000000 (32TB) -// . . -// +--------------------------------+ 0x0000180000000000 (24TB) -// | Marked1 View | -// +--------------------------------+ 0x0000100000000000 (16TB) -// | Marked0 View | -// +--------------------------------+ 0x0000080000000000 (8TB) -// . . -// +--------------------------------+ 0x0000000000000000 -// -// 6 4 4 4 4 -// 3 7 6 3 2 0 -// +------------------+-----+------------------------------------------------+ -// |00000000 00000000 0|1111|111 11111111 11111111 11111111 11111111 11111111| -// +-------------------+----+------------------------------------------------+ -// | | | -// | | * 42-0 Object Offset (43-bits, 8TB address space) -// | | -// | * 46-43 Metadata Bits (4-bits) 0001 = Marked0 (Address view 8-16TB) -// | 0010 = Marked1 (Address view 16-24TB) -// | 0100 = Remapped (Address view 32-40TB) -// | 1000 = Finalizable (Address view N/A) -// | -// * 63-47 Fixed (17-bits, always zero) -// -// -// Address Space & Pointer Layout 3 -// -------------------------------- -// -// +--------------------------------+ 0x00007FFFFFFFFFFF (127TB) -// . . -// . . -// . . -// +--------------------------------+ 0x0000500000000000 (80TB) -// | Remapped View | -// +--------------------------------+ 0x0000400000000000 (64TB) -// . . -// +--------------------------------+ 0x0000300000000000 (48TB) -// | Marked1 View | -// +--------------------------------+ 0x0000200000000000 (32TB) -// | Marked0 View | -// +--------------------------------+ 0x0000100000000000 (16TB) -// . . -// +--------------------------------+ 0x0000000000000000 -// -// 6 4 4 4 4 -// 3 8 7 4 3 0 -// +------------------+----+-------------------------------------------------+ -// |00000000 00000000 |1111|1111 11111111 11111111 11111111 11111111 11111111| -// +------------------+----+-------------------------------------------------+ -// | | | -// | | * 43-0 Object Offset (44-bits, 16TB address space) -// | | -// | * 47-44 Metadata Bits (4-bits) 0001 = Marked0 (Address view 16-32TB) -// | 0010 = Marked1 (Address view 32-48TB) -// | 0100 = Remapped (Address view 64-80TB) -// | 1000 = Finalizable (Address view N/A) -// | -// * 63-48 Fixed (16-bits, always zero) -// - -size_t XPlatformAddressOffsetBits() { - const size_t min_address_offset_bits = 42; // 4TB - const size_t max_address_offset_bits = 44; // 16TB - const size_t address_offset = round_up_power_of_2(MaxHeapSize * XVirtualToPhysicalRatio); - const size_t address_offset_bits = log2i_exact(address_offset); - return clamp(address_offset_bits, min_address_offset_bits, max_address_offset_bits); -} - -size_t XPlatformAddressMetadataShift() { - return XPlatformAddressOffsetBits(); -} diff --git a/src/hotspot/cpu/x86/gc/x/xGlobals_x86.hpp b/src/hotspot/cpu/x86/gc/x/xGlobals_x86.hpp deleted file mode 100644 index dd00d4ddadcf1..0000000000000 --- a/src/hotspot/cpu/x86/gc/x/xGlobals_x86.hpp +++ /dev/null @@ -1,33 +0,0 @@ -/* - * Copyright (c) 2015, 2022, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#ifndef CPU_X86_GC_X_XGLOBALS_X86_HPP -#define CPU_X86_GC_X_XGLOBALS_X86_HPP - -const size_t XPlatformHeapViews = 3; -const size_t XPlatformCacheLineSize = 64; - -size_t XPlatformAddressOffsetBits(); -size_t XPlatformAddressMetadataShift(); - -#endif // CPU_X86_GC_X_XGLOBALS_X86_HPP diff --git a/src/hotspot/cpu/x86/gc/x/x_x86_64.ad b/src/hotspot/cpu/x86/gc/x/x_x86_64.ad deleted file mode 100644 index ba4b3cb6df05f..0000000000000 --- a/src/hotspot/cpu/x86/gc/x/x_x86_64.ad +++ /dev/null @@ -1,156 +0,0 @@ -// -// Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved. -// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. -// -// This code is free software; you can redistribute it and/or modify it -// under the terms of the GNU General Public License version 2 only, as -// published by the Free Software Foundation. -// -// This code is distributed in the hope that it will be useful, but WITHOUT -// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License -// version 2 for more details (a copy is included in the LICENSE file that -// accompanied this code). -// -// You should have received a copy of the GNU General Public License version -// 2 along with this work; if not, write to the Free Software Foundation, -// Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. -// -// Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA -// or visit www.oracle.com if you need additional information or have any -// questions. -// - -source_hpp %{ - -#include "gc/shared/gc_globals.hpp" -#include "gc/x/c2/xBarrierSetC2.hpp" -#include "gc/x/xThreadLocalData.hpp" - -%} - -source %{ - -#include "c2_intelJccErratum_x86.hpp" - -static void x_load_barrier(MacroAssembler* masm, const MachNode* node, Address ref_addr, Register ref, Register tmp, uint8_t barrier_data) { - if (barrier_data == XLoadBarrierElided) { - return; - } - XLoadBarrierStubC2* const stub = XLoadBarrierStubC2::create(node, ref_addr, ref, tmp, barrier_data); - { - IntelJccErratumAlignment intel_alignment(masm, 10 /* jcc_size */); - __ testptr(ref, Address(r15_thread, XThreadLocalData::address_bad_mask_offset())); - __ jcc(Assembler::notZero, *stub->entry()); - } - __ bind(*stub->continuation()); -} - -static void x_load_barrier_cmpxchg(MacroAssembler* masm, const MachNode* node, Address ref_addr, Register ref, Register tmp, Label& good) { - XLoadBarrierStubC2* const stub = XLoadBarrierStubC2::create(node, ref_addr, ref, tmp, XLoadBarrierStrong); - { - IntelJccErratumAlignment intel_alignment(masm, 10 /* jcc_size */); - __ testptr(ref, Address(r15_thread, XThreadLocalData::address_bad_mask_offset())); - __ jcc(Assembler::zero, good); - } - { - IntelJccErratumAlignment intel_alignment(masm, 5 /* jcc_size */); - __ jmp(*stub->entry()); - } - __ bind(*stub->continuation()); -} - -static void x_cmpxchg_common(MacroAssembler* masm, const MachNode* node, Register mem_reg, Register newval, Register tmp) { - // Compare value (oldval) is in rax - const Address mem = Address(mem_reg, 0); - - if (node->barrier_data() != XLoadBarrierElided) { - __ movptr(tmp, rax); - } - - __ lock(); - __ cmpxchgptr(newval, mem); - - if (node->barrier_data() != XLoadBarrierElided) { - Label good; - x_load_barrier_cmpxchg(masm, node, mem, rax, tmp, good); - __ movptr(rax, tmp); - __ lock(); - __ cmpxchgptr(newval, mem); - __ bind(good); - } -} - -%} - -// Load Pointer -instruct xLoadP(rRegP dst, memory mem, rFlagsReg cr) -%{ - predicate(UseZGC && !ZGenerational && n->as_Load()->barrier_data() != 0); - match(Set dst (LoadP mem)); - effect(KILL cr, TEMP dst); - - ins_cost(125); - - format %{ "movq $dst, $mem" %} - - ins_encode %{ - __ movptr($dst$$Register, $mem$$Address); - x_load_barrier(masm, this, $mem$$Address, $dst$$Register, noreg /* tmp */, barrier_data()); - %} - - ins_pipe(ialu_reg_mem); -%} - -instruct xCompareAndExchangeP(indirect mem, rax_RegP oldval, rRegP newval, rRegP tmp, rFlagsReg cr) %{ - match(Set oldval (CompareAndExchangeP mem (Binary oldval newval))); - predicate(UseZGC && !ZGenerational && n->as_LoadStore()->barrier_data() == XLoadBarrierStrong); - effect(KILL cr, TEMP tmp); - - format %{ "lock\n\t" - "cmpxchgq $newval, $mem" %} - - ins_encode %{ - precond($oldval$$Register == rax); - x_cmpxchg_common(masm, this, $mem$$Register, $newval$$Register, $tmp$$Register); - %} - - ins_pipe(pipe_cmpxchg); -%} - -instruct xCompareAndSwapP(rRegI res, indirect mem, rRegP newval, rRegP tmp, rFlagsReg cr, rax_RegP oldval) %{ - match(Set res (CompareAndSwapP mem (Binary oldval newval))); - match(Set res (WeakCompareAndSwapP mem (Binary oldval newval))); - predicate(UseZGC && !ZGenerational && n->as_LoadStore()->barrier_data() == XLoadBarrierStrong); - effect(KILL cr, KILL oldval, TEMP tmp); - - format %{ "lock\n\t" - "cmpxchgq $newval, $mem\n\t" - "setcc $res \t# emits sete + movzbl or setzue for APX" %} - - ins_encode %{ - precond($oldval$$Register == rax); - x_cmpxchg_common(masm, this, $mem$$Register, $newval$$Register, $tmp$$Register); - if (barrier_data() != XLoadBarrierElided) { - __ cmpptr($tmp$$Register, rax); - } - __ setcc(Assembler::equal, $res$$Register); - %} - - ins_pipe(pipe_cmpxchg); -%} - -instruct xXChgP(indirect mem, rRegP newval, rFlagsReg cr) %{ - match(Set newval (GetAndSetP mem newval)); - predicate(UseZGC && !ZGenerational && n->as_LoadStore()->barrier_data() != 0); - effect(KILL cr); - - format %{ "xchgq $newval, $mem" %} - - ins_encode %{ - __ xchgptr($newval$$Register, Address($mem$$Register, 0)); - x_load_barrier(masm, this, Address(noreg, 0), $newval$$Register, noreg /* tmp */, barrier_data()); - %} - - ins_pipe(pipe_cmpxchg); -%} diff --git a/src/hotspot/cpu/x86/gc/z/z_x86_64.ad b/src/hotspot/cpu/x86/gc/z/z_x86_64.ad index f55ad70e8616e..9555cadd0227d 100644 --- a/src/hotspot/cpu/x86/gc/z/z_x86_64.ad +++ b/src/hotspot/cpu/x86/gc/z/z_x86_64.ad @@ -115,7 +115,7 @@ operand no_rax_RegP() // Load Pointer instruct zLoadP(rRegP dst, memory mem, rFlagsReg cr) %{ - predicate(UseZGC && ZGenerational && n->as_Load()->barrier_data() != 0); + predicate(UseZGC && n->as_Load()->barrier_data() != 0); match(Set dst (LoadP mem)); effect(TEMP dst, KILL cr); @@ -134,7 +134,7 @@ instruct zLoadP(rRegP dst, memory mem, rFlagsReg cr) // Load Pointer and Null Check instruct zLoadPNullCheck(rFlagsReg cr, memory op, immP0 zero) %{ - predicate(UseZGC && ZGenerational && n->in(1)->as_Load()->barrier_data() != 0); + predicate(UseZGC && n->in(1)->as_Load()->barrier_data() != 0); match(Set cr (CmpP (LoadP op) zero)); ins_cost(500); // XXX @@ -150,7 +150,7 @@ instruct zLoadPNullCheck(rFlagsReg cr, memory op, immP0 zero) // Store Pointer instruct zStoreP(memory mem, any_RegP src, rRegP tmp, rFlagsReg cr) %{ - predicate(UseZGC && ZGenerational && n->as_Store()->barrier_data() != 0); + predicate(UseZGC && n->as_Store()->barrier_data() != 0); match(Set mem (StoreP mem src)); effect(TEMP tmp, KILL cr); @@ -166,7 +166,7 @@ instruct zStoreP(memory mem, any_RegP src, rRegP tmp, rFlagsReg cr) // Store Null Pointer instruct zStorePNull(memory mem, immP0 zero, rRegP tmp, rFlagsReg cr) %{ - predicate(UseZGC && ZGenerational && n->as_Store()->barrier_data() != 0); + predicate(UseZGC && n->as_Store()->barrier_data() != 0); match(Set mem (StoreP mem zero)); effect(TEMP tmp, KILL cr); @@ -185,7 +185,7 @@ instruct zStorePNull(memory mem, immP0 zero, rRegP tmp, rFlagsReg cr) instruct zCompareAndExchangeP(indirect mem, no_rax_RegP newval, rRegP tmp, rax_RegP oldval, rFlagsReg cr) %{ match(Set oldval (CompareAndExchangeP mem (Binary oldval newval))); - predicate(UseZGC && ZGenerational && n->as_LoadStore()->barrier_data() != 0); + predicate(UseZGC && n->as_LoadStore()->barrier_data() != 0); effect(TEMP tmp, KILL cr); format %{ "lock\n\t" @@ -208,7 +208,7 @@ instruct zCompareAndExchangeP(indirect mem, no_rax_RegP newval, rRegP tmp, rax_R instruct zCompareAndSwapP(rRegI res, indirect mem, rRegP newval, rRegP tmp, rax_RegP oldval, rFlagsReg cr) %{ match(Set res (CompareAndSwapP mem (Binary oldval newval))); match(Set res (WeakCompareAndSwapP mem (Binary oldval newval))); - predicate(UseZGC && ZGenerational && n->as_LoadStore()->barrier_data() != 0); + predicate(UseZGC && n->as_LoadStore()->barrier_data() != 0); effect(TEMP tmp, KILL oldval, KILL cr); format %{ "lock\n\t" @@ -230,7 +230,7 @@ instruct zCompareAndSwapP(rRegI res, indirect mem, rRegP newval, rRegP tmp, rax_ instruct zXChgP(indirect mem, rRegP newval, rRegP tmp, rFlagsReg cr) %{ match(Set newval (GetAndSetP mem newval)); - predicate(UseZGC && ZGenerational && n->as_LoadStore()->barrier_data() != 0); + predicate(UseZGC && n->as_LoadStore()->barrier_data() != 0); effect(TEMP tmp, KILL cr); format %{ "xchgq $newval, $mem" %} diff --git a/src/hotspot/os/bsd/gc/x/xLargePages_bsd.cpp b/src/hotspot/os/bsd/gc/x/xLargePages_bsd.cpp deleted file mode 100644 index 1c82e83120881..0000000000000 --- a/src/hotspot/os/bsd/gc/x/xLargePages_bsd.cpp +++ /dev/null @@ -1,34 +0,0 @@ -/* - * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#include "precompiled.hpp" -#include "gc/x/xLargePages.hpp" -#include "runtime/globals.hpp" - -void XLargePages::pd_initialize() { - if (UseLargePages) { - _state = Explicit; - } else { - _state = Disabled; - } -} diff --git a/src/hotspot/os/bsd/gc/x/xNUMA_bsd.cpp b/src/hotspot/os/bsd/gc/x/xNUMA_bsd.cpp deleted file mode 100644 index b0e23a1716ad9..0000000000000 --- a/src/hotspot/os/bsd/gc/x/xNUMA_bsd.cpp +++ /dev/null @@ -1,43 +0,0 @@ -/* - * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#include "precompiled.hpp" -#include "gc/x/xNUMA.hpp" -#include "utilities/globalDefinitions.hpp" - -void XNUMA::pd_initialize() { - _enabled = false; -} - -uint32_t XNUMA::count() { - return 1; -} - -uint32_t XNUMA::id() { - return 0; -} - -uint32_t XNUMA::memory_id(uintptr_t addr) { - // NUMA support not enabled, assume everything belongs to node zero - return 0; -} diff --git a/src/hotspot/os/bsd/gc/x/xPhysicalMemoryBacking_bsd.cpp b/src/hotspot/os/bsd/gc/x/xPhysicalMemoryBacking_bsd.cpp deleted file mode 100644 index 2c64c3788d34d..0000000000000 --- a/src/hotspot/os/bsd/gc/x/xPhysicalMemoryBacking_bsd.cpp +++ /dev/null @@ -1,181 +0,0 @@ -/* - * Copyright (c) 2019, 2020, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#include "precompiled.hpp" -#include "gc/shared/gcLogPrecious.hpp" -#include "gc/x/xErrno.hpp" -#include "gc/x/xGlobals.hpp" -#include "gc/x/xLargePages.inline.hpp" -#include "gc/x/xPhysicalMemory.inline.hpp" -#include "gc/x/xPhysicalMemoryBacking_bsd.hpp" -#include "logging/log.hpp" -#include "runtime/globals.hpp" -#include "runtime/os.hpp" -#include "utilities/align.hpp" -#include "utilities/debug.hpp" - -#include -#include -#include -#include - -// The backing is represented by a reserved virtual address space, in which -// we commit and uncommit physical memory. Multi-mapping the different heap -// views is done by simply remapping the backing memory using mach_vm_remap(). - -static int vm_flags_superpage() { - if (!XLargePages::is_explicit()) { - return 0; - } - - const int page_size_in_megabytes = XGranuleSize >> 20; - return page_size_in_megabytes << VM_FLAGS_SUPERPAGE_SHIFT; -} - -static XErrno mremap(uintptr_t from_addr, uintptr_t to_addr, size_t size) { - mach_vm_address_t remap_addr = to_addr; - vm_prot_t remap_cur_prot; - vm_prot_t remap_max_prot; - - // Remap memory to an additional location - const kern_return_t res = mach_vm_remap(mach_task_self(), - &remap_addr, - size, - 0 /* mask */, - VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE | vm_flags_superpage(), - mach_task_self(), - from_addr, - FALSE /* copy */, - &remap_cur_prot, - &remap_max_prot, - VM_INHERIT_COPY); - - return (res == KERN_SUCCESS) ? XErrno(0) : XErrno(EINVAL); -} - -XPhysicalMemoryBacking::XPhysicalMemoryBacking(size_t max_capacity) : - _base(0), - _initialized(false) { - - // Reserve address space for backing memory - _base = (uintptr_t)os::reserve_memory(max_capacity); - if (_base == 0) { - // Failed - log_error_pd(gc)("Failed to reserve address space for backing memory"); - return; - } - - // Successfully initialized - _initialized = true; -} - -bool XPhysicalMemoryBacking::is_initialized() const { - return _initialized; -} - -void XPhysicalMemoryBacking::warn_commit_limits(size_t max_capacity) const { - // Does nothing -} - -bool XPhysicalMemoryBacking::commit_inner(size_t offset, size_t length) const { - assert(is_aligned(offset, os::vm_page_size()), "Invalid offset"); - assert(is_aligned(length, os::vm_page_size()), "Invalid length"); - - log_trace(gc, heap)("Committing memory: " SIZE_FORMAT "M-" SIZE_FORMAT "M (" SIZE_FORMAT "M)", - offset / M, (offset + length) / M, length / M); - - const uintptr_t addr = _base + offset; - const void* const res = mmap((void*)addr, length, PROT_READ | PROT_WRITE, MAP_FIXED | MAP_ANONYMOUS | MAP_PRIVATE, -1, 0); - if (res == MAP_FAILED) { - XErrno err; - log_error(gc)("Failed to commit memory (%s)", err.to_string()); - return false; - } - - // Success - return true; -} - -size_t XPhysicalMemoryBacking::commit(size_t offset, size_t length) const { - // Try to commit the whole region - if (commit_inner(offset, length)) { - // Success - return length; - } - - // Failed, try to commit as much as possible - size_t start = offset; - size_t end = offset + length; - - for (;;) { - length = align_down((end - start) / 2, XGranuleSize); - if (length == 0) { - // Done, don't commit more - return start - offset; - } - - if (commit_inner(start, length)) { - // Success, try commit more - start += length; - } else { - // Failed, try commit less - end -= length; - } - } -} - -size_t XPhysicalMemoryBacking::uncommit(size_t offset, size_t length) const { - assert(is_aligned(offset, os::vm_page_size()), "Invalid offset"); - assert(is_aligned(length, os::vm_page_size()), "Invalid length"); - - log_trace(gc, heap)("Uncommitting memory: " SIZE_FORMAT "M-" SIZE_FORMAT "M (" SIZE_FORMAT "M)", - offset / M, (offset + length) / M, length / M); - - const uintptr_t start = _base + offset; - const void* const res = mmap((void*)start, length, PROT_NONE, MAP_FIXED | MAP_ANONYMOUS | MAP_PRIVATE | MAP_NORESERVE, -1, 0); - if (res == MAP_FAILED) { - XErrno err; - log_error(gc)("Failed to uncommit memory (%s)", err.to_string()); - return 0; - } - - return length; -} - -void XPhysicalMemoryBacking::map(uintptr_t addr, size_t size, uintptr_t offset) const { - const XErrno err = mremap(_base + offset, addr, size); - if (err) { - fatal("Failed to remap memory (%s)", err.to_string()); - } -} - -void XPhysicalMemoryBacking::unmap(uintptr_t addr, size_t size) const { - // Note that we must keep the address space reservation intact and just detach - // the backing memory. For this reason we map a new anonymous, non-accessible - // and non-reserved page over the mapping instead of actually unmapping. - const void* const res = mmap((void*)addr, size, PROT_NONE, MAP_FIXED | MAP_ANONYMOUS | MAP_PRIVATE | MAP_NORESERVE, -1, 0); - if (res == MAP_FAILED) { - XErrno err; - fatal("Failed to map memory (%s)", err.to_string()); - } -} diff --git a/src/hotspot/os/bsd/gc/x/xPhysicalMemoryBacking_bsd.hpp b/src/hotspot/os/bsd/gc/x/xPhysicalMemoryBacking_bsd.hpp deleted file mode 100644 index 8b4747026ff23..0000000000000 --- a/src/hotspot/os/bsd/gc/x/xPhysicalMemoryBacking_bsd.hpp +++ /dev/null @@ -1,48 +0,0 @@ -/* - * Copyright (c) 2019, 2020, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#ifndef OS_BSD_GC_X_XPHYSICALMEMORYBACKING_BSD_HPP -#define OS_BSD_GC_X_XPHYSICALMEMORYBACKING_BSD_HPP - -class XPhysicalMemoryBacking { -private: - uintptr_t _base; - bool _initialized; - - bool commit_inner(size_t offset, size_t length) const; - -public: - XPhysicalMemoryBacking(size_t max_capacity); - - bool is_initialized() const; - - void warn_commit_limits(size_t max_capacity) const; - - size_t commit(size_t offset, size_t length) const; - size_t uncommit(size_t offset, size_t length) const; - - void map(uintptr_t addr, size_t size, uintptr_t offset) const; - void unmap(uintptr_t addr, size_t size) const; -}; - -#endif // OS_BSD_GC_X_XPHYSICALMEMORYBACKING_BSD_HPP diff --git a/src/hotspot/os/linux/gc/x/xLargePages_linux.cpp b/src/hotspot/os/linux/gc/x/xLargePages_linux.cpp deleted file mode 100644 index 6ad956b1e63fe..0000000000000 --- a/src/hotspot/os/linux/gc/x/xLargePages_linux.cpp +++ /dev/null @@ -1,38 +0,0 @@ -/* - * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#include "precompiled.hpp" -#include "gc/x/xLargePages.hpp" -#include "runtime/globals.hpp" - -void XLargePages::pd_initialize() { - if (UseLargePages) { - if (UseTransparentHugePages) { - _state = Transparent; - } else { - _state = Explicit; - } - } else { - _state = Disabled; - } -} diff --git a/src/hotspot/os/linux/gc/x/xMountPoint_linux.cpp b/src/hotspot/os/linux/gc/x/xMountPoint_linux.cpp deleted file mode 100644 index 96c0f2f92dbd2..0000000000000 --- a/src/hotspot/os/linux/gc/x/xMountPoint_linux.cpp +++ /dev/null @@ -1,154 +0,0 @@ -/* - * Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#include "precompiled.hpp" -#include "gc/shared/gcLogPrecious.hpp" -#include "gc/x/xArray.inline.hpp" -#include "gc/x/xErrno.hpp" -#include "gc/x/xMountPoint_linux.hpp" -#include "runtime/globals.hpp" -#include "runtime/os.hpp" -#include "utilities/globalDefinitions.hpp" - -#include -#include - -// Mount information, see proc(5) for more details. -#define PROC_SELF_MOUNTINFO "/proc/self/mountinfo" - -XMountPoint::XMountPoint(const char* filesystem, const char** preferred_mountpoints) { - if (AllocateHeapAt != nullptr) { - // Use specified path - _path = os::strdup(AllocateHeapAt, mtGC); - } else { - // Find suitable path - _path = find_mountpoint(filesystem, preferred_mountpoints); - } -} - -XMountPoint::~XMountPoint() { - os::free(_path); - _path = nullptr; -} - -char* XMountPoint::get_mountpoint(const char* line, const char* filesystem) const { - char* line_mountpoint = nullptr; - char* line_filesystem = nullptr; - - // Parse line and return a newly allocated string containing the mount point if - // the line contains a matching filesystem and the mount point is accessible by - // the current user. - // sscanf, using %m, will return malloced memory. Need raw ::free, not os::free. - if (sscanf(line, "%*u %*u %*u:%*u %*s %ms %*[^-]- %ms", &line_mountpoint, &line_filesystem) != 2 || - strcmp(line_filesystem, filesystem) != 0 || - access(line_mountpoint, R_OK|W_OK|X_OK) != 0) { - // Not a matching or accessible filesystem - ALLOW_C_FUNCTION(::free, ::free(line_mountpoint);) - line_mountpoint = nullptr; - } - - ALLOW_C_FUNCTION(::free, ::free(line_filesystem);) - - return line_mountpoint; -} - -void XMountPoint::get_mountpoints(const char* filesystem, XArray* mountpoints) const { - FILE* fd = os::fopen(PROC_SELF_MOUNTINFO, "r"); - if (fd == nullptr) { - XErrno err; - log_error_p(gc)("Failed to open %s: %s", PROC_SELF_MOUNTINFO, err.to_string()); - return; - } - - char* line = nullptr; - size_t length = 0; - - while (getline(&line, &length, fd) != -1) { - char* const mountpoint = get_mountpoint(line, filesystem); - if (mountpoint != nullptr) { - mountpoints->append(mountpoint); - } - } - - // readline will return malloced memory. Need raw ::free, not os::free. - ALLOW_C_FUNCTION(::free, ::free(line);) - fclose(fd); -} - -void XMountPoint::free_mountpoints(XArray* mountpoints) const { - XArrayIterator iter(mountpoints); - for (char* mountpoint; iter.next(&mountpoint);) { - ALLOW_C_FUNCTION(::free, ::free(mountpoint);) // *not* os::free - } - mountpoints->clear(); -} - -char* XMountPoint::find_preferred_mountpoint(const char* filesystem, - XArray* mountpoints, - const char** preferred_mountpoints) const { - // Find preferred mount point - XArrayIterator iter1(mountpoints); - for (char* mountpoint; iter1.next(&mountpoint);) { - for (const char** preferred = preferred_mountpoints; *preferred != nullptr; preferred++) { - if (!strcmp(mountpoint, *preferred)) { - // Preferred mount point found - return os::strdup(mountpoint, mtGC); - } - } - } - - // Preferred mount point not found - log_error_p(gc)("More than one %s filesystem found:", filesystem); - XArrayIterator iter2(mountpoints); - for (char* mountpoint; iter2.next(&mountpoint);) { - log_error_p(gc)(" %s", mountpoint); - } - - return nullptr; -} - -char* XMountPoint::find_mountpoint(const char* filesystem, const char** preferred_mountpoints) const { - char* path = nullptr; - XArray mountpoints; - - get_mountpoints(filesystem, &mountpoints); - - if (mountpoints.length() == 0) { - // No mount point found - log_error_p(gc)("Failed to find an accessible %s filesystem", filesystem); - } else if (mountpoints.length() == 1) { - // One mount point found - path = os::strdup(mountpoints.at(0), mtGC); - } else { - // More than one mount point found - path = find_preferred_mountpoint(filesystem, &mountpoints, preferred_mountpoints); - } - - free_mountpoints(&mountpoints); - - return path; -} - -const char* XMountPoint::get() const { - return _path; -} diff --git a/src/hotspot/os/linux/gc/x/xMountPoint_linux.hpp b/src/hotspot/os/linux/gc/x/xMountPoint_linux.hpp deleted file mode 100644 index e0ca126e0667a..0000000000000 --- a/src/hotspot/os/linux/gc/x/xMountPoint_linux.hpp +++ /dev/null @@ -1,52 +0,0 @@ -/* - * Copyright (c) 2016, 2020, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#ifndef OS_LINUX_GC_X_XMOUNTPOINT_LINUX_HPP -#define OS_LINUX_GC_X_XMOUNTPOINT_LINUX_HPP - -#include "gc/x/xArray.hpp" -#include "memory/allocation.hpp" - -class XMountPoint : public StackObj { -private: - char* _path; - - char* get_mountpoint(const char* line, - const char* filesystem) const; - void get_mountpoints(const char* filesystem, - XArray* mountpoints) const; - void free_mountpoints(XArray* mountpoints) const; - char* find_preferred_mountpoint(const char* filesystem, - XArray* mountpoints, - const char** preferred_mountpoints) const; - char* find_mountpoint(const char* filesystem, - const char** preferred_mountpoints) const; - -public: - XMountPoint(const char* filesystem, const char** preferred_mountpoints); - ~XMountPoint(); - - const char* get() const; -}; - -#endif // OS_LINUX_GC_X_XMOUNTPOINT_LINUX_HPP diff --git a/src/hotspot/os/linux/gc/x/xNUMA_linux.cpp b/src/hotspot/os/linux/gc/x/xNUMA_linux.cpp deleted file mode 100644 index 0cc557dde6e86..0000000000000 --- a/src/hotspot/os/linux/gc/x/xNUMA_linux.cpp +++ /dev/null @@ -1,71 +0,0 @@ -/* - * Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#include "gc/x/xCPU.inline.hpp" -#include "gc/x/xErrno.hpp" -#include "gc/x/xNUMA.hpp" -#include "gc/x/xSyscall_linux.hpp" -#include "os_linux.hpp" -#include "runtime/globals.hpp" -#include "runtime/os.hpp" -#include "utilities/debug.hpp" - -void XNUMA::pd_initialize() { - _enabled = UseNUMA; -} - -uint32_t XNUMA::count() { - if (!_enabled) { - // NUMA support not enabled - return 1; - } - - return os::Linux::numa_max_node() + 1; -} - -uint32_t XNUMA::id() { - if (!_enabled) { - // NUMA support not enabled - return 0; - } - - return os::Linux::get_node_by_cpu(XCPU::id()); -} - -uint32_t XNUMA::memory_id(uintptr_t addr) { - if (!_enabled) { - // NUMA support not enabled, assume everything belongs to node zero - return 0; - } - - uint32_t id = (uint32_t)-1; - - if (XSyscall::get_mempolicy((int*)&id, nullptr, 0, (void*)addr, MPOL_F_NODE | MPOL_F_ADDR) == -1) { - XErrno err; - fatal("Failed to get NUMA id for memory at " PTR_FORMAT " (%s)", addr, err.to_string()); - } - - assert(id < count(), "Invalid NUMA id"); - - return id; -} diff --git a/src/hotspot/os/linux/gc/x/xPhysicalMemoryBacking_linux.cpp b/src/hotspot/os/linux/gc/x/xPhysicalMemoryBacking_linux.cpp deleted file mode 100644 index 35625f613d349..0000000000000 --- a/src/hotspot/os/linux/gc/x/xPhysicalMemoryBacking_linux.cpp +++ /dev/null @@ -1,724 +0,0 @@ -/* - * Copyright (c) 2015, 2024, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#include "precompiled.hpp" -#include "gc/shared/gcLogPrecious.hpp" -#include "gc/x/xArray.inline.hpp" -#include "gc/x/xErrno.hpp" -#include "gc/x/xGlobals.hpp" -#include "gc/x/xLargePages.inline.hpp" -#include "gc/x/xMountPoint_linux.hpp" -#include "gc/x/xNUMA.inline.hpp" -#include "gc/x/xPhysicalMemoryBacking_linux.hpp" -#include "gc/x/xSyscall_linux.hpp" -#include "logging/log.hpp" -#include "os_linux.hpp" -#include "runtime/init.hpp" -#include "runtime/os.hpp" -#include "runtime/safefetch.hpp" -#include "utilities/align.hpp" -#include "utilities/debug.hpp" -#include "utilities/growableArray.hpp" - -#include -#include -#include -#include -#include -#include -#include - -// -// Support for building on older Linux systems -// - -// memfd_create(2) flags -#ifndef MFD_CLOEXEC -#define MFD_CLOEXEC 0x0001U -#endif -#ifndef MFD_HUGETLB -#define MFD_HUGETLB 0x0004U -#endif -#ifndef MFD_HUGE_2MB -#define MFD_HUGE_2MB 0x54000000U -#endif - -// open(2) flags -#ifndef O_CLOEXEC -#define O_CLOEXEC 02000000 -#endif -#ifndef O_TMPFILE -#define O_TMPFILE (020000000 | O_DIRECTORY) -#endif - -// fallocate(2) flags -#ifndef FALLOC_FL_KEEP_SIZE -#define FALLOC_FL_KEEP_SIZE 0x01 -#endif -#ifndef FALLOC_FL_PUNCH_HOLE -#define FALLOC_FL_PUNCH_HOLE 0x02 -#endif - -// Filesystem types, see statfs(2) -#ifndef TMPFS_MAGIC -#define TMPFS_MAGIC 0x01021994 -#endif -#ifndef HUGETLBFS_MAGIC -#define HUGETLBFS_MAGIC 0x958458f6 -#endif - -// Filesystem names -#define XFILESYSTEM_TMPFS "tmpfs" -#define XFILESYSTEM_HUGETLBFS "hugetlbfs" - -// Proc file entry for max map mount -#define XFILENAME_PROC_MAX_MAP_COUNT "/proc/sys/vm/max_map_count" - -// Sysfs file for transparent huge page on tmpfs -#define XFILENAME_SHMEM_ENABLED "/sys/kernel/mm/transparent_hugepage/shmem_enabled" - -// Java heap filename -#define XFILENAME_HEAP "java_heap" - -// Preferred tmpfs mount points, ordered by priority -static const char* z_preferred_tmpfs_mountpoints[] = { - "/dev/shm", - "/run/shm", - nullptr -}; - -// Preferred hugetlbfs mount points, ordered by priority -static const char* z_preferred_hugetlbfs_mountpoints[] = { - "/dev/hugepages", - "/hugepages", - nullptr -}; - -static int z_fallocate_hugetlbfs_attempts = 3; -static bool z_fallocate_supported = true; - -XPhysicalMemoryBacking::XPhysicalMemoryBacking(size_t max_capacity) : - _fd(-1), - _filesystem(0), - _block_size(0), - _available(0), - _initialized(false) { - - // Create backing file - _fd = create_fd(XFILENAME_HEAP); - if (_fd == -1) { - return; - } - - // Truncate backing file - while (ftruncate(_fd, max_capacity) == -1) { - if (errno != EINTR) { - XErrno err; - log_error_p(gc)("Failed to truncate backing file (%s)", err.to_string()); - return; - } - } - - // Get filesystem statistics - struct statfs buf; - if (fstatfs(_fd, &buf) == -1) { - XErrno err; - log_error_p(gc)("Failed to determine filesystem type for backing file (%s)", err.to_string()); - return; - } - - _filesystem = buf.f_type; - _block_size = buf.f_bsize; - _available = buf.f_bavail * _block_size; - - log_info_p(gc, init)("Heap Backing Filesystem: %s (" UINT64_FORMAT_X ")", - is_tmpfs() ? XFILESYSTEM_TMPFS : is_hugetlbfs() ? XFILESYSTEM_HUGETLBFS : "other", _filesystem); - - // Make sure the filesystem type matches requested large page type - if (XLargePages::is_transparent() && !is_tmpfs()) { - log_error_p(gc)("-XX:+UseTransparentHugePages can only be enabled when using a %s filesystem", - XFILESYSTEM_TMPFS); - return; - } - - if (XLargePages::is_transparent() && !tmpfs_supports_transparent_huge_pages()) { - log_error_p(gc)("-XX:+UseTransparentHugePages on a %s filesystem not supported by kernel", - XFILESYSTEM_TMPFS); - return; - } - - if (XLargePages::is_explicit() && !is_hugetlbfs()) { - log_error_p(gc)("-XX:+UseLargePages (without -XX:+UseTransparentHugePages) can only be enabled " - "when using a %s filesystem", XFILESYSTEM_HUGETLBFS); - return; - } - - if (!XLargePages::is_explicit() && is_hugetlbfs()) { - log_error_p(gc)("-XX:+UseLargePages must be enabled when using a %s filesystem", - XFILESYSTEM_HUGETLBFS); - return; - } - - // Make sure the filesystem block size is compatible - if (XGranuleSize % _block_size != 0) { - log_error_p(gc)("Filesystem backing the heap has incompatible block size (" SIZE_FORMAT ")", - _block_size); - return; - } - - if (is_hugetlbfs() && _block_size != XGranuleSize) { - log_error_p(gc)("%s filesystem has unexpected block size " SIZE_FORMAT " (expected " SIZE_FORMAT ")", - XFILESYSTEM_HUGETLBFS, _block_size, XGranuleSize); - return; - } - - // Successfully initialized - _initialized = true; -} - -int XPhysicalMemoryBacking::create_mem_fd(const char* name) const { - assert(XGranuleSize == 2 * M, "Granule size must match MFD_HUGE_2MB"); - - // Create file name - char filename[PATH_MAX]; - snprintf(filename, sizeof(filename), "%s%s", name, XLargePages::is_explicit() ? ".hugetlb" : ""); - - // Create file - const int extra_flags = XLargePages::is_explicit() ? (MFD_HUGETLB | MFD_HUGE_2MB) : 0; - const int fd = XSyscall::memfd_create(filename, MFD_CLOEXEC | extra_flags); - if (fd == -1) { - XErrno err; - log_debug_p(gc, init)("Failed to create memfd file (%s)", - (XLargePages::is_explicit() && (err == EINVAL || err == ENODEV)) ? - "Hugepages (2M) not available" : err.to_string()); - return -1; - } - - log_info_p(gc, init)("Heap Backing File: /memfd:%s", filename); - - return fd; -} - -int XPhysicalMemoryBacking::create_file_fd(const char* name) const { - const char* const filesystem = XLargePages::is_explicit() - ? XFILESYSTEM_HUGETLBFS - : XFILESYSTEM_TMPFS; - const char** const preferred_mountpoints = XLargePages::is_explicit() - ? z_preferred_hugetlbfs_mountpoints - : z_preferred_tmpfs_mountpoints; - - // Find mountpoint - XMountPoint mountpoint(filesystem, preferred_mountpoints); - if (mountpoint.get() == nullptr) { - log_error_p(gc)("Use -XX:AllocateHeapAt to specify the path to a %s filesystem", filesystem); - return -1; - } - - // Try to create an anonymous file using the O_TMPFILE flag. Note that this - // flag requires kernel >= 3.11. If this fails we fall back to open/unlink. - const int fd_anon = os::open(mountpoint.get(), O_TMPFILE|O_EXCL|O_RDWR|O_CLOEXEC, S_IRUSR|S_IWUSR); - if (fd_anon == -1) { - XErrno err; - log_debug_p(gc, init)("Failed to create anonymous file in %s (%s)", mountpoint.get(), - (err == EINVAL ? "Not supported" : err.to_string())); - } else { - // Get inode number for anonymous file - struct stat stat_buf; - if (fstat(fd_anon, &stat_buf) == -1) { - XErrno err; - log_error_pd(gc)("Failed to determine inode number for anonymous file (%s)", err.to_string()); - return -1; - } - - log_info_p(gc, init)("Heap Backing File: %s/#" UINT64_FORMAT, mountpoint.get(), (uint64_t)stat_buf.st_ino); - - return fd_anon; - } - - log_debug_p(gc, init)("Falling back to open/unlink"); - - // Create file name - char filename[PATH_MAX]; - snprintf(filename, sizeof(filename), "%s/%s.%d", mountpoint.get(), name, os::current_process_id()); - - // Create file - const int fd = os::open(filename, O_CREAT|O_EXCL|O_RDWR|O_CLOEXEC, S_IRUSR|S_IWUSR); - if (fd == -1) { - XErrno err; - log_error_p(gc)("Failed to create file %s (%s)", filename, err.to_string()); - return -1; - } - - // Unlink file - if (unlink(filename) == -1) { - XErrno err; - log_error_p(gc)("Failed to unlink file %s (%s)", filename, err.to_string()); - return -1; - } - - log_info_p(gc, init)("Heap Backing File: %s", filename); - - return fd; -} - -int XPhysicalMemoryBacking::create_fd(const char* name) const { - if (AllocateHeapAt == nullptr) { - // If the path is not explicitly specified, then we first try to create a memfd file - // instead of looking for a tmpfd/hugetlbfs mount point. Note that memfd_create() might - // not be supported at all (requires kernel >= 3.17), or it might not support large - // pages (requires kernel >= 4.14). If memfd_create() fails, then we try to create a - // file on an accessible tmpfs or hugetlbfs mount point. - const int fd = create_mem_fd(name); - if (fd != -1) { - return fd; - } - - log_debug_p(gc)("Falling back to searching for an accessible mount point"); - } - - return create_file_fd(name); -} - -bool XPhysicalMemoryBacking::is_initialized() const { - return _initialized; -} - -void XPhysicalMemoryBacking::warn_available_space(size_t max_capacity) const { - // Note that the available space on a tmpfs or a hugetlbfs filesystem - // will be zero if no size limit was specified when it was mounted. - if (_available == 0) { - // No size limit set, skip check - log_info_p(gc, init)("Available space on backing filesystem: N/A"); - return; - } - - log_info_p(gc, init)("Available space on backing filesystem: " SIZE_FORMAT "M", _available / M); - - // Warn if the filesystem doesn't currently have enough space available to hold - // the max heap size. The max heap size will be capped if we later hit this limit - // when trying to expand the heap. - if (_available < max_capacity) { - log_warning_p(gc)("***** WARNING! INCORRECT SYSTEM CONFIGURATION DETECTED! *****"); - log_warning_p(gc)("Not enough space available on the backing filesystem to hold the current max Java heap"); - log_warning_p(gc)("size (" SIZE_FORMAT "M). Please adjust the size of the backing filesystem accordingly " - "(available", max_capacity / M); - log_warning_p(gc)("space is currently " SIZE_FORMAT "M). Continuing execution with the current filesystem " - "size could", _available / M); - log_warning_p(gc)("lead to a premature OutOfMemoryError being thrown, due to failure to commit memory."); - } -} - -void XPhysicalMemoryBacking::warn_max_map_count(size_t max_capacity) const { - const char* const filename = XFILENAME_PROC_MAX_MAP_COUNT; - FILE* const file = os::fopen(filename, "r"); - if (file == nullptr) { - // Failed to open file, skip check - log_debug_p(gc, init)("Failed to open %s", filename); - return; - } - - size_t actual_max_map_count = 0; - const int result = fscanf(file, SIZE_FORMAT, &actual_max_map_count); - fclose(file); - if (result != 1) { - // Failed to read file, skip check - log_debug_p(gc, init)("Failed to read %s", filename); - return; - } - - // The required max map count is impossible to calculate exactly since subsystems - // other than ZGC are also creating memory mappings, and we have no control over that. - // However, ZGC tends to create the most mappings and dominate the total count. - // In the worst cases, ZGC will map each granule three times, i.e. once per heap view. - // We speculate that we need another 20% to allow for non-ZGC subsystems to map memory. - const size_t required_max_map_count = (max_capacity / XGranuleSize) * 3 * 1.2; - if (actual_max_map_count < required_max_map_count) { - log_warning_p(gc)("***** WARNING! INCORRECT SYSTEM CONFIGURATION DETECTED! *****"); - log_warning_p(gc)("The system limit on number of memory mappings per process might be too low for the given"); - log_warning_p(gc)("max Java heap size (" SIZE_FORMAT "M). Please adjust %s to allow for at", - max_capacity / M, filename); - log_warning_p(gc)("least " SIZE_FORMAT " mappings (current limit is " SIZE_FORMAT "). Continuing execution " - "with the current", required_max_map_count, actual_max_map_count); - log_warning_p(gc)("limit could lead to a premature OutOfMemoryError being thrown, due to failure to map memory."); - } -} - -void XPhysicalMemoryBacking::warn_commit_limits(size_t max_capacity) const { - // Warn if available space is too low - warn_available_space(max_capacity); - - // Warn if max map count is too low - warn_max_map_count(max_capacity); -} - -bool XPhysicalMemoryBacking::is_tmpfs() const { - return _filesystem == TMPFS_MAGIC; -} - -bool XPhysicalMemoryBacking::is_hugetlbfs() const { - return _filesystem == HUGETLBFS_MAGIC; -} - -bool XPhysicalMemoryBacking::tmpfs_supports_transparent_huge_pages() const { - // If the shmem_enabled file exists and is readable then we - // know the kernel supports transparent huge pages for tmpfs. - return access(XFILENAME_SHMEM_ENABLED, R_OK) == 0; -} - -XErrno XPhysicalMemoryBacking::fallocate_compat_mmap_hugetlbfs(size_t offset, size_t length, bool touch) const { - // On hugetlbfs, mapping a file segment will fail immediately, without - // the need to touch the mapped pages first, if there aren't enough huge - // pages available to back the mapping. - void* const addr = mmap(nullptr, length, PROT_READ|PROT_WRITE, MAP_SHARED, _fd, offset); - if (addr == MAP_FAILED) { - // Failed - return errno; - } - - // Once mapped, the huge pages are only reserved. We need to touch them - // to associate them with the file segment. Note that we can not punch - // hole in file segments which only have reserved pages. - if (touch) { - char* const start = (char*)addr; - char* const end = start + length; - os::pretouch_memory(start, end, _block_size); - } - - // Unmap again. From now on, the huge pages that were mapped are allocated - // to this file. There's no risk of getting a SIGBUS when mapping and - // touching these pages again. - if (munmap(addr, length) == -1) { - // Failed - return errno; - } - - // Success - return 0; -} - -static bool safe_touch_mapping(void* addr, size_t length, size_t page_size) { - char* const start = (char*)addr; - char* const end = start + length; - - // Touching a mapping that can't be backed by memory will generate a - // SIGBUS. By using SafeFetch32 any SIGBUS will be safely caught and - // handled. On tmpfs, doing a fetch (rather than a store) is enough - // to cause backing pages to be allocated (there's no zero-page to - // worry about). - for (char *p = start; p < end; p += page_size) { - if (SafeFetch32((int*)p, -1) == -1) { - // Failed - return false; - } - } - - // Success - return true; -} - -XErrno XPhysicalMemoryBacking::fallocate_compat_mmap_tmpfs(size_t offset, size_t length) const { - // On tmpfs, we need to touch the mapped pages to figure out - // if there are enough pages available to back the mapping. - void* const addr = mmap(nullptr, length, PROT_READ|PROT_WRITE, MAP_SHARED, _fd, offset); - if (addr == MAP_FAILED) { - // Failed - return errno; - } - - // Advise mapping to use transparent huge pages - os::realign_memory((char*)addr, length, XGranuleSize); - - // Touch the mapping (safely) to make sure it's backed by memory - const bool backed = safe_touch_mapping(addr, length, _block_size); - - // Unmap again. If successfully touched, the backing memory will - // be allocated to this file. There's no risk of getting a SIGBUS - // when mapping and touching these pages again. - if (munmap(addr, length) == -1) { - // Failed - return errno; - } - - // Success - return backed ? 0 : ENOMEM; -} - -XErrno XPhysicalMemoryBacking::fallocate_compat_pwrite(size_t offset, size_t length) const { - uint8_t data = 0; - - // Allocate backing memory by writing to each block - for (size_t pos = offset; pos < offset + length; pos += _block_size) { - if (pwrite(_fd, &data, sizeof(data), pos) == -1) { - // Failed - return errno; - } - } - - // Success - return 0; -} - -XErrno XPhysicalMemoryBacking::fallocate_fill_hole_compat(size_t offset, size_t length) const { - // fallocate(2) is only supported by tmpfs since Linux 3.5, and by hugetlbfs - // since Linux 4.3. When fallocate(2) is not supported we emulate it using - // mmap/munmap (for hugetlbfs and tmpfs with transparent huge pages) or pwrite - // (for tmpfs without transparent huge pages and other filesystem types). - if (XLargePages::is_explicit()) { - return fallocate_compat_mmap_hugetlbfs(offset, length, false /* touch */); - } else if (XLargePages::is_transparent()) { - return fallocate_compat_mmap_tmpfs(offset, length); - } else { - return fallocate_compat_pwrite(offset, length); - } -} - -XErrno XPhysicalMemoryBacking::fallocate_fill_hole_syscall(size_t offset, size_t length) const { - const int mode = 0; // Allocate - const int res = XSyscall::fallocate(_fd, mode, offset, length); - if (res == -1) { - // Failed - return errno; - } - - // Success - return 0; -} - -XErrno XPhysicalMemoryBacking::fallocate_fill_hole(size_t offset, size_t length) const { - // Using compat mode is more efficient when allocating space on hugetlbfs. - // Note that allocating huge pages this way will only reserve them, and not - // associate them with segments of the file. We must guarantee that we at - // some point touch these segments, otherwise we can not punch hole in them. - // Also note that we need to use compat mode when using transparent huge pages, - // since we need to use madvise(2) on the mapping before the page is allocated. - if (z_fallocate_supported && !XLargePages::is_enabled()) { - const XErrno err = fallocate_fill_hole_syscall(offset, length); - if (!err) { - // Success - return 0; - } - - if (err != ENOSYS && err != EOPNOTSUPP) { - // Failed - return err; - } - - // Not supported - log_debug_p(gc)("Falling back to fallocate() compatibility mode"); - z_fallocate_supported = false; - } - - return fallocate_fill_hole_compat(offset, length); -} - -XErrno XPhysicalMemoryBacking::fallocate_punch_hole(size_t offset, size_t length) const { - if (XLargePages::is_explicit()) { - // We can only punch hole in pages that have been touched. Non-touched - // pages are only reserved, and not associated with any specific file - // segment. We don't know which pages have been previously touched, so - // we always touch them here to guarantee that we can punch hole. - const XErrno err = fallocate_compat_mmap_hugetlbfs(offset, length, true /* touch */); - if (err) { - // Failed - return err; - } - } - - const int mode = FALLOC_FL_PUNCH_HOLE|FALLOC_FL_KEEP_SIZE; - if (XSyscall::fallocate(_fd, mode, offset, length) == -1) { - // Failed - return errno; - } - - // Success - return 0; -} - -XErrno XPhysicalMemoryBacking::split_and_fallocate(bool punch_hole, size_t offset, size_t length) const { - // Try first half - const size_t offset0 = offset; - const size_t length0 = align_up(length / 2, _block_size); - const XErrno err0 = fallocate(punch_hole, offset0, length0); - if (err0) { - return err0; - } - - // Try second half - const size_t offset1 = offset0 + length0; - const size_t length1 = length - length0; - const XErrno err1 = fallocate(punch_hole, offset1, length1); - if (err1) { - return err1; - } - - // Success - return 0; -} - -XErrno XPhysicalMemoryBacking::fallocate(bool punch_hole, size_t offset, size_t length) const { - assert(is_aligned(offset, _block_size), "Invalid offset"); - assert(is_aligned(length, _block_size), "Invalid length"); - - const XErrno err = punch_hole ? fallocate_punch_hole(offset, length) : fallocate_fill_hole(offset, length); - if (err == EINTR && length > _block_size) { - // Calling fallocate(2) with a large length can take a long time to - // complete. When running profilers, such as VTune, this syscall will - // be constantly interrupted by signals. Expanding the file in smaller - // steps avoids this problem. - return split_and_fallocate(punch_hole, offset, length); - } - - return err; -} - -bool XPhysicalMemoryBacking::commit_inner(size_t offset, size_t length) const { - log_trace(gc, heap)("Committing memory: " SIZE_FORMAT "M-" SIZE_FORMAT "M (" SIZE_FORMAT "M)", - offset / M, (offset + length) / M, length / M); - -retry: - const XErrno err = fallocate(false /* punch_hole */, offset, length); - if (err) { - if (err == ENOSPC && !is_init_completed() && XLargePages::is_explicit() && z_fallocate_hugetlbfs_attempts-- > 0) { - // If we fail to allocate during initialization, due to lack of space on - // the hugetlbfs filesystem, then we wait and retry a few times before - // giving up. Otherwise there is a risk that running JVMs back-to-back - // will fail, since there is a delay between process termination and the - // huge pages owned by that process being returned to the huge page pool - // and made available for new allocations. - log_debug_p(gc, init)("Failed to commit memory (%s), retrying", err.to_string()); - - // Wait and retry in one second, in the hope that huge pages will be - // available by then. - sleep(1); - goto retry; - } - - // Failed - log_error_p(gc)("Failed to commit memory (%s)", err.to_string()); - return false; - } - - // Success - return true; -} - -static int offset_to_node(size_t offset) { - const GrowableArray* mapping = os::Linux::numa_nindex_to_node(); - const size_t nindex = (offset >> XGranuleSizeShift) % mapping->length(); - return mapping->at((int)nindex); -} - -size_t XPhysicalMemoryBacking::commit_numa_interleaved(size_t offset, size_t length) const { - size_t committed = 0; - - // Commit one granule at a time, so that each granule - // can be allocated from a different preferred node. - while (committed < length) { - const size_t granule_offset = offset + committed; - - // Setup NUMA policy to allocate memory from a preferred node - os::Linux::numa_set_preferred(offset_to_node(granule_offset)); - - if (!commit_inner(granule_offset, XGranuleSize)) { - // Failed - break; - } - - committed += XGranuleSize; - } - - // Restore NUMA policy - os::Linux::numa_set_preferred(-1); - - return committed; -} - -size_t XPhysicalMemoryBacking::commit_default(size_t offset, size_t length) const { - // Try to commit the whole region - if (commit_inner(offset, length)) { - // Success - return length; - } - - // Failed, try to commit as much as possible - size_t start = offset; - size_t end = offset + length; - - for (;;) { - length = align_down((end - start) / 2, XGranuleSize); - if (length < XGranuleSize) { - // Done, don't commit more - return start - offset; - } - - if (commit_inner(start, length)) { - // Success, try commit more - start += length; - } else { - // Failed, try commit less - end -= length; - } - } -} - -size_t XPhysicalMemoryBacking::commit(size_t offset, size_t length) const { - if (XNUMA::is_enabled() && !XLargePages::is_explicit()) { - // To get granule-level NUMA interleaving when using non-large pages, - // we must explicitly interleave the memory at commit/fallocate time. - return commit_numa_interleaved(offset, length); - } - - return commit_default(offset, length); -} - -size_t XPhysicalMemoryBacking::uncommit(size_t offset, size_t length) const { - log_trace(gc, heap)("Uncommitting memory: " SIZE_FORMAT "M-" SIZE_FORMAT "M (" SIZE_FORMAT "M)", - offset / M, (offset + length) / M, length / M); - - const XErrno err = fallocate(true /* punch_hole */, offset, length); - if (err) { - log_error(gc)("Failed to uncommit memory (%s)", err.to_string()); - return 0; - } - - return length; -} - -void XPhysicalMemoryBacking::map(uintptr_t addr, size_t size, uintptr_t offset) const { - const void* const res = mmap((void*)addr, size, PROT_READ|PROT_WRITE, MAP_FIXED|MAP_SHARED, _fd, offset); - if (res == MAP_FAILED) { - XErrno err; - fatal("Failed to map memory (%s)", err.to_string()); - } -} - -void XPhysicalMemoryBacking::unmap(uintptr_t addr, size_t size) const { - // Note that we must keep the address space reservation intact and just detach - // the backing memory. For this reason we map a new anonymous, non-accessible - // and non-reserved page over the mapping instead of actually unmapping. - const void* const res = mmap((void*)addr, size, PROT_NONE, MAP_FIXED | MAP_ANONYMOUS | MAP_PRIVATE | MAP_NORESERVE, -1, 0); - if (res == MAP_FAILED) { - XErrno err; - fatal("Failed to map memory (%s)", err.to_string()); - } -} diff --git a/src/hotspot/os/linux/gc/x/xPhysicalMemoryBacking_linux.hpp b/src/hotspot/os/linux/gc/x/xPhysicalMemoryBacking_linux.hpp deleted file mode 100644 index 253a3f87ef427..0000000000000 --- a/src/hotspot/os/linux/gc/x/xPhysicalMemoryBacking_linux.hpp +++ /dev/null @@ -1,77 +0,0 @@ -/* - * Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#ifndef OS_LINUX_GC_X_XPHYSICALMEMORYBACKING_LINUX_HPP -#define OS_LINUX_GC_X_XPHYSICALMEMORYBACKING_LINUX_HPP - -class XErrno; - -class XPhysicalMemoryBacking { -private: - int _fd; - size_t _size; - uint64_t _filesystem; - size_t _block_size; - size_t _available; - bool _initialized; - - void warn_available_space(size_t max_capacity) const; - void warn_max_map_count(size_t max_capacity) const; - - int create_mem_fd(const char* name) const; - int create_file_fd(const char* name) const; - int create_fd(const char* name) const; - - bool is_tmpfs() const; - bool is_hugetlbfs() const; - bool tmpfs_supports_transparent_huge_pages() const; - - XErrno fallocate_compat_mmap_hugetlbfs(size_t offset, size_t length, bool touch) const; - XErrno fallocate_compat_mmap_tmpfs(size_t offset, size_t length) const; - XErrno fallocate_compat_pwrite(size_t offset, size_t length) const; - XErrno fallocate_fill_hole_compat(size_t offset, size_t length) const; - XErrno fallocate_fill_hole_syscall(size_t offset, size_t length) const; - XErrno fallocate_fill_hole(size_t offset, size_t length) const; - XErrno fallocate_punch_hole(size_t offset, size_t length) const; - XErrno split_and_fallocate(bool punch_hole, size_t offset, size_t length) const; - XErrno fallocate(bool punch_hole, size_t offset, size_t length) const; - - bool commit_inner(size_t offset, size_t length) const; - size_t commit_numa_interleaved(size_t offset, size_t length) const; - size_t commit_default(size_t offset, size_t length) const; - -public: - XPhysicalMemoryBacking(size_t max_capacity); - - bool is_initialized() const; - - void warn_commit_limits(size_t max_capacity) const; - - size_t commit(size_t offset, size_t length) const; - size_t uncommit(size_t offset, size_t length) const; - - void map(uintptr_t addr, size_t size, uintptr_t offset) const; - void unmap(uintptr_t addr, size_t size) const; -}; - -#endif // OS_LINUX_GC_X_XPHYSICALMEMORYBACKING_LINUX_HPP diff --git a/src/hotspot/os/linux/gc/x/xSyscall_linux.cpp b/src/hotspot/os/linux/gc/x/xSyscall_linux.cpp deleted file mode 100644 index 6035eaae61bd1..0000000000000 --- a/src/hotspot/os/linux/gc/x/xSyscall_linux.cpp +++ /dev/null @@ -1,40 +0,0 @@ -/* - * Copyright (c) 2019, 2020, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#include "precompiled.hpp" -#include "gc/x/xSyscall_linux.hpp" -#include OS_CPU_HEADER(gc/x/xSyscall) - -#include - -int XSyscall::memfd_create(const char *name, unsigned int flags) { - return syscall(SYS_memfd_create, name, flags); -} - -int XSyscall::fallocate(int fd, int mode, size_t offset, size_t length) { - return syscall(SYS_fallocate, fd, mode, offset, length); -} - -long XSyscall::get_mempolicy(int* mode, unsigned long* nodemask, unsigned long maxnode, void* addr, unsigned long flags) { - return syscall(SYS_get_mempolicy, mode, nodemask, maxnode, addr, flags); -} diff --git a/src/hotspot/os/linux/gc/x/xSyscall_linux.hpp b/src/hotspot/os/linux/gc/x/xSyscall_linux.hpp deleted file mode 100644 index f16d2b2ffdcc6..0000000000000 --- a/src/hotspot/os/linux/gc/x/xSyscall_linux.hpp +++ /dev/null @@ -1,45 +0,0 @@ -/* - * Copyright (c) 2019, 2022, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#ifndef OS_LINUX_GC_X_XSYSCALL_LINUX_HPP -#define OS_LINUX_GC_X_XSYSCALL_LINUX_HPP - -#include "memory/allStatic.hpp" -#include "utilities/globalDefinitions.hpp" - -// Flags for get_mempolicy() -#ifndef MPOL_F_NODE -#define MPOL_F_NODE (1<<0) -#endif -#ifndef MPOL_F_ADDR -#define MPOL_F_ADDR (1<<1) -#endif - -class XSyscall : public AllStatic { -public: - static int memfd_create(const char* name, unsigned int flags); - static int fallocate(int fd, int mode, size_t offset, size_t length); - static long get_mempolicy(int* mode, unsigned long* nodemask, unsigned long maxnode, void* addr, unsigned long flags); -}; - -#endif // OS_LINUX_GC_X_XSYSCALL_LINUX_HPP diff --git a/src/hotspot/os/posix/gc/x/xArguments_posix.cpp b/src/hotspot/os/posix/gc/x/xArguments_posix.cpp deleted file mode 100644 index 6df0a9bd07460..0000000000000 --- a/src/hotspot/os/posix/gc/x/xArguments_posix.cpp +++ /dev/null @@ -1,29 +0,0 @@ -/* - * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#include "precompiled.hpp" -#include "gc/x/xArguments.hpp" - -bool XArguments::is_os_supported() { - return true; -} diff --git a/src/hotspot/os/posix/gc/x/xInitialize_posix.cpp b/src/hotspot/os/posix/gc/x/xInitialize_posix.cpp deleted file mode 100644 index acf71e9890178..0000000000000 --- a/src/hotspot/os/posix/gc/x/xInitialize_posix.cpp +++ /dev/null @@ -1,29 +0,0 @@ -/* - * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#include "precompiled.hpp" -#include "gc/x/xInitialize.hpp" - -void XInitialize::pd_initialize() { - // Does nothing -} diff --git a/src/hotspot/os/posix/gc/x/xUtils_posix.cpp b/src/hotspot/os/posix/gc/x/xUtils_posix.cpp deleted file mode 100644 index eee3e5cfbe60d..0000000000000 --- a/src/hotspot/os/posix/gc/x/xUtils_posix.cpp +++ /dev/null @@ -1,43 +0,0 @@ -/* - * Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#include "precompiled.hpp" -#include "gc/x/xUtils.hpp" -#include "utilities/debug.hpp" -#include "utilities/globalDefinitions.hpp" - -#include - -uintptr_t XUtils::alloc_aligned(size_t alignment, size_t size) { - void* res = nullptr; - - // Use raw posix_memalign as long as we have no wrapper for it - ALLOW_C_FUNCTION(::posix_memalign, int rc = posix_memalign(&res, alignment, size);) - if (rc != 0) { - fatal("posix_memalign() failed"); - } - - memset(res, 0, size); - - return (uintptr_t)res; -} diff --git a/src/hotspot/os/posix/gc/x/xVirtualMemory_posix.cpp b/src/hotspot/os/posix/gc/x/xVirtualMemory_posix.cpp deleted file mode 100644 index e2422eb0978fc..0000000000000 --- a/src/hotspot/os/posix/gc/x/xVirtualMemory_posix.cpp +++ /dev/null @@ -1,60 +0,0 @@ -/* - * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#include "precompiled.hpp" -#include "gc/x/xAddress.inline.hpp" -#include "gc/x/xVirtualMemory.hpp" -#include "logging/log.hpp" - -#include -#include - -void XVirtualMemoryManager::pd_initialize_before_reserve() { - // Does nothing -} - -void XVirtualMemoryManager::pd_initialize_after_reserve() { - // Does nothing -} - -bool XVirtualMemoryManager::pd_reserve(uintptr_t addr, size_t size) { - const uintptr_t res = (uintptr_t)mmap((void*)addr, size, PROT_NONE, MAP_ANONYMOUS|MAP_PRIVATE|MAP_NORESERVE, -1, 0); - if (res == (uintptr_t)MAP_FAILED) { - // Failed to reserve memory - return false; - } - - if (res != addr) { - // Failed to reserve memory at the requested address - munmap((void*)res, size); - return false; - } - - // Success - return true; -} - -void XVirtualMemoryManager::pd_unreserve(uintptr_t addr, size_t size) { - const int res = munmap((void*)addr, size); - assert(res == 0, "Failed to unmap memory"); -} diff --git a/src/hotspot/os/windows/gc/x/xArguments_windows.cpp b/src/hotspot/os/windows/gc/x/xArguments_windows.cpp deleted file mode 100644 index fc5f7eccb911f..0000000000000 --- a/src/hotspot/os/windows/gc/x/xArguments_windows.cpp +++ /dev/null @@ -1,30 +0,0 @@ -/* - * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#include "precompiled.hpp" -#include "gc/x/xArguments.hpp" -#include "gc/x/xSyscall_windows.hpp" - -bool XArguments::is_os_supported() { - return XSyscall::is_supported(); -} diff --git a/src/hotspot/os/windows/gc/x/xInitialize_windows.cpp b/src/hotspot/os/windows/gc/x/xInitialize_windows.cpp deleted file mode 100644 index 99f6432803326..0000000000000 --- a/src/hotspot/os/windows/gc/x/xInitialize_windows.cpp +++ /dev/null @@ -1,30 +0,0 @@ -/* - * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#include "precompiled.hpp" -#include "gc/x/xInitialize.hpp" -#include "gc/x/xSyscall_windows.hpp" - -void XInitialize::pd_initialize() { - XSyscall::initialize(); -} diff --git a/src/hotspot/os/windows/gc/x/xLargePages_windows.cpp b/src/hotspot/os/windows/gc/x/xLargePages_windows.cpp deleted file mode 100644 index 20b3c4911fc68..0000000000000 --- a/src/hotspot/os/windows/gc/x/xLargePages_windows.cpp +++ /dev/null @@ -1,40 +0,0 @@ -/* - * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#include "precompiled.hpp" -#include "gc/shared/gcLogPrecious.hpp" -#include "gc/x/xLargePages.hpp" -#include "gc/x/xSyscall_windows.hpp" -#include "runtime/globals.hpp" - -void XLargePages::pd_initialize() { - if (UseLargePages) { - if (XSyscall::is_large_pages_supported()) { - _state = Explicit; - return; - } - log_info_p(gc, init)("Shared large pages not supported on this OS version"); - } - - _state = Disabled; -} diff --git a/src/hotspot/os/windows/gc/x/xMapper_windows.cpp b/src/hotspot/os/windows/gc/x/xMapper_windows.cpp deleted file mode 100644 index e69b6ec56e293..0000000000000 --- a/src/hotspot/os/windows/gc/x/xMapper_windows.cpp +++ /dev/null @@ -1,310 +0,0 @@ -/* - * Copyright (c) 2019, 2023, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#include "precompiled.hpp" -#include "gc/x/xMapper_windows.hpp" -#include "gc/x/xSyscall_windows.hpp" -#include "logging/log.hpp" -#include "utilities/debug.hpp" - -#include - -// Memory reservation, commit, views, and placeholders. -// -// To be able to up-front reserve address space for the heap views, and later -// multi-map the heap views to the same physical memory, without ever losing the -// reservation of the reserved address space, we use "placeholders". -// -// These placeholders block out the address space from being used by other parts -// of the process. To commit memory in this address space, the placeholder must -// be replaced by anonymous memory, or replaced by mapping a view against a -// paging file mapping. We use the later to support multi-mapping. -// -// We want to be able to dynamically commit and uncommit the physical memory of -// the heap (and also unmap ZPages), in granules of ZGranuleSize bytes. There is -// no way to grow and shrink the committed memory of a paging file mapping. -// Therefore, we create multiple granule-sized page file mappings. The memory is -// committed by creating a page file mapping, map a view against it, commit the -// memory, unmap the view. The memory will stay committed until all views are -// unmapped, and the paging file mapping handle is closed. -// -// When replacing a placeholder address space reservation with a mapped view -// against a paging file mapping, the virtual address space must exactly match -// an existing placeholder's address and size. Therefore we only deal with -// granule-sized placeholders at this layer. Higher layers that keep track of -// reserved available address space can (and will) coalesce placeholders, but -// they will be split before being used. - -#define fatal_error(msg, addr, size) \ - fatal(msg ": " PTR_FORMAT " " SIZE_FORMAT "M (%d)", \ - (addr), (size) / M, GetLastError()) - -uintptr_t XMapper::reserve(uintptr_t addr, size_t size) { - void* const res = XSyscall::VirtualAlloc2( - GetCurrentProcess(), // Process - (void*)addr, // BaseAddress - size, // Size - MEM_RESERVE | MEM_RESERVE_PLACEHOLDER, // AllocationType - PAGE_NOACCESS, // PageProtection - nullptr, // ExtendedParameters - 0 // ParameterCount - ); - - // Caller responsible for error handling - return (uintptr_t)res; -} - -void XMapper::unreserve(uintptr_t addr, size_t size) { - const bool res = XSyscall::VirtualFreeEx( - GetCurrentProcess(), // hProcess - (void*)addr, // lpAddress - size, // dwSize - MEM_RELEASE // dwFreeType - ); - - if (!res) { - fatal_error("Failed to unreserve memory", addr, size); - } -} - -HANDLE XMapper::create_paging_file_mapping(size_t size) { - // Create mapping with SEC_RESERVE instead of SEC_COMMIT. - // - // We use MapViewOfFile3 for two different reasons: - // 1) When committing memory for the created paging file - // 2) When mapping a view of the memory created in (2) - // - // The non-platform code is only setup to deal with out-of-memory - // errors in (1). By using SEC_RESERVE, we prevent MapViewOfFile3 - // from failing because of "commit limit" checks. To actually commit - // memory in (1), a call to VirtualAlloc2 is done. - - HANDLE const res = XSyscall::CreateFileMappingW( - INVALID_HANDLE_VALUE, // hFile - nullptr, // lpFileMappingAttribute - PAGE_READWRITE | SEC_RESERVE, // flProtect - size >> 32, // dwMaximumSizeHigh - size & 0xFFFFFFFF, // dwMaximumSizeLow - nullptr // lpName - ); - - // Caller responsible for error handling - return res; -} - -bool XMapper::commit_paging_file_mapping(HANDLE file_handle, uintptr_t file_offset, size_t size) { - const uintptr_t addr = map_view_no_placeholder(file_handle, file_offset, size); - if (addr == 0) { - log_error(gc)("Failed to map view of paging file mapping (%d)", GetLastError()); - return false; - } - - const uintptr_t res = commit(addr, size); - if (res != addr) { - log_error(gc)("Failed to commit memory (%d)", GetLastError()); - } - - unmap_view_no_placeholder(addr, size); - - return res == addr; -} - -uintptr_t XMapper::map_view_no_placeholder(HANDLE file_handle, uintptr_t file_offset, size_t size) { - void* const res = XSyscall::MapViewOfFile3( - file_handle, // FileMapping - GetCurrentProcess(), // ProcessHandle - nullptr, // BaseAddress - file_offset, // Offset - size, // ViewSize - 0, // AllocationType - PAGE_NOACCESS, // PageProtection - nullptr, // ExtendedParameters - 0 // ParameterCount - ); - - // Caller responsible for error handling - return (uintptr_t)res; -} - -void XMapper::unmap_view_no_placeholder(uintptr_t addr, size_t size) { - const bool res = XSyscall::UnmapViewOfFile2( - GetCurrentProcess(), // ProcessHandle - (void*)addr, // BaseAddress - 0 // UnmapFlags - ); - - if (!res) { - fatal_error("Failed to unmap memory", addr, size); - } -} - -uintptr_t XMapper::commit(uintptr_t addr, size_t size) { - void* const res = XSyscall::VirtualAlloc2( - GetCurrentProcess(), // Process - (void*)addr, // BaseAddress - size, // Size - MEM_COMMIT, // AllocationType - PAGE_NOACCESS, // PageProtection - nullptr, // ExtendedParameters - 0 // ParameterCount - ); - - // Caller responsible for error handling - return (uintptr_t)res; -} - -HANDLE XMapper::create_and_commit_paging_file_mapping(size_t size) { - HANDLE const file_handle = create_paging_file_mapping(size); - if (file_handle == 0) { - log_error(gc)("Failed to create paging file mapping (%d)", GetLastError()); - return 0; - } - - const bool res = commit_paging_file_mapping(file_handle, 0 /* file_offset */, size); - if (!res) { - close_paging_file_mapping(file_handle); - return 0; - } - - return file_handle; -} - -void XMapper::close_paging_file_mapping(HANDLE file_handle) { - const bool res = CloseHandle( - file_handle // hObject - ); - - if (!res) { - fatal("Failed to close paging file handle (%d)", GetLastError()); - } -} - -HANDLE XMapper::create_shared_awe_section() { - MEM_EXTENDED_PARAMETER parameter = { 0 }; - parameter.Type = MemSectionExtendedParameterUserPhysicalFlags; - parameter.ULong64 = 0; - - HANDLE section = XSyscall::CreateFileMapping2( - INVALID_HANDLE_VALUE, // File - nullptr, // SecurityAttributes - SECTION_MAP_READ | SECTION_MAP_WRITE, // DesiredAccess - PAGE_READWRITE, // PageProtection - SEC_RESERVE | SEC_LARGE_PAGES, // AllocationAttributes - 0, // MaximumSize - nullptr, // Name - ¶meter, // ExtendedParameters - 1 // ParameterCount - ); - - if (section == nullptr) { - fatal("Could not create shared AWE section (%d)", GetLastError()); - } - - return section; -} - -uintptr_t XMapper::reserve_for_shared_awe(HANDLE awe_section, uintptr_t addr, size_t size) { - MEM_EXTENDED_PARAMETER parameter = { 0 }; - parameter.Type = MemExtendedParameterUserPhysicalHandle; - parameter.Handle = awe_section; - - void* const res = XSyscall::VirtualAlloc2( - GetCurrentProcess(), // Process - (void*)addr, // BaseAddress - size, // Size - MEM_RESERVE | MEM_PHYSICAL, // AllocationType - PAGE_READWRITE, // PageProtection - ¶meter, // ExtendedParameters - 1 // ParameterCount - ); - - // Caller responsible for error handling - return (uintptr_t)res; -} - -void XMapper::unreserve_for_shared_awe(uintptr_t addr, size_t size) { - bool res = VirtualFree( - (void*)addr, // lpAddress - 0, // dwSize - MEM_RELEASE // dwFreeType - ); - - if (!res) { - fatal("Failed to unreserve memory: " PTR_FORMAT " " SIZE_FORMAT "M (%d)", - addr, size / M, GetLastError()); - } -} - -void XMapper::split_placeholder(uintptr_t addr, size_t size) { - const bool res = VirtualFree( - (void*)addr, // lpAddress - size, // dwSize - MEM_RELEASE | MEM_PRESERVE_PLACEHOLDER // dwFreeType - ); - - if (!res) { - fatal_error("Failed to split placeholder", addr, size); - } -} - -void XMapper::coalesce_placeholders(uintptr_t addr, size_t size) { - const bool res = VirtualFree( - (void*)addr, // lpAddress - size, // dwSize - MEM_RELEASE | MEM_COALESCE_PLACEHOLDERS // dwFreeType - ); - - if (!res) { - fatal_error("Failed to coalesce placeholders", addr, size); - } -} - -void XMapper::map_view_replace_placeholder(HANDLE file_handle, uintptr_t file_offset, uintptr_t addr, size_t size) { - void* const res = XSyscall::MapViewOfFile3( - file_handle, // FileMapping - GetCurrentProcess(), // ProcessHandle - (void*)addr, // BaseAddress - file_offset, // Offset - size, // ViewSize - MEM_REPLACE_PLACEHOLDER, // AllocationType - PAGE_READWRITE, // PageProtection - nullptr, // ExtendedParameters - 0 // ParameterCount - ); - - if (res == nullptr) { - fatal_error("Failed to map memory", addr, size); - } -} - -void XMapper::unmap_view_preserve_placeholder(uintptr_t addr, size_t size) { - const bool res = XSyscall::UnmapViewOfFile2( - GetCurrentProcess(), // ProcessHandle - (void*)addr, // BaseAddress - MEM_PRESERVE_PLACEHOLDER // UnmapFlags - ); - - if (!res) { - fatal_error("Failed to unmap memory", addr, size); - } -} diff --git a/src/hotspot/os/windows/gc/x/xMapper_windows.hpp b/src/hotspot/os/windows/gc/x/xMapper_windows.hpp deleted file mode 100644 index 0f266d3fab7c6..0000000000000 --- a/src/hotspot/os/windows/gc/x/xMapper_windows.hpp +++ /dev/null @@ -1,94 +0,0 @@ -/* - * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#ifndef OS_WINDOWS_GC_X_XMAPPER_WINDOWS_HPP -#define OS_WINDOWS_GC_X_XMAPPER_WINDOWS_HPP - -#include "memory/allStatic.hpp" -#include "utilities/globalDefinitions.hpp" - -#include - -class XMapper : public AllStatic { -private: - // Create paging file mapping - static HANDLE create_paging_file_mapping(size_t size); - - // Commit paging file mapping - static bool commit_paging_file_mapping(HANDLE file_handle, uintptr_t file_offset, size_t size); - - // Map a view anywhere without a placeholder - static uintptr_t map_view_no_placeholder(HANDLE file_handle, uintptr_t file_offset, size_t size); - - // Unmap a view without preserving a placeholder - static void unmap_view_no_placeholder(uintptr_t addr, size_t size); - - // Commit memory covering the given virtual address range - static uintptr_t commit(uintptr_t addr, size_t size); - -public: - // Reserve memory with a placeholder - static uintptr_t reserve(uintptr_t addr, size_t size); - - // Unreserve memory - static void unreserve(uintptr_t addr, size_t size); - - // Create and commit paging file mapping - static HANDLE create_and_commit_paging_file_mapping(size_t size); - - // Close paging file mapping - static void close_paging_file_mapping(HANDLE file_handle); - - // Create a shared AWE section - static HANDLE create_shared_awe_section(); - - // Reserve memory attached to the shared AWE section - static uintptr_t reserve_for_shared_awe(HANDLE awe_section, uintptr_t addr, size_t size); - - // Unreserve memory attached to a shared AWE section - static void unreserve_for_shared_awe(uintptr_t addr, size_t size); - - // Split a placeholder - // - // A view can only replace an entire placeholder, so placeholders need to be - // split and coalesced to be the exact size of the new views. - // [addr, addr + size) needs to be a proper sub-placeholder of an existing - // placeholder. - static void split_placeholder(uintptr_t addr, size_t size); - - // Coalesce a placeholder - // - // [addr, addr + size) is the new placeholder. A sub-placeholder needs to - // exist within that range. - static void coalesce_placeholders(uintptr_t addr, size_t size); - - // Map a view of the file handle and replace the placeholder covering the - // given virtual address range - static void map_view_replace_placeholder(HANDLE file_handle, uintptr_t file_offset, uintptr_t addr, size_t size); - - // Unmap the view and reinstate a placeholder covering the given virtual - // address range - static void unmap_view_preserve_placeholder(uintptr_t addr, size_t size); -}; - -#endif // OS_WINDOWS_GC_X_XMAPPER_WINDOWS_HPP diff --git a/src/hotspot/os/windows/gc/x/xNUMA_windows.cpp b/src/hotspot/os/windows/gc/x/xNUMA_windows.cpp deleted file mode 100644 index 47a84df962e92..0000000000000 --- a/src/hotspot/os/windows/gc/x/xNUMA_windows.cpp +++ /dev/null @@ -1,42 +0,0 @@ -/* - * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#include "precompiled.hpp" -#include "gc/x/xNUMA.hpp" - -void XNUMA::pd_initialize() { - _enabled = false; -} - -uint32_t XNUMA::count() { - return 1; -} - -uint32_t XNUMA::id() { - return 0; -} - -uint32_t XNUMA::memory_id(uintptr_t addr) { - // NUMA support not enabled, assume everything belongs to node zero - return 0; -} diff --git a/src/hotspot/os/windows/gc/x/xPhysicalMemoryBacking_windows.cpp b/src/hotspot/os/windows/gc/x/xPhysicalMemoryBacking_windows.cpp deleted file mode 100644 index 92d47dfb7c84c..0000000000000 --- a/src/hotspot/os/windows/gc/x/xPhysicalMemoryBacking_windows.cpp +++ /dev/null @@ -1,252 +0,0 @@ -/* - * Copyright (c) 2019, 2023, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#include "precompiled.hpp" -#include "gc/x/xGlobals.hpp" -#include "gc/x/xGranuleMap.inline.hpp" -#include "gc/x/xLargePages.inline.hpp" -#include "gc/x/xMapper_windows.hpp" -#include "gc/x/xPhysicalMemoryBacking_windows.hpp" -#include "logging/log.hpp" -#include "runtime/globals.hpp" -#include "utilities/debug.hpp" - -class XPhysicalMemoryBackingImpl : public CHeapObj { -public: - virtual size_t commit(size_t offset, size_t size) = 0; - virtual size_t uncommit(size_t offset, size_t size) = 0; - virtual void map(uintptr_t addr, size_t size, size_t offset) const = 0; - virtual void unmap(uintptr_t addr, size_t size) const = 0; -}; - -// Implements small pages (paged) support using placeholder reservation. -// -// The backing commits and uncommits physical memory, that can be -// multi-mapped into the virtual address space. To support fine-graned -// committing and uncommitting, each XGranuleSize'd chunk is mapped to -// a separate paging file mapping. - -class XPhysicalMemoryBackingSmallPages : public XPhysicalMemoryBackingImpl { -private: - XGranuleMap _handles; - - HANDLE get_handle(uintptr_t offset) const { - HANDLE const handle = _handles.get(offset); - assert(handle != 0, "Should be set"); - return handle; - } - - void put_handle(uintptr_t offset, HANDLE handle) { - assert(handle != INVALID_HANDLE_VALUE, "Invalid handle"); - assert(_handles.get(offset) == 0, "Should be cleared"); - _handles.put(offset, handle); - } - - void clear_handle(uintptr_t offset) { - assert(_handles.get(offset) != 0, "Should be set"); - _handles.put(offset, 0); - } - -public: - XPhysicalMemoryBackingSmallPages(size_t max_capacity) : - XPhysicalMemoryBackingImpl(), - _handles(max_capacity) {} - - size_t commit(size_t offset, size_t size) { - for (size_t i = 0; i < size; i += XGranuleSize) { - HANDLE const handle = XMapper::create_and_commit_paging_file_mapping(XGranuleSize); - if (handle == 0) { - return i; - } - - put_handle(offset + i, handle); - } - - return size; - } - - size_t uncommit(size_t offset, size_t size) { - for (size_t i = 0; i < size; i += XGranuleSize) { - HANDLE const handle = get_handle(offset + i); - clear_handle(offset + i); - XMapper::close_paging_file_mapping(handle); - } - - return size; - } - - void map(uintptr_t addr, size_t size, size_t offset) const { - assert(is_aligned(offset, XGranuleSize), "Misaligned"); - assert(is_aligned(addr, XGranuleSize), "Misaligned"); - assert(is_aligned(size, XGranuleSize), "Misaligned"); - - for (size_t i = 0; i < size; i += XGranuleSize) { - HANDLE const handle = get_handle(offset + i); - XMapper::map_view_replace_placeholder(handle, 0 /* offset */, addr + i, XGranuleSize); - } - } - - void unmap(uintptr_t addr, size_t size) const { - assert(is_aligned(addr, XGranuleSize), "Misaligned"); - assert(is_aligned(size, XGranuleSize), "Misaligned"); - - for (size_t i = 0; i < size; i += XGranuleSize) { - XMapper::unmap_view_preserve_placeholder(addr + i, XGranuleSize); - } - } -}; - -// Implements Large Pages (locked) support using shared AWE physical memory. -// -// Shared AWE physical memory also works with small pages, but it has -// a few drawbacks that makes it a no-go to use it at this point: -// -// 1) It seems to use 8 bytes of committed memory per *reserved* memory. -// Given our scheme to use a large address space range this turns out to -// use too much memory. -// -// 2) It requires memory locking privileges, even for small pages. This -// has always been a requirement for large pages, and would be an extra -// restriction for usage with small pages. -// -// Note: The large pages size is tied to our XGranuleSize. - -extern HANDLE XAWESection; - -class XPhysicalMemoryBackingLargePages : public XPhysicalMemoryBackingImpl { -private: - ULONG_PTR* const _page_array; - - static ULONG_PTR* alloc_page_array(size_t max_capacity) { - const size_t npages = max_capacity / XGranuleSize; - const size_t array_size = npages * sizeof(ULONG_PTR); - - return (ULONG_PTR*)os::malloc(array_size, mtGC); - } - -public: - XPhysicalMemoryBackingLargePages(size_t max_capacity) : - XPhysicalMemoryBackingImpl(), - _page_array(alloc_page_array(max_capacity)) {} - - size_t commit(size_t offset, size_t size) { - const size_t index = offset >> XGranuleSizeShift; - const size_t npages = size >> XGranuleSizeShift; - - size_t npages_res = npages; - const bool res = AllocateUserPhysicalPages(XAWESection, &npages_res, &_page_array[index]); - if (!res) { - fatal("Failed to allocate physical memory " SIZE_FORMAT "M @ " PTR_FORMAT " (%d)", - size / M, offset, GetLastError()); - } else { - log_debug(gc)("Allocated physical memory: " SIZE_FORMAT "M @ " PTR_FORMAT, size / M, offset); - } - - // AllocateUserPhysicalPages might not be able to allocate the requested amount of memory. - // The allocated number of pages are written in npages_res. - return npages_res << XGranuleSizeShift; - } - - size_t uncommit(size_t offset, size_t size) { - const size_t index = offset >> XGranuleSizeShift; - const size_t npages = size >> XGranuleSizeShift; - - size_t npages_res = npages; - const bool res = FreeUserPhysicalPages(XAWESection, &npages_res, &_page_array[index]); - if (!res) { - fatal("Failed to uncommit physical memory " SIZE_FORMAT "M @ " PTR_FORMAT " (%d)", - size, offset, GetLastError()); - } - - return npages_res << XGranuleSizeShift; - } - - void map(uintptr_t addr, size_t size, size_t offset) const { - const size_t npages = size >> XGranuleSizeShift; - const size_t index = offset >> XGranuleSizeShift; - - const bool res = MapUserPhysicalPages((char*)addr, npages, &_page_array[index]); - if (!res) { - fatal("Failed to map view " PTR_FORMAT " " SIZE_FORMAT "M @ " PTR_FORMAT " (%d)", - addr, size / M, offset, GetLastError()); - } - } - - void unmap(uintptr_t addr, size_t size) const { - const size_t npages = size >> XGranuleSizeShift; - - const bool res = MapUserPhysicalPages((char*)addr, npages, nullptr); - if (!res) { - fatal("Failed to unmap view " PTR_FORMAT " " SIZE_FORMAT "M (%d)", - addr, size / M, GetLastError()); - } - } -}; - -static XPhysicalMemoryBackingImpl* select_impl(size_t max_capacity) { - if (XLargePages::is_enabled()) { - return new XPhysicalMemoryBackingLargePages(max_capacity); - } - - return new XPhysicalMemoryBackingSmallPages(max_capacity); -} - -XPhysicalMemoryBacking::XPhysicalMemoryBacking(size_t max_capacity) : - _impl(select_impl(max_capacity)) {} - -bool XPhysicalMemoryBacking::is_initialized() const { - return true; -} - -void XPhysicalMemoryBacking::warn_commit_limits(size_t max_capacity) const { - // Does nothing -} - -size_t XPhysicalMemoryBacking::commit(size_t offset, size_t length) { - log_trace(gc, heap)("Committing memory: " SIZE_FORMAT "M-" SIZE_FORMAT "M (" SIZE_FORMAT "M)", - offset / M, (offset + length) / M, length / M); - - return _impl->commit(offset, length); -} - -size_t XPhysicalMemoryBacking::uncommit(size_t offset, size_t length) { - log_trace(gc, heap)("Uncommitting memory: " SIZE_FORMAT "M-" SIZE_FORMAT "M (" SIZE_FORMAT "M)", - offset / M, (offset + length) / M, length / M); - - return _impl->uncommit(offset, length); -} - -void XPhysicalMemoryBacking::map(uintptr_t addr, size_t size, size_t offset) const { - assert(is_aligned(offset, XGranuleSize), "Misaligned: " PTR_FORMAT, offset); - assert(is_aligned(addr, XGranuleSize), "Misaligned: " PTR_FORMAT, addr); - assert(is_aligned(size, XGranuleSize), "Misaligned: " PTR_FORMAT, size); - - _impl->map(addr, size, offset); -} - -void XPhysicalMemoryBacking::unmap(uintptr_t addr, size_t size) const { - assert(is_aligned(addr, XGranuleSize), "Misaligned"); - assert(is_aligned(size, XGranuleSize), "Misaligned"); - - _impl->unmap(addr, size); -} diff --git a/src/hotspot/os/windows/gc/x/xPhysicalMemoryBacking_windows.hpp b/src/hotspot/os/windows/gc/x/xPhysicalMemoryBacking_windows.hpp deleted file mode 100644 index d6e123f21e51a..0000000000000 --- a/src/hotspot/os/windows/gc/x/xPhysicalMemoryBacking_windows.hpp +++ /dev/null @@ -1,51 +0,0 @@ -/* - * Copyright (c) 2019, 2020, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#ifndef OS_WINDOWS_GC_X_XPHYSICALMEMORYBACKING_WINDOWS_HPP -#define OS_WINDOWS_GC_X_XPHYSICALMEMORYBACKING_WINDOWS_HPP - -#include "utilities/globalDefinitions.hpp" - -#include - -class XPhysicalMemoryBackingImpl; - -class XPhysicalMemoryBacking { -private: - XPhysicalMemoryBackingImpl* _impl; - -public: - XPhysicalMemoryBacking(size_t max_capacity); - - bool is_initialized() const; - - void warn_commit_limits(size_t max_capacity) const; - - size_t commit(size_t offset, size_t length); - size_t uncommit(size_t offset, size_t length); - - void map(uintptr_t addr, size_t size, size_t offset) const; - void unmap(uintptr_t addr, size_t size) const; -}; - -#endif // OS_WINDOWS_GC_X_XPHYSICALMEMORYBACKING_WINDOWS_HPP diff --git a/src/hotspot/os/windows/gc/x/xSyscall_windows.cpp b/src/hotspot/os/windows/gc/x/xSyscall_windows.cpp deleted file mode 100644 index f22966a54898d..0000000000000 --- a/src/hotspot/os/windows/gc/x/xSyscall_windows.cpp +++ /dev/null @@ -1,93 +0,0 @@ -/* - * Copyright (c) 2019, 2023, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#include "precompiled.hpp" -#include "gc/shared/gcLogPrecious.hpp" -#include "gc/x/xSyscall_windows.hpp" -#include "runtime/java.hpp" -#include "runtime/os.hpp" - -XSyscall::CreateFileMappingWFn XSyscall::CreateFileMappingW; -XSyscall::CreateFileMapping2Fn XSyscall::CreateFileMapping2; -XSyscall::VirtualAlloc2Fn XSyscall::VirtualAlloc2; -XSyscall::VirtualFreeExFn XSyscall::VirtualFreeEx; -XSyscall::MapViewOfFile3Fn XSyscall::MapViewOfFile3; -XSyscall::UnmapViewOfFile2Fn XSyscall::UnmapViewOfFile2; - -static void* lookup_kernelbase_library() { - const char* const name = "KernelBase"; - char ebuf[1024]; - void* const handle = os::dll_load(name, ebuf, sizeof(ebuf)); - if (handle == nullptr) { - log_error_p(gc)("Failed to load library: %s", name); - } - return handle; -} - -static void* lookup_kernelbase_symbol(const char* name) { - static void* const handle = lookup_kernelbase_library(); - if (handle == nullptr) { - return nullptr; - } - return os::dll_lookup(handle, name); -} - -static bool has_kernelbase_symbol(const char* name) { - return lookup_kernelbase_symbol(name) != nullptr; -} - -template -static void install_kernelbase_symbol(Fn*& fn, const char* name) { - fn = reinterpret_cast(lookup_kernelbase_symbol(name)); -} - -template -static void install_kernelbase_1803_symbol_or_exit(Fn*& fn, const char* name) { - install_kernelbase_symbol(fn, name); - if (fn == nullptr) { - log_error_p(gc)("Failed to lookup symbol: %s", name); - vm_exit_during_initialization("ZGC requires Windows version 1803 or later"); - } -} - -void XSyscall::initialize() { - // Required - install_kernelbase_1803_symbol_or_exit(CreateFileMappingW, "CreateFileMappingW"); - install_kernelbase_1803_symbol_or_exit(VirtualAlloc2, "VirtualAlloc2"); - install_kernelbase_1803_symbol_or_exit(VirtualFreeEx, "VirtualFreeEx"); - install_kernelbase_1803_symbol_or_exit(MapViewOfFile3, "MapViewOfFile3"); - install_kernelbase_1803_symbol_or_exit(UnmapViewOfFile2, "UnmapViewOfFile2"); - - // Optional - for large pages support - install_kernelbase_symbol(CreateFileMapping2, "CreateFileMapping2"); -} - -bool XSyscall::is_supported() { - // Available in Windows version 1803 and later - return has_kernelbase_symbol("VirtualAlloc2"); -} - -bool XSyscall::is_large_pages_supported() { - // Available in Windows version 1809 and later - return has_kernelbase_symbol("CreateFileMapping2"); -} diff --git a/src/hotspot/os/windows/gc/x/xSyscall_windows.hpp b/src/hotspot/os/windows/gc/x/xSyscall_windows.hpp deleted file mode 100644 index 89ba2573b10cc..0000000000000 --- a/src/hotspot/os/windows/gc/x/xSyscall_windows.hpp +++ /dev/null @@ -1,55 +0,0 @@ -/* - * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#ifndef OS_WINDOWS_GC_X_XSYSCALL_WINDOWS_HPP -#define OS_WINDOWS_GC_X_XSYSCALL_WINDOWS_HPP - -#include "utilities/globalDefinitions.hpp" - -#include -#include - -class XSyscall { -private: - typedef HANDLE (*CreateFileMappingWFn)(HANDLE, LPSECURITY_ATTRIBUTES, DWORD, DWORD, DWORD, LPCWSTR); - typedef HANDLE (*CreateFileMapping2Fn)(HANDLE, LPSECURITY_ATTRIBUTES, ULONG, ULONG, ULONG, ULONG64, PCWSTR, PMEM_EXTENDED_PARAMETER, ULONG); - typedef PVOID (*VirtualAlloc2Fn)(HANDLE, PVOID, SIZE_T, ULONG, ULONG, MEM_EXTENDED_PARAMETER*, ULONG); - typedef BOOL (*VirtualFreeExFn)(HANDLE, LPVOID, SIZE_T, DWORD); - typedef PVOID (*MapViewOfFile3Fn)(HANDLE, HANDLE, PVOID, ULONG64, SIZE_T, ULONG, ULONG, MEM_EXTENDED_PARAMETER*, ULONG); - typedef BOOL (*UnmapViewOfFile2Fn)(HANDLE, PVOID, ULONG); - -public: - static CreateFileMappingWFn CreateFileMappingW; - static CreateFileMapping2Fn CreateFileMapping2; - static VirtualAlloc2Fn VirtualAlloc2; - static VirtualFreeExFn VirtualFreeEx; - static MapViewOfFile3Fn MapViewOfFile3; - static UnmapViewOfFile2Fn UnmapViewOfFile2; - - static void initialize(); - - static bool is_supported(); - static bool is_large_pages_supported(); -}; - -#endif // OS_WINDOWS_GC_X_XSYSCALL_WINDOWS_HPP diff --git a/src/hotspot/os/windows/gc/x/xUtils_windows.cpp b/src/hotspot/os/windows/gc/x/xUtils_windows.cpp deleted file mode 100644 index 788da80834ab3..0000000000000 --- a/src/hotspot/os/windows/gc/x/xUtils_windows.cpp +++ /dev/null @@ -1,40 +0,0 @@ -/* - * Copyright (c) 2019, 2023, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#include "precompiled.hpp" -#include "gc/x/xUtils.hpp" -#include "utilities/debug.hpp" - -#include - -uintptr_t XUtils::alloc_aligned(size_t alignment, size_t size) { - void* const res = _aligned_malloc(size, alignment); - - if (res == nullptr) { - fatal("_aligned_malloc failed"); - } - - memset(res, 0, size); - - return (uintptr_t)res; -} diff --git a/src/hotspot/os/windows/gc/x/xVirtualMemory_windows.cpp b/src/hotspot/os/windows/gc/x/xVirtualMemory_windows.cpp deleted file mode 100644 index a54f1e3cbaefc..0000000000000 --- a/src/hotspot/os/windows/gc/x/xVirtualMemory_windows.cpp +++ /dev/null @@ -1,195 +0,0 @@ -/* - * Copyright (c) 2019, 2023, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#include "precompiled.hpp" -#include "gc/x/xAddress.inline.hpp" -#include "gc/x/xGlobals.hpp" -#include "gc/x/xLargePages.inline.hpp" -#include "gc/x/xMapper_windows.hpp" -#include "gc/x/xSyscall_windows.hpp" -#include "gc/x/xVirtualMemory.inline.hpp" -#include "utilities/align.hpp" -#include "utilities/debug.hpp" - -class XVirtualMemoryManagerImpl : public CHeapObj { -public: - virtual void initialize_before_reserve() {} - virtual void initialize_after_reserve(XMemoryManager* manager) {} - virtual bool reserve(uintptr_t addr, size_t size) = 0; - virtual void unreserve(uintptr_t addr, size_t size) = 0; -}; - -// Implements small pages (paged) support using placeholder reservation. -class XVirtualMemoryManagerSmallPages : public XVirtualMemoryManagerImpl { -private: - class PlaceholderCallbacks : public AllStatic { - public: - static void split_placeholder(uintptr_t start, size_t size) { - XMapper::split_placeholder(XAddress::marked0(start), size); - XMapper::split_placeholder(XAddress::marked1(start), size); - XMapper::split_placeholder(XAddress::remapped(start), size); - } - - static void coalesce_placeholders(uintptr_t start, size_t size) { - XMapper::coalesce_placeholders(XAddress::marked0(start), size); - XMapper::coalesce_placeholders(XAddress::marked1(start), size); - XMapper::coalesce_placeholders(XAddress::remapped(start), size); - } - - static void split_into_placeholder_granules(uintptr_t start, size_t size) { - for (uintptr_t addr = start; addr < start + size; addr += XGranuleSize) { - split_placeholder(addr, XGranuleSize); - } - } - - static void coalesce_into_one_placeholder(uintptr_t start, size_t size) { - assert(is_aligned(size, XGranuleSize), "Must be granule aligned"); - - if (size > XGranuleSize) { - coalesce_placeholders(start, size); - } - } - - static void create_callback(const XMemory* area) { - assert(is_aligned(area->size(), XGranuleSize), "Must be granule aligned"); - coalesce_into_one_placeholder(area->start(), area->size()); - } - - static void destroy_callback(const XMemory* area) { - assert(is_aligned(area->size(), XGranuleSize), "Must be granule aligned"); - // Don't try split the last granule - VirtualFree will fail - split_into_placeholder_granules(area->start(), area->size() - XGranuleSize); - } - - static void shrink_from_front_callback(const XMemory* area, size_t size) { - assert(is_aligned(size, XGranuleSize), "Must be granule aligned"); - split_into_placeholder_granules(area->start(), size); - } - - static void shrink_from_back_callback(const XMemory* area, size_t size) { - assert(is_aligned(size, XGranuleSize), "Must be granule aligned"); - // Don't try split the last granule - VirtualFree will fail - split_into_placeholder_granules(area->end() - size, size - XGranuleSize); - } - - static void grow_from_front_callback(const XMemory* area, size_t size) { - assert(is_aligned(area->size(), XGranuleSize), "Must be granule aligned"); - coalesce_into_one_placeholder(area->start() - size, area->size() + size); - } - - static void grow_from_back_callback(const XMemory* area, size_t size) { - assert(is_aligned(area->size(), XGranuleSize), "Must be granule aligned"); - coalesce_into_one_placeholder(area->start(), area->size() + size); - } - - static void register_with(XMemoryManager* manager) { - // Each reserved virtual memory address area registered in _manager is - // exactly covered by a single placeholder. Callbacks are installed so - // that whenever a memory area changes, the corresponding placeholder - // is adjusted. - // - // The create and grow callbacks are called when virtual memory is - // returned to the memory manager. The new memory area is then covered - // by a new single placeholder. - // - // The destroy and shrink callbacks are called when virtual memory is - // allocated from the memory manager. The memory area is then is split - // into granule-sized placeholders. - // - // See comment in zMapper_windows.cpp explaining why placeholders are - // split into XGranuleSize sized placeholders. - - XMemoryManager::Callbacks callbacks; - - callbacks._create = &create_callback; - callbacks._destroy = &destroy_callback; - callbacks._shrink_from_front = &shrink_from_front_callback; - callbacks._shrink_from_back = &shrink_from_back_callback; - callbacks._grow_from_front = &grow_from_front_callback; - callbacks._grow_from_back = &grow_from_back_callback; - - manager->register_callbacks(callbacks); - } - }; - - virtual void initialize_after_reserve(XMemoryManager* manager) { - PlaceholderCallbacks::register_with(manager); - } - - virtual bool reserve(uintptr_t addr, size_t size) { - const uintptr_t res = XMapper::reserve(addr, size); - - assert(res == addr || res == 0, "Should not reserve other memory than requested"); - return res == addr; - } - - virtual void unreserve(uintptr_t addr, size_t size) { - XMapper::unreserve(addr, size); - } -}; - -// Implements Large Pages (locked) support using shared AWE physical memory. - -// XPhysicalMemory layer needs access to the section -HANDLE XAWESection; - -class XVirtualMemoryManagerLargePages : public XVirtualMemoryManagerImpl { -private: - virtual void initialize_before_reserve() { - XAWESection = XMapper::create_shared_awe_section(); - } - - virtual bool reserve(uintptr_t addr, size_t size) { - const uintptr_t res = XMapper::reserve_for_shared_awe(XAWESection, addr, size); - - assert(res == addr || res == 0, "Should not reserve other memory than requested"); - return res == addr; - } - - virtual void unreserve(uintptr_t addr, size_t size) { - XMapper::unreserve_for_shared_awe(addr, size); - } -}; - -static XVirtualMemoryManagerImpl* _impl = nullptr; - -void XVirtualMemoryManager::pd_initialize_before_reserve() { - if (XLargePages::is_enabled()) { - _impl = new XVirtualMemoryManagerLargePages(); - } else { - _impl = new XVirtualMemoryManagerSmallPages(); - } - _impl->initialize_before_reserve(); -} - -void XVirtualMemoryManager::pd_initialize_after_reserve() { - _impl->initialize_after_reserve(&_manager); -} - -bool XVirtualMemoryManager::pd_reserve(uintptr_t addr, size_t size) { - return _impl->reserve(addr, size); -} - -void XVirtualMemoryManager::pd_unreserve(uintptr_t addr, size_t size) { - _impl->unreserve(addr, size); -} diff --git a/src/hotspot/os_cpu/linux_aarch64/gc/x/xSyscall_linux_aarch64.hpp b/src/hotspot/os_cpu/linux_aarch64/gc/x/xSyscall_linux_aarch64.hpp deleted file mode 100644 index b4c49f477a677..0000000000000 --- a/src/hotspot/os_cpu/linux_aarch64/gc/x/xSyscall_linux_aarch64.hpp +++ /dev/null @@ -1,40 +0,0 @@ -/* - * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#ifndef OS_CPU_LINUX_AARCH64_GC_X_XSYSCALL_LINUX_AARCH64_HPP -#define OS_CPU_LINUX_AARCH64_GC_X_XSYSCALL_LINUX_AARCH64_HPP - -#include - -// -// Support for building on older Linux systems -// - -#ifndef SYS_memfd_create -#define SYS_memfd_create 279 -#endif -#ifndef SYS_fallocate -#define SYS_fallocate 47 -#endif - -#endif // OS_CPU_LINUX_AARCH64_GC_X_XSYSCALL_LINUX_AARCH64_HPP diff --git a/src/hotspot/os_cpu/linux_ppc/gc/x/xSyscall_linux_ppc.hpp b/src/hotspot/os_cpu/linux_ppc/gc/x/xSyscall_linux_ppc.hpp deleted file mode 100644 index 22d51cd58f542..0000000000000 --- a/src/hotspot/os_cpu/linux_ppc/gc/x/xSyscall_linux_ppc.hpp +++ /dev/null @@ -1,42 +0,0 @@ -/* - * Copyright (c) 2021, Oracle and/or its affiliates. All rights reserved. - * Copyright (c) 2021 SAP SE. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#ifndef OS_CPU_LINUX_PPC_GC_X_XSYSCALL_LINUX_PPC_HPP -#define OS_CPU_LINUX_PPC_GC_X_XSYSCALL_LINUX_PPC_HPP - -#include - -// -// Support for building on older Linux systems -// - - -#ifndef SYS_memfd_create -#define SYS_memfd_create 360 -#endif -#ifndef SYS_fallocate -#define SYS_fallocate 309 -#endif - -#endif // OS_CPU_LINUX_PPC_GC_X_XSYSCALL_LINUX_PPC_HPP diff --git a/src/hotspot/os_cpu/linux_riscv/gc/x/xSyscall_linux_riscv.hpp b/src/hotspot/os_cpu/linux_riscv/gc/x/xSyscall_linux_riscv.hpp deleted file mode 100644 index bfd49b0bf4e4d..0000000000000 --- a/src/hotspot/os_cpu/linux_riscv/gc/x/xSyscall_linux_riscv.hpp +++ /dev/null @@ -1,42 +0,0 @@ -/* - * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved. - * Copyright (c) 2020, 2021, Huawei Technologies Co., Ltd. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - * - */ - -#ifndef OS_CPU_LINUX_RISCV_GC_X_XSYSCALL_LINUX_RISCV_HPP -#define OS_CPU_LINUX_RISCV_GC_X_XSYSCALL_LINUX_RISCV_HPP - -#include - -// -// Support for building on older Linux systems -// - -#ifndef SYS_memfd_create -#define SYS_memfd_create 279 -#endif -#ifndef SYS_fallocate -#define SYS_fallocate 47 -#endif - -#endif // OS_CPU_LINUX_RISCV_GC_X_XSYSCALL_LINUX_RISCV_HPP diff --git a/src/hotspot/os_cpu/linux_x86/gc/x/xSyscall_linux_x86.hpp b/src/hotspot/os_cpu/linux_x86/gc/x/xSyscall_linux_x86.hpp deleted file mode 100644 index 2709b373b2812..0000000000000 --- a/src/hotspot/os_cpu/linux_x86/gc/x/xSyscall_linux_x86.hpp +++ /dev/null @@ -1,40 +0,0 @@ -/* - * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#ifndef OS_CPU_LINUX_X86_GC_X_XSYSCALL_LINUX_X86_HPP -#define OS_CPU_LINUX_X86_GC_X_XSYSCALL_LINUX_X86_HPP - -#include - -// -// Support for building on older Linux systems -// - -#ifndef SYS_memfd_create -#define SYS_memfd_create 319 -#endif -#ifndef SYS_fallocate -#define SYS_fallocate 285 -#endif - -#endif // OS_CPU_LINUX_X86_GC_X_XSYSCALL_LINUX_X86_HPP diff --git a/src/hotspot/share/gc/shared/barrierSetConfig.hpp b/src/hotspot/share/gc/shared/barrierSetConfig.hpp index 76681aa898687..368312af06b21 100644 --- a/src/hotspot/share/gc/shared/barrierSetConfig.hpp +++ b/src/hotspot/share/gc/shared/barrierSetConfig.hpp @@ -33,7 +33,6 @@ EPSILONGC_ONLY(f(EpsilonBarrierSet)) \ G1GC_ONLY(f(G1BarrierSet)) \ SHENANDOAHGC_ONLY(f(ShenandoahBarrierSet)) \ - ZGC_ONLY(f(XBarrierSet)) \ ZGC_ONLY(f(ZBarrierSet)) #define FOR_EACH_ABSTRACT_BARRIER_SET_DO(f) \ diff --git a/src/hotspot/share/gc/shared/barrierSetConfig.inline.hpp b/src/hotspot/share/gc/shared/barrierSetConfig.inline.hpp index 9523428821b7e..001b5b00372bc 100644 --- a/src/hotspot/share/gc/shared/barrierSetConfig.inline.hpp +++ b/src/hotspot/share/gc/shared/barrierSetConfig.inline.hpp @@ -40,7 +40,6 @@ #include "gc/shenandoah/shenandoahBarrierSet.inline.hpp" #endif #if INCLUDE_ZGC -#include "gc/x/xBarrierSet.inline.hpp" #include "gc/z/zBarrierSet.inline.hpp" #endif diff --git a/src/hotspot/share/gc/shared/gcConfig.cpp b/src/hotspot/share/gc/shared/gcConfig.cpp index 506b368d6cf05..8eb265b54d939 100644 --- a/src/hotspot/share/gc/shared/gcConfig.cpp +++ b/src/hotspot/share/gc/shared/gcConfig.cpp @@ -44,7 +44,7 @@ #include "gc/shenandoah/shenandoahArguments.hpp" #endif #if INCLUDE_ZGC -#include "gc/z/shared/zSharedArguments.hpp" +#include "gc/z/zArguments.hpp" #endif struct IncludedGC { @@ -62,7 +62,7 @@ struct IncludedGC { PARALLELGC_ONLY(static ParallelArguments parallelArguments;) SERIALGC_ONLY(static SerialArguments serialArguments;) SHENANDOAHGC_ONLY(static ShenandoahArguments shenandoahArguments;) - ZGC_ONLY(static ZSharedArguments zArguments;) + ZGC_ONLY(static ZArguments zArguments;) // Table of included GCs, for translating between command // line flag, CollectedHeap::Name and GCArguments instance. diff --git a/src/hotspot/share/gc/shared/gcConfiguration.cpp b/src/hotspot/share/gc/shared/gcConfiguration.cpp index 2e8d3eb2a515a..824e119e69649 100644 --- a/src/hotspot/share/gc/shared/gcConfiguration.cpp +++ b/src/hotspot/share/gc/shared/gcConfiguration.cpp @@ -43,11 +43,7 @@ GCName GCConfiguration::young_collector() const { } if (UseZGC) { - if (ZGenerational) { - return ZMinor; - } else { - return NA; - } + return ZMinor; } if (UseShenandoahGC) { @@ -66,12 +62,8 @@ GCName GCConfiguration::old_collector() const { return ParallelOld; } - if (UseZGC) { - if (ZGenerational) { - return ZMajor; - } else { - return Z; - } +if (UseZGC) { + return ZMajor; } if (UseShenandoahGC) { diff --git a/src/hotspot/share/gc/shared/gcName.hpp b/src/hotspot/share/gc/shared/gcName.hpp index 3d2dd350ac10e..b9b87c231ca91 100644 --- a/src/hotspot/share/gc/shared/gcName.hpp +++ b/src/hotspot/share/gc/shared/gcName.hpp @@ -37,7 +37,6 @@ enum GCName { G1Full, ZMinor, ZMajor, - Z, // Support for the legacy, single-gen mode Shenandoah, NA, GCNameEndSentinel @@ -56,7 +55,6 @@ class GCNameHelper { case G1Full: return "G1Full"; case ZMinor: return "ZGC Minor"; case ZMajor: return "ZGC Major"; - case Z: return "Z"; case Shenandoah: return "Shenandoah"; case NA: return "N/A"; default: ShouldNotReachHere(); return nullptr; diff --git a/src/hotspot/share/gc/shared/gc_globals.hpp b/src/hotspot/share/gc/shared/gc_globals.hpp index 34bc638c9baca..9086c25ee48fb 100644 --- a/src/hotspot/share/gc/shared/gc_globals.hpp +++ b/src/hotspot/share/gc/shared/gc_globals.hpp @@ -43,7 +43,7 @@ #include "gc/shenandoah/shenandoah_globals.hpp" #endif #if INCLUDE_ZGC -#include "gc/z/shared/z_shared_globals.hpp" +#include "gc/z/z_globals.hpp" #endif #define GC_FLAGS(develop, \ @@ -93,7 +93,7 @@ range, \ constraint)) \ \ - ZGC_ONLY(GC_Z_SHARED_FLAGS( \ + ZGC_ONLY(GC_Z_FLAGS( \ develop, \ develop_pd, \ product, \ @@ -118,9 +118,6 @@ product(bool, UseZGC, false, \ "Use the Z garbage collector") \ \ - product(bool, ZGenerational, true, \ - "Use the generational version of ZGC") \ - \ product(bool, UseShenandoahGC, false, \ "Use the Shenandoah garbage collector") \ \ diff --git a/src/hotspot/share/gc/shared/vmStructs_gc.hpp b/src/hotspot/share/gc/shared/vmStructs_gc.hpp index f2850f75e2441..bba9c9e099fc9 100644 --- a/src/hotspot/share/gc/shared/vmStructs_gc.hpp +++ b/src/hotspot/share/gc/shared/vmStructs_gc.hpp @@ -46,7 +46,7 @@ #include "gc/shenandoah/vmStructs_shenandoah.hpp" #endif #if INCLUDE_ZGC -#include "gc/z/shared/vmStructs_z_shared.hpp" +#include "gc/z/vmStructs_z.hpp" #endif #define VM_STRUCTS_GC(nonstatic_field, \ @@ -69,7 +69,7 @@ SHENANDOAHGC_ONLY(VM_STRUCTS_SHENANDOAH(nonstatic_field, \ volatile_nonstatic_field, \ static_field)) \ - ZGC_ONLY(VM_STRUCTS_Z_SHARED(nonstatic_field, \ + ZGC_ONLY(VM_STRUCTS_Z(nonstatic_field, \ volatile_nonstatic_field, \ static_field)) \ \ @@ -121,7 +121,7 @@ SHENANDOAHGC_ONLY(VM_TYPES_SHENANDOAH(declare_type, \ declare_toplevel_type, \ declare_integer_type)) \ - ZGC_ONLY(VM_TYPES_Z_SHARED(declare_type, \ + ZGC_ONLY(VM_TYPES_Z(declare_type, \ declare_toplevel_type, \ declare_integer_type)) \ \ @@ -175,7 +175,7 @@ declare_constant_with_value)) \ SHENANDOAHGC_ONLY(VM_INT_CONSTANTS_SHENANDOAH(declare_constant, \ declare_constant_with_value)) \ - ZGC_ONLY(VM_INT_CONSTANTS_Z_SHARED(declare_constant, \ + ZGC_ONLY(VM_INT_CONSTANTS_Z(declare_constant, \ declare_constant_with_value)) \ \ /********************************************/ \ @@ -199,6 +199,6 @@ declare_constant(CollectedHeap::G1) \ #define VM_LONG_CONSTANTS_GC(declare_constant) \ - ZGC_ONLY(VM_LONG_CONSTANTS_Z_SHARED(declare_constant)) + ZGC_ONLY(VM_LONG_CONSTANTS_Z(declare_constant)) #endif // SHARE_GC_SHARED_VMSTRUCTS_GC_HPP diff --git a/src/hotspot/share/gc/x/c1/xBarrierSetC1.cpp b/src/hotspot/share/gc/x/c1/xBarrierSetC1.cpp deleted file mode 100644 index 6f64392cefced..0000000000000 --- a/src/hotspot/share/gc/x/c1/xBarrierSetC1.cpp +++ /dev/null @@ -1,237 +0,0 @@ -/* - * Copyright (c) 2015, 2024, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#include "precompiled.hpp" -#include "c1/c1_LIR.hpp" -#include "c1/c1_LIRGenerator.hpp" -#include "c1/c1_CodeStubs.hpp" -#include "gc/x/c1/xBarrierSetC1.hpp" -#include "gc/x/xBarrierSet.hpp" -#include "gc/x/xBarrierSetAssembler.hpp" -#include "gc/x/xThreadLocalData.hpp" -#include "utilities/macros.hpp" - -XLoadBarrierStubC1::XLoadBarrierStubC1(LIRAccess& access, LIR_Opr ref, address runtime_stub) : - _decorators(access.decorators()), - _ref_addr(access.resolved_addr()), - _ref(ref), - _tmp(LIR_OprFact::illegalOpr), - _runtime_stub(runtime_stub) { - - assert(_ref_addr->is_address(), "Must be an address"); - assert(_ref->is_register(), "Must be a register"); - - // Allocate tmp register if needed - if (_ref_addr->as_address_ptr()->index()->is_valid() || - _ref_addr->as_address_ptr()->disp() != 0) { - // Has index or displacement, need tmp register to load address into - _tmp = access.gen()->new_pointer_register(); - } - - FrameMap* f = Compilation::current()->frame_map(); - f->update_reserved_argument_area_size(2 * BytesPerWord); -} - -DecoratorSet XLoadBarrierStubC1::decorators() const { - return _decorators; -} - -LIR_Opr XLoadBarrierStubC1::ref() const { - return _ref; -} - -LIR_Opr XLoadBarrierStubC1::ref_addr() const { - return _ref_addr; -} - -LIR_Opr XLoadBarrierStubC1::tmp() const { - return _tmp; -} - -address XLoadBarrierStubC1::runtime_stub() const { - return _runtime_stub; -} - -void XLoadBarrierStubC1::visit(LIR_OpVisitState* visitor) { - visitor->do_slow_case(); - visitor->do_input(_ref_addr); - visitor->do_output(_ref); - if (_tmp->is_valid()) { - visitor->do_temp(_tmp); - } -} - -void XLoadBarrierStubC1::emit_code(LIR_Assembler* ce) { - XBarrierSet::assembler()->generate_c1_load_barrier_stub(ce, this); -} - -#ifndef PRODUCT -void XLoadBarrierStubC1::print_name(outputStream* out) const { - out->print("XLoadBarrierStubC1"); -} -#endif // PRODUCT - -class LIR_OpXLoadBarrierTest : public LIR_Op { -private: - LIR_Opr _opr; - -public: - LIR_OpXLoadBarrierTest(LIR_Opr opr) : - LIR_Op(lir_xloadbarrier_test, LIR_OprFact::illegalOpr, nullptr), - _opr(opr) {} - - virtual void visit(LIR_OpVisitState* state) { - state->do_input(_opr); - } - - virtual void emit_code(LIR_Assembler* ce) { - XBarrierSet::assembler()->generate_c1_load_barrier_test(ce, _opr); - } - - virtual void print_instr(outputStream* out) const { - _opr->print(out); - out->print(" "); - } - -#ifndef PRODUCT - virtual const char* name() const { - return "lir_z_load_barrier_test"; - } -#endif // PRODUCT -}; - -static bool barrier_needed(LIRAccess& access) { - return XBarrierSet::barrier_needed(access.decorators(), access.type()); -} - -XBarrierSetC1::XBarrierSetC1() : - _load_barrier_on_oop_field_preloaded_runtime_stub(nullptr), - _load_barrier_on_weak_oop_field_preloaded_runtime_stub(nullptr) {} - -address XBarrierSetC1::load_barrier_on_oop_field_preloaded_runtime_stub(DecoratorSet decorators) const { - assert((decorators & ON_PHANTOM_OOP_REF) == 0, "Unsupported decorator"); - //assert((decorators & ON_UNKNOWN_OOP_REF) == 0, "Unsupported decorator"); - - if ((decorators & ON_WEAK_OOP_REF) != 0) { - return _load_barrier_on_weak_oop_field_preloaded_runtime_stub; - } else { - return _load_barrier_on_oop_field_preloaded_runtime_stub; - } -} - -#ifdef ASSERT -#define __ access.gen()->lir(__FILE__, __LINE__)-> -#else -#define __ access.gen()->lir()-> -#endif - -void XBarrierSetC1::load_barrier(LIRAccess& access, LIR_Opr result) const { - // Fast path - __ append(new LIR_OpXLoadBarrierTest(result)); - - // Slow path - const address runtime_stub = load_barrier_on_oop_field_preloaded_runtime_stub(access.decorators()); - CodeStub* const stub = new XLoadBarrierStubC1(access, result, runtime_stub); - __ branch(lir_cond_notEqual, stub); - __ branch_destination(stub->continuation()); -} - -LIR_Opr XBarrierSetC1::resolve_address(LIRAccess& access, bool resolve_in_register) { - // We must resolve in register when patching. This is to avoid - // having a patch area in the load barrier stub, since the call - // into the runtime to patch will not have the proper oop map. - const bool patch_before_barrier = barrier_needed(access) && (access.decorators() & C1_NEEDS_PATCHING) != 0; - return BarrierSetC1::resolve_address(access, resolve_in_register || patch_before_barrier); -} - -#undef __ - -void XBarrierSetC1::load_at_resolved(LIRAccess& access, LIR_Opr result) { - BarrierSetC1::load_at_resolved(access, result); - - if (barrier_needed(access)) { - load_barrier(access, result); - } -} - -static void pre_load_barrier(LIRAccess& access) { - DecoratorSet decorators = access.decorators(); - - // Downgrade access to MO_UNORDERED - decorators = (decorators & ~MO_DECORATOR_MASK) | MO_UNORDERED; - - // Remove ACCESS_WRITE - decorators = (decorators & ~ACCESS_WRITE); - - // Generate synthetic load at - access.gen()->access_load_at(decorators, - access.type(), - access.base().item(), - access.offset().opr(), - access.gen()->new_register(access.type()), - nullptr /* patch_emit_info */, - nullptr /* load_emit_info */); -} - -LIR_Opr XBarrierSetC1::atomic_xchg_at_resolved(LIRAccess& access, LIRItem& value) { - if (barrier_needed(access)) { - pre_load_barrier(access); - } - - return BarrierSetC1::atomic_xchg_at_resolved(access, value); -} - -LIR_Opr XBarrierSetC1::atomic_cmpxchg_at_resolved(LIRAccess& access, LIRItem& cmp_value, LIRItem& new_value) { - if (barrier_needed(access)) { - pre_load_barrier(access); - } - - return BarrierSetC1::atomic_cmpxchg_at_resolved(access, cmp_value, new_value); -} - -class XLoadBarrierRuntimeStubCodeGenClosure : public StubAssemblerCodeGenClosure { -private: - const DecoratorSet _decorators; - -public: - XLoadBarrierRuntimeStubCodeGenClosure(DecoratorSet decorators) : - _decorators(decorators) {} - - virtual OopMapSet* generate_code(StubAssembler* sasm) { - XBarrierSet::assembler()->generate_c1_load_barrier_runtime_stub(sasm, _decorators); - return nullptr; - } -}; - -static address generate_c1_runtime_stub(BufferBlob* blob, DecoratorSet decorators, const char* name) { - XLoadBarrierRuntimeStubCodeGenClosure cl(decorators); - CodeBlob* const code_blob = Runtime1::generate_blob(blob, C1StubId::NO_STUBID /* stub_id */, name, false /* expect_oop_map*/, &cl); - return code_blob->code_begin(); -} - -void XBarrierSetC1::generate_c1_runtime_stubs(BufferBlob* blob) { - _load_barrier_on_oop_field_preloaded_runtime_stub = - generate_c1_runtime_stub(blob, ON_STRONG_OOP_REF, "load_barrier_on_oop_field_preloaded_runtime_stub"); - _load_barrier_on_weak_oop_field_preloaded_runtime_stub = - generate_c1_runtime_stub(blob, ON_WEAK_OOP_REF, "load_barrier_on_weak_oop_field_preloaded_runtime_stub"); -} diff --git a/src/hotspot/share/gc/x/c1/xBarrierSetC1.hpp b/src/hotspot/share/gc/x/c1/xBarrierSetC1.hpp deleted file mode 100644 index 26c2e142cdf80..0000000000000 --- a/src/hotspot/share/gc/x/c1/xBarrierSetC1.hpp +++ /dev/null @@ -1,78 +0,0 @@ -/* - * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#ifndef SHARE_GC_X_C1_XBARRIERSETC1_HPP -#define SHARE_GC_X_C1_XBARRIERSETC1_HPP - -#include "c1/c1_CodeStubs.hpp" -#include "c1/c1_IR.hpp" -#include "c1/c1_LIR.hpp" -#include "gc/shared/c1/barrierSetC1.hpp" -#include "oops/accessDecorators.hpp" - -class XLoadBarrierStubC1 : public CodeStub { -private: - DecoratorSet _decorators; - LIR_Opr _ref_addr; - LIR_Opr _ref; - LIR_Opr _tmp; - address _runtime_stub; - -public: - XLoadBarrierStubC1(LIRAccess& access, LIR_Opr ref, address runtime_stub); - - DecoratorSet decorators() const; - LIR_Opr ref() const; - LIR_Opr ref_addr() const; - LIR_Opr tmp() const; - address runtime_stub() const; - - virtual void emit_code(LIR_Assembler* ce); - virtual void visit(LIR_OpVisitState* visitor); - -#ifndef PRODUCT - virtual void print_name(outputStream* out) const; -#endif // PRODUCT -}; - -class XBarrierSetC1 : public BarrierSetC1 { -private: - address _load_barrier_on_oop_field_preloaded_runtime_stub; - address _load_barrier_on_weak_oop_field_preloaded_runtime_stub; - - address load_barrier_on_oop_field_preloaded_runtime_stub(DecoratorSet decorators) const; - void load_barrier(LIRAccess& access, LIR_Opr result) const; - -protected: - virtual LIR_Opr resolve_address(LIRAccess& access, bool resolve_in_register); - virtual void load_at_resolved(LIRAccess& access, LIR_Opr result); - virtual LIR_Opr atomic_xchg_at_resolved(LIRAccess& access, LIRItem& value); - virtual LIR_Opr atomic_cmpxchg_at_resolved(LIRAccess& access, LIRItem& cmp_value, LIRItem& new_value); - -public: - XBarrierSetC1(); - - virtual void generate_c1_runtime_stubs(BufferBlob* blob); -}; - -#endif // SHARE_GC_X_C1_XBARRIERSETC1_HPP diff --git a/src/hotspot/share/gc/x/c2/xBarrierSetC2.cpp b/src/hotspot/share/gc/x/c2/xBarrierSetC2.cpp deleted file mode 100644 index d006b37e7d208..0000000000000 --- a/src/hotspot/share/gc/x/c2/xBarrierSetC2.cpp +++ /dev/null @@ -1,583 +0,0 @@ -/* - * Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#include "precompiled.hpp" -#include "classfile/javaClasses.hpp" -#include "gc/x/c2/xBarrierSetC2.hpp" -#include "gc/x/xBarrierSet.hpp" -#include "gc/x/xBarrierSetAssembler.hpp" -#include "gc/x/xBarrierSetRuntime.hpp" -#include "opto/arraycopynode.hpp" -#include "opto/addnode.hpp" -#include "opto/block.hpp" -#include "opto/compile.hpp" -#include "opto/graphKit.hpp" -#include "opto/machnode.hpp" -#include "opto/macro.hpp" -#include "opto/memnode.hpp" -#include "opto/node.hpp" -#include "opto/output.hpp" -#include "opto/regalloc.hpp" -#include "opto/rootnode.hpp" -#include "opto/runtime.hpp" -#include "opto/type.hpp" -#include "utilities/growableArray.hpp" -#include "utilities/macros.hpp" - -class XBarrierSetC2State : public ArenaObj { -private: - GrowableArray* _stubs; - Node_Array _live; - -public: - XBarrierSetC2State(Arena* arena) : - _stubs(new (arena) GrowableArray(arena, 8, 0, nullptr)), - _live(arena) {} - - GrowableArray* stubs() { - return _stubs; - } - - RegMask* live(const Node* node) { - if (!node->is_Mach()) { - // Don't need liveness for non-MachNodes - return nullptr; - } - - const MachNode* const mach = node->as_Mach(); - if (mach->barrier_data() == XLoadBarrierElided) { - // Don't need liveness data for nodes without barriers - return nullptr; - } - - RegMask* live = (RegMask*)_live[node->_idx]; - if (live == nullptr) { - live = new (Compile::current()->comp_arena()->AmallocWords(sizeof(RegMask))) RegMask(); - _live.map(node->_idx, (Node*)live); - } - - return live; - } -}; - -static XBarrierSetC2State* barrier_set_state() { - return reinterpret_cast(Compile::current()->barrier_set_state()); -} - -XLoadBarrierStubC2* XLoadBarrierStubC2::create(const MachNode* node, Address ref_addr, Register ref, Register tmp, uint8_t barrier_data) { - XLoadBarrierStubC2* const stub = new (Compile::current()->comp_arena()) XLoadBarrierStubC2(node, ref_addr, ref, tmp, barrier_data); - if (!Compile::current()->output()->in_scratch_emit_size()) { - barrier_set_state()->stubs()->append(stub); - } - - return stub; -} - -XLoadBarrierStubC2::XLoadBarrierStubC2(const MachNode* node, Address ref_addr, Register ref, Register tmp, uint8_t barrier_data) : - _node(node), - _ref_addr(ref_addr), - _ref(ref), - _tmp(tmp), - _barrier_data(barrier_data), - _entry(), - _continuation() { - assert_different_registers(ref, ref_addr.base()); - assert_different_registers(ref, ref_addr.index()); -} - -Address XLoadBarrierStubC2::ref_addr() const { - return _ref_addr; -} - -Register XLoadBarrierStubC2::ref() const { - return _ref; -} - -Register XLoadBarrierStubC2::tmp() const { - return _tmp; -} - -address XLoadBarrierStubC2::slow_path() const { - DecoratorSet decorators = DECORATORS_NONE; - if (_barrier_data & XLoadBarrierStrong) { - decorators |= ON_STRONG_OOP_REF; - } - if (_barrier_data & XLoadBarrierWeak) { - decorators |= ON_WEAK_OOP_REF; - } - if (_barrier_data & XLoadBarrierPhantom) { - decorators |= ON_PHANTOM_OOP_REF; - } - if (_barrier_data & XLoadBarrierNoKeepalive) { - decorators |= AS_NO_KEEPALIVE; - } - return XBarrierSetRuntime::load_barrier_on_oop_field_preloaded_addr(decorators); -} - -RegMask& XLoadBarrierStubC2::live() const { - RegMask* mask = barrier_set_state()->live(_node); - assert(mask != nullptr, "must be mach-node with barrier"); - return *mask; -} - -Label* XLoadBarrierStubC2::entry() { - // The _entry will never be bound when in_scratch_emit_size() is true. - // However, we still need to return a label that is not bound now, but - // will eventually be bound. Any label will do, as it will only act as - // a placeholder, so we return the _continuation label. - return Compile::current()->output()->in_scratch_emit_size() ? &_continuation : &_entry; -} - -Label* XLoadBarrierStubC2::continuation() { - return &_continuation; -} - -void* XBarrierSetC2::create_barrier_state(Arena* comp_arena) const { - return new (comp_arena) XBarrierSetC2State(comp_arena); -} - -void XBarrierSetC2::late_barrier_analysis() const { - analyze_dominating_barriers(); - compute_liveness_at_stubs(); -} - -void XBarrierSetC2::emit_stubs(CodeBuffer& cb) const { - MacroAssembler masm(&cb); - GrowableArray* const stubs = barrier_set_state()->stubs(); - - for (int i = 0; i < stubs->length(); i++) { - // Make sure there is enough space in the code buffer - if (cb.insts()->maybe_expand_to_ensure_remaining(PhaseOutput::MAX_inst_size) && cb.blob() == nullptr) { - ciEnv::current()->record_failure("CodeCache is full"); - return; - } - - XBarrierSet::assembler()->generate_c2_load_barrier_stub(&masm, stubs->at(i)); - } - - masm.flush(); -} - -int XBarrierSetC2::estimate_stub_size() const { - Compile* const C = Compile::current(); - BufferBlob* const blob = C->output()->scratch_buffer_blob(); - GrowableArray* const stubs = barrier_set_state()->stubs(); - int size = 0; - - for (int i = 0; i < stubs->length(); i++) { - CodeBuffer cb(blob->content_begin(), (address)C->output()->scratch_locs_memory() - blob->content_begin()); - MacroAssembler masm(&cb); - XBarrierSet::assembler()->generate_c2_load_barrier_stub(&masm, stubs->at(i)); - size += cb.insts_size(); - } - - return size; -} - -static void set_barrier_data(C2Access& access) { - if (XBarrierSet::barrier_needed(access.decorators(), access.type())) { - uint8_t barrier_data = 0; - - if (access.decorators() & ON_PHANTOM_OOP_REF) { - barrier_data |= XLoadBarrierPhantom; - } else if (access.decorators() & ON_WEAK_OOP_REF) { - barrier_data |= XLoadBarrierWeak; - } else { - barrier_data |= XLoadBarrierStrong; - } - - if (access.decorators() & AS_NO_KEEPALIVE) { - barrier_data |= XLoadBarrierNoKeepalive; - } - - access.set_barrier_data(barrier_data); - } -} - -Node* XBarrierSetC2::load_at_resolved(C2Access& access, const Type* val_type) const { - set_barrier_data(access); - return BarrierSetC2::load_at_resolved(access, val_type); -} - -Node* XBarrierSetC2::atomic_cmpxchg_val_at_resolved(C2AtomicParseAccess& access, Node* expected_val, - Node* new_val, const Type* val_type) const { - set_barrier_data(access); - return BarrierSetC2::atomic_cmpxchg_val_at_resolved(access, expected_val, new_val, val_type); -} - -Node* XBarrierSetC2::atomic_cmpxchg_bool_at_resolved(C2AtomicParseAccess& access, Node* expected_val, - Node* new_val, const Type* value_type) const { - set_barrier_data(access); - return BarrierSetC2::atomic_cmpxchg_bool_at_resolved(access, expected_val, new_val, value_type); -} - -Node* XBarrierSetC2::atomic_xchg_at_resolved(C2AtomicParseAccess& access, Node* new_val, const Type* val_type) const { - set_barrier_data(access); - return BarrierSetC2::atomic_xchg_at_resolved(access, new_val, val_type); -} - -bool XBarrierSetC2::array_copy_requires_gc_barriers(bool tightly_coupled_alloc, BasicType type, - bool is_clone, bool is_clone_instance, - ArrayCopyPhase phase) const { - if (phase == ArrayCopyPhase::Parsing) { - return false; - } - if (phase == ArrayCopyPhase::Optimization) { - return is_clone_instance; - } - // else ArrayCopyPhase::Expansion - return type == T_OBJECT || type == T_ARRAY; -} - -// This TypeFunc assumes a 64bit system -static const TypeFunc* clone_type() { - // Create input type (domain) - const Type** domain_fields = TypeTuple::fields(4); - domain_fields[TypeFunc::Parms + 0] = TypeInstPtr::NOTNULL; // src - domain_fields[TypeFunc::Parms + 1] = TypeInstPtr::NOTNULL; // dst - domain_fields[TypeFunc::Parms + 2] = TypeLong::LONG; // size lower - domain_fields[TypeFunc::Parms + 3] = Type::HALF; // size upper - const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms + 4, domain_fields); - - // Create result type (range) - const Type** range_fields = TypeTuple::fields(0); - const TypeTuple* range = TypeTuple::make(TypeFunc::Parms + 0, range_fields); - - return TypeFunc::make(domain, range); -} - -#define XTOP LP64_ONLY(COMMA phase->top()) - -void XBarrierSetC2::clone_at_expansion(PhaseMacroExpand* phase, ArrayCopyNode* ac) const { - Node* const src = ac->in(ArrayCopyNode::Src); - const TypeAryPtr* ary_ptr = src->get_ptr_type()->isa_aryptr(); - - if (ac->is_clone_array() && ary_ptr != nullptr) { - BasicType bt = ary_ptr->elem()->array_element_basic_type(); - if (is_reference_type(bt)) { - // Clone object array - bt = T_OBJECT; - } else { - // Clone primitive array - bt = T_LONG; - } - - Node* ctrl = ac->in(TypeFunc::Control); - Node* mem = ac->in(TypeFunc::Memory); - Node* src = ac->in(ArrayCopyNode::Src); - Node* src_offset = ac->in(ArrayCopyNode::SrcPos); - Node* dest = ac->in(ArrayCopyNode::Dest); - Node* dest_offset = ac->in(ArrayCopyNode::DestPos); - Node* length = ac->in(ArrayCopyNode::Length); - - if (bt == T_OBJECT) { - // BarrierSetC2::clone sets the offsets via BarrierSetC2::arraycopy_payload_base_offset - // which 8-byte aligns them to allow for word size copies. Make sure the offsets point - // to the first element in the array when cloning object arrays. Otherwise, load - // barriers are applied to parts of the header. Also adjust the length accordingly. - assert(src_offset == dest_offset, "should be equal"); - jlong offset = src_offset->get_long(); - if (offset != arrayOopDesc::base_offset_in_bytes(T_OBJECT)) { - assert(!UseCompressedClassPointers, "should only happen without compressed class pointers"); - assert((arrayOopDesc::base_offset_in_bytes(T_OBJECT) - offset) == BytesPerLong, "unexpected offset"); - length = phase->transform_later(new SubLNode(length, phase->longcon(1))); // Size is in longs - src_offset = phase->longcon(arrayOopDesc::base_offset_in_bytes(T_OBJECT)); - dest_offset = src_offset; - } - } - Node* payload_src = phase->basic_plus_adr(src, src_offset); - Node* payload_dst = phase->basic_plus_adr(dest, dest_offset); - - const char* copyfunc_name = "arraycopy"; - address copyfunc_addr = phase->basictype2arraycopy(bt, nullptr, nullptr, true, copyfunc_name, true); - - const TypePtr* raw_adr_type = TypeRawPtr::BOTTOM; - const TypeFunc* call_type = OptoRuntime::fast_arraycopy_Type(); - - Node* call = phase->make_leaf_call(ctrl, mem, call_type, copyfunc_addr, copyfunc_name, raw_adr_type, payload_src, payload_dst, length XTOP); - phase->transform_later(call); - - phase->igvn().replace_node(ac, call); - return; - } - - // Clone instance - Node* const ctrl = ac->in(TypeFunc::Control); - Node* const mem = ac->in(TypeFunc::Memory); - Node* const dst = ac->in(ArrayCopyNode::Dest); - Node* const size = ac->in(ArrayCopyNode::Length); - - assert(size->bottom_type()->is_long(), "Should be long"); - - // The native clone we are calling here expects the instance size in words - // Add header/offset size to payload size to get instance size. - Node* const base_offset = phase->longcon(arraycopy_payload_base_offset(ac->is_clone_array()) >> LogBytesPerLong); - Node* const full_size = phase->transform_later(new AddLNode(size, base_offset)); - - Node* const call = phase->make_leaf_call(ctrl, - mem, - clone_type(), - XBarrierSetRuntime::clone_addr(), - "XBarrierSetRuntime::clone", - TypeRawPtr::BOTTOM, - src, - dst, - full_size, - phase->top()); - phase->transform_later(call); - phase->igvn().replace_node(ac, call); -} - -#undef XTOP - -// == Dominating barrier elision == - -static bool block_has_safepoint(const Block* block, uint from, uint to) { - for (uint i = from; i < to; i++) { - if (block->get_node(i)->is_MachSafePoint()) { - // Safepoint found - return true; - } - } - - // Safepoint not found - return false; -} - -static bool block_has_safepoint(const Block* block) { - return block_has_safepoint(block, 0, block->number_of_nodes()); -} - -static uint block_index(const Block* block, const Node* node) { - for (uint j = 0; j < block->number_of_nodes(); ++j) { - if (block->get_node(j) == node) { - return j; - } - } - ShouldNotReachHere(); - return 0; -} - -void XBarrierSetC2::analyze_dominating_barriers() const { - ResourceMark rm; - Compile* const C = Compile::current(); - PhaseCFG* const cfg = C->cfg(); - Block_List worklist; - Node_List mem_ops; - Node_List barrier_loads; - - // Step 1 - Find accesses, and track them in lists - for (uint i = 0; i < cfg->number_of_blocks(); ++i) { - const Block* const block = cfg->get_block(i); - for (uint j = 0; j < block->number_of_nodes(); ++j) { - const Node* const node = block->get_node(j); - if (!node->is_Mach()) { - continue; - } - - MachNode* const mach = node->as_Mach(); - switch (mach->ideal_Opcode()) { - case Op_LoadP: - if ((mach->barrier_data() & XLoadBarrierStrong) != 0) { - barrier_loads.push(mach); - } - if ((mach->barrier_data() & (XLoadBarrierStrong | XLoadBarrierNoKeepalive)) == - XLoadBarrierStrong) { - mem_ops.push(mach); - } - break; - case Op_CompareAndExchangeP: - case Op_CompareAndSwapP: - case Op_GetAndSetP: - if ((mach->barrier_data() & XLoadBarrierStrong) != 0) { - barrier_loads.push(mach); - } - case Op_StoreP: - mem_ops.push(mach); - break; - - default: - break; - } - } - } - - // Step 2 - Find dominating accesses for each load - for (uint i = 0; i < barrier_loads.size(); i++) { - MachNode* const load = barrier_loads.at(i)->as_Mach(); - const TypePtr* load_adr_type = nullptr; - intptr_t load_offset = 0; - const Node* const load_obj = load->get_base_and_disp(load_offset, load_adr_type); - Block* const load_block = cfg->get_block_for_node(load); - const uint load_index = block_index(load_block, load); - - for (uint j = 0; j < mem_ops.size(); j++) { - MachNode* mem = mem_ops.at(j)->as_Mach(); - const TypePtr* mem_adr_type = nullptr; - intptr_t mem_offset = 0; - const Node* mem_obj = mem->get_base_and_disp(mem_offset, mem_adr_type); - Block* mem_block = cfg->get_block_for_node(mem); - uint mem_index = block_index(mem_block, mem); - - if (load_obj == NodeSentinel || mem_obj == NodeSentinel || - load_obj == nullptr || mem_obj == nullptr || - load_offset < 0 || mem_offset < 0) { - continue; - } - - if (mem_obj != load_obj || mem_offset != load_offset) { - // Not the same addresses, not a candidate - continue; - } - - if (load_block == mem_block) { - // Earlier accesses in the same block - if (mem_index < load_index && !block_has_safepoint(mem_block, mem_index + 1, load_index)) { - load->set_barrier_data(XLoadBarrierElided); - } - } else if (mem_block->dominates(load_block)) { - // Dominating block? Look around for safepoints - ResourceMark rm; - Block_List stack; - VectorSet visited; - stack.push(load_block); - bool safepoint_found = block_has_safepoint(load_block); - while (!safepoint_found && stack.size() > 0) { - Block* block = stack.pop(); - if (visited.test_set(block->_pre_order)) { - continue; - } - if (block_has_safepoint(block)) { - safepoint_found = true; - break; - } - if (block == mem_block) { - continue; - } - - // Push predecessor blocks - for (uint p = 1; p < block->num_preds(); ++p) { - Block* pred = cfg->get_block_for_node(block->pred(p)); - stack.push(pred); - } - } - - if (!safepoint_found) { - load->set_barrier_data(XLoadBarrierElided); - } - } - } - } -} - -// == Reduced spilling optimization == - -void XBarrierSetC2::compute_liveness_at_stubs() const { - ResourceMark rm; - Compile* const C = Compile::current(); - Arena* const A = Thread::current()->resource_area(); - PhaseCFG* const cfg = C->cfg(); - PhaseRegAlloc* const regalloc = C->regalloc(); - RegMask* const live = NEW_ARENA_ARRAY(A, RegMask, cfg->number_of_blocks() * sizeof(RegMask)); - XBarrierSetAssembler* const bs = XBarrierSet::assembler(); - Block_List worklist; - - for (uint i = 0; i < cfg->number_of_blocks(); ++i) { - new ((void*)(live + i)) RegMask(); - worklist.push(cfg->get_block(i)); - } - - while (worklist.size() > 0) { - const Block* const block = worklist.pop(); - RegMask& old_live = live[block->_pre_order]; - RegMask new_live; - - // Initialize to union of successors - for (uint i = 0; i < block->_num_succs; i++) { - const uint succ_id = block->_succs[i]->_pre_order; - new_live.OR(live[succ_id]); - } - - // Walk block backwards, computing liveness - for (int i = block->number_of_nodes() - 1; i >= 0; --i) { - const Node* const node = block->get_node(i); - - // Remove def bits - const OptoReg::Name first = bs->refine_register(node, regalloc->get_reg_first(node)); - const OptoReg::Name second = bs->refine_register(node, regalloc->get_reg_second(node)); - if (first != OptoReg::Bad) { - new_live.Remove(first); - } - if (second != OptoReg::Bad) { - new_live.Remove(second); - } - - // Add use bits - for (uint j = 1; j < node->req(); ++j) { - const Node* const use = node->in(j); - const OptoReg::Name first = bs->refine_register(use, regalloc->get_reg_first(use)); - const OptoReg::Name second = bs->refine_register(use, regalloc->get_reg_second(use)); - if (first != OptoReg::Bad) { - new_live.Insert(first); - } - if (second != OptoReg::Bad) { - new_live.Insert(second); - } - } - - // If this node tracks liveness, update it - RegMask* const regs = barrier_set_state()->live(node); - if (regs != nullptr) { - regs->OR(new_live); - } - } - - // Now at block top, see if we have any changes - new_live.SUBTRACT(old_live); - if (new_live.is_NotEmpty()) { - // Liveness has refined, update and propagate to prior blocks - old_live.OR(new_live); - for (uint i = 1; i < block->num_preds(); ++i) { - Block* const pred = cfg->get_block_for_node(block->pred(i)); - worklist.push(pred); - } - } - } -} - -#ifndef PRODUCT -void XBarrierSetC2::dump_barrier_data(const MachNode* mach, outputStream* st) const { - if ((mach->barrier_data() & XLoadBarrierStrong) != 0) { - st->print("strong "); - } - if ((mach->barrier_data() & XLoadBarrierWeak) != 0) { - st->print("weak "); - } - if ((mach->barrier_data() & XLoadBarrierPhantom) != 0) { - st->print("phantom "); - } - if ((mach->barrier_data() & XLoadBarrierNoKeepalive) != 0) { - st->print("nokeepalive "); - } -} -#endif // !PRODUCT diff --git a/src/hotspot/share/gc/x/c2/xBarrierSetC2.hpp b/src/hotspot/share/gc/x/c2/xBarrierSetC2.hpp deleted file mode 100644 index 91835338fd73c..0000000000000 --- a/src/hotspot/share/gc/x/c2/xBarrierSetC2.hpp +++ /dev/null @@ -1,100 +0,0 @@ -/* - * Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#ifndef SHARE_GC_X_C2_XBARRIERSETC2_HPP -#define SHARE_GC_X_C2_XBARRIERSETC2_HPP - -#include "gc/shared/c2/barrierSetC2.hpp" -#include "memory/allocation.hpp" -#include "opto/node.hpp" -#include "utilities/growableArray.hpp" - -const uint8_t XLoadBarrierElided = 0; -const uint8_t XLoadBarrierStrong = 1; -const uint8_t XLoadBarrierWeak = 2; -const uint8_t XLoadBarrierPhantom = 4; -const uint8_t XLoadBarrierNoKeepalive = 8; - -class XLoadBarrierStubC2 : public ArenaObj { -private: - const MachNode* _node; - const Address _ref_addr; - const Register _ref; - const Register _tmp; - const uint8_t _barrier_data; - Label _entry; - Label _continuation; - - XLoadBarrierStubC2(const MachNode* node, Address ref_addr, Register ref, Register tmp, uint8_t barrier_data); - -public: - static XLoadBarrierStubC2* create(const MachNode* node, Address ref_addr, Register ref, Register tmp, uint8_t barrier_data); - - Address ref_addr() const; - Register ref() const; - Register tmp() const; - address slow_path() const; - RegMask& live() const; - Label* entry(); - Label* continuation(); -}; - -class XBarrierSetC2 : public BarrierSetC2 { -private: - void compute_liveness_at_stubs() const; - void analyze_dominating_barriers() const; - -protected: - virtual Node* load_at_resolved(C2Access& access, const Type* val_type) const; - virtual Node* atomic_cmpxchg_val_at_resolved(C2AtomicParseAccess& access, - Node* expected_val, - Node* new_val, - const Type* val_type) const; - virtual Node* atomic_cmpxchg_bool_at_resolved(C2AtomicParseAccess& access, - Node* expected_val, - Node* new_val, - const Type* value_type) const; - virtual Node* atomic_xchg_at_resolved(C2AtomicParseAccess& access, - Node* new_val, - const Type* val_type) const; - -public: - virtual void* create_barrier_state(Arena* comp_arena) const; - virtual bool array_copy_requires_gc_barriers(bool tightly_coupled_alloc, - BasicType type, - bool is_clone, - bool is_clone_instance, - ArrayCopyPhase phase) const; - virtual void clone_at_expansion(PhaseMacroExpand* phase, - ArrayCopyNode* ac) const; - - virtual void late_barrier_analysis() const; - virtual int estimate_stub_size() const; - virtual void emit_stubs(CodeBuffer& cb) const; - -#ifndef PRODUCT - virtual void dump_barrier_data(const MachNode* mach, outputStream* st) const; -#endif -}; - -#endif // SHARE_GC_X_C2_XBARRIERSETC2_HPP diff --git a/src/hotspot/share/gc/x/vmStructs_x.cpp b/src/hotspot/share/gc/x/vmStructs_x.cpp deleted file mode 100644 index 4c7d63f41b403..0000000000000 --- a/src/hotspot/share/gc/x/vmStructs_x.cpp +++ /dev/null @@ -1,41 +0,0 @@ -/* - * Copyright (c) 2018, 2021, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#include "precompiled.hpp" -#include "gc/x/vmStructs_x.hpp" - -XGlobalsForVMStructs::XGlobalsForVMStructs() : - _XGlobalPhase(&XGlobalPhase), - _XGlobalSeqNum(&XGlobalSeqNum), - _XAddressOffsetMask(&XAddressOffsetMask), - _XAddressMetadataMask(&XAddressMetadataMask), - _XAddressMetadataFinalizable(&XAddressMetadataFinalizable), - _XAddressGoodMask(&XAddressGoodMask), - _XAddressBadMask(&XAddressBadMask), - _XAddressWeakBadMask(&XAddressWeakBadMask), - _XObjectAlignmentSmallShift(&XObjectAlignmentSmallShift), - _XObjectAlignmentSmall(&XObjectAlignmentSmall) { -} - -XGlobalsForVMStructs XGlobalsForVMStructs::_instance; -XGlobalsForVMStructs* XGlobalsForVMStructs::_instance_p = &XGlobalsForVMStructs::_instance; diff --git a/src/hotspot/share/gc/x/vmStructs_x.hpp b/src/hotspot/share/gc/x/vmStructs_x.hpp deleted file mode 100644 index b911c21be2343..0000000000000 --- a/src/hotspot/share/gc/x/vmStructs_x.hpp +++ /dev/null @@ -1,143 +0,0 @@ -/* - * Copyright (c) 2017, 2021, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#ifndef SHARE_GC_X_VMSTRUCTS_X_HPP -#define SHARE_GC_X_VMSTRUCTS_X_HPP - -#include "gc/x/xAttachedArray.hpp" -#include "gc/x/xCollectedHeap.hpp" -#include "gc/x/xForwarding.hpp" -#include "gc/x/xGranuleMap.hpp" -#include "gc/x/xHeap.hpp" -#include "gc/x/xPageAllocator.hpp" -#include "utilities/macros.hpp" - -// Expose some ZGC globals to the SA agent. -class XGlobalsForVMStructs { - static XGlobalsForVMStructs _instance; - -public: - static XGlobalsForVMStructs* _instance_p; - - XGlobalsForVMStructs(); - - uint32_t* _XGlobalPhase; - - uint32_t* _XGlobalSeqNum; - - uintptr_t* _XAddressOffsetMask; - uintptr_t* _XAddressMetadataMask; - uintptr_t* _XAddressMetadataFinalizable; - uintptr_t* _XAddressGoodMask; - uintptr_t* _XAddressBadMask; - uintptr_t* _XAddressWeakBadMask; - - const int* _XObjectAlignmentSmallShift; - const int* _XObjectAlignmentSmall; -}; - -typedef XGranuleMap XGranuleMapForPageTable; -typedef XGranuleMap XGranuleMapForForwarding; -typedef XAttachedArray XAttachedArrayForForwarding; - -#define VM_STRUCTS_X(nonstatic_field, volatile_nonstatic_field, static_field) \ - static_field(XGlobalsForVMStructs, _instance_p, XGlobalsForVMStructs*) \ - nonstatic_field(XGlobalsForVMStructs, _XGlobalPhase, uint32_t*) \ - nonstatic_field(XGlobalsForVMStructs, _XGlobalSeqNum, uint32_t*) \ - nonstatic_field(XGlobalsForVMStructs, _XAddressOffsetMask, uintptr_t*) \ - nonstatic_field(XGlobalsForVMStructs, _XAddressMetadataMask, uintptr_t*) \ - nonstatic_field(XGlobalsForVMStructs, _XAddressMetadataFinalizable, uintptr_t*) \ - nonstatic_field(XGlobalsForVMStructs, _XAddressGoodMask, uintptr_t*) \ - nonstatic_field(XGlobalsForVMStructs, _XAddressBadMask, uintptr_t*) \ - nonstatic_field(XGlobalsForVMStructs, _XAddressWeakBadMask, uintptr_t*) \ - nonstatic_field(XGlobalsForVMStructs, _XObjectAlignmentSmallShift, const int*) \ - nonstatic_field(XGlobalsForVMStructs, _XObjectAlignmentSmall, const int*) \ - \ - nonstatic_field(XCollectedHeap, _heap, XHeap) \ - \ - nonstatic_field(XHeap, _page_allocator, XPageAllocator) \ - nonstatic_field(XHeap, _page_table, XPageTable) \ - nonstatic_field(XHeap, _forwarding_table, XForwardingTable) \ - nonstatic_field(XHeap, _relocate, XRelocate) \ - \ - nonstatic_field(XPage, _type, const uint8_t) \ - nonstatic_field(XPage, _seqnum, uint32_t) \ - nonstatic_field(XPage, _virtual, const XVirtualMemory) \ - volatile_nonstatic_field(XPage, _top, uintptr_t) \ - \ - nonstatic_field(XPageAllocator, _max_capacity, const size_t) \ - volatile_nonstatic_field(XPageAllocator, _capacity, size_t) \ - volatile_nonstatic_field(XPageAllocator, _used, size_t) \ - \ - nonstatic_field(XPageTable, _map, XGranuleMapForPageTable) \ - \ - nonstatic_field(XGranuleMapForPageTable, _map, XPage** const) \ - nonstatic_field(XGranuleMapForForwarding, _map, XForwarding** const) \ - \ - nonstatic_field(XForwardingTable, _map, XGranuleMapForForwarding) \ - \ - nonstatic_field(XVirtualMemory, _start, const uintptr_t) \ - nonstatic_field(XVirtualMemory, _end, const uintptr_t) \ - \ - nonstatic_field(XForwarding, _virtual, const XVirtualMemory) \ - nonstatic_field(XForwarding, _object_alignment_shift, const size_t) \ - volatile_nonstatic_field(XForwarding, _ref_count, int) \ - nonstatic_field(XForwarding, _entries, const XAttachedArrayForForwarding) \ - nonstatic_field(XForwardingEntry, _entry, uint64_t) \ - nonstatic_field(XAttachedArrayForForwarding, _length, const size_t) - -#define VM_INT_CONSTANTS_X(declare_constant, declare_constant_with_value) \ - declare_constant(XPhaseRelocate) \ - declare_constant(XPageTypeSmall) \ - declare_constant(XPageTypeMedium) \ - declare_constant(XPageTypeLarge) \ - declare_constant(XObjectAlignmentMediumShift) \ - declare_constant(XObjectAlignmentLargeShift) - -#define VM_LONG_CONSTANTS_X(declare_constant) \ - declare_constant(XGranuleSizeShift) \ - declare_constant(XPageSizeSmallShift) \ - declare_constant(XPageSizeMediumShift) \ - declare_constant(XAddressOffsetShift) \ - declare_constant(XAddressOffsetBits) \ - declare_constant(XAddressOffsetMask) \ - declare_constant(XAddressOffsetMax) - -#define VM_TYPES_X(declare_type, declare_toplevel_type, declare_integer_type) \ - declare_toplevel_type(XGlobalsForVMStructs) \ - declare_type(XCollectedHeap, CollectedHeap) \ - declare_toplevel_type(XHeap) \ - declare_toplevel_type(XRelocate) \ - declare_toplevel_type(XPage) \ - declare_toplevel_type(XPageAllocator) \ - declare_toplevel_type(XPageTable) \ - declare_toplevel_type(XAttachedArrayForForwarding) \ - declare_toplevel_type(XGranuleMapForPageTable) \ - declare_toplevel_type(XGranuleMapForForwarding) \ - declare_toplevel_type(XVirtualMemory) \ - declare_toplevel_type(XForwardingTable) \ - declare_toplevel_type(XForwarding) \ - declare_toplevel_type(XForwardingEntry) \ - declare_toplevel_type(XPhysicalMemoryManager) - -#endif // SHARE_GC_X_VMSTRUCTS_X_HPP diff --git a/src/hotspot/share/gc/x/xAbort.cpp b/src/hotspot/share/gc/x/xAbort.cpp deleted file mode 100644 index 11b8d840d22f5..0000000000000 --- a/src/hotspot/share/gc/x/xAbort.cpp +++ /dev/null @@ -1,32 +0,0 @@ -/* - * Copyright (c) 2021, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#include "precompiled.hpp" -#include "gc/x/xAbort.hpp" -#include "runtime/atomic.hpp" - -volatile bool XAbort::_should_abort = false; - -void XAbort::abort() { - Atomic::release_store_fence(&_should_abort, true); -} diff --git a/src/hotspot/share/gc/x/xAbort.hpp b/src/hotspot/share/gc/x/xAbort.hpp deleted file mode 100644 index 808a350584bc5..0000000000000 --- a/src/hotspot/share/gc/x/xAbort.hpp +++ /dev/null @@ -1,38 +0,0 @@ -/* - * Copyright (c) 2021, 2022, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#ifndef SHARE_GC_X_XABORT_HPP -#define SHARE_GC_X_XABORT_HPP - -#include "memory/allStatic.hpp" - -class XAbort : public AllStatic { -private: - static volatile bool _should_abort; - -public: - static bool should_abort(); - static void abort(); -}; - -#endif // SHARE_GC_X_XABORT_HPP diff --git a/src/hotspot/share/gc/x/xAbort.inline.hpp b/src/hotspot/share/gc/x/xAbort.inline.hpp deleted file mode 100644 index 8ef1219330a93..0000000000000 --- a/src/hotspot/share/gc/x/xAbort.inline.hpp +++ /dev/null @@ -1,35 +0,0 @@ -/* - * Copyright (c) 2021, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#ifndef SHARE_GC_X_XABORT_INLINE_HPP -#define SHARE_GC_X_XABORT_INLINE_HPP - -#include "gc/x/xAbort.hpp" - -#include "runtime/atomic.hpp" - -inline bool XAbort::should_abort() { - return Atomic::load_acquire(&_should_abort); -} - -#endif // SHARE_GC_X_XABORT_INLINE_HPP diff --git a/src/hotspot/share/gc/x/xAddress.cpp b/src/hotspot/share/gc/x/xAddress.cpp deleted file mode 100644 index 33dffc662f161..0000000000000 --- a/src/hotspot/share/gc/x/xAddress.cpp +++ /dev/null @@ -1,58 +0,0 @@ -/* - * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#include "precompiled.hpp" -#include "gc/x/xAddress.hpp" -#include "gc/x/xGlobals.hpp" - -void XAddress::set_good_mask(uintptr_t mask) { - XAddressGoodMask = mask; - XAddressBadMask = XAddressGoodMask ^ XAddressMetadataMask; - XAddressWeakBadMask = (XAddressGoodMask | XAddressMetadataRemapped | XAddressMetadataFinalizable) ^ XAddressMetadataMask; -} - -void XAddress::initialize() { - XAddressOffsetBits = XPlatformAddressOffsetBits(); - XAddressOffsetMask = (((uintptr_t)1 << XAddressOffsetBits) - 1) << XAddressOffsetShift; - XAddressOffsetMax = (uintptr_t)1 << XAddressOffsetBits; - - XAddressMetadataShift = XPlatformAddressMetadataShift(); - XAddressMetadataMask = (((uintptr_t)1 << XAddressMetadataBits) - 1) << XAddressMetadataShift; - - XAddressMetadataMarked0 = (uintptr_t)1 << (XAddressMetadataShift + 0); - XAddressMetadataMarked1 = (uintptr_t)1 << (XAddressMetadataShift + 1); - XAddressMetadataRemapped = (uintptr_t)1 << (XAddressMetadataShift + 2); - XAddressMetadataFinalizable = (uintptr_t)1 << (XAddressMetadataShift + 3); - - XAddressMetadataMarked = XAddressMetadataMarked0; - set_good_mask(XAddressMetadataRemapped); -} - -void XAddress::flip_to_marked() { - XAddressMetadataMarked ^= (XAddressMetadataMarked0 | XAddressMetadataMarked1); - set_good_mask(XAddressMetadataMarked); -} - -void XAddress::flip_to_remapped() { - set_good_mask(XAddressMetadataRemapped); -} diff --git a/src/hotspot/share/gc/x/xAddress.hpp b/src/hotspot/share/gc/x/xAddress.hpp deleted file mode 100644 index ff9d548f1af0c..0000000000000 --- a/src/hotspot/share/gc/x/xAddress.hpp +++ /dev/null @@ -1,67 +0,0 @@ -/* - * Copyright (c) 2015, 2022, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#ifndef SHARE_GC_X_XADDRESS_HPP -#define SHARE_GC_X_XADDRESS_HPP - -#include "memory/allStatic.hpp" -#include "utilities/globalDefinitions.hpp" - -class XAddress : public AllStatic { - friend class XAddressTest; - -private: - static void set_good_mask(uintptr_t mask); - -public: - static void initialize(); - - static void flip_to_marked(); - static void flip_to_remapped(); - - static bool is_null(uintptr_t value); - static bool is_bad(uintptr_t value); - static bool is_good(uintptr_t value); - static bool is_good_or_null(uintptr_t value); - static bool is_weak_bad(uintptr_t value); - static bool is_weak_good(uintptr_t value); - static bool is_weak_good_or_null(uintptr_t value); - static bool is_marked(uintptr_t value); - static bool is_marked_or_null(uintptr_t value); - static bool is_finalizable(uintptr_t value); - static bool is_finalizable_good(uintptr_t value); - static bool is_remapped(uintptr_t value); - static bool is_in(uintptr_t value); - - static uintptr_t offset(uintptr_t value); - static uintptr_t good(uintptr_t value); - static uintptr_t good_or_null(uintptr_t value); - static uintptr_t finalizable_good(uintptr_t value); - static uintptr_t marked(uintptr_t value); - static uintptr_t marked0(uintptr_t value); - static uintptr_t marked1(uintptr_t value); - static uintptr_t remapped(uintptr_t value); - static uintptr_t remapped_or_null(uintptr_t value); -}; - -#endif // SHARE_GC_X_XADDRESS_HPP diff --git a/src/hotspot/share/gc/x/xAddress.inline.hpp b/src/hotspot/share/gc/x/xAddress.inline.hpp deleted file mode 100644 index 046ee10af00af..0000000000000 --- a/src/hotspot/share/gc/x/xAddress.inline.hpp +++ /dev/null @@ -1,137 +0,0 @@ -/* - * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#ifndef SHARE_GC_X_XADDRESS_INLINE_HPP -#define SHARE_GC_X_XADDRESS_INLINE_HPP - -#include "gc/x/xAddress.hpp" - -#include "gc/x/xGlobals.hpp" -#include "utilities/globalDefinitions.hpp" -#include "utilities/macros.hpp" -#include "utilities/powerOfTwo.hpp" - -inline bool XAddress::is_null(uintptr_t value) { - return value == 0; -} - -inline bool XAddress::is_bad(uintptr_t value) { - return value & XAddressBadMask; -} - -inline bool XAddress::is_good(uintptr_t value) { - return !is_bad(value) && !is_null(value); -} - -inline bool XAddress::is_good_or_null(uintptr_t value) { - // Checking if an address is "not bad" is an optimized version of - // checking if it's "good or null", which eliminates an explicit - // null check. However, the implicit null check only checks that - // the mask bits are zero, not that the entire address is zero. - // This means that an address without mask bits would pass through - // the barrier as if it was null. This should be harmless as such - // addresses should ever be passed through the barrier. - const bool result = !is_bad(value); - assert((is_good(value) || is_null(value)) == result, "Bad address"); - return result; -} - -inline bool XAddress::is_weak_bad(uintptr_t value) { - return value & XAddressWeakBadMask; -} - -inline bool XAddress::is_weak_good(uintptr_t value) { - return !is_weak_bad(value) && !is_null(value); -} - -inline bool XAddress::is_weak_good_or_null(uintptr_t value) { - return !is_weak_bad(value); -} - -inline bool XAddress::is_marked(uintptr_t value) { - return value & XAddressMetadataMarked; -} - -inline bool XAddress::is_marked_or_null(uintptr_t value) { - return is_marked(value) || is_null(value); -} - -inline bool XAddress::is_finalizable(uintptr_t value) { - return value & XAddressMetadataFinalizable; -} - -inline bool XAddress::is_finalizable_good(uintptr_t value) { - return is_finalizable(value) && is_good(value ^ XAddressMetadataFinalizable); -} - -inline bool XAddress::is_remapped(uintptr_t value) { - return value & XAddressMetadataRemapped; -} - -inline bool XAddress::is_in(uintptr_t value) { - // Check that exactly one non-offset bit is set - if (!is_power_of_2(value & ~XAddressOffsetMask)) { - return false; - } - - // Check that one of the non-finalizable metadata is set - return value & (XAddressMetadataMask & ~XAddressMetadataFinalizable); -} - -inline uintptr_t XAddress::offset(uintptr_t value) { - return value & XAddressOffsetMask; -} - -inline uintptr_t XAddress::good(uintptr_t value) { - return offset(value) | XAddressGoodMask; -} - -inline uintptr_t XAddress::good_or_null(uintptr_t value) { - return is_null(value) ? 0 : good(value); -} - -inline uintptr_t XAddress::finalizable_good(uintptr_t value) { - return offset(value) | XAddressMetadataFinalizable | XAddressGoodMask; -} - -inline uintptr_t XAddress::marked(uintptr_t value) { - return offset(value) | XAddressMetadataMarked; -} - -inline uintptr_t XAddress::marked0(uintptr_t value) { - return offset(value) | XAddressMetadataMarked0; -} - -inline uintptr_t XAddress::marked1(uintptr_t value) { - return offset(value) | XAddressMetadataMarked1; -} - -inline uintptr_t XAddress::remapped(uintptr_t value) { - return offset(value) | XAddressMetadataRemapped; -} - -inline uintptr_t XAddress::remapped_or_null(uintptr_t value) { - return is_null(value) ? 0 : remapped(value); -} - -#endif // SHARE_GC_X_XADDRESS_INLINE_HPP diff --git a/src/hotspot/share/gc/x/xAddressSpaceLimit.cpp b/src/hotspot/share/gc/x/xAddressSpaceLimit.cpp deleted file mode 100644 index 6d3c7a295dfe0..0000000000000 --- a/src/hotspot/share/gc/x/xAddressSpaceLimit.cpp +++ /dev/null @@ -1,53 +0,0 @@ -/* - * Copyright (c) 2019, 2021, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#include "precompiled.hpp" -#include "gc/shared/gc_globals.hpp" -#include "gc/x/xAddressSpaceLimit.hpp" -#include "gc/x/xGlobals.hpp" -#include "runtime/globals.hpp" -#include "runtime/os.hpp" -#include "utilities/align.hpp" - -static size_t address_space_limit() { - size_t limit = 0; - - if (os::has_allocatable_memory_limit(&limit)) { - return limit; - } - - // No limit - return SIZE_MAX; -} - -size_t XAddressSpaceLimit::mark_stack() { - // Allow mark stacks to occupy 10% of the address space - const size_t limit = address_space_limit() / 10; - return align_up(limit, XMarkStackSpaceExpandSize); -} - -size_t XAddressSpaceLimit::heap_view() { - // Allow all heap views to occupy 50% of the address space - const size_t limit = address_space_limit() / MaxVirtMemFraction / XHeapViews; - return align_up(limit, XGranuleSize); -} diff --git a/src/hotspot/share/gc/x/xAddressSpaceLimit.hpp b/src/hotspot/share/gc/x/xAddressSpaceLimit.hpp deleted file mode 100644 index 9a3fcc27a293d..0000000000000 --- a/src/hotspot/share/gc/x/xAddressSpaceLimit.hpp +++ /dev/null @@ -1,36 +0,0 @@ -/* - * Copyright (c) 2019, 2022, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#ifndef SHARE_GC_X_XADDRESSSPACELIMIT_HPP -#define SHARE_GC_X_XADDRESSSPACELIMIT_HPP - -#include "memory/allStatic.hpp" -#include "utilities/globalDefinitions.hpp" - -class XAddressSpaceLimit : public AllStatic { -public: - static size_t mark_stack(); - static size_t heap_view(); -}; - -#endif // SHARE_GC_X_XADDRESSSPACELIMIT_HPP diff --git a/src/hotspot/share/gc/x/xAllocationFlags.hpp b/src/hotspot/share/gc/x/xAllocationFlags.hpp deleted file mode 100644 index 307d68c65ac78..0000000000000 --- a/src/hotspot/share/gc/x/xAllocationFlags.hpp +++ /dev/null @@ -1,85 +0,0 @@ -/* - * Copyright (c) 2017, 2020, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#ifndef SHARE_GC_X_XALLOCATIONFLAGS_HPP -#define SHARE_GC_X_XALLOCATIONFLAGS_HPP - -#include "gc/x/xBitField.hpp" -#include "memory/allocation.hpp" - -// -// Allocation flags layout -// ----------------------- -// -// 7 2 1 0 -// +-----+-+-+-+ -// |00000|1|1|1| -// +-----+-+-+-+ -// | | | | -// | | | * 0-0 Non-Blocking Flag (1-bit) -// | | | -// | | * 1-1 Worker Relocation Flag (1-bit) -// | | -// | * 2-2 Low Address Flag (1-bit) -// | -// * 7-3 Unused (5-bits) -// - -class XAllocationFlags { -private: - typedef XBitField field_non_blocking; - typedef XBitField field_worker_relocation; - typedef XBitField field_low_address; - - uint8_t _flags; - -public: - XAllocationFlags() : - _flags(0) {} - - void set_non_blocking() { - _flags |= field_non_blocking::encode(true); - } - - void set_worker_relocation() { - _flags |= field_worker_relocation::encode(true); - } - - void set_low_address() { - _flags |= field_low_address::encode(true); - } - - bool non_blocking() const { - return field_non_blocking::decode(_flags); - } - - bool worker_relocation() const { - return field_worker_relocation::decode(_flags); - } - - bool low_address() const { - return field_low_address::decode(_flags); - } -}; - -#endif // SHARE_GC_X_XALLOCATIONFLAGS_HPP diff --git a/src/hotspot/share/gc/x/xArguments.cpp b/src/hotspot/share/gc/x/xArguments.cpp deleted file mode 100644 index 13cb302d14aa9..0000000000000 --- a/src/hotspot/share/gc/x/xArguments.cpp +++ /dev/null @@ -1,129 +0,0 @@ -/* - * Copyright (c) 2017, 2021, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#include "precompiled.hpp" -#include "gc/x/xAddressSpaceLimit.hpp" -#include "gc/x/xArguments.hpp" -#include "gc/x/xCollectedHeap.hpp" -#include "gc/x/xGlobals.hpp" -#include "gc/x/xHeuristics.hpp" -#include "gc/shared/gcArguments.hpp" -#include "runtime/globals.hpp" -#include "runtime/globals_extension.hpp" -#include "runtime/java.hpp" - -void XArguments::initialize_alignments() { - SpaceAlignment = XGranuleSize; - HeapAlignment = SpaceAlignment; -} - -void XArguments::initialize_heap_flags_and_sizes() { - // Nothing extra to do -} - -void XArguments::initialize() { - warning("Non-generational ZGC is deprecated."); - - // Check mark stack size - const size_t mark_stack_space_limit = XAddressSpaceLimit::mark_stack(); - if (ZMarkStackSpaceLimit > mark_stack_space_limit) { - if (!FLAG_IS_DEFAULT(ZMarkStackSpaceLimit)) { - vm_exit_during_initialization("ZMarkStackSpaceLimit too large for limited address space"); - } - FLAG_SET_DEFAULT(ZMarkStackSpaceLimit, mark_stack_space_limit); - } - - // Enable NUMA by default - if (FLAG_IS_DEFAULT(UseNUMA)) { - FLAG_SET_DEFAULT(UseNUMA, true); - } - - if (FLAG_IS_DEFAULT(ZFragmentationLimit)) { - FLAG_SET_DEFAULT(ZFragmentationLimit, 25.0); - } - - // Select number of parallel threads - if (FLAG_IS_DEFAULT(ParallelGCThreads)) { - FLAG_SET_DEFAULT(ParallelGCThreads, XHeuristics::nparallel_workers()); - } - - if (ParallelGCThreads == 0) { - vm_exit_during_initialization("The flag -XX:+UseZGC can not be combined with -XX:ParallelGCThreads=0"); - } - - // Select number of concurrent threads - if (FLAG_IS_DEFAULT(ConcGCThreads)) { - FLAG_SET_DEFAULT(ConcGCThreads, XHeuristics::nconcurrent_workers()); - } - - if (ConcGCThreads == 0) { - vm_exit_during_initialization("The flag -XX:+UseZGC can not be combined with -XX:ConcGCThreads=0"); - } - - // Large page size must match granule size - if (!FLAG_IS_DEFAULT(LargePageSizeInBytes) && LargePageSizeInBytes != XGranuleSize) { - vm_exit_during_initialization(err_msg("Incompatible -XX:LargePageSizeInBytes, only " - SIZE_FORMAT "M large pages are supported by ZGC", - XGranuleSize / M)); - } - - // The heuristics used when UseDynamicNumberOfGCThreads is - // enabled defaults to using a ZAllocationSpikeTolerance of 1. - if (UseDynamicNumberOfGCThreads && FLAG_IS_DEFAULT(ZAllocationSpikeTolerance)) { - FLAG_SET_DEFAULT(ZAllocationSpikeTolerance, 1); - } - -#ifdef COMPILER2 - // Enable loop strip mining by default - if (FLAG_IS_DEFAULT(UseCountedLoopSafepoints)) { - FLAG_SET_DEFAULT(UseCountedLoopSafepoints, true); - if (FLAG_IS_DEFAULT(LoopStripMiningIter)) { - FLAG_SET_DEFAULT(LoopStripMiningIter, 1000); - } - } -#endif - - // CompressedOops not supported - FLAG_SET_DEFAULT(UseCompressedOops, false); - - // Verification before startup and after exit not (yet) supported - FLAG_SET_DEFAULT(VerifyDuringStartup, false); - FLAG_SET_DEFAULT(VerifyBeforeExit, false); - - if (VerifyBeforeGC || VerifyDuringGC || VerifyAfterGC) { - FLAG_SET_DEFAULT(ZVerifyRoots, true); - FLAG_SET_DEFAULT(ZVerifyObjects, true); - } -} - -size_t XArguments::heap_virtual_to_physical_ratio() { - return XHeapViews * XVirtualToPhysicalRatio; -} - -CollectedHeap* XArguments::create_heap() { - return new XCollectedHeap(); -} - -bool XArguments::is_supported() { - return is_os_supported(); -} diff --git a/src/hotspot/share/gc/x/xArguments.hpp b/src/hotspot/share/gc/x/xArguments.hpp deleted file mode 100644 index 196dd994cad9b..0000000000000 --- a/src/hotspot/share/gc/x/xArguments.hpp +++ /dev/null @@ -1,44 +0,0 @@ -/* - * Copyright (c) 2017, 2020, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#ifndef SHARE_GC_X_XARGUMENTS_HPP -#define SHARE_GC_X_XARGUMENTS_HPP - -#include "gc/shared/gcArguments.hpp" - -class CollectedHeap; - -class XArguments : AllStatic { -public: - static void initialize_alignments(); - static void initialize_heap_flags_and_sizes(); - static void initialize(); - static size_t heap_virtual_to_physical_ratio(); - static CollectedHeap* create_heap(); - - static bool is_supported(); - - static bool is_os_supported(); -}; - -#endif // SHARE_GC_X_XARGUMENTS_HPP diff --git a/src/hotspot/share/gc/x/xArray.hpp b/src/hotspot/share/gc/x/xArray.hpp deleted file mode 100644 index b0b4b5bd81ea6..0000000000000 --- a/src/hotspot/share/gc/x/xArray.hpp +++ /dev/null @@ -1,51 +0,0 @@ -/* - * Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#ifndef SHARE_GC_X_XARRAY_HPP -#define SHARE_GC_X_XARRAY_HPP - -#include "memory/allocation.hpp" -#include "utilities/growableArray.hpp" - -template using XArray = GrowableArrayCHeap; - -template -class XArrayIteratorImpl : public StackObj { -private: - const T* _next; - const T* const _end; - - bool next_serial(T* elem); - bool next_parallel(T* elem); - -public: - XArrayIteratorImpl(const T* array, size_t length); - XArrayIteratorImpl(const XArray* array); - - bool next(T* elem); -}; - -template using XArrayIterator = XArrayIteratorImpl; -template using XArrayParallelIterator = XArrayIteratorImpl; - -#endif // SHARE_GC_X_XARRAY_HPP diff --git a/src/hotspot/share/gc/x/xArray.inline.hpp b/src/hotspot/share/gc/x/xArray.inline.hpp deleted file mode 100644 index 721e3130095cd..0000000000000 --- a/src/hotspot/share/gc/x/xArray.inline.hpp +++ /dev/null @@ -1,81 +0,0 @@ -/* - * Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#ifndef SHARE_GC_X_XARRAY_INLINE_HPP -#define SHARE_GC_X_XARRAY_INLINE_HPP - -#include "gc/x/xArray.hpp" - -#include "runtime/atomic.hpp" - -template -inline bool XArrayIteratorImpl::next_serial(T* elem) { - if (_next == _end) { - return false; - } - - *elem = *_next; - _next++; - - return true; -} - -template -inline bool XArrayIteratorImpl::next_parallel(T* elem) { - const T* old_next = Atomic::load(&_next); - - for (;;) { - if (old_next == _end) { - return false; - } - - const T* const new_next = old_next + 1; - const T* const prev_next = Atomic::cmpxchg(&_next, old_next, new_next); - if (prev_next == old_next) { - *elem = *old_next; - return true; - } - - old_next = prev_next; - } -} - -template -inline XArrayIteratorImpl::XArrayIteratorImpl(const T* array, size_t length) : - _next(array), - _end(array + length) {} - -template -inline XArrayIteratorImpl::XArrayIteratorImpl(const XArray* array) : - XArrayIteratorImpl(array->is_empty() ? nullptr : array->adr_at(0), array->length()) {} - -template -inline bool XArrayIteratorImpl::next(T* elem) { - if (Parallel) { - return next_parallel(elem); - } else { - return next_serial(elem); - } -} - -#endif // SHARE_GC_X_XARRAY_INLINE_HPP diff --git a/src/hotspot/share/gc/x/xAttachedArray.hpp b/src/hotspot/share/gc/x/xAttachedArray.hpp deleted file mode 100644 index f039f602aab38..0000000000000 --- a/src/hotspot/share/gc/x/xAttachedArray.hpp +++ /dev/null @@ -1,54 +0,0 @@ -/* - * Copyright (c) 2019, 2021, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#ifndef SHARE_GC_X_XATTACHEDARRAY_HPP -#define SHARE_GC_X_XATTACHEDARRAY_HPP - -#include "utilities/globalDefinitions.hpp" - -class VMStructs; - -template -class XAttachedArray { - friend class ::VMStructs; - -private: - const size_t _length; - - static size_t object_size(); - static size_t array_size(size_t length); - -public: - template - static void* alloc(Allocator* allocator, size_t length); - - static void* alloc(size_t length); - static void free(ObjectT* obj); - - XAttachedArray(size_t length); - - size_t length() const; - ArrayT* operator()(const ObjectT* obj) const; -}; - -#endif // SHARE_GC_X_XATTACHEDARRAY_HPP diff --git a/src/hotspot/share/gc/x/xAttachedArray.inline.hpp b/src/hotspot/share/gc/x/xAttachedArray.inline.hpp deleted file mode 100644 index ba10de9967384..0000000000000 --- a/src/hotspot/share/gc/x/xAttachedArray.inline.hpp +++ /dev/null @@ -1,86 +0,0 @@ -/* - * Copyright (c) 2019, 2020, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#ifndef SHARE_GC_X_XATTACHEDARRAY_INLINE_HPP -#define SHARE_GC_X_XATTACHEDARRAY_INLINE_HPP - -#include "gc/x/xAttachedArray.hpp" - -#include "memory/allocation.hpp" -#include "utilities/align.hpp" - -template -inline size_t XAttachedArray::object_size() { - return align_up(sizeof(ObjectT), sizeof(ArrayT)); -} - -template -inline size_t XAttachedArray::array_size(size_t length) { - return sizeof(ArrayT) * length; -} - -template -template -inline void* XAttachedArray::alloc(Allocator* allocator, size_t length) { - // Allocate memory for object and array - const size_t size = object_size() + array_size(length); - void* const addr = allocator->alloc(size); - - // Placement new array - void* const array_addr = reinterpret_cast(addr) + object_size(); - ::new (array_addr) ArrayT[length]; - - // Return pointer to object - return addr; -} - -template -inline void* XAttachedArray::alloc(size_t length) { - struct Allocator { - void* alloc(size_t size) const { - return AllocateHeap(size, mtGC); - } - } allocator; - return alloc(&allocator, length); -} - -template -inline void XAttachedArray::free(ObjectT* obj) { - FreeHeap(obj); -} - -template -inline XAttachedArray::XAttachedArray(size_t length) : - _length(length) {} - -template -inline size_t XAttachedArray::length() const { - return _length; -} - -template -inline ArrayT* XAttachedArray::operator()(const ObjectT* obj) const { - return reinterpret_cast(reinterpret_cast(obj) + object_size()); -} - -#endif // SHARE_GC_X_XATTACHEDARRAY_INLINE_HPP diff --git a/src/hotspot/share/gc/x/xBarrier.cpp b/src/hotspot/share/gc/x/xBarrier.cpp deleted file mode 100644 index 726950092b246..0000000000000 --- a/src/hotspot/share/gc/x/xBarrier.cpp +++ /dev/null @@ -1,275 +0,0 @@ -/* - * Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#include "precompiled.hpp" -#include "classfile/javaClasses.hpp" -#include "gc/x/xBarrier.inline.hpp" -#include "gc/x/xHeap.inline.hpp" -#include "gc/x/xOop.inline.hpp" -#include "gc/x/xThread.inline.hpp" -#include "memory/iterator.inline.hpp" -#include "oops/oop.inline.hpp" -#include "runtime/safepoint.hpp" -#include "utilities/debug.hpp" - -template -bool XBarrier::should_mark_through(uintptr_t addr) { - // Finalizable marked oops can still exists on the heap after marking - // has completed, in which case we just want to convert this into a - // good oop and not push it on the mark stack. - if (!during_mark()) { - assert(XAddress::is_marked(addr), "Should be marked"); - assert(XAddress::is_finalizable(addr), "Should be finalizable"); - return false; - } - - // During marking, we mark through already marked oops to avoid having - // some large part of the object graph hidden behind a pushed, but not - // yet flushed, entry on a mutator mark stack. Always marking through - // allows the GC workers to proceed through the object graph even if a - // mutator touched an oop first, which in turn will reduce the risk of - // having to flush mark stacks multiple times to terminate marking. - // - // However, when doing finalizable marking we don't always want to mark - // through. First, marking through an already strongly marked oop would - // be wasteful, since we will then proceed to do finalizable marking on - // an object which is, or will be, marked strongly. Second, marking - // through an already finalizable marked oop would also be wasteful, - // since such oops can never end up on a mutator mark stack and can - // therefore not hide some part of the object graph from GC workers. - if (finalizable) { - return !XAddress::is_marked(addr); - } - - // Mark through - return true; -} - -template -uintptr_t XBarrier::mark(uintptr_t addr) { - uintptr_t good_addr; - - if (XAddress::is_marked(addr)) { - // Already marked, but try to mark though anyway - good_addr = XAddress::good(addr); - } else if (XAddress::is_remapped(addr)) { - // Already remapped, but also needs to be marked - good_addr = XAddress::good(addr); - } else { - // Needs to be both remapped and marked - good_addr = remap(addr); - } - - // Mark - if (should_mark_through(addr)) { - XHeap::heap()->mark_object(good_addr); - } - - if (finalizable) { - // Make the oop finalizable marked/good, instead of normal marked/good. - // This is needed because an object might first becomes finalizable - // marked by the GC, and then loaded by a mutator thread. In this case, - // the mutator thread must be able to tell that the object needs to be - // strongly marked. The finalizable bit in the oop exists to make sure - // that a load of a finalizable marked oop will fall into the barrier - // slow path so that we can mark the object as strongly reachable. - return XAddress::finalizable_good(good_addr); - } - - return good_addr; -} - -uintptr_t XBarrier::remap(uintptr_t addr) { - assert(!XAddress::is_good(addr), "Should not be good"); - assert(!XAddress::is_weak_good(addr), "Should not be weak good"); - return XHeap::heap()->remap_object(addr); -} - -uintptr_t XBarrier::relocate(uintptr_t addr) { - assert(!XAddress::is_good(addr), "Should not be good"); - assert(!XAddress::is_weak_good(addr), "Should not be weak good"); - return XHeap::heap()->relocate_object(addr); -} - -uintptr_t XBarrier::relocate_or_mark(uintptr_t addr) { - return during_relocate() ? relocate(addr) : mark(addr); -} - -uintptr_t XBarrier::relocate_or_mark_no_follow(uintptr_t addr) { - return during_relocate() ? relocate(addr) : mark(addr); -} - -uintptr_t XBarrier::relocate_or_remap(uintptr_t addr) { - return during_relocate() ? relocate(addr) : remap(addr); -} - -// -// Load barrier -// -uintptr_t XBarrier::load_barrier_on_oop_slow_path(uintptr_t addr) { - return relocate_or_mark(addr); -} - -uintptr_t XBarrier::load_barrier_on_invisible_root_oop_slow_path(uintptr_t addr) { - return relocate_or_mark_no_follow(addr); -} - -void XBarrier::load_barrier_on_oop_fields(oop o) { - assert(XAddress::is_good(XOop::to_address(o)), "Should be good"); - XLoadBarrierOopClosure cl; - o->oop_iterate(&cl); -} - -// -// Weak load barrier -// -uintptr_t XBarrier::weak_load_barrier_on_oop_slow_path(uintptr_t addr) { - return XAddress::is_weak_good(addr) ? XAddress::good(addr) : relocate_or_remap(addr); -} - -uintptr_t XBarrier::weak_load_barrier_on_weak_oop_slow_path(uintptr_t addr) { - const uintptr_t good_addr = weak_load_barrier_on_oop_slow_path(addr); - if (XHeap::heap()->is_object_strongly_live(good_addr)) { - return good_addr; - } - - // Not strongly live - return 0; -} - -uintptr_t XBarrier::weak_load_barrier_on_phantom_oop_slow_path(uintptr_t addr) { - const uintptr_t good_addr = weak_load_barrier_on_oop_slow_path(addr); - if (XHeap::heap()->is_object_live(good_addr)) { - return good_addr; - } - - // Not live - return 0; -} - -// -// Keep alive barrier -// -uintptr_t XBarrier::keep_alive_barrier_on_oop_slow_path(uintptr_t addr) { - assert(during_mark(), "Invalid phase"); - - // Mark - return mark(addr); -} - -uintptr_t XBarrier::keep_alive_barrier_on_weak_oop_slow_path(uintptr_t addr) { - assert(XResurrection::is_blocked(), "This operation is only valid when resurrection is blocked"); - const uintptr_t good_addr = weak_load_barrier_on_oop_slow_path(addr); - assert(XHeap::heap()->is_object_strongly_live(good_addr), "Should be live"); - return good_addr; -} - -uintptr_t XBarrier::keep_alive_barrier_on_phantom_oop_slow_path(uintptr_t addr) { - assert(XResurrection::is_blocked(), "This operation is only valid when resurrection is blocked"); - const uintptr_t good_addr = weak_load_barrier_on_oop_slow_path(addr); - assert(XHeap::heap()->is_object_live(good_addr), "Should be live"); - return good_addr; -} - -// -// Mark barrier -// -uintptr_t XBarrier::mark_barrier_on_oop_slow_path(uintptr_t addr) { - assert(during_mark(), "Invalid phase"); - assert(XThread::is_worker(), "Invalid thread"); - - // Mark - return mark(addr); -} - -uintptr_t XBarrier::mark_barrier_on_finalizable_oop_slow_path(uintptr_t addr) { - assert(during_mark(), "Invalid phase"); - assert(XThread::is_worker(), "Invalid thread"); - - // Mark - return mark(addr); -} - -// -// Narrow oop variants, never used. -// -oop XBarrier::load_barrier_on_oop_field(volatile narrowOop* p) { - ShouldNotReachHere(); - return nullptr; -} - -oop XBarrier::load_barrier_on_oop_field_preloaded(volatile narrowOop* p, oop o) { - ShouldNotReachHere(); - return nullptr; -} - -void XBarrier::load_barrier_on_oop_array(volatile narrowOop* p, size_t length) { - ShouldNotReachHere(); -} - -oop XBarrier::load_barrier_on_weak_oop_field_preloaded(volatile narrowOop* p, oop o) { - ShouldNotReachHere(); - return nullptr; -} - -oop XBarrier::load_barrier_on_phantom_oop_field_preloaded(volatile narrowOop* p, oop o) { - ShouldNotReachHere(); - return nullptr; -} - -oop XBarrier::weak_load_barrier_on_oop_field_preloaded(volatile narrowOop* p, oop o) { - ShouldNotReachHere(); - return nullptr; -} - -oop XBarrier::weak_load_barrier_on_weak_oop_field_preloaded(volatile narrowOop* p, oop o) { - ShouldNotReachHere(); - return nullptr; -} - -oop XBarrier::weak_load_barrier_on_phantom_oop_field_preloaded(volatile narrowOop* p, oop o) { - ShouldNotReachHere(); - return nullptr; -} - -#ifdef ASSERT - -// ON_WEAK barriers should only ever be applied to j.l.r.Reference.referents. -void XBarrier::verify_on_weak(volatile oop* referent_addr) { - if (referent_addr != nullptr) { - uintptr_t base = (uintptr_t)referent_addr - java_lang_ref_Reference::referent_offset(); - oop obj = cast_to_oop(base); - assert(oopDesc::is_oop(obj), "Verification failed for: ref " PTR_FORMAT " obj: " PTR_FORMAT, (uintptr_t)referent_addr, base); - assert(java_lang_ref_Reference::is_referent_field(obj, java_lang_ref_Reference::referent_offset()), "Sanity"); - } -} - -#endif - -void XLoadBarrierOopClosure::do_oop(oop* p) { - XBarrier::load_barrier_on_oop_field(p); -} - -void XLoadBarrierOopClosure::do_oop(narrowOop* p) { - ShouldNotReachHere(); -} diff --git a/src/hotspot/share/gc/x/xBarrier.hpp b/src/hotspot/share/gc/x/xBarrier.hpp deleted file mode 100644 index e2ef210d7d25b..0000000000000 --- a/src/hotspot/share/gc/x/xBarrier.hpp +++ /dev/null @@ -1,135 +0,0 @@ -/* - * Copyright (c) 2015, 2022, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#ifndef SHARE_GC_X_XBARRIER_HPP -#define SHARE_GC_X_XBARRIER_HPP - -#include "memory/allStatic.hpp" -#include "memory/iterator.hpp" -#include "oops/oop.hpp" - -typedef bool (*XBarrierFastPath)(uintptr_t); -typedef uintptr_t (*XBarrierSlowPath)(uintptr_t); - -class XBarrier : public AllStatic { -private: - static const bool GCThread = true; - static const bool AnyThread = false; - - static const bool Follow = true; - static const bool DontFollow = false; - - static const bool Strong = false; - static const bool Finalizable = true; - - static const bool Publish = true; - static const bool Overflow = false; - - template static void self_heal(volatile oop* p, uintptr_t addr, uintptr_t heal_addr); - - template static oop barrier(volatile oop* p, oop o); - template static oop weak_barrier(volatile oop* p, oop o); - template static void root_barrier(oop* p, oop o); - - static bool is_good_or_null_fast_path(uintptr_t addr); - static bool is_weak_good_or_null_fast_path(uintptr_t addr); - static bool is_marked_or_null_fast_path(uintptr_t addr); - - static bool during_mark(); - static bool during_relocate(); - template static bool should_mark_through(uintptr_t addr); - template static uintptr_t mark(uintptr_t addr); - static uintptr_t remap(uintptr_t addr); - static uintptr_t relocate(uintptr_t addr); - static uintptr_t relocate_or_mark(uintptr_t addr); - static uintptr_t relocate_or_mark_no_follow(uintptr_t addr); - static uintptr_t relocate_or_remap(uintptr_t addr); - - static uintptr_t load_barrier_on_oop_slow_path(uintptr_t addr); - static uintptr_t load_barrier_on_invisible_root_oop_slow_path(uintptr_t addr); - - static uintptr_t weak_load_barrier_on_oop_slow_path(uintptr_t addr); - static uintptr_t weak_load_barrier_on_weak_oop_slow_path(uintptr_t addr); - static uintptr_t weak_load_barrier_on_phantom_oop_slow_path(uintptr_t addr); - - static uintptr_t keep_alive_barrier_on_oop_slow_path(uintptr_t addr); - static uintptr_t keep_alive_barrier_on_weak_oop_slow_path(uintptr_t addr); - static uintptr_t keep_alive_barrier_on_phantom_oop_slow_path(uintptr_t addr); - - static uintptr_t mark_barrier_on_oop_slow_path(uintptr_t addr); - static uintptr_t mark_barrier_on_finalizable_oop_slow_path(uintptr_t addr); - - static void verify_on_weak(volatile oop* referent_addr) NOT_DEBUG_RETURN; - -public: - // Load barrier - static oop load_barrier_on_oop(oop o); - static oop load_barrier_on_oop_field(volatile oop* p); - static oop load_barrier_on_oop_field_preloaded(volatile oop* p, oop o); - static void load_barrier_on_oop_array(volatile oop* p, size_t length); - static void load_barrier_on_oop_fields(oop o); - static oop load_barrier_on_weak_oop_field_preloaded(volatile oop* p, oop o); - static oop load_barrier_on_phantom_oop_field_preloaded(volatile oop* p, oop o); - static void load_barrier_on_root_oop_field(oop* p); - static void load_barrier_on_invisible_root_oop_field(oop* p); - - // Weak load barrier - static oop weak_load_barrier_on_oop_field(volatile oop* p); - static oop weak_load_barrier_on_oop_field_preloaded(volatile oop* p, oop o); - static oop weak_load_barrier_on_weak_oop(oop o); - static oop weak_load_barrier_on_weak_oop_field_preloaded(volatile oop* p, oop o); - static oop weak_load_barrier_on_phantom_oop(oop o); - static oop weak_load_barrier_on_phantom_oop_field_preloaded(volatile oop* p, oop o); - - // Is alive barrier - static bool is_alive_barrier_on_weak_oop(oop o); - static bool is_alive_barrier_on_phantom_oop(oop o); - - // Keep alive barrier - static void keep_alive_barrier_on_oop(oop o); - static void keep_alive_barrier_on_weak_oop_field(volatile oop* p); - static void keep_alive_barrier_on_phantom_oop_field(volatile oop* p); - static void keep_alive_barrier_on_phantom_root_oop_field(oop* p); - - // Mark barrier - static void mark_barrier_on_oop_field(volatile oop* p, bool finalizable); - static void mark_barrier_on_oop_array(volatile oop* p, size_t length, bool finalizable); - - // Narrow oop variants, never used. - static oop load_barrier_on_oop_field(volatile narrowOop* p); - static oop load_barrier_on_oop_field_preloaded(volatile narrowOop* p, oop o); - static void load_barrier_on_oop_array(volatile narrowOop* p, size_t length); - static oop load_barrier_on_weak_oop_field_preloaded(volatile narrowOop* p, oop o); - static oop load_barrier_on_phantom_oop_field_preloaded(volatile narrowOop* p, oop o); - static oop weak_load_barrier_on_oop_field_preloaded(volatile narrowOop* p, oop o); - static oop weak_load_barrier_on_weak_oop_field_preloaded(volatile narrowOop* p, oop o); - static oop weak_load_barrier_on_phantom_oop_field_preloaded(volatile narrowOop* p, oop o); -}; - -class XLoadBarrierOopClosure : public BasicOopIterateClosure { -public: - virtual void do_oop(oop* p); - virtual void do_oop(narrowOop* p); -}; - -#endif // SHARE_GC_X_XBARRIER_HPP diff --git a/src/hotspot/share/gc/x/xBarrier.inline.hpp b/src/hotspot/share/gc/x/xBarrier.inline.hpp deleted file mode 100644 index 2319bda4d74b1..0000000000000 --- a/src/hotspot/share/gc/x/xBarrier.inline.hpp +++ /dev/null @@ -1,394 +0,0 @@ -/* - * Copyright (c) 2015, 2022, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#ifndef SHARE_GC_X_XBARRIER_INLINE_HPP -#define SHARE_GC_X_XBARRIER_INLINE_HPP - -#include "gc/x/xBarrier.hpp" - -#include "code/codeCache.hpp" -#include "gc/x/xAddress.inline.hpp" -#include "gc/x/xOop.inline.hpp" -#include "gc/x/xResurrection.inline.hpp" -#include "oops/oop.hpp" -#include "runtime/atomic.hpp" -#include "runtime/continuation.hpp" - -// A self heal must always "upgrade" the address metadata bits in -// accordance with the metadata bits state machine, which has the -// valid state transitions as described below (where N is the GC -// cycle). -// -// Note the subtleness of overlapping GC cycles. Specifically that -// oops are colored Remapped(N) starting at relocation N and ending -// at marking N + 1. -// -// +--- Mark Start -// | +--- Mark End -// | | +--- Relocate Start -// | | | +--- Relocate End -// | | | | -// Marked |---N---|--N+1--|--N+2--|---- -// Finalizable |---N---|--N+1--|--N+2--|---- -// Remapped ----|---N---|--N+1--|--N+2--| -// -// VALID STATE TRANSITIONS -// -// Marked(N) -> Remapped(N) -// -> Marked(N + 1) -// -> Finalizable(N + 1) -// -// Finalizable(N) -> Marked(N) -// -> Remapped(N) -// -> Marked(N + 1) -// -> Finalizable(N + 1) -// -// Remapped(N) -> Marked(N + 1) -// -> Finalizable(N + 1) -// -// PHASE VIEW -// -// XPhaseMark -// Load & Mark -// Marked(N) <- Marked(N - 1) -// <- Finalizable(N - 1) -// <- Remapped(N - 1) -// <- Finalizable(N) -// -// Mark(Finalizable) -// Finalizable(N) <- Marked(N - 1) -// <- Finalizable(N - 1) -// <- Remapped(N - 1) -// -// Load(AS_NO_KEEPALIVE) -// Remapped(N - 1) <- Marked(N - 1) -// <- Finalizable(N - 1) -// -// XPhaseMarkCompleted (Resurrection blocked) -// Load & Load(ON_WEAK/PHANTOM_OOP_REF | AS_NO_KEEPALIVE) & KeepAlive -// Marked(N) <- Marked(N - 1) -// <- Finalizable(N - 1) -// <- Remapped(N - 1) -// <- Finalizable(N) -// -// Load(ON_STRONG_OOP_REF | AS_NO_KEEPALIVE) -// Remapped(N - 1) <- Marked(N - 1) -// <- Finalizable(N - 1) -// -// XPhaseMarkCompleted (Resurrection unblocked) -// Load -// Marked(N) <- Finalizable(N) -// -// XPhaseRelocate -// Load & Load(AS_NO_KEEPALIVE) -// Remapped(N) <- Marked(N) -// <- Finalizable(N) - -template -inline void XBarrier::self_heal(volatile oop* p, uintptr_t addr, uintptr_t heal_addr) { - if (heal_addr == 0) { - // Never heal with null since it interacts badly with reference processing. - // A mutator clearing an oop would be similar to calling Reference.clear(), - // which would make the reference non-discoverable or silently dropped - // by the reference processor. - return; - } - - assert(!fast_path(addr), "Invalid self heal"); - assert(fast_path(heal_addr), "Invalid self heal"); - - for (;;) { - // Heal - const uintptr_t prev_addr = Atomic::cmpxchg((volatile uintptr_t*)p, addr, heal_addr, memory_order_relaxed); - if (prev_addr == addr) { - // Success - return; - } - - if (fast_path(prev_addr)) { - // Must not self heal - return; - } - - // The oop location was healed by another barrier, but still needs upgrading. - // Re-apply healing to make sure the oop is not left with weaker (remapped or - // finalizable) metadata bits than what this barrier tried to apply. - assert(XAddress::offset(prev_addr) == XAddress::offset(heal_addr), "Invalid offset"); - addr = prev_addr; - } -} - -template -inline oop XBarrier::barrier(volatile oop* p, oop o) { - const uintptr_t addr = XOop::to_address(o); - - // Fast path - if (fast_path(addr)) { - return XOop::from_address(addr); - } - - // Slow path - const uintptr_t good_addr = slow_path(addr); - - if (p != nullptr) { - self_heal(p, addr, good_addr); - } - - return XOop::from_address(good_addr); -} - -template -inline oop XBarrier::weak_barrier(volatile oop* p, oop o) { - const uintptr_t addr = XOop::to_address(o); - - // Fast path - if (fast_path(addr)) { - // Return the good address instead of the weak good address - // to ensure that the currently active heap view is used. - return XOop::from_address(XAddress::good_or_null(addr)); - } - - // Slow path - const uintptr_t good_addr = slow_path(addr); - - if (p != nullptr) { - // The slow path returns a good/marked address or null, but we never mark - // oops in a weak load barrier so we always heal with the remapped address. - self_heal(p, addr, XAddress::remapped_or_null(good_addr)); - } - - return XOop::from_address(good_addr); -} - -template -inline void XBarrier::root_barrier(oop* p, oop o) { - const uintptr_t addr = XOop::to_address(o); - - // Fast path - if (fast_path(addr)) { - return; - } - - // Slow path - const uintptr_t good_addr = slow_path(addr); - - // Non-atomic healing helps speed up root scanning. This is safe to do - // since we are always healing roots in a safepoint, or under a lock, - // which ensures we are never racing with mutators modifying roots while - // we are healing them. It's also safe in case multiple GC threads try - // to heal the same root if it is aligned, since they would always heal - // the root in the same way and it does not matter in which order it - // happens. For misaligned oops, there needs to be mutual exclusion. - *p = XOop::from_address(good_addr); -} - -inline bool XBarrier::is_good_or_null_fast_path(uintptr_t addr) { - return XAddress::is_good_or_null(addr); -} - -inline bool XBarrier::is_weak_good_or_null_fast_path(uintptr_t addr) { - return XAddress::is_weak_good_or_null(addr); -} - -inline bool XBarrier::is_marked_or_null_fast_path(uintptr_t addr) { - return XAddress::is_marked_or_null(addr); -} - -inline bool XBarrier::during_mark() { - return XGlobalPhase == XPhaseMark; -} - -inline bool XBarrier::during_relocate() { - return XGlobalPhase == XPhaseRelocate; -} - -// -// Load barrier -// -inline oop XBarrier::load_barrier_on_oop(oop o) { - return load_barrier_on_oop_field_preloaded((oop*)nullptr, o); -} - -inline oop XBarrier::load_barrier_on_oop_field(volatile oop* p) { - const oop o = Atomic::load(p); - return load_barrier_on_oop_field_preloaded(p, o); -} - -inline oop XBarrier::load_barrier_on_oop_field_preloaded(volatile oop* p, oop o) { - return barrier(p, o); -} - -inline void XBarrier::load_barrier_on_oop_array(volatile oop* p, size_t length) { - for (volatile const oop* const end = p + length; p < end; p++) { - load_barrier_on_oop_field(p); - } -} - -inline oop XBarrier::load_barrier_on_weak_oop_field_preloaded(volatile oop* p, oop o) { - verify_on_weak(p); - - if (XResurrection::is_blocked()) { - return barrier(p, o); - } - - return load_barrier_on_oop_field_preloaded(p, o); -} - -inline oop XBarrier::load_barrier_on_phantom_oop_field_preloaded(volatile oop* p, oop o) { - if (XResurrection::is_blocked()) { - return barrier(p, o); - } - - return load_barrier_on_oop_field_preloaded(p, o); -} - -inline void XBarrier::load_barrier_on_root_oop_field(oop* p) { - const oop o = *p; - root_barrier(p, o); -} - -inline void XBarrier::load_barrier_on_invisible_root_oop_field(oop* p) { - const oop o = *p; - root_barrier(p, o); -} - -// -// Weak load barrier -// -inline oop XBarrier::weak_load_barrier_on_oop_field(volatile oop* p) { - assert(!XResurrection::is_blocked(), "Should not be called during resurrection blocked phase"); - const oop o = Atomic::load(p); - return weak_load_barrier_on_oop_field_preloaded(p, o); -} - -inline oop XBarrier::weak_load_barrier_on_oop_field_preloaded(volatile oop* p, oop o) { - return weak_barrier(p, o); -} - -inline oop XBarrier::weak_load_barrier_on_weak_oop(oop o) { - return weak_load_barrier_on_weak_oop_field_preloaded((oop*)nullptr, o); -} - -inline oop XBarrier::weak_load_barrier_on_weak_oop_field_preloaded(volatile oop* p, oop o) { - verify_on_weak(p); - - if (XResurrection::is_blocked()) { - return barrier(p, o); - } - - return weak_load_barrier_on_oop_field_preloaded(p, o); -} - -inline oop XBarrier::weak_load_barrier_on_phantom_oop(oop o) { - return weak_load_barrier_on_phantom_oop_field_preloaded((oop*)nullptr, o); -} - -inline oop XBarrier::weak_load_barrier_on_phantom_oop_field_preloaded(volatile oop* p, oop o) { - if (XResurrection::is_blocked()) { - return barrier(p, o); - } - - return weak_load_barrier_on_oop_field_preloaded(p, o); -} - -// -// Is alive barrier -// -inline bool XBarrier::is_alive_barrier_on_weak_oop(oop o) { - // Check if oop is logically non-null. This operation - // is only valid when resurrection is blocked. - assert(XResurrection::is_blocked(), "Invalid phase"); - return weak_load_barrier_on_weak_oop(o) != nullptr; -} - -inline bool XBarrier::is_alive_barrier_on_phantom_oop(oop o) { - // Check if oop is logically non-null. This operation - // is only valid when resurrection is blocked. - assert(XResurrection::is_blocked(), "Invalid phase"); - return weak_load_barrier_on_phantom_oop(o) != nullptr; -} - -// -// Keep alive barrier -// -inline void XBarrier::keep_alive_barrier_on_weak_oop_field(volatile oop* p) { - assert(XResurrection::is_blocked(), "This operation is only valid when resurrection is blocked"); - const oop o = Atomic::load(p); - barrier(p, o); -} - -inline void XBarrier::keep_alive_barrier_on_phantom_oop_field(volatile oop* p) { - assert(XResurrection::is_blocked(), "This operation is only valid when resurrection is blocked"); - const oop o = Atomic::load(p); - barrier(p, o); -} - -inline void XBarrier::keep_alive_barrier_on_phantom_root_oop_field(oop* p) { - // The keep alive operation is only valid when resurrection is blocked. - // - // Except with Loom, where we intentionally trigger arms nmethods after - // unlinking, to get a sense of what nmethods are alive. This will trigger - // the keep alive barriers, but the oops are healed and the slow-paths - // will not trigger. We have stronger checks in the slow-paths. - assert(XResurrection::is_blocked() || (CodeCache::contains((void*)p)), - "This operation is only valid when resurrection is blocked"); - const oop o = *p; - root_barrier(p, o); -} - -inline void XBarrier::keep_alive_barrier_on_oop(oop o) { - const uintptr_t addr = XOop::to_address(o); - assert(XAddress::is_good(addr), "Invalid address"); - - if (during_mark()) { - keep_alive_barrier_on_oop_slow_path(addr); - } -} - -// -// Mark barrier -// -inline void XBarrier::mark_barrier_on_oop_field(volatile oop* p, bool finalizable) { - const oop o = Atomic::load(p); - - if (finalizable) { - barrier(p, o); - } else { - const uintptr_t addr = XOop::to_address(o); - if (XAddress::is_good(addr)) { - // Mark through good oop - mark_barrier_on_oop_slow_path(addr); - } else { - // Mark through bad oop - barrier(p, o); - } - } -} - -inline void XBarrier::mark_barrier_on_oop_array(volatile oop* p, size_t length, bool finalizable) { - for (volatile const oop* const end = p + length; p < end; p++) { - mark_barrier_on_oop_field(p, finalizable); - } -} - -#endif // SHARE_GC_X_XBARRIER_INLINE_HPP diff --git a/src/hotspot/share/gc/x/xBarrierSet.cpp b/src/hotspot/share/gc/x/xBarrierSet.cpp deleted file mode 100644 index cee53e8c3fa00..0000000000000 --- a/src/hotspot/share/gc/x/xBarrierSet.cpp +++ /dev/null @@ -1,99 +0,0 @@ -/* - * Copyright (c) 2018, 2021, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#include "precompiled.hpp" -#include "gc/x/xBarrierSet.hpp" -#include "gc/x/xBarrierSetAssembler.hpp" -#include "gc/x/xBarrierSetNMethod.hpp" -#include "gc/x/xBarrierSetStackChunk.hpp" -#include "gc/x/xGlobals.hpp" -#include "gc/x/xHeap.inline.hpp" -#include "gc/x/xStackWatermark.hpp" -#include "gc/x/xThreadLocalData.hpp" -#include "runtime/javaThread.hpp" -#include "utilities/macros.hpp" -#ifdef COMPILER1 -#include "gc/x/c1/xBarrierSetC1.hpp" -#endif -#ifdef COMPILER2 -#include "gc/x/c2/xBarrierSetC2.hpp" -#endif - -class XBarrierSetC1; -class XBarrierSetC2; - -XBarrierSet::XBarrierSet() : - BarrierSet(make_barrier_set_assembler(), - make_barrier_set_c1(), - make_barrier_set_c2(), - new XBarrierSetNMethod(), - new XBarrierSetStackChunk(), - BarrierSet::FakeRtti(BarrierSet::XBarrierSet)) {} - -XBarrierSetAssembler* XBarrierSet::assembler() { - BarrierSetAssembler* const bsa = BarrierSet::barrier_set()->barrier_set_assembler(); - return reinterpret_cast(bsa); -} - -bool XBarrierSet::barrier_needed(DecoratorSet decorators, BasicType type) { - assert((decorators & AS_RAW) == 0, "Unexpected decorator"); - //assert((decorators & ON_UNKNOWN_OOP_REF) == 0, "Unexpected decorator"); - - if (is_reference_type(type)) { - assert((decorators & (IN_HEAP | IN_NATIVE)) != 0, "Where is reference?"); - // Barrier needed even when IN_NATIVE, to allow concurrent scanning. - return true; - } - - // Barrier not needed - return false; -} - -void XBarrierSet::on_thread_create(Thread* thread) { - // Create thread local data - XThreadLocalData::create(thread); -} - -void XBarrierSet::on_thread_destroy(Thread* thread) { - // Destroy thread local data - XThreadLocalData::destroy(thread); -} - -void XBarrierSet::on_thread_attach(Thread* thread) { - // Set thread local address bad mask - XThreadLocalData::set_address_bad_mask(thread, XAddressBadMask); - if (thread->is_Java_thread()) { - JavaThread* const jt = JavaThread::cast(thread); - StackWatermark* const watermark = new XStackWatermark(jt); - StackWatermarkSet::add_watermark(jt, watermark); - } -} - -void XBarrierSet::on_thread_detach(Thread* thread) { - // Flush and free any remaining mark stacks - XHeap::heap()->mark_flush_and_free(thread); -} - -void XBarrierSet::print_on(outputStream* st) const { - st->print_cr("XBarrierSet"); -} diff --git a/src/hotspot/share/gc/x/xBarrierSet.hpp b/src/hotspot/share/gc/x/xBarrierSet.hpp deleted file mode 100644 index 3f1eb760033d0..0000000000000 --- a/src/hotspot/share/gc/x/xBarrierSet.hpp +++ /dev/null @@ -1,109 +0,0 @@ -/* - * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#ifndef SHARE_GC_X_XBARRIERSET_HPP -#define SHARE_GC_X_XBARRIERSET_HPP - -#include "gc/shared/barrierSet.hpp" - -class XBarrierSetAssembler; - -class XBarrierSet : public BarrierSet { -public: - XBarrierSet(); - - static XBarrierSetAssembler* assembler(); - static bool barrier_needed(DecoratorSet decorators, BasicType type); - - virtual void on_thread_create(Thread* thread); - virtual void on_thread_destroy(Thread* thread); - virtual void on_thread_attach(Thread* thread); - virtual void on_thread_detach(Thread* thread); - - virtual void print_on(outputStream* st) const; - - template - class AccessBarrier : public BarrierSet::AccessBarrier { - private: - typedef BarrierSet::AccessBarrier Raw; - - template - static void verify_decorators_present(); - - template - static void verify_decorators_absent(); - - static oop* field_addr(oop base, ptrdiff_t offset); - - template - static oop load_barrier_on_oop_field_preloaded(T* addr, oop o); - - template - static oop load_barrier_on_unknown_oop_field_preloaded(oop base, ptrdiff_t offset, T* addr, oop o); - - public: - // - // In heap - // - template - static oop oop_load_in_heap(T* addr); - static oop oop_load_in_heap_at(oop base, ptrdiff_t offset); - - template - static oop oop_atomic_cmpxchg_in_heap(T* addr, oop compare_value, oop new_value); - static oop oop_atomic_cmpxchg_in_heap_at(oop base, ptrdiff_t offset, oop compare_value, oop new_value); - - template - static oop oop_atomic_xchg_in_heap(T* addr, oop new_value); - static oop oop_atomic_xchg_in_heap_at(oop base, ptrdiff_t offset, oop new_value); - - template - static bool oop_arraycopy_in_heap(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw, - arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw, - size_t length); - - static void clone_in_heap(oop src, oop dst, size_t size); - - // - // Not in heap - // - template - static oop oop_load_not_in_heap(T* addr); - - template - static oop oop_atomic_cmpxchg_not_in_heap(T* addr, oop compare_value, oop new_value); - - template - static oop oop_atomic_xchg_not_in_heap(T* addr, oop new_value); - }; -}; - -template<> struct BarrierSet::GetName { - static const BarrierSet::Name value = BarrierSet::XBarrierSet; -}; - -template<> struct BarrierSet::GetType { - typedef ::XBarrierSet type; -}; - -#endif // SHARE_GC_X_XBARRIERSET_HPP diff --git a/src/hotspot/share/gc/x/xBarrierSet.inline.hpp b/src/hotspot/share/gc/x/xBarrierSet.inline.hpp deleted file mode 100644 index 3d92c38647d0b..0000000000000 --- a/src/hotspot/share/gc/x/xBarrierSet.inline.hpp +++ /dev/null @@ -1,242 +0,0 @@ -/* - * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#ifndef SHARE_GC_X_XBARRIERSET_INLINE_HPP -#define SHARE_GC_X_XBARRIERSET_INLINE_HPP - -#include "gc/x/xBarrierSet.hpp" - -#include "gc/shared/accessBarrierSupport.inline.hpp" -#include "gc/x/xBarrier.inline.hpp" -#include "utilities/debug.hpp" - -template -template -inline void XBarrierSet::AccessBarrier::verify_decorators_present() { - if ((decorators & expected) == 0) { - fatal("Using unsupported access decorators"); - } -} - -template -template -inline void XBarrierSet::AccessBarrier::verify_decorators_absent() { - if ((decorators & expected) != 0) { - fatal("Using unsupported access decorators"); - } -} - -template -inline oop* XBarrierSet::AccessBarrier::field_addr(oop base, ptrdiff_t offset) { - assert(base != nullptr, "Invalid base"); - return reinterpret_cast(reinterpret_cast((void*)base) + offset); -} - -template -template -inline oop XBarrierSet::AccessBarrier::load_barrier_on_oop_field_preloaded(T* addr, oop o) { - verify_decorators_absent(); - - if (HasDecorator::value) { - if (HasDecorator::value) { - return XBarrier::weak_load_barrier_on_oop_field_preloaded(addr, o); - } else if (HasDecorator::value) { - return XBarrier::weak_load_barrier_on_weak_oop_field_preloaded(addr, o); - } else { - assert((HasDecorator::value), "Must be"); - return XBarrier::weak_load_barrier_on_phantom_oop_field_preloaded(addr, o); - } - } else { - if (HasDecorator::value) { - return XBarrier::load_barrier_on_oop_field_preloaded(addr, o); - } else if (HasDecorator::value) { - return XBarrier::load_barrier_on_weak_oop_field_preloaded(addr, o); - } else { - assert((HasDecorator::value), "Must be"); - return XBarrier::load_barrier_on_phantom_oop_field_preloaded(addr, o); - } - } -} - -template -template -inline oop XBarrierSet::AccessBarrier::load_barrier_on_unknown_oop_field_preloaded(oop base, ptrdiff_t offset, T* addr, oop o) { - verify_decorators_present(); - - const DecoratorSet decorators_known_strength = - AccessBarrierSupport::resolve_possibly_unknown_oop_ref_strength(base, offset); - - if (HasDecorator::value) { - if (decorators_known_strength & ON_STRONG_OOP_REF) { - return XBarrier::weak_load_barrier_on_oop_field_preloaded(addr, o); - } else if (decorators_known_strength & ON_WEAK_OOP_REF) { - return XBarrier::weak_load_barrier_on_weak_oop_field_preloaded(addr, o); - } else { - assert(decorators_known_strength & ON_PHANTOM_OOP_REF, "Must be"); - return XBarrier::weak_load_barrier_on_phantom_oop_field_preloaded(addr, o); - } - } else { - if (decorators_known_strength & ON_STRONG_OOP_REF) { - return XBarrier::load_barrier_on_oop_field_preloaded(addr, o); - } else if (decorators_known_strength & ON_WEAK_OOP_REF) { - return XBarrier::load_barrier_on_weak_oop_field_preloaded(addr, o); - } else { - assert(decorators_known_strength & ON_PHANTOM_OOP_REF, "Must be"); - return XBarrier::load_barrier_on_phantom_oop_field_preloaded(addr, o); - } - } -} - -// -// In heap -// -template -template -inline oop XBarrierSet::AccessBarrier::oop_load_in_heap(T* addr) { - verify_decorators_absent(); - - const oop o = Raw::oop_load_in_heap(addr); - return load_barrier_on_oop_field_preloaded(addr, o); -} - -template -inline oop XBarrierSet::AccessBarrier::oop_load_in_heap_at(oop base, ptrdiff_t offset) { - oop* const addr = field_addr(base, offset); - const oop o = Raw::oop_load_in_heap(addr); - - if (HasDecorator::value) { - return load_barrier_on_unknown_oop_field_preloaded(base, offset, addr, o); - } - - return load_barrier_on_oop_field_preloaded(addr, o); -} - -template -template -inline oop XBarrierSet::AccessBarrier::oop_atomic_cmpxchg_in_heap(T* addr, oop compare_value, oop new_value) { - verify_decorators_present(); - verify_decorators_absent(); - - XBarrier::load_barrier_on_oop_field(addr); - return Raw::oop_atomic_cmpxchg_in_heap(addr, compare_value, new_value); -} - -template -inline oop XBarrierSet::AccessBarrier::oop_atomic_cmpxchg_in_heap_at(oop base, ptrdiff_t offset, oop compare_value, oop new_value) { - verify_decorators_present(); - verify_decorators_absent(); - - // Through Unsafe.CompareAndExchangeObject()/CompareAndSetObject() we can receive - // calls with ON_UNKNOWN_OOP_REF set. However, we treat these as ON_STRONG_OOP_REF, - // with the motivation that if you're doing Unsafe operations on a Reference.referent - // field, then you're on your own anyway. - XBarrier::load_barrier_on_oop_field(field_addr(base, offset)); - return Raw::oop_atomic_cmpxchg_in_heap_at(base, offset, compare_value, new_value); -} - -template -template -inline oop XBarrierSet::AccessBarrier::oop_atomic_xchg_in_heap(T* addr, oop new_value) { - verify_decorators_present(); - verify_decorators_absent(); - - const oop o = Raw::oop_atomic_xchg_in_heap(addr, new_value); - return XBarrier::load_barrier_on_oop(o); -} - -template -inline oop XBarrierSet::AccessBarrier::oop_atomic_xchg_in_heap_at(oop base, ptrdiff_t offset, oop new_value) { - verify_decorators_present(); - verify_decorators_absent(); - - const oop o = Raw::oop_atomic_xchg_in_heap_at(base, offset, new_value); - return XBarrier::load_barrier_on_oop(o); -} - -template -template -inline bool XBarrierSet::AccessBarrier::oop_arraycopy_in_heap(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw, - arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw, - size_t length) { - T* src = arrayOopDesc::obj_offset_to_raw(src_obj, src_offset_in_bytes, src_raw); - T* dst = arrayOopDesc::obj_offset_to_raw(dst_obj, dst_offset_in_bytes, dst_raw); - - if (!HasDecorator::value) { - // No check cast, bulk barrier and bulk copy - XBarrier::load_barrier_on_oop_array(src, length); - return Raw::oop_arraycopy_in_heap(nullptr, 0, src, nullptr, 0, dst, length); - } - - // Check cast and copy each elements - Klass* const dst_klass = objArrayOop(dst_obj)->element_klass(); - for (const T* const end = src + length; src < end; src++, dst++) { - const oop elem = XBarrier::load_barrier_on_oop_field(src); - if (!oopDesc::is_instanceof_or_null(elem, dst_klass)) { - // Check cast failed - return false; - } - - // Cast is safe, since we know it's never a narrowOop - *(oop*)dst = elem; - } - - return true; -} - -template -inline void XBarrierSet::AccessBarrier::clone_in_heap(oop src, oop dst, size_t size) { - XBarrier::load_barrier_on_oop_fields(src); - Raw::clone_in_heap(src, dst, size); -} - -// -// Not in heap -// -template -template -inline oop XBarrierSet::AccessBarrier::oop_load_not_in_heap(T* addr) { - verify_decorators_absent(); - - const oop o = Raw::oop_load_not_in_heap(addr); - return load_barrier_on_oop_field_preloaded(addr, o); -} - -template -template -inline oop XBarrierSet::AccessBarrier::oop_atomic_cmpxchg_not_in_heap(T* addr, oop compare_value, oop new_value) { - verify_decorators_present(); - verify_decorators_absent(); - - return Raw::oop_atomic_cmpxchg_not_in_heap(addr, compare_value, new_value); -} - -template -template -inline oop XBarrierSet::AccessBarrier::oop_atomic_xchg_not_in_heap(T* addr, oop new_value) { - verify_decorators_present(); - verify_decorators_absent(); - - return Raw::oop_atomic_xchg_not_in_heap(addr, new_value); -} - -#endif // SHARE_GC_X_XBARRIERSET_INLINE_HPP diff --git a/src/hotspot/share/gc/x/xBarrierSetAssembler.cpp b/src/hotspot/share/gc/x/xBarrierSetAssembler.cpp deleted file mode 100644 index d00c12ed291e4..0000000000000 --- a/src/hotspot/share/gc/x/xBarrierSetAssembler.cpp +++ /dev/null @@ -1,35 +0,0 @@ -/* - * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#include "precompiled.hpp" -#include "gc/x/xBarrierSetAssembler.hpp" -#include "gc/x/xThreadLocalData.hpp" -#include "runtime/javaThread.hpp" - -Address XBarrierSetAssemblerBase::address_bad_mask_from_thread(Register thread) { - return Address(thread, XThreadLocalData::address_bad_mask_offset()); -} - -Address XBarrierSetAssemblerBase::address_bad_mask_from_jni_env(Register env) { - return Address(env, XThreadLocalData::address_bad_mask_offset() - JavaThread::jni_environment_offset()); -} diff --git a/src/hotspot/share/gc/x/xBarrierSetAssembler.hpp b/src/hotspot/share/gc/x/xBarrierSetAssembler.hpp deleted file mode 100644 index 2f733465bfb97..0000000000000 --- a/src/hotspot/share/gc/x/xBarrierSetAssembler.hpp +++ /dev/null @@ -1,39 +0,0 @@ -/* - * Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#ifndef SHARE_GC_X_XBARRIERSETASSEMBLER_HPP -#define SHARE_GC_X_XBARRIERSETASSEMBLER_HPP - -#include "gc/shared/barrierSetAssembler.hpp" -#include "utilities/macros.hpp" - -class XBarrierSetAssemblerBase : public BarrierSetAssembler { -public: - static Address address_bad_mask_from_thread(Register thread); - static Address address_bad_mask_from_jni_env(Register env); -}; - -// Needs to be included after definition of XBarrierSetAssemblerBase -#include CPU_HEADER(gc/x/xBarrierSetAssembler) - -#endif // SHARE_GC_X_XBARRIERSETASSEMBLER_HPP diff --git a/src/hotspot/share/gc/x/xBarrierSetNMethod.cpp b/src/hotspot/share/gc/x/xBarrierSetNMethod.cpp deleted file mode 100644 index 3dc76463028b8..0000000000000 --- a/src/hotspot/share/gc/x/xBarrierSetNMethod.cpp +++ /dev/null @@ -1,83 +0,0 @@ -/* - * Copyright (c) 2018, 2022, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#include "precompiled.hpp" -#include "code/nmethod.hpp" -#include "gc/x/xBarrierSetNMethod.hpp" -#include "gc/x/xGlobals.hpp" -#include "gc/x/xLock.inline.hpp" -#include "gc/x/xNMethod.hpp" -#include "gc/x/xThreadLocalData.hpp" -#include "logging/log.hpp" -#include "runtime/threadWXSetters.inline.hpp" - -bool XBarrierSetNMethod::nmethod_entry_barrier(nmethod* nm) { - if (!is_armed(nm)) { - // Some other thread got here first and healed the oops - // and disarmed the nmethod. No need to continue. - return true; - } - - XLocker locker(XNMethod::lock_for_nmethod(nm)); - log_trace(nmethod, barrier)("Entered critical zone for %p", nm); - - if (!is_armed(nm)) { - // Some other thread managed to complete while we were - // waiting for lock. No need to continue. - return true; - } - - MACOS_AARCH64_ONLY(ThreadWXEnable wx(WXWrite, Thread::current())); - - if (nm->is_unloading()) { - // We don't need to take the lock when unlinking nmethods from - // the Method, because it is only concurrently unlinked by - // the entry barrier, which acquires the per nmethod lock. - nm->unlink_from_method(); - - // We can end up calling nmethods that are unloading - // since we clear compiled ICs lazily. Returning false - // will re-resovle the call and update the compiled IC. - return false; - } - - // Heal oops - XNMethod::nmethod_oops_barrier(nm); - - - // CodeCache unloading support - nm->mark_as_maybe_on_stack(); - - // Disarm - disarm(nm); - - return true; -} - -int* XBarrierSetNMethod::disarmed_guard_value_address() const { - return (int*)XAddressBadMaskHighOrderBitsAddr; -} - -ByteSize XBarrierSetNMethod::thread_disarmed_guard_value_offset() const { - return XThreadLocalData::nmethod_disarmed_offset(); -} diff --git a/src/hotspot/share/gc/x/xBarrierSetNMethod.hpp b/src/hotspot/share/gc/x/xBarrierSetNMethod.hpp deleted file mode 100644 index db1ee8c4e8f11..0000000000000 --- a/src/hotspot/share/gc/x/xBarrierSetNMethod.hpp +++ /dev/null @@ -1,41 +0,0 @@ -/* - * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#ifndef SHARE_GC_X_XBARRIERSETNMETHOD_HPP -#define SHARE_GC_X_XBARRIERSETNMETHOD_HPP - -#include "gc/shared/barrierSetNMethod.hpp" -#include "memory/allocation.hpp" - -class nmethod; - -class XBarrierSetNMethod : public BarrierSetNMethod { -protected: - virtual bool nmethod_entry_barrier(nmethod* nm); - -public: - virtual ByteSize thread_disarmed_guard_value_offset() const; - virtual int* disarmed_guard_value_address() const; -}; - -#endif // SHARE_GC_X_XBARRIERSETNMETHOD_HPP diff --git a/src/hotspot/share/gc/x/xBarrierSetRuntime.cpp b/src/hotspot/share/gc/x/xBarrierSetRuntime.cpp deleted file mode 100644 index d87df24b9d8e6..0000000000000 --- a/src/hotspot/share/gc/x/xBarrierSetRuntime.cpp +++ /dev/null @@ -1,114 +0,0 @@ -/* - * Copyright (c) 2018, 2021, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#include "precompiled.hpp" -#include "gc/x/xBarrier.inline.hpp" -#include "gc/x/xBarrierSetRuntime.hpp" -#include "oops/access.hpp" -#include "runtime/interfaceSupport.inline.hpp" - -JRT_LEAF(oopDesc*, XBarrierSetRuntime::load_barrier_on_oop_field_preloaded(oopDesc* o, oop* p)) - return XBarrier::load_barrier_on_oop_field_preloaded(p, o); -JRT_END - -JRT_LEAF(oopDesc*, XBarrierSetRuntime::weak_load_barrier_on_oop_field_preloaded(oopDesc* o, oop* p)) - return XBarrier::weak_load_barrier_on_oop_field_preloaded(p, o); -JRT_END - -JRT_LEAF(oopDesc*, XBarrierSetRuntime::weak_load_barrier_on_weak_oop_field_preloaded(oopDesc* o, oop* p)) - return XBarrier::weak_load_barrier_on_weak_oop_field_preloaded(p, o); -JRT_END - -JRT_LEAF(oopDesc*, XBarrierSetRuntime::weak_load_barrier_on_phantom_oop_field_preloaded(oopDesc* o, oop* p)) - return XBarrier::weak_load_barrier_on_phantom_oop_field_preloaded(p, o); -JRT_END - -JRT_LEAF(oopDesc*, XBarrierSetRuntime::load_barrier_on_weak_oop_field_preloaded(oopDesc* o, oop* p)) - return XBarrier::load_barrier_on_weak_oop_field_preloaded(p, o); -JRT_END - -JRT_LEAF(oopDesc*, XBarrierSetRuntime::load_barrier_on_phantom_oop_field_preloaded(oopDesc* o, oop* p)) - return XBarrier::load_barrier_on_phantom_oop_field_preloaded(p, o); -JRT_END - -JRT_LEAF(void, XBarrierSetRuntime::load_barrier_on_oop_array(oop* p, size_t length)) - XBarrier::load_barrier_on_oop_array(p, length); -JRT_END - -JRT_LEAF(void, XBarrierSetRuntime::clone(oopDesc* src, oopDesc* dst, size_t size)) - HeapAccess<>::clone(src, dst, size); -JRT_END - -address XBarrierSetRuntime::load_barrier_on_oop_field_preloaded_addr(DecoratorSet decorators) { - if (decorators & ON_PHANTOM_OOP_REF) { - if (decorators & AS_NO_KEEPALIVE) { - return weak_load_barrier_on_phantom_oop_field_preloaded_addr(); - } else { - return load_barrier_on_phantom_oop_field_preloaded_addr(); - } - } else if (decorators & ON_WEAK_OOP_REF) { - if (decorators & AS_NO_KEEPALIVE) { - return weak_load_barrier_on_weak_oop_field_preloaded_addr(); - } else { - return load_barrier_on_weak_oop_field_preloaded_addr(); - } - } else { - if (decorators & AS_NO_KEEPALIVE) { - return weak_load_barrier_on_oop_field_preloaded_addr(); - } else { - return load_barrier_on_oop_field_preloaded_addr(); - } - } -} - -address XBarrierSetRuntime::load_barrier_on_oop_field_preloaded_addr() { - return reinterpret_cast
(load_barrier_on_oop_field_preloaded); -} - -address XBarrierSetRuntime::load_barrier_on_weak_oop_field_preloaded_addr() { - return reinterpret_cast
(load_barrier_on_weak_oop_field_preloaded); -} - -address XBarrierSetRuntime::load_barrier_on_phantom_oop_field_preloaded_addr() { - return reinterpret_cast
(load_barrier_on_phantom_oop_field_preloaded); -} - -address XBarrierSetRuntime::weak_load_barrier_on_oop_field_preloaded_addr() { - return reinterpret_cast
(weak_load_barrier_on_oop_field_preloaded); -} - -address XBarrierSetRuntime::weak_load_barrier_on_weak_oop_field_preloaded_addr() { - return reinterpret_cast
(weak_load_barrier_on_weak_oop_field_preloaded); -} - -address XBarrierSetRuntime::weak_load_barrier_on_phantom_oop_field_preloaded_addr() { - return reinterpret_cast
(weak_load_barrier_on_phantom_oop_field_preloaded); -} - -address XBarrierSetRuntime::load_barrier_on_oop_array_addr() { - return reinterpret_cast
(load_barrier_on_oop_array); -} - -address XBarrierSetRuntime::clone_addr() { - return reinterpret_cast
(clone); -} diff --git a/src/hotspot/share/gc/x/xBarrierSetRuntime.hpp b/src/hotspot/share/gc/x/xBarrierSetRuntime.hpp deleted file mode 100644 index 6302f1ce36dc0..0000000000000 --- a/src/hotspot/share/gc/x/xBarrierSetRuntime.hpp +++ /dev/null @@ -1,56 +0,0 @@ -/* - * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#ifndef SHARE_GC_X_XBARRIERSETRUNTIME_HPP -#define SHARE_GC_X_XBARRIERSETRUNTIME_HPP - -#include "memory/allStatic.hpp" -#include "oops/accessDecorators.hpp" -#include "utilities/globalDefinitions.hpp" - -class oopDesc; - -class XBarrierSetRuntime : public AllStatic { -private: - static oopDesc* load_barrier_on_oop_field_preloaded(oopDesc* o, oop* p); - static oopDesc* load_barrier_on_weak_oop_field_preloaded(oopDesc* o, oop* p); - static oopDesc* load_barrier_on_phantom_oop_field_preloaded(oopDesc* o, oop* p); - static oopDesc* weak_load_barrier_on_oop_field_preloaded(oopDesc* o, oop* p); - static oopDesc* weak_load_barrier_on_weak_oop_field_preloaded(oopDesc* o, oop* p); - static oopDesc* weak_load_barrier_on_phantom_oop_field_preloaded(oopDesc* o, oop* p); - static void load_barrier_on_oop_array(oop* p, size_t length); - static void clone(oopDesc* src, oopDesc* dst, size_t size); - -public: - static address load_barrier_on_oop_field_preloaded_addr(DecoratorSet decorators); - static address load_barrier_on_oop_field_preloaded_addr(); - static address load_barrier_on_weak_oop_field_preloaded_addr(); - static address load_barrier_on_phantom_oop_field_preloaded_addr(); - static address weak_load_barrier_on_oop_field_preloaded_addr(); - static address weak_load_barrier_on_weak_oop_field_preloaded_addr(); - static address weak_load_barrier_on_phantom_oop_field_preloaded_addr(); - static address load_barrier_on_oop_array_addr(); - static address clone_addr(); -}; - -#endif // SHARE_GC_X_XBARRIERSETRUNTIME_HPP diff --git a/src/hotspot/share/gc/x/xBarrierSetStackChunk.cpp b/src/hotspot/share/gc/x/xBarrierSetStackChunk.cpp deleted file mode 100644 index 1670a00434fb0..0000000000000 --- a/src/hotspot/share/gc/x/xBarrierSetStackChunk.cpp +++ /dev/null @@ -1,47 +0,0 @@ -/* - * Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - * - */ - -#include "precompiled.hpp" -#include "gc/x/xBarrier.inline.hpp" -#include "gc/x/xBarrierSetStackChunk.hpp" -#include "runtime/atomic.hpp" -#include "utilities/debug.hpp" - -void XBarrierSetStackChunk::encode_gc_mode(stackChunkOop chunk, OopIterator* iterator) { - // Do nothing -} - -void XBarrierSetStackChunk::decode_gc_mode(stackChunkOop chunk, OopIterator* iterator) { - // Do nothing -} - -oop XBarrierSetStackChunk::load_oop(stackChunkOop chunk, oop* addr) { - oop obj = Atomic::load(addr); - return XBarrier::load_barrier_on_oop_field_preloaded((volatile oop*)nullptr, obj); -} - -oop XBarrierSetStackChunk::load_oop(stackChunkOop chunk, narrowOop* addr) { - ShouldNotReachHere(); - return nullptr; -} diff --git a/src/hotspot/share/gc/x/xBarrierSetStackChunk.hpp b/src/hotspot/share/gc/x/xBarrierSetStackChunk.hpp deleted file mode 100644 index 36180db7b8c4a..0000000000000 --- a/src/hotspot/share/gc/x/xBarrierSetStackChunk.hpp +++ /dev/null @@ -1,44 +0,0 @@ -/* - * Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - * - */ - -#ifndef SHARE_GC_X_XBARRIERSETSTACKCHUNK_HPP -#define SHARE_GC_X_XBARRIERSETSTACKCHUNK_HPP - -#include "gc/shared/barrierSetStackChunk.hpp" -#include "memory/iterator.hpp" -#include "oops/oopsHierarchy.hpp" -#include "utilities/globalDefinitions.hpp" - -class OopClosure; - -class XBarrierSetStackChunk : public BarrierSetStackChunk { -public: - virtual void encode_gc_mode(stackChunkOop chunk, OopIterator* iterator) override; - virtual void decode_gc_mode(stackChunkOop chunk, OopIterator* iterator) override; - - virtual oop load_oop(stackChunkOop chunk, oop* addr) override; - virtual oop load_oop(stackChunkOop chunk, narrowOop* addr) override; -}; - -#endif // SHARE_GC_X_XBARRIERSETSTACKCHUNK_HPP diff --git a/src/hotspot/share/gc/x/xBitField.hpp b/src/hotspot/share/gc/x/xBitField.hpp deleted file mode 100644 index f11d7cf7ef7a4..0000000000000 --- a/src/hotspot/share/gc/x/xBitField.hpp +++ /dev/null @@ -1,81 +0,0 @@ -/* - * Copyright (c) 2017, 2022, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#ifndef SHARE_GC_X_XBITFIELD_HPP -#define SHARE_GC_X_XBITFIELD_HPP - -#include "memory/allStatic.hpp" -#include "utilities/debug.hpp" -#include "utilities/globalDefinitions.hpp" - -// -// Example -// ------- -// -// typedef XBitField field_word_aligned_size; -// typedef XBitField field_length; -// -// -// 6 3 3 -// 3 2 1 2 10 -// +-----------------------------------+---------------------------------+--+ -// |11111111 11111111 11111111 11111111|11111111 11111111 11111111 111111|11| -// +-----------------------------------+---------------------------------+--+ -// | | | -// | 31-2 field_length (30-bits) * | -// | | -// | 1-0 field_word_aligned_size (2-bits) * -// | -// * 63-32 Unused (32-bits) -// -// -// field_word_aligned_size::encode(16) = 2 -// field_length::encode(2342) = 9368 -// -// field_word_aligned_size::decode(9368 | 2) = 16 -// field_length::decode(9368 | 2) = 2342 -// - -template -class XBitField : public AllStatic { -private: - static const int ContainerBits = sizeof(ContainerType) * BitsPerByte; - - static_assert(FieldBits < ContainerBits, "Field too large"); - static_assert(FieldShift + FieldBits <= ContainerBits, "Field too large"); - static_assert(ValueShift + FieldBits <= ContainerBits, "Field too large"); - - static const ContainerType FieldMask = (((ContainerType)1 << FieldBits) - 1); - -public: - static ValueType decode(ContainerType container) { - return (ValueType)(((container >> FieldShift) & FieldMask) << ValueShift); - } - - static ContainerType encode(ValueType value) { - assert(((ContainerType)value & (FieldMask << ValueShift)) == (ContainerType)value, "Invalid value"); - return ((ContainerType)value >> ValueShift) << FieldShift; - } -}; - -#endif // SHARE_GC_X_XBITFIELD_HPP diff --git a/src/hotspot/share/gc/x/xBitMap.hpp b/src/hotspot/share/gc/x/xBitMap.hpp deleted file mode 100644 index c96f63b4c8985..0000000000000 --- a/src/hotspot/share/gc/x/xBitMap.hpp +++ /dev/null @@ -1,42 +0,0 @@ -/* - * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#ifndef SHARE_GC_X_XBITMAP_HPP -#define SHARE_GC_X_XBITMAP_HPP - -#include "utilities/bitMap.hpp" - -class XBitMap : public CHeapBitMap { -private: - static bm_word_t bit_mask_pair(idx_t bit); - - bool par_set_bit_pair_finalizable(idx_t bit, bool& inc_live); - bool par_set_bit_pair_strong(idx_t bit, bool& inc_live); - -public: - XBitMap(idx_t size_in_bits); - - bool par_set_bit_pair(idx_t bit, bool finalizable, bool& inc_live); -}; - -#endif // SHARE_GC_X_XBITMAP_HPP diff --git a/src/hotspot/share/gc/x/xBitMap.inline.hpp b/src/hotspot/share/gc/x/xBitMap.inline.hpp deleted file mode 100644 index e35f59eeb880e..0000000000000 --- a/src/hotspot/share/gc/x/xBitMap.inline.hpp +++ /dev/null @@ -1,80 +0,0 @@ -/* - * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#ifndef SHARE_GC_X_XBITMAP_INLINE_HPP -#define SHARE_GC_X_XBITMAP_INLINE_HPP - -#include "gc/x/xBitMap.hpp" - -#include "runtime/atomic.hpp" -#include "utilities/bitMap.inline.hpp" -#include "utilities/debug.hpp" - -inline XBitMap::XBitMap(idx_t size_in_bits) : - CHeapBitMap(size_in_bits, mtGC, false /* clear */) {} - -inline BitMap::bm_word_t XBitMap::bit_mask_pair(idx_t bit) { - assert(bit_in_word(bit) < BitsPerWord - 1, "Invalid bit index"); - return (bm_word_t)3 << bit_in_word(bit); -} - -inline bool XBitMap::par_set_bit_pair_finalizable(idx_t bit, bool& inc_live) { - inc_live = par_set_bit(bit); - return inc_live; -} - -inline bool XBitMap::par_set_bit_pair_strong(idx_t bit, bool& inc_live) { - verify_index(bit); - volatile bm_word_t* const addr = word_addr(bit); - const bm_word_t pair_mask = bit_mask_pair(bit); - bm_word_t old_val = *addr; - - do { - const bm_word_t new_val = old_val | pair_mask; - if (new_val == old_val) { - // Someone else beat us to it - inc_live = false; - return false; - } - const bm_word_t cur_val = Atomic::cmpxchg(addr, old_val, new_val); - if (cur_val == old_val) { - // Success - const bm_word_t marked_mask = bit_mask(bit); - inc_live = !(old_val & marked_mask); - return true; - } - - // The value changed, retry - old_val = cur_val; - } while (true); -} - -inline bool XBitMap::par_set_bit_pair(idx_t bit, bool finalizable, bool& inc_live) { - if (finalizable) { - return par_set_bit_pair_finalizable(bit, inc_live); - } else { - return par_set_bit_pair_strong(bit, inc_live); - } -} - -#endif // SHARE_GC_X_XBITMAP_INLINE_HPP diff --git a/src/hotspot/share/gc/x/xBreakpoint.cpp b/src/hotspot/share/gc/x/xBreakpoint.cpp deleted file mode 100644 index e053ceaedb956..0000000000000 --- a/src/hotspot/share/gc/x/xBreakpoint.cpp +++ /dev/null @@ -1,63 +0,0 @@ -/* - * Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#include "precompiled.hpp" -#include "gc/shared/concurrentGCBreakpoints.hpp" -#include "gc/x/xBreakpoint.hpp" -#include "runtime/mutexLocker.hpp" -#include "utilities/debug.hpp" - -bool XBreakpoint::_start_gc = false; - -void XBreakpoint::start_gc() { - MonitorLocker ml(ConcurrentGCBreakpoints::monitor()); - assert(ConcurrentGCBreakpoints::is_controlled(), "Invalid state"); - assert(!_start_gc, "Invalid state"); - _start_gc = true; - ml.notify_all(); -} - -void XBreakpoint::at_before_gc() { - MonitorLocker ml(ConcurrentGCBreakpoints::monitor(), Mutex::_no_safepoint_check_flag); - while (ConcurrentGCBreakpoints::is_controlled() && !_start_gc) { - ml.wait(); - } - _start_gc = false; - ConcurrentGCBreakpoints::notify_idle_to_active(); -} - -void XBreakpoint::at_after_gc() { - ConcurrentGCBreakpoints::notify_active_to_idle(); -} - -void XBreakpoint::at_after_marking_started() { - ConcurrentGCBreakpoints::at("AFTER MARKING STARTED"); -} - -void XBreakpoint::at_before_marking_completed() { - ConcurrentGCBreakpoints::at("BEFORE MARKING COMPLETED"); -} - -void XBreakpoint::at_after_reference_processing_started() { - ConcurrentGCBreakpoints::at("AFTER CONCURRENT REFERENCE PROCESSING STARTED"); -} diff --git a/src/hotspot/share/gc/x/xBreakpoint.hpp b/src/hotspot/share/gc/x/xBreakpoint.hpp deleted file mode 100644 index 0c0b9d3c90f69..0000000000000 --- a/src/hotspot/share/gc/x/xBreakpoint.hpp +++ /dev/null @@ -1,43 +0,0 @@ -/* - * Copyright (c) 2020, 2022, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#ifndef SHARE_GC_X_XBREAKPOINT_HPP -#define SHARE_GC_X_XBREAKPOINT_HPP - -#include "memory/allStatic.hpp" - -class XBreakpoint : public AllStatic { -private: - static bool _start_gc; - -public: - static void start_gc(); - - static void at_before_gc(); - static void at_after_gc(); - static void at_after_marking_started(); - static void at_before_marking_completed(); - static void at_after_reference_processing_started(); -}; - -#endif // SHARE_GC_X_XBREAKPOINT_HPP diff --git a/src/hotspot/share/gc/x/xCPU.cpp b/src/hotspot/share/gc/x/xCPU.cpp deleted file mode 100644 index d21d32aeb352d..0000000000000 --- a/src/hotspot/share/gc/x/xCPU.cpp +++ /dev/null @@ -1,67 +0,0 @@ -/* - * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#include "precompiled.hpp" -#include "gc/shared/gcLogPrecious.hpp" -#include "gc/x/xCPU.inline.hpp" -#include "memory/padded.inline.hpp" -#include "runtime/javaThread.hpp" -#include "runtime/os.hpp" -#include "utilities/debug.hpp" - -#define XCPU_UNKNOWN_AFFINITY ((Thread*)-1) -#define XCPU_UNKNOWN_SELF ((Thread*)-2) - -PaddedEnd* XCPU::_affinity = nullptr; -THREAD_LOCAL Thread* XCPU::_self = XCPU_UNKNOWN_SELF; -THREAD_LOCAL uint32_t XCPU::_cpu = 0; - -void XCPU::initialize() { - assert(_affinity == nullptr, "Already initialized"); - const uint32_t ncpus = count(); - - _affinity = PaddedArray::create_unfreeable(ncpus); - - for (uint32_t i = 0; i < ncpus; i++) { - _affinity[i]._thread = XCPU_UNKNOWN_AFFINITY; - } - - log_info_p(gc, init)("CPUs: %u total, %u available", - os::processor_count(), - os::initial_active_processor_count()); -} - -uint32_t XCPU::id_slow() { - // Set current thread - if (_self == XCPU_UNKNOWN_SELF) { - _self = Thread::current(); - } - - // Set current CPU - _cpu = os::processor_id(); - - // Update affinity table - _affinity[_cpu]._thread = _self; - - return _cpu; -} diff --git a/src/hotspot/share/gc/x/xCPU.hpp b/src/hotspot/share/gc/x/xCPU.hpp deleted file mode 100644 index fd931956c4bdc..0000000000000 --- a/src/hotspot/share/gc/x/xCPU.hpp +++ /dev/null @@ -1,52 +0,0 @@ -/* - * Copyright (c) 2015, 2022, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#ifndef SHARE_GC_X_XCPU_HPP -#define SHARE_GC_X_XCPU_HPP - -#include "memory/allStatic.hpp" -#include "memory/padded.hpp" -#include "utilities/globalDefinitions.hpp" - -class Thread; - -class XCPU : public AllStatic { -private: - struct XCPUAffinity { - Thread* _thread; - }; - - static PaddedEnd* _affinity; - static THREAD_LOCAL Thread* _self; - static THREAD_LOCAL uint32_t _cpu; - - static uint32_t id_slow(); - -public: - static void initialize(); - - static uint32_t count(); - static uint32_t id(); -}; - -#endif // SHARE_GC_X_XCPU_HPP diff --git a/src/hotspot/share/gc/x/xCPU.inline.hpp b/src/hotspot/share/gc/x/xCPU.inline.hpp deleted file mode 100644 index 3cf5bfa96e00e..0000000000000 --- a/src/hotspot/share/gc/x/xCPU.inline.hpp +++ /dev/null @@ -1,48 +0,0 @@ -/* - * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#ifndef SHARE_GC_X_XCPU_INLINE_HPP -#define SHARE_GC_X_XCPU_INLINE_HPP - -#include "gc/x/xCPU.hpp" - -#include "runtime/os.hpp" -#include "utilities/debug.hpp" - -inline uint32_t XCPU::count() { - return os::processor_count(); -} - -inline uint32_t XCPU::id() { - assert(_affinity != nullptr, "Not initialized"); - - // Fast path - if (_affinity[_cpu]._thread == _self) { - return _cpu; - } - - // Slow path - return id_slow(); -} - -#endif // SHARE_GC_X_XCPU_INLINE_HPP diff --git a/src/hotspot/share/gc/x/xCollectedHeap.cpp b/src/hotspot/share/gc/x/xCollectedHeap.cpp deleted file mode 100644 index d03b6312a67dd..0000000000000 --- a/src/hotspot/share/gc/x/xCollectedHeap.cpp +++ /dev/null @@ -1,344 +0,0 @@ -/* - * Copyright (c) 2015, 2024, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#include "precompiled.hpp" -#include "classfile/classLoaderData.hpp" -#include "gc/shared/gcHeapSummary.hpp" -#include "gc/shared/gcLocker.inline.hpp" -#include "gc/shared/suspendibleThreadSet.hpp" -#include "gc/x/xCollectedHeap.hpp" -#include "gc/x/xDirector.hpp" -#include "gc/x/xDriver.hpp" -#include "gc/x/xGlobals.hpp" -#include "gc/x/xHeap.inline.hpp" -#include "gc/x/xNMethod.hpp" -#include "gc/x/xObjArrayAllocator.hpp" -#include "gc/x/xOop.inline.hpp" -#include "gc/x/xServiceability.hpp" -#include "gc/x/xStat.hpp" -#include "gc/x/xUtils.inline.hpp" -#include "memory/classLoaderMetaspace.hpp" -#include "memory/iterator.hpp" -#include "memory/metaspaceCriticalAllocation.hpp" -#include "memory/universe.hpp" -#include "oops/stackChunkOop.hpp" -#include "runtime/continuationJavaClasses.hpp" -#include "runtime/stackWatermarkSet.hpp" -#include "utilities/align.hpp" - -XCollectedHeap* XCollectedHeap::heap() { - return named_heap(CollectedHeap::Z); -} - -XCollectedHeap::XCollectedHeap() : - _barrier_set(), - _initialize(&_barrier_set), - _heap(), - _driver(new XDriver()), - _director(new XDirector(_driver)), - _stat(new XStat()), - _runtime_workers() {} - -CollectedHeap::Name XCollectedHeap::kind() const { - return CollectedHeap::Z; -} - -const char* XCollectedHeap::name() const { - return XName; -} - -jint XCollectedHeap::initialize() { - if (!_heap.is_initialized()) { - return JNI_ENOMEM; - } - - Universe::calculate_verify_data((HeapWord*)0, (HeapWord*)UINTPTR_MAX); - - return JNI_OK; -} - -void XCollectedHeap::initialize_serviceability() { - _heap.serviceability_initialize(); -} - -class XStopConcurrentGCThreadClosure : public ThreadClosure { -public: - virtual void do_thread(Thread* thread) { - if (thread->is_ConcurrentGC_thread()) { - ConcurrentGCThread::cast(thread)->stop(); - } - } -}; - -void XCollectedHeap::stop() { - XStopConcurrentGCThreadClosure cl; - gc_threads_do(&cl); -} - -size_t XCollectedHeap::max_capacity() const { - return _heap.max_capacity(); -} - -size_t XCollectedHeap::capacity() const { - return _heap.capacity(); -} - -size_t XCollectedHeap::used() const { - return _heap.used(); -} - -size_t XCollectedHeap::unused() const { - return _heap.unused(); -} - -bool XCollectedHeap::is_maximal_no_gc() const { - // Not supported - ShouldNotReachHere(); - return false; -} - -bool XCollectedHeap::is_in(const void* p) const { - return _heap.is_in((uintptr_t)p); -} - -bool XCollectedHeap::requires_barriers(stackChunkOop obj) const { - uintptr_t* cont_addr = obj->field_addr(jdk_internal_vm_StackChunk::cont_offset()); - - if (!_heap.is_allocating(cast_from_oop(obj))) { - // An object that isn't allocating, is visible from GC tracing. Such - // stack chunks require barriers. - return true; - } - - if (!XAddress::is_good_or_null(*cont_addr)) { - // If a chunk is allocated after a GC started, but before relocate start - // we can have an allocating chunk that isn't deeply good. That means that - // the contained oops might be bad and require GC barriers. - return true; - } - - // The chunk is allocating and its pointers are good. This chunk needs no - // GC barriers - return false; -} - -HeapWord* XCollectedHeap::allocate_new_tlab(size_t min_size, size_t requested_size, size_t* actual_size) { - const size_t size_in_bytes = XUtils::words_to_bytes(align_object_size(requested_size)); - const uintptr_t addr = _heap.alloc_tlab(size_in_bytes); - - if (addr != 0) { - *actual_size = requested_size; - } - - return (HeapWord*)addr; -} - -oop XCollectedHeap::array_allocate(Klass* klass, size_t size, int length, bool do_zero, TRAPS) { - XObjArrayAllocator allocator(klass, size, length, do_zero, THREAD); - return allocator.allocate(); -} - -HeapWord* XCollectedHeap::mem_allocate(size_t size, bool* gc_overhead_limit_was_exceeded) { - const size_t size_in_bytes = XUtils::words_to_bytes(align_object_size(size)); - return (HeapWord*)_heap.alloc_object(size_in_bytes); -} - -MetaWord* XCollectedHeap::satisfy_failed_metadata_allocation(ClassLoaderData* loader_data, - size_t size, - Metaspace::MetadataType mdtype) { - // Start asynchronous GC - collect(GCCause::_metadata_GC_threshold); - - // Expand and retry allocation - MetaWord* const result = loader_data->metaspace_non_null()->expand_and_allocate(size, mdtype); - if (result != nullptr) { - return result; - } - - // As a last resort, try a critical allocation, riding on a synchronous full GC - return MetaspaceCriticalAllocation::allocate(loader_data, size, mdtype); -} - -void XCollectedHeap::collect(GCCause::Cause cause) { - _driver->collect(cause); -} - -void XCollectedHeap::collect_as_vm_thread(GCCause::Cause cause) { - // These collection requests are ignored since ZGC can't run a synchronous - // GC cycle from within the VM thread. This is considered benign, since the - // only GC causes coming in here should be heap dumper and heap inspector. - // If the heap dumper or heap inspector explicitly requests a gc and the - // caller is not the VM thread a synchronous GC cycle is performed from the - // caller thread in the prologue. - assert(Thread::current()->is_VM_thread(), "Should be the VM thread"); - guarantee(cause == GCCause::_heap_dump || - cause == GCCause::_heap_inspection, "Invalid cause"); -} - -void XCollectedHeap::do_full_collection(bool clear_all_soft_refs) { - // Not supported - ShouldNotReachHere(); -} - -size_t XCollectedHeap::tlab_capacity(Thread* ignored) const { - return _heap.tlab_capacity(); -} - -size_t XCollectedHeap::tlab_used(Thread* ignored) const { - return _heap.tlab_used(); -} - -size_t XCollectedHeap::max_tlab_size() const { - return _heap.max_tlab_size(); -} - -size_t XCollectedHeap::unsafe_max_tlab_alloc(Thread* ignored) const { - return _heap.unsafe_max_tlab_alloc(); -} - -MemoryUsage XCollectedHeap::memory_usage() { - return _heap.serviceability_memory_pool()->get_memory_usage(); -} - -GrowableArray XCollectedHeap::memory_managers() { - GrowableArray memory_managers(2); - memory_managers.append(_heap.serviceability_cycle_memory_manager()); - memory_managers.append(_heap.serviceability_pause_memory_manager()); - return memory_managers; -} - -GrowableArray XCollectedHeap::memory_pools() { - GrowableArray memory_pools(1); - memory_pools.append(_heap.serviceability_memory_pool()); - return memory_pools; -} - -void XCollectedHeap::object_iterate(ObjectClosure* cl) { - _heap.object_iterate(cl, true /* visit_weaks */); -} - -ParallelObjectIteratorImpl* XCollectedHeap::parallel_object_iterator(uint nworkers) { - return _heap.parallel_object_iterator(nworkers, true /* visit_weaks */); -} - -void XCollectedHeap::keep_alive(oop obj) { - _heap.keep_alive(obj); -} - -void XCollectedHeap::register_nmethod(nmethod* nm) { - XNMethod::register_nmethod(nm); -} - -void XCollectedHeap::unregister_nmethod(nmethod* nm) { - XNMethod::unregister_nmethod(nm); -} - -void XCollectedHeap::verify_nmethod(nmethod* nm) { - // Does nothing -} - -WorkerThreads* XCollectedHeap::safepoint_workers() { - return _runtime_workers.workers(); -} - -void XCollectedHeap::gc_threads_do(ThreadClosure* tc) const { - tc->do_thread(_director); - tc->do_thread(_driver); - tc->do_thread(_stat); - _heap.threads_do(tc); - _runtime_workers.threads_do(tc); -} - -VirtualSpaceSummary XCollectedHeap::create_heap_space_summary() { - return VirtualSpaceSummary((HeapWord*)0, (HeapWord*)capacity(), (HeapWord*)max_capacity()); -} - -void XCollectedHeap::safepoint_synchronize_begin() { - StackWatermarkSet::safepoint_synchronize_begin(); - SuspendibleThreadSet::synchronize(); -} - -void XCollectedHeap::safepoint_synchronize_end() { - SuspendibleThreadSet::desynchronize(); -} - -void XCollectedHeap::pin_object(JavaThread* thread, oop obj) { - GCLocker::lock_critical(thread); -} - -void XCollectedHeap::unpin_object(JavaThread* thread, oop obj) { - GCLocker::unlock_critical(thread); -} - -void XCollectedHeap::prepare_for_verify() { - // Does nothing -} - -void XCollectedHeap::print_on(outputStream* st) const { - _heap.print_on(st); -} - -void XCollectedHeap::print_on_error(outputStream* st) const { - st->print_cr("ZGC Globals:"); - st->print_cr(" GlobalPhase: %u (%s)", XGlobalPhase, XGlobalPhaseToString()); - st->print_cr(" GlobalSeqNum: %u", XGlobalSeqNum); - st->print_cr(" Offset Max: " SIZE_FORMAT "%s (" PTR_FORMAT ")", - byte_size_in_exact_unit(XAddressOffsetMax), - exact_unit_for_byte_size(XAddressOffsetMax), - XAddressOffsetMax); - st->print_cr(" Page Size Small: " SIZE_FORMAT "M", XPageSizeSmall / M); - st->print_cr(" Page Size Medium: " SIZE_FORMAT "M", XPageSizeMedium / M); - st->cr(); - st->print_cr("ZGC Metadata Bits:"); - st->print_cr(" Good: " PTR_FORMAT, XAddressGoodMask); - st->print_cr(" Bad: " PTR_FORMAT, XAddressBadMask); - st->print_cr(" WeakBad: " PTR_FORMAT, XAddressWeakBadMask); - st->print_cr(" Marked: " PTR_FORMAT, XAddressMetadataMarked); - st->print_cr(" Remapped: " PTR_FORMAT, XAddressMetadataRemapped); - st->cr(); - CollectedHeap::print_on_error(st); -} - -void XCollectedHeap::print_extended_on(outputStream* st) const { - _heap.print_extended_on(st); -} - -void XCollectedHeap::print_tracing_info() const { - // Does nothing -} - -bool XCollectedHeap::print_location(outputStream* st, void* addr) const { - return _heap.print_location(st, (uintptr_t)addr); -} - -void XCollectedHeap::verify(VerifyOption option /* ignored */) { - _heap.verify(); -} - -bool XCollectedHeap::is_oop(oop object) const { - return _heap.is_oop(XOop::to_address(object)); -} - -bool XCollectedHeap::supports_concurrent_gc_breakpoints() const { - return true; -} diff --git a/src/hotspot/share/gc/x/xCollectedHeap.hpp b/src/hotspot/share/gc/x/xCollectedHeap.hpp deleted file mode 100644 index 250c882d15e2a..0000000000000 --- a/src/hotspot/share/gc/x/xCollectedHeap.hpp +++ /dev/null @@ -1,127 +0,0 @@ -/* - * Copyright (c) 2015, 2024, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#ifndef SHARE_GC_X_XCOLLECTEDHEAP_HPP -#define SHARE_GC_X_XCOLLECTEDHEAP_HPP - -#include "gc/shared/collectedHeap.hpp" -#include "gc/shared/softRefPolicy.hpp" -#include "gc/x/xBarrierSet.hpp" -#include "gc/x/xHeap.hpp" -#include "gc/x/xInitialize.hpp" -#include "gc/x/xRuntimeWorkers.hpp" -#include "memory/metaspace.hpp" -#include "services/memoryUsage.hpp" - -class VMStructs; -class XDirector; -class XDriver; -class XStat; - -class XCollectedHeap : public CollectedHeap { - friend class ::VMStructs; - -private: - XBarrierSet _barrier_set; - XInitialize _initialize; - XHeap _heap; - XDriver* _driver; - XDirector* _director; - XStat* _stat; - XRuntimeWorkers _runtime_workers; - - HeapWord* allocate_new_tlab(size_t min_size, - size_t requested_size, - size_t* actual_size) override; - -public: - static XCollectedHeap* heap(); - - XCollectedHeap(); - Name kind() const override; - const char* name() const override; - jint initialize() override; - void initialize_serviceability() override; - void stop() override; - - size_t max_capacity() const override; - size_t capacity() const override; - size_t used() const override; - size_t unused() const override; - - bool is_maximal_no_gc() const override; - bool is_in(const void* p) const override; - bool requires_barriers(stackChunkOop obj) const override; - - oop array_allocate(Klass* klass, size_t size, int length, bool do_zero, TRAPS) override; - HeapWord* mem_allocate(size_t size, bool* gc_overhead_limit_was_exceeded) override; - MetaWord* satisfy_failed_metadata_allocation(ClassLoaderData* loader_data, - size_t size, - Metaspace::MetadataType mdtype) override; - void collect(GCCause::Cause cause) override; - void collect_as_vm_thread(GCCause::Cause cause) override; - void do_full_collection(bool clear_all_soft_refs) override; - - size_t tlab_capacity(Thread* thr) const override; - size_t tlab_used(Thread* thr) const override; - size_t max_tlab_size() const override; - size_t unsafe_max_tlab_alloc(Thread* thr) const override; - - MemoryUsage memory_usage() override; - GrowableArray memory_managers() override; - GrowableArray memory_pools() override; - - void object_iterate(ObjectClosure* cl) override; - ParallelObjectIteratorImpl* parallel_object_iterator(uint nworkers) override; - - void keep_alive(oop obj) override; - - void register_nmethod(nmethod* nm) override; - void unregister_nmethod(nmethod* nm) override; - void verify_nmethod(nmethod* nmethod) override; - - WorkerThreads* safepoint_workers() override; - - void gc_threads_do(ThreadClosure* tc) const override; - - VirtualSpaceSummary create_heap_space_summary() override; - - void safepoint_synchronize_begin() override; - void safepoint_synchronize_end() override; - - void pin_object(JavaThread* thread, oop obj) override; - void unpin_object(JavaThread* thread, oop obj) override; - - void print_on(outputStream* st) const override; - void print_on_error(outputStream* st) const override; - void print_extended_on(outputStream* st) const override; - void print_tracing_info() const override; - bool print_location(outputStream* st, void* addr) const override; - - void prepare_for_verify() override; - void verify(VerifyOption option /* ignored */) override; - bool is_oop(oop object) const override; - bool supports_concurrent_gc_breakpoints() const override; -}; - -#endif // SHARE_GC_X_XCOLLECTEDHEAP_HPP diff --git a/src/hotspot/share/gc/x/xDebug.gdb b/src/hotspot/share/gc/x/xDebug.gdb deleted file mode 100644 index 2dbf578b07bc3..0000000000000 --- a/src/hotspot/share/gc/x/xDebug.gdb +++ /dev/null @@ -1,148 +0,0 @@ -# -# GDB functions for debugging the Z Garbage Collector -# - -printf "Loading zDebug.gdb\n" - -# Print Klass* -define zpk - printf "Klass: %s\n", (char*)((Klass*)($arg0))->_name->_body -end - -# Print oop -define zpo - set $obj = (oopDesc*)($arg0) - - printf "Oop: 0x%016llx\tState: ", (uintptr_t)$obj - if ((uintptr_t)$obj & (uintptr_t)XAddressGoodMask) - printf "Good " - if ((uintptr_t)$obj & (uintptr_t)XAddressMetadataRemapped) - printf "(Remapped)" - else - if ((uintptr_t)$obj & (uintptr_t)XAddressMetadataMarked) - printf "(Marked)" - else - printf "(Unknown)" - end - end - else - printf "Bad " - if ((uintptr_t)XAddressGoodMask & (uintptr_t)XAddressMetadataMarked) - # Should be marked - if ((uintptr_t)$obj & (uintptr_t)XAddressMetadataRemapped) - printf "(Not Marked, Remapped)" - else - printf "(Not Marked, Not Remapped)" - end - else - if ((uintptr_t)XAddressGoodMask & (uintptr_t)XAddressMetadataRemapped) - # Should be remapped - if ((uintptr_t)$obj & (uintptr_t)XAddressMetadataMarked) - printf "(Marked, Not Remapped)" - else - printf "(Not Marked, Not Remapped)" - end - else - # Unknown - printf "(Unknown)" - end - end - end - printf "\t Page: %llu\n", ((uintptr_t)$obj & XAddressOffsetMask) >> XGranuleSizeShift - x/16gx $obj - if (UseCompressedClassPointers) - set $klass = (Klass*)(void*)((uintptr_t)CompressedKlassPointers::_base +((uintptr_t)$obj->_metadata->_compressed_klass << CompressedKlassPointers::_shift)) - else - set $klass = $obj->_metadata->_klass - end - printf "Mark: 0x%016llx\tKlass: %s\n", (uintptr_t)$obj->_mark, (char*)$klass->_name->_body -end - -# Print heap page by page table index -define zpp - set $page = (XPage*)((uintptr_t)XHeap::_heap._page_table._map._map[($arg0)] & ~1) - printf "Page %p\n", $page - print *$page -end - -# Print page_table -define zpt - printf "Pagetable (first 128 slots)\n" - x/128gx XHeap::_heap._page_table._map._map -end - -# Print live map -define __zmarked - set $livemap = $arg0 - set $bit = $arg1 - set $size = $livemap._bitmap._size - set $segment = $size / XLiveMap::nsegments - set $segment_bit = 1 << $segment - - printf "Segment is " - if !($livemap._segment_live_bits & $segment_bit) - printf "NOT " - end - printf "live (segment %d)\n", $segment - - if $bit >= $size - print "Error: Bit %z out of bounds (bitmap size %z)\n", $bit, $size - else - set $word_index = $bit / 64 - set $bit_index = $bit % 64 - set $word = $livemap._bitmap._map[$word_index] - set $live_bit = $word & (1 << $bit_index) - - printf "Object is " - if $live_bit == 0 - printf "NOT " - end - printf "live (word index %d, bit index %d)\n", $word_index, $bit_index - end -end - -define zmarked - set $addr = $arg0 - set $obj = ((uintptr_t)$addr & XAddressOffsetMask) - set $page_index = $obj >> XGranuleSizeShift - set $page_entry = (uintptr_t)XHeap::_heap._page_table._map._map[$page_index] - set $page = (XPage*)($page_entry & ~1) - set $page_start = (uintptr_t)$page._virtual._start - set $page_end = (uintptr_t)$page._virtual._end - set $page_seqnum = $page._livemap._seqnum - set $global_seqnum = XGlobalSeqNum - - if $obj < $page_start || $obj >= $page_end - printf "Error: %p not in page %p (start %p, end %p)\n", $obj, $page, $page_start, $page_end - else - printf "Page is " - if $page_seqnum != $global_seqnum - printf "NOT " - end - printf "live (page %p, page seqnum %d, global seqnum %d)\n", $page, $page_seqnum, $global_seqnum - - #if $page_seqnum == $global_seqnum - set $offset = $obj - $page_start - set $bit = $offset / 8 - __zmarked $page._livemap $bit - #end - end -end - -# Print heap information -define zph - printf "Heap\n" - printf " GlobalPhase: %u\n", XGlobalPhase - printf " GlobalSeqNum: %u\n", XGlobalSeqNum - printf " Offset Max: %-15llu (0x%llx)\n", XAddressOffsetMax, XAddressOffsetMax - printf " Page Size Small: %-15llu (0x%llx)\n", XPageSizeSmall, XPageSizeSmall - printf " Page Size Medium: %-15llu (0x%llx)\n", XPageSizeMedium, XPageSizeMedium - printf "Metadata Bits\n" - printf " Good: 0x%016llx\n", XAddressGoodMask - printf " Bad: 0x%016llx\n", XAddressBadMask - printf " WeakBad: 0x%016llx\n", XAddressWeakBadMask - printf " Marked: 0x%016llx\n", XAddressMetadataMarked - printf " Remapped: 0x%016llx\n", XAddressMetadataRemapped -end - -# End of file diff --git a/src/hotspot/share/gc/x/xDirector.cpp b/src/hotspot/share/gc/x/xDirector.cpp deleted file mode 100644 index e1c69bd05b7de..0000000000000 --- a/src/hotspot/share/gc/x/xDirector.cpp +++ /dev/null @@ -1,406 +0,0 @@ -/* - * Copyright (c) 2015, 2024, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#include "precompiled.hpp" -#include "gc/shared/gc_globals.hpp" -#include "gc/x/xDirector.hpp" -#include "gc/x/xDriver.hpp" -#include "gc/x/xHeap.inline.hpp" -#include "gc/x/xHeuristics.hpp" -#include "gc/x/xStat.hpp" -#include "logging/log.hpp" - -constexpr double one_in_1000 = 3.290527; -constexpr double sample_interval = 1.0 / XStatAllocRate::sample_hz; - -XDirector::XDirector(XDriver* driver) : - _driver(driver), - _metronome(XStatAllocRate::sample_hz) { - set_name("XDirector"); - create_and_start(); -} - -static void sample_allocation_rate() { - // Sample allocation rate. This is needed by rule_allocation_rate() - // below to estimate the time we have until we run out of memory. - const double bytes_per_second = XStatAllocRate::sample_and_reset(); - - log_debug(gc, alloc)("Allocation Rate: %.1fMB/s, Predicted: %.1fMB/s, Avg: %.1f(+/-%.1f)MB/s", - bytes_per_second / M, - XStatAllocRate::predict() / M, - XStatAllocRate::avg() / M, - XStatAllocRate::sd() / M); -} - -static XDriverRequest rule_allocation_stall() { - // Perform GC if we've observed at least one allocation stall since - // the last GC started. - if (!XHeap::heap()->has_alloc_stalled()) { - return GCCause::_no_gc; - } - - log_debug(gc, director)("Rule: Allocation Stall Observed"); - - return GCCause::_z_allocation_stall; -} - -static XDriverRequest rule_warmup() { - if (XStatCycle::is_warm()) { - // Rule disabled - return GCCause::_no_gc; - } - - // Perform GC if heap usage passes 10/20/30% and no other GC has been - // performed yet. This allows us to get some early samples of the GC - // duration, which is needed by the other rules. - const size_t soft_max_capacity = XHeap::heap()->soft_max_capacity(); - const size_t used = XHeap::heap()->used(); - const double used_threshold_percent = (XStatCycle::nwarmup_cycles() + 1) * 0.1; - const size_t used_threshold = soft_max_capacity * used_threshold_percent; - - log_debug(gc, director)("Rule: Warmup %.0f%%, Used: " SIZE_FORMAT "MB, UsedThreshold: " SIZE_FORMAT "MB", - used_threshold_percent * 100, used / M, used_threshold / M); - - if (used < used_threshold) { - return GCCause::_no_gc; - } - - return GCCause::_z_warmup; -} - -static XDriverRequest rule_timer() { - if (ZCollectionInterval <= 0) { - // Rule disabled - return GCCause::_no_gc; - } - - // Perform GC if timer has expired. - const double time_since_last_gc = XStatCycle::time_since_last(); - const double time_until_gc = ZCollectionInterval - time_since_last_gc; - - log_debug(gc, director)("Rule: Timer, Interval: %.3fs, TimeUntilGC: %.3fs", - ZCollectionInterval, time_until_gc); - - if (time_until_gc > 0) { - return GCCause::_no_gc; - } - - return GCCause::_z_timer; -} - -static double estimated_gc_workers(double serial_gc_time, double parallelizable_gc_time, double time_until_deadline) { - const double parallelizable_time_until_deadline = MAX2(time_until_deadline - serial_gc_time, 0.001); - return parallelizable_gc_time / parallelizable_time_until_deadline; -} - -static uint discrete_gc_workers(double gc_workers) { - return clamp(ceil(gc_workers), 1, ConcGCThreads); -} - -static double select_gc_workers(double serial_gc_time, double parallelizable_gc_time, double alloc_rate_sd_percent, double time_until_oom) { - // Use all workers until we're warm - if (!XStatCycle::is_warm()) { - const double not_warm_gc_workers = ConcGCThreads; - log_debug(gc, director)("Select GC Workers (Not Warm), GCWorkers: %.3f", not_warm_gc_workers); - return not_warm_gc_workers; - } - - // Calculate number of GC workers needed to avoid a long GC cycle and to avoid OOM. - const double avoid_long_gc_workers = estimated_gc_workers(serial_gc_time, parallelizable_gc_time, 10 /* seconds */); - const double avoid_oom_gc_workers = estimated_gc_workers(serial_gc_time, parallelizable_gc_time, time_until_oom); - - const double gc_workers = MAX2(avoid_long_gc_workers, avoid_oom_gc_workers); - const uint actual_gc_workers = discrete_gc_workers(gc_workers); - const uint last_gc_workers = XStatCycle::last_active_workers(); - - // More than 15% division from the average is considered unsteady - if (alloc_rate_sd_percent >= 0.15) { - const double half_gc_workers = ConcGCThreads / 2.0; - const double unsteady_gc_workers = MAX3(gc_workers, last_gc_workers, half_gc_workers); - log_debug(gc, director)("Select GC Workers (Unsteady), " - "AvoidLongGCWorkers: %.3f, AvoidOOMGCWorkers: %.3f, LastGCWorkers: %.3f, HalfGCWorkers: %.3f, GCWorkers: %.3f", - avoid_long_gc_workers, avoid_oom_gc_workers, (double)last_gc_workers, half_gc_workers, unsteady_gc_workers); - return unsteady_gc_workers; - } - - if (actual_gc_workers < last_gc_workers) { - // Before decreasing number of GC workers compared to the previous GC cycle, check if the - // next GC cycle will need to increase it again. If so, use the same number of GC workers - // that will be needed in the next cycle. - const double gc_duration_delta = (parallelizable_gc_time / actual_gc_workers) - (parallelizable_gc_time / last_gc_workers); - const double additional_time_for_allocations = XStatCycle::time_since_last() - gc_duration_delta - sample_interval; - const double next_time_until_oom = time_until_oom + additional_time_for_allocations; - const double next_avoid_oom_gc_workers = estimated_gc_workers(serial_gc_time, parallelizable_gc_time, next_time_until_oom); - - // Add 0.5 to increase friction and avoid lowering too eagerly - const double next_gc_workers = next_avoid_oom_gc_workers + 0.5; - const double try_lowering_gc_workers = clamp(next_gc_workers, actual_gc_workers, last_gc_workers); - - log_debug(gc, director)("Select GC Workers (Try Lowering), " - "AvoidLongGCWorkers: %.3f, AvoidOOMGCWorkers: %.3f, NextAvoidOOMGCWorkers: %.3f, LastGCWorkers: %.3f, GCWorkers: %.3f", - avoid_long_gc_workers, avoid_oom_gc_workers, next_avoid_oom_gc_workers, (double)last_gc_workers, try_lowering_gc_workers); - return try_lowering_gc_workers; - } - - log_debug(gc, director)("Select GC Workers (Normal), " - "AvoidLongGCWorkers: %.3f, AvoidOOMGCWorkers: %.3f, LastGCWorkers: %.3f, GCWorkers: %.3f", - avoid_long_gc_workers, avoid_oom_gc_workers, (double)last_gc_workers, gc_workers); - return gc_workers; -} - -static XDriverRequest rule_allocation_rate_dynamic() { - if (!XStatCycle::is_time_trustable()) { - // Rule disabled - return GCCause::_no_gc; - } - - // Calculate amount of free memory available. Note that we take the - // relocation headroom into account to avoid in-place relocation. - const size_t soft_max_capacity = XHeap::heap()->soft_max_capacity(); - const size_t used = XHeap::heap()->used(); - const size_t free_including_headroom = soft_max_capacity - MIN2(soft_max_capacity, used); - const size_t free = free_including_headroom - MIN2(free_including_headroom, XHeuristics::relocation_headroom()); - - // Calculate time until OOM given the max allocation rate and the amount - // of free memory. The allocation rate is a moving average and we multiply - // that with an allocation spike tolerance factor to guard against unforeseen - // phase changes in the allocate rate. We then add ~3.3 sigma to account for - // the allocation rate variance, which means the probability is 1 in 1000 - // that a sample is outside of the confidence interval. - const double alloc_rate_predict = XStatAllocRate::predict(); - const double alloc_rate_avg = XStatAllocRate::avg(); - const double alloc_rate_sd = XStatAllocRate::sd(); - const double alloc_rate_sd_percent = alloc_rate_sd / (alloc_rate_avg + 1.0); - const double alloc_rate = (MAX2(alloc_rate_predict, alloc_rate_avg) * ZAllocationSpikeTolerance) + (alloc_rate_sd * one_in_1000) + 1.0; - const double time_until_oom = (free / alloc_rate) / (1.0 + alloc_rate_sd_percent); - - // Calculate max serial/parallel times of a GC cycle. The times are - // moving averages, we add ~3.3 sigma to account for the variance. - const double serial_gc_time = XStatCycle::serial_time().davg() + (XStatCycle::serial_time().dsd() * one_in_1000); - const double parallelizable_gc_time = XStatCycle::parallelizable_time().davg() + (XStatCycle::parallelizable_time().dsd() * one_in_1000); - - // Calculate number of GC workers needed to avoid OOM. - const double gc_workers = select_gc_workers(serial_gc_time, parallelizable_gc_time, alloc_rate_sd_percent, time_until_oom); - - // Convert to a discrete number of GC workers within limits. - const uint actual_gc_workers = discrete_gc_workers(gc_workers); - - // Calculate GC duration given number of GC workers needed. - const double actual_gc_duration = serial_gc_time + (parallelizable_gc_time / actual_gc_workers); - const uint last_gc_workers = XStatCycle::last_active_workers(); - - // Calculate time until GC given the time until OOM and GC duration. - // We also subtract the sample interval, so that we don't overshoot the - // target time and end up starting the GC too late in the next interval. - const double time_until_gc = time_until_oom - actual_gc_duration - sample_interval; - - log_debug(gc, director)("Rule: Allocation Rate (Dynamic GC Workers), " - "MaxAllocRate: %.1fMB/s (+/-%.1f%%), Free: " SIZE_FORMAT "MB, GCCPUTime: %.3f, " - "GCDuration: %.3fs, TimeUntilOOM: %.3fs, TimeUntilGC: %.3fs, GCWorkers: %u -> %u", - alloc_rate / M, - alloc_rate_sd_percent * 100, - free / M, - serial_gc_time + parallelizable_gc_time, - serial_gc_time + (parallelizable_gc_time / actual_gc_workers), - time_until_oom, - time_until_gc, - last_gc_workers, - actual_gc_workers); - - if (actual_gc_workers <= last_gc_workers && time_until_gc > 0) { - return XDriverRequest(GCCause::_no_gc, actual_gc_workers); - } - - return XDriverRequest(GCCause::_z_allocation_rate, actual_gc_workers); -} - -static XDriverRequest rule_allocation_rate_static() { - if (!XStatCycle::is_time_trustable()) { - // Rule disabled - return GCCause::_no_gc; - } - - // Perform GC if the estimated max allocation rate indicates that we - // will run out of memory. The estimated max allocation rate is based - // on the moving average of the sampled allocation rate plus a safety - // margin based on variations in the allocation rate and unforeseen - // allocation spikes. - - // Calculate amount of free memory available. Note that we take the - // relocation headroom into account to avoid in-place relocation. - const size_t soft_max_capacity = XHeap::heap()->soft_max_capacity(); - const size_t used = XHeap::heap()->used(); - const size_t free_including_headroom = soft_max_capacity - MIN2(soft_max_capacity, used); - const size_t free = free_including_headroom - MIN2(free_including_headroom, XHeuristics::relocation_headroom()); - - // Calculate time until OOM given the max allocation rate and the amount - // of free memory. The allocation rate is a moving average and we multiply - // that with an allocation spike tolerance factor to guard against unforeseen - // phase changes in the allocate rate. We then add ~3.3 sigma to account for - // the allocation rate variance, which means the probability is 1 in 1000 - // that a sample is outside of the confidence interval. - const double max_alloc_rate = (XStatAllocRate::avg() * ZAllocationSpikeTolerance) + (XStatAllocRate::sd() * one_in_1000); - const double time_until_oom = free / (max_alloc_rate + 1.0); // Plus 1.0B/s to avoid division by zero - - // Calculate max serial/parallel times of a GC cycle. The times are - // moving averages, we add ~3.3 sigma to account for the variance. - const double serial_gc_time = XStatCycle::serial_time().davg() + (XStatCycle::serial_time().dsd() * one_in_1000); - const double parallelizable_gc_time = XStatCycle::parallelizable_time().davg() + (XStatCycle::parallelizable_time().dsd() * one_in_1000); - - // Calculate GC duration given number of GC workers needed. - const double gc_duration = serial_gc_time + (parallelizable_gc_time / ConcGCThreads); - - // Calculate time until GC given the time until OOM and max duration of GC. - // We also deduct the sample interval, so that we don't overshoot the target - // time and end up starting the GC too late in the next interval. - const double time_until_gc = time_until_oom - gc_duration - sample_interval; - - log_debug(gc, director)("Rule: Allocation Rate (Static GC Workers), MaxAllocRate: %.1fMB/s, Free: " SIZE_FORMAT "MB, GCDuration: %.3fs, TimeUntilGC: %.3fs", - max_alloc_rate / M, free / M, gc_duration, time_until_gc); - - if (time_until_gc > 0) { - return GCCause::_no_gc; - } - - return GCCause::_z_allocation_rate; -} - -static XDriverRequest rule_allocation_rate() { - if (UseDynamicNumberOfGCThreads) { - return rule_allocation_rate_dynamic(); - } else { - return rule_allocation_rate_static(); - } -} - -static XDriverRequest rule_high_usage() { - // Perform GC if the amount of free memory is 5% or less. This is a preventive - // meassure in the case where the application has a very low allocation rate, - // such that the allocation rate rule doesn't trigger, but the amount of free - // memory is still slowly but surely heading towards zero. In this situation, - // we start a GC cycle to avoid a potential allocation stall later. - - // Calculate amount of free memory available. Note that we take the - // relocation headroom into account to avoid in-place relocation. - const size_t soft_max_capacity = XHeap::heap()->soft_max_capacity(); - const size_t used = XHeap::heap()->used(); - const size_t free_including_headroom = soft_max_capacity - MIN2(soft_max_capacity, used); - const size_t free = free_including_headroom - MIN2(free_including_headroom, XHeuristics::relocation_headroom()); - const double free_percent = percent_of(free, soft_max_capacity); - - log_debug(gc, director)("Rule: High Usage, Free: " SIZE_FORMAT "MB(%.1f%%)", - free / M, free_percent); - - if (free_percent > 5.0) { - return GCCause::_no_gc; - } - - return GCCause::_z_high_usage; -} - -static XDriverRequest rule_proactive() { - if (!ZProactive || !XStatCycle::is_warm()) { - // Rule disabled - return GCCause::_no_gc; - } - - // Perform GC if the impact of doing so, in terms of application throughput - // reduction, is considered acceptable. This rule allows us to keep the heap - // size down and allow reference processing to happen even when we have a lot - // of free space on the heap. - - // Only consider doing a proactive GC if the heap usage has grown by at least - // 10% of the max capacity since the previous GC, or more than 5 minutes has - // passed since the previous GC. This helps avoid superfluous GCs when running - // applications with very low allocation rate. - const size_t used_after_last_gc = XStatHeap::used_at_relocate_end(); - const size_t used_increase_threshold = XHeap::heap()->soft_max_capacity() * 0.10; // 10% - const size_t used_threshold = used_after_last_gc + used_increase_threshold; - const size_t used = XHeap::heap()->used(); - const double time_since_last_gc = XStatCycle::time_since_last(); - const double time_since_last_gc_threshold = 5 * 60; // 5 minutes - if (used < used_threshold && time_since_last_gc < time_since_last_gc_threshold) { - // Don't even consider doing a proactive GC - log_debug(gc, director)("Rule: Proactive, UsedUntilEnabled: " SIZE_FORMAT "MB, TimeUntilEnabled: %.3fs", - (used_threshold - used) / M, - time_since_last_gc_threshold - time_since_last_gc); - return GCCause::_no_gc; - } - - const double assumed_throughput_drop_during_gc = 0.50; // 50% - const double acceptable_throughput_drop = 0.01; // 1% - const double serial_gc_time = XStatCycle::serial_time().davg() + (XStatCycle::serial_time().dsd() * one_in_1000); - const double parallelizable_gc_time = XStatCycle::parallelizable_time().davg() + (XStatCycle::parallelizable_time().dsd() * one_in_1000); - const double gc_duration = serial_gc_time + (parallelizable_gc_time / ConcGCThreads); - const double acceptable_gc_interval = gc_duration * ((assumed_throughput_drop_during_gc / acceptable_throughput_drop) - 1.0); - const double time_until_gc = acceptable_gc_interval - time_since_last_gc; - - log_debug(gc, director)("Rule: Proactive, AcceptableGCInterval: %.3fs, TimeSinceLastGC: %.3fs, TimeUntilGC: %.3fs", - acceptable_gc_interval, time_since_last_gc, time_until_gc); - - if (time_until_gc > 0) { - return GCCause::_no_gc; - } - - return GCCause::_z_proactive; -} - -static XDriverRequest make_gc_decision() { - // List of rules - using XDirectorRule = XDriverRequest (*)(); - const XDirectorRule rules[] = { - rule_allocation_stall, - rule_warmup, - rule_timer, - rule_allocation_rate, - rule_high_usage, - rule_proactive, - }; - - // Execute rules - for (size_t i = 0; i < ARRAY_SIZE(rules); i++) { - const XDriverRequest request = rules[i](); - if (request.cause() != GCCause::_no_gc) { - return request; - } - } - - return GCCause::_no_gc; -} - -void XDirector::run_service() { - // Main loop - while (_metronome.wait_for_tick()) { - sample_allocation_rate(); - if (!_driver->is_busy()) { - const XDriverRequest request = make_gc_decision(); - if (request.cause() != GCCause::_no_gc) { - _driver->collect(request); - } - } - } -} - -void XDirector::stop_service() { - _metronome.stop(); -} diff --git a/src/hotspot/share/gc/x/xDirector.hpp b/src/hotspot/share/gc/x/xDirector.hpp deleted file mode 100644 index eacce20e8c9c6..0000000000000 --- a/src/hotspot/share/gc/x/xDirector.hpp +++ /dev/null @@ -1,45 +0,0 @@ -/* - * Copyright (c) 2015, 2021, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#ifndef SHARE_GC_X_XDIRECTOR_HPP -#define SHARE_GC_X_XDIRECTOR_HPP - -#include "gc/shared/concurrentGCThread.hpp" -#include "gc/x/xMetronome.hpp" - -class XDriver; - -class XDirector : public ConcurrentGCThread { -private: - XDriver* const _driver; - XMetronome _metronome; - -protected: - virtual void run_service(); - virtual void stop_service(); - -public: - XDirector(XDriver* driver); -}; - -#endif // SHARE_GC_X_XDIRECTOR_HPP diff --git a/src/hotspot/share/gc/x/xDriver.cpp b/src/hotspot/share/gc/x/xDriver.cpp deleted file mode 100644 index 3e6fd03134e12..0000000000000 --- a/src/hotspot/share/gc/x/xDriver.cpp +++ /dev/null @@ -1,518 +0,0 @@ -/* - * Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#include "precompiled.hpp" -#include "gc/shared/gcId.hpp" -#include "gc/shared/gcLocker.hpp" -#include "gc/shared/gcVMOperations.hpp" -#include "gc/shared/isGCActiveMark.hpp" -#include "gc/x/xAbort.inline.hpp" -#include "gc/x/xBreakpoint.hpp" -#include "gc/x/xCollectedHeap.hpp" -#include "gc/x/xDriver.hpp" -#include "gc/x/xHeap.inline.hpp" -#include "gc/x/xMessagePort.inline.hpp" -#include "gc/x/xServiceability.hpp" -#include "gc/x/xStat.hpp" -#include "gc/x/xVerify.hpp" -#include "interpreter/oopMapCache.hpp" -#include "logging/log.hpp" -#include "memory/universe.hpp" -#include "runtime/threads.hpp" -#include "runtime/vmOperations.hpp" -#include "runtime/vmThread.hpp" - -static const XStatPhaseCycle XPhaseCycle("Garbage Collection Cycle"); -static const XStatPhasePause XPhasePauseMarkStart("Pause Mark Start"); -static const XStatPhaseConcurrent XPhaseConcurrentMark("Concurrent Mark"); -static const XStatPhaseConcurrent XPhaseConcurrentMarkContinue("Concurrent Mark Continue"); -static const XStatPhaseConcurrent XPhaseConcurrentMarkFree("Concurrent Mark Free"); -static const XStatPhasePause XPhasePauseMarkEnd("Pause Mark End"); -static const XStatPhaseConcurrent XPhaseConcurrentProcessNonStrongReferences("Concurrent Process Non-Strong References"); -static const XStatPhaseConcurrent XPhaseConcurrentResetRelocationSet("Concurrent Reset Relocation Set"); -static const XStatPhaseConcurrent XPhaseConcurrentSelectRelocationSet("Concurrent Select Relocation Set"); -static const XStatPhasePause XPhasePauseRelocateStart("Pause Relocate Start"); -static const XStatPhaseConcurrent XPhaseConcurrentRelocated("Concurrent Relocate"); -static const XStatCriticalPhase XCriticalPhaseGCLockerStall("GC Locker Stall", false /* verbose */); -static const XStatSampler XSamplerJavaThreads("System", "Java Threads", XStatUnitThreads); - -XDriverRequest::XDriverRequest() : - XDriverRequest(GCCause::_no_gc) {} - -XDriverRequest::XDriverRequest(GCCause::Cause cause) : - XDriverRequest(cause, ConcGCThreads) {} - -XDriverRequest::XDriverRequest(GCCause::Cause cause, uint nworkers) : - _cause(cause), - _nworkers(nworkers) {} - -bool XDriverRequest::operator==(const XDriverRequest& other) const { - return _cause == other._cause; -} - -GCCause::Cause XDriverRequest::cause() const { - return _cause; -} - -uint XDriverRequest::nworkers() const { - return _nworkers; -} - -class VM_XOperation : public VM_Operation { -private: - const uint _gc_id; - bool _gc_locked; - bool _success; - -public: - VM_XOperation() : - _gc_id(GCId::current()), - _gc_locked(false), - _success(false) {} - - virtual bool needs_inactive_gc_locker() const { - // An inactive GC locker is needed in operations where we change the bad - // mask or move objects. Changing the bad mask will invalidate all oops, - // which makes it conceptually the same thing as moving all objects. - return false; - } - - virtual bool skip_thread_oop_barriers() const { - return true; - } - - virtual bool do_operation() = 0; - - virtual bool doit_prologue() { - Heap_lock->lock(); - return true; - } - - virtual void doit() { - // Abort if GC locker state is incompatible - if (needs_inactive_gc_locker() && GCLocker::check_active_before_gc()) { - _gc_locked = true; - return; - } - - // Setup GC id and active marker - GCIdMark gc_id_mark(_gc_id); - IsSTWGCActiveMark gc_active_mark; - - // Verify before operation - XVerify::before_zoperation(); - - // Execute operation - _success = do_operation(); - - // Update statistics - XStatSample(XSamplerJavaThreads, Threads::number_of_threads()); - } - - virtual void doit_epilogue() { - Heap_lock->unlock(); - - // GC thread root traversal likely used OopMapCache a lot, which - // might have created lots of old entries. Trigger the cleanup now. - OopMapCache::try_trigger_cleanup(); - } - - bool gc_locked() const { - return _gc_locked; - } - - bool success() const { - return _success; - } -}; - -class VM_XMarkStart : public VM_XOperation { -public: - virtual VMOp_Type type() const { - return VMOp_XMarkStart; - } - - virtual bool needs_inactive_gc_locker() const { - return true; - } - - virtual bool do_operation() { - XStatTimer timer(XPhasePauseMarkStart); - XServiceabilityPauseTracer tracer; - - XCollectedHeap::heap()->increment_total_collections(true /* full */); - - XHeap::heap()->mark_start(); - return true; - } -}; - -class VM_XMarkEnd : public VM_XOperation { -public: - virtual VMOp_Type type() const { - return VMOp_XMarkEnd; - } - - virtual bool do_operation() { - XStatTimer timer(XPhasePauseMarkEnd); - XServiceabilityPauseTracer tracer; - return XHeap::heap()->mark_end(); - } -}; - -class VM_XRelocateStart : public VM_XOperation { -public: - virtual VMOp_Type type() const { - return VMOp_XRelocateStart; - } - - virtual bool needs_inactive_gc_locker() const { - return true; - } - - virtual bool do_operation() { - XStatTimer timer(XPhasePauseRelocateStart); - XServiceabilityPauseTracer tracer; - XHeap::heap()->relocate_start(); - return true; - } -}; - -class VM_XVerify : public VM_Operation { -public: - virtual VMOp_Type type() const { - return VMOp_XVerify; - } - - virtual bool skip_thread_oop_barriers() const { - return true; - } - - virtual void doit() { - XVerify::after_weak_processing(); - } -}; - -XDriver::XDriver() : - _gc_cycle_port(), - _gc_locker_port() { - set_name("XDriver"); - create_and_start(); -} - -bool XDriver::is_busy() const { - return _gc_cycle_port.is_busy(); -} - -void XDriver::collect(const XDriverRequest& request) { - switch (request.cause()) { - case GCCause::_heap_dump: - case GCCause::_heap_inspection: - case GCCause::_wb_young_gc: - case GCCause::_wb_full_gc: - case GCCause::_dcmd_gc_run: - case GCCause::_java_lang_system_gc: - case GCCause::_full_gc_alot: - case GCCause::_scavenge_alot: - case GCCause::_jvmti_force_gc: - case GCCause::_metadata_GC_clear_soft_refs: - case GCCause::_codecache_GC_aggressive: - // Start synchronous GC - _gc_cycle_port.send_sync(request); - break; - - case GCCause::_z_timer: - case GCCause::_z_warmup: - case GCCause::_z_allocation_rate: - case GCCause::_z_allocation_stall: - case GCCause::_z_proactive: - case GCCause::_z_high_usage: - case GCCause::_codecache_GC_threshold: - case GCCause::_metadata_GC_threshold: - // Start asynchronous GC - _gc_cycle_port.send_async(request); - break; - - case GCCause::_gc_locker: - // Restart VM operation previously blocked by the GC locker - _gc_locker_port.signal(); - break; - - case GCCause::_wb_breakpoint: - XBreakpoint::start_gc(); - _gc_cycle_port.send_async(request); - break; - - default: - // Other causes not supported - fatal("Unsupported GC cause (%s)", GCCause::to_string(request.cause())); - break; - } -} - -template -bool XDriver::pause() { - for (;;) { - T op; - VMThread::execute(&op); - if (op.gc_locked()) { - // Wait for GC to become unlocked and restart the VM operation - XStatTimer timer(XCriticalPhaseGCLockerStall); - _gc_locker_port.wait(); - continue; - } - - // Notify VM operation completed - _gc_locker_port.ack(); - - return op.success(); - } -} - -void XDriver::pause_mark_start() { - pause(); -} - -void XDriver::concurrent_mark() { - XStatTimer timer(XPhaseConcurrentMark); - XBreakpoint::at_after_marking_started(); - XHeap::heap()->mark(true /* initial */); - XBreakpoint::at_before_marking_completed(); -} - -bool XDriver::pause_mark_end() { - return pause(); -} - -void XDriver::concurrent_mark_continue() { - XStatTimer timer(XPhaseConcurrentMarkContinue); - XHeap::heap()->mark(false /* initial */); -} - -void XDriver::concurrent_mark_free() { - XStatTimer timer(XPhaseConcurrentMarkFree); - XHeap::heap()->mark_free(); -} - -void XDriver::concurrent_process_non_strong_references() { - XStatTimer timer(XPhaseConcurrentProcessNonStrongReferences); - XBreakpoint::at_after_reference_processing_started(); - XHeap::heap()->process_non_strong_references(); -} - -void XDriver::concurrent_reset_relocation_set() { - XStatTimer timer(XPhaseConcurrentResetRelocationSet); - XHeap::heap()->reset_relocation_set(); -} - -void XDriver::pause_verify() { - if (ZVerifyRoots || ZVerifyObjects) { - VM_XVerify op; - VMThread::execute(&op); - } -} - -void XDriver::concurrent_select_relocation_set() { - XStatTimer timer(XPhaseConcurrentSelectRelocationSet); - XHeap::heap()->select_relocation_set(); -} - -void XDriver::pause_relocate_start() { - pause(); -} - -void XDriver::concurrent_relocate() { - XStatTimer timer(XPhaseConcurrentRelocated); - XHeap::heap()->relocate(); -} - -void XDriver::check_out_of_memory() { - XHeap::heap()->check_out_of_memory(); -} - -static bool should_clear_soft_references(const XDriverRequest& request) { - // Clear soft references if implied by the GC cause - if (request.cause() == GCCause::_wb_full_gc || - request.cause() == GCCause::_metadata_GC_clear_soft_refs || - request.cause() == GCCause::_z_allocation_stall) { - // Clear - return true; - } - - // Don't clear - return false; -} - -static uint select_active_worker_threads_dynamic(const XDriverRequest& request) { - // Use requested number of worker threads - return request.nworkers(); -} - -static uint select_active_worker_threads_static(const XDriverRequest& request) { - const GCCause::Cause cause = request.cause(); - const uint nworkers = request.nworkers(); - - // Boost number of worker threads if implied by the GC cause - if (cause == GCCause::_wb_full_gc || - cause == GCCause::_java_lang_system_gc || - cause == GCCause::_metadata_GC_clear_soft_refs || - cause == GCCause::_z_allocation_stall) { - // Boost - const uint boosted_nworkers = MAX2(nworkers, ParallelGCThreads); - return boosted_nworkers; - } - - // Use requested number of worker threads - return nworkers; -} - -static uint select_active_worker_threads(const XDriverRequest& request) { - if (UseDynamicNumberOfGCThreads) { - return select_active_worker_threads_dynamic(request); - } else { - return select_active_worker_threads_static(request); - } -} - -class XDriverGCScope : public StackObj { -private: - GCIdMark _gc_id; - GCCause::Cause _gc_cause; - GCCauseSetter _gc_cause_setter; - XStatTimer _timer; - XServiceabilityCycleTracer _tracer; - -public: - XDriverGCScope(const XDriverRequest& request) : - _gc_id(), - _gc_cause(request.cause()), - _gc_cause_setter(XCollectedHeap::heap(), _gc_cause), - _timer(XPhaseCycle), - _tracer() { - // Update statistics - XStatCycle::at_start(); - - // Set up soft reference policy - const bool clear = should_clear_soft_references(request); - XHeap::heap()->set_soft_reference_policy(clear); - - // Select number of worker threads to use - const uint nworkers = select_active_worker_threads(request); - XHeap::heap()->set_active_workers(nworkers); - } - - ~XDriverGCScope() { - // Update statistics - XStatCycle::at_end(_gc_cause, XHeap::heap()->active_workers()); - - // Update data used by soft reference policy - Universe::heap()->update_capacity_and_used_at_gc(); - - // Signal that we have completed a visit to all live objects - Universe::heap()->record_whole_heap_examined_timestamp(); - } -}; - -// Macro to execute a termination check after a concurrent phase. Note -// that it's important that the termination check comes after the call -// to the function f, since we can't abort between pause_relocate_start() -// and concurrent_relocate(). We need to let concurrent_relocate() call -// abort_page() on the remaining entries in the relocation set. -#define concurrent(f) \ - do { \ - concurrent_##f(); \ - if (should_terminate()) { \ - return; \ - } \ - } while (false) - -void XDriver::gc(const XDriverRequest& request) { - XDriverGCScope scope(request); - - // Phase 1: Pause Mark Start - pause_mark_start(); - - // Phase 2: Concurrent Mark - concurrent(mark); - - // Phase 3: Pause Mark End - while (!pause_mark_end()) { - // Phase 3.5: Concurrent Mark Continue - concurrent(mark_continue); - } - - // Phase 4: Concurrent Mark Free - concurrent(mark_free); - - // Phase 5: Concurrent Process Non-Strong References - concurrent(process_non_strong_references); - - // Phase 6: Concurrent Reset Relocation Set - concurrent(reset_relocation_set); - - // Phase 7: Pause Verify - pause_verify(); - - // Phase 8: Concurrent Select Relocation Set - concurrent(select_relocation_set); - - // Phase 9: Pause Relocate Start - pause_relocate_start(); - - // Phase 10: Concurrent Relocate - concurrent(relocate); -} - -void XDriver::run_service() { - // Main loop - while (!should_terminate()) { - // Wait for GC request - const XDriverRequest request = _gc_cycle_port.receive(); - if (request.cause() == GCCause::_no_gc) { - continue; - } - - XBreakpoint::at_before_gc(); - - // Run GC - gc(request); - - if (should_terminate()) { - // Abort - break; - } - - // Notify GC completed - _gc_cycle_port.ack(); - - // Check for out of memory condition - check_out_of_memory(); - - XBreakpoint::at_after_gc(); - } -} - -void XDriver::stop_service() { - XAbort::abort(); - _gc_cycle_port.send_async(GCCause::_no_gc); -} diff --git a/src/hotspot/share/gc/x/xDriver.hpp b/src/hotspot/share/gc/x/xDriver.hpp deleted file mode 100644 index 3803b699b85df..0000000000000 --- a/src/hotspot/share/gc/x/xDriver.hpp +++ /dev/null @@ -1,84 +0,0 @@ -/* - * Copyright (c) 2015, 2021, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#ifndef SHARE_GC_X_XDRIVER_HPP -#define SHARE_GC_X_XDRIVER_HPP - -#include "gc/shared/concurrentGCThread.hpp" -#include "gc/shared/gcCause.hpp" -#include "gc/x/xMessagePort.hpp" - -class VM_XOperation; - -class XDriverRequest { -private: - GCCause::Cause _cause; - uint _nworkers; - -public: - XDriverRequest(); - XDriverRequest(GCCause::Cause cause); - XDriverRequest(GCCause::Cause cause, uint nworkers); - - bool operator==(const XDriverRequest& other) const; - - GCCause::Cause cause() const; - uint nworkers() const; -}; - -class XDriver : public ConcurrentGCThread { -private: - XMessagePort _gc_cycle_port; - XRendezvousPort _gc_locker_port; - - template bool pause(); - - void pause_mark_start(); - void concurrent_mark(); - bool pause_mark_end(); - void concurrent_mark_continue(); - void concurrent_mark_free(); - void concurrent_process_non_strong_references(); - void concurrent_reset_relocation_set(); - void pause_verify(); - void concurrent_select_relocation_set(); - void pause_relocate_start(); - void concurrent_relocate(); - - void check_out_of_memory(); - - void gc(const XDriverRequest& request); - -protected: - virtual void run_service(); - virtual void stop_service(); - -public: - XDriver(); - - bool is_busy() const; - - void collect(const XDriverRequest& request); -}; - -#endif // SHARE_GC_X_XDRIVER_HPP diff --git a/src/hotspot/share/gc/x/xErrno.cpp b/src/hotspot/share/gc/x/xErrno.cpp deleted file mode 100644 index 64951bc47ab15..0000000000000 --- a/src/hotspot/share/gc/x/xErrno.cpp +++ /dev/null @@ -1,51 +0,0 @@ -/* - * Copyright (c) 2015, 2021, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#include "precompiled.hpp" -#include "gc/x/xErrno.hpp" -#include "runtime/os.hpp" - -#include -#include - -XErrno::XErrno() : - _error(errno) {} - -XErrno::XErrno(int error) : - _error(error) {} - -XErrno::operator bool() const { - return _error != 0; -} - -bool XErrno::operator==(int error) const { - return _error == error; -} - -bool XErrno::operator!=(int error) const { - return _error != error; -} - -const char* XErrno::to_string() const { - return os::strerror(_error); -} diff --git a/src/hotspot/share/gc/x/xErrno.hpp b/src/hotspot/share/gc/x/xErrno.hpp deleted file mode 100644 index eb72d43da3f8f..0000000000000 --- a/src/hotspot/share/gc/x/xErrno.hpp +++ /dev/null @@ -1,43 +0,0 @@ -/* - * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#ifndef SHARE_GC_X_XERRNO_HPP -#define SHARE_GC_X_XERRNO_HPP - -#include "memory/allocation.hpp" - -class XErrno : public StackObj { -private: - const int _error; - -public: - XErrno(); - XErrno(int error); - - operator bool() const; - bool operator==(int error) const; - bool operator!=(int error) const; - const char* to_string() const; -}; - -#endif // SHARE_GC_X_XERRNO_HPP diff --git a/src/hotspot/share/gc/x/xForwarding.cpp b/src/hotspot/share/gc/x/xForwarding.cpp deleted file mode 100644 index aa0cd4dff0bcd..0000000000000 --- a/src/hotspot/share/gc/x/xForwarding.cpp +++ /dev/null @@ -1,210 +0,0 @@ -/* - * Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#include "precompiled.hpp" -#include "gc/x/xAddress.inline.hpp" -#include "gc/x/xForwarding.inline.hpp" -#include "gc/x/xStat.hpp" -#include "gc/x/xUtils.inline.hpp" -#include "utilities/align.hpp" - -// -// Reference count states: -// -// * If the reference count is zero, it will never change again. -// -// * If the reference count is positive, it can be both retained -// (increased) and released (decreased). -// -// * If the reference count is negative, is can only be released -// (increased). A negative reference count means that one or more -// threads are waiting for one or more other threads to release -// their references. -// -// The reference lock is used for waiting until the reference -// count has become zero (released) or negative one (claimed). -// - -static const XStatCriticalPhase XCriticalPhaseRelocationStall("Relocation Stall"); - -bool XForwarding::retain_page() { - for (;;) { - const int32_t ref_count = Atomic::load_acquire(&_ref_count); - - if (ref_count == 0) { - // Released - return false; - } - - if (ref_count < 0) { - // Claimed - const bool success = wait_page_released(); - assert(success, "Should always succeed"); - return false; - } - - if (Atomic::cmpxchg(&_ref_count, ref_count, ref_count + 1) == ref_count) { - // Retained - return true; - } - } -} - -XPage* XForwarding::claim_page() { - for (;;) { - const int32_t ref_count = Atomic::load(&_ref_count); - assert(ref_count > 0, "Invalid state"); - - // Invert reference count - if (Atomic::cmpxchg(&_ref_count, ref_count, -ref_count) != ref_count) { - continue; - } - - // If the previous reference count was 1, then we just changed it to -1, - // and we have now claimed the page. Otherwise we wait until it is claimed. - if (ref_count != 1) { - XLocker locker(&_ref_lock); - while (Atomic::load_acquire(&_ref_count) != -1) { - _ref_lock.wait(); - } - } - - return _page; - } -} - -void XForwarding::release_page() { - for (;;) { - const int32_t ref_count = Atomic::load(&_ref_count); - assert(ref_count != 0, "Invalid state"); - - if (ref_count > 0) { - // Decrement reference count - if (Atomic::cmpxchg(&_ref_count, ref_count, ref_count - 1) != ref_count) { - continue; - } - - // If the previous reference count was 1, then we just decremented - // it to 0 and we should signal that the page is now released. - if (ref_count == 1) { - // Notify released - XLocker locker(&_ref_lock); - _ref_lock.notify_all(); - } - } else { - // Increment reference count - if (Atomic::cmpxchg(&_ref_count, ref_count, ref_count + 1) != ref_count) { - continue; - } - - // If the previous reference count was -2 or -1, then we just incremented it - // to -1 or 0, and we should signal the that page is now claimed or released. - if (ref_count == -2 || ref_count == -1) { - // Notify claimed or released - XLocker locker(&_ref_lock); - _ref_lock.notify_all(); - } - } - - return; - } -} - -bool XForwarding::wait_page_released() const { - if (Atomic::load_acquire(&_ref_count) != 0) { - XStatTimer timer(XCriticalPhaseRelocationStall); - XLocker locker(&_ref_lock); - while (Atomic::load_acquire(&_ref_count) != 0) { - if (_ref_abort) { - return false; - } - - _ref_lock.wait(); - } - } - - return true; -} - -XPage* XForwarding::detach_page() { - // Wait until released - if (Atomic::load_acquire(&_ref_count) != 0) { - XLocker locker(&_ref_lock); - while (Atomic::load_acquire(&_ref_count) != 0) { - _ref_lock.wait(); - } - } - - // Detach and return page - XPage* const page = _page; - _page = nullptr; - return page; -} - -void XForwarding::abort_page() { - XLocker locker(&_ref_lock); - assert(Atomic::load(&_ref_count) > 0, "Invalid state"); - assert(!_ref_abort, "Invalid state"); - _ref_abort = true; - _ref_lock.notify_all(); -} - -void XForwarding::verify() const { - guarantee(_ref_count != 0, "Invalid reference count"); - guarantee(_page != nullptr, "Invalid page"); - - uint32_t live_objects = 0; - size_t live_bytes = 0; - - for (XForwardingCursor i = 0; i < _entries.length(); i++) { - const XForwardingEntry entry = at(&i); - if (!entry.populated()) { - // Skip empty entries - continue; - } - - // Check from index - guarantee(entry.from_index() < _page->object_max_count(), "Invalid from index"); - - // Check for duplicates - for (XForwardingCursor j = i + 1; j < _entries.length(); j++) { - const XForwardingEntry other = at(&j); - if (!other.populated()) { - // Skip empty entries - continue; - } - - guarantee(entry.from_index() != other.from_index(), "Duplicate from"); - guarantee(entry.to_offset() != other.to_offset(), "Duplicate to"); - } - - const uintptr_t to_addr = XAddress::good(entry.to_offset()); - const size_t size = XUtils::object_size(to_addr); - const size_t aligned_size = align_up(size, _page->object_alignment()); - live_bytes += aligned_size; - live_objects++; - } - - // Verify number of live objects and bytes - _page->verify_live(live_objects, live_bytes); -} diff --git a/src/hotspot/share/gc/x/xForwarding.hpp b/src/hotspot/share/gc/x/xForwarding.hpp deleted file mode 100644 index a6185e23ced27..0000000000000 --- a/src/hotspot/share/gc/x/xForwarding.hpp +++ /dev/null @@ -1,88 +0,0 @@ -/* - * Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#ifndef SHARE_GC_X_XFORWARDING_HPP -#define SHARE_GC_X_XFORWARDING_HPP - -#include "gc/x/xAttachedArray.hpp" -#include "gc/x/xForwardingEntry.hpp" -#include "gc/x/xLock.hpp" -#include "gc/x/xVirtualMemory.hpp" - -class ObjectClosure; -class VMStructs; -class XForwardingAllocator; -class XPage; - -typedef size_t XForwardingCursor; - -class XForwarding { - friend class ::VMStructs; - friend class XForwardingTest; - -private: - typedef XAttachedArray AttachedArray; - - const XVirtualMemory _virtual; - const size_t _object_alignment_shift; - const AttachedArray _entries; - XPage* _page; - mutable XConditionLock _ref_lock; - volatile int32_t _ref_count; - bool _ref_abort; - bool _in_place; - - XForwardingEntry* entries() const; - XForwardingEntry at(XForwardingCursor* cursor) const; - XForwardingEntry first(uintptr_t from_index, XForwardingCursor* cursor) const; - XForwardingEntry next(XForwardingCursor* cursor) const; - - XForwarding(XPage* page, size_t nentries); - -public: - static uint32_t nentries(const XPage* page); - static XForwarding* alloc(XForwardingAllocator* allocator, XPage* page); - - uint8_t type() const; - uintptr_t start() const; - size_t size() const; - size_t object_alignment_shift() const; - void object_iterate(ObjectClosure *cl); - - bool retain_page(); - XPage* claim_page(); - void release_page(); - bool wait_page_released() const; - XPage* detach_page(); - void abort_page(); - - void set_in_place(); - bool in_place() const; - - XForwardingEntry find(uintptr_t from_index, XForwardingCursor* cursor) const; - uintptr_t insert(uintptr_t from_index, uintptr_t to_offset, XForwardingCursor* cursor); - - void verify() const; -}; - -#endif // SHARE_GC_X_XFORWARDING_HPP diff --git a/src/hotspot/share/gc/x/xForwarding.inline.hpp b/src/hotspot/share/gc/x/xForwarding.inline.hpp deleted file mode 100644 index 257109f3de926..0000000000000 --- a/src/hotspot/share/gc/x/xForwarding.inline.hpp +++ /dev/null @@ -1,163 +0,0 @@ -/* - * Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#ifndef SHARE_GC_X_XFORWARDING_INLINE_HPP -#define SHARE_GC_X_XFORWARDING_INLINE_HPP - -#include "gc/x/xForwarding.hpp" - -#include "gc/x/xAttachedArray.inline.hpp" -#include "gc/x/xForwardingAllocator.inline.hpp" -#include "gc/x/xHash.inline.hpp" -#include "gc/x/xHeap.hpp" -#include "gc/x/xLock.inline.hpp" -#include "gc/x/xPage.inline.hpp" -#include "gc/x/xVirtualMemory.inline.hpp" -#include "runtime/atomic.hpp" -#include "utilities/debug.hpp" -#include "utilities/powerOfTwo.hpp" - -inline uint32_t XForwarding::nentries(const XPage* page) { - // The number returned by the function is used to size the hash table of - // forwarding entries for this page. This hash table uses linear probing. - // The size of the table must be a power of two to allow for quick and - // inexpensive indexing/masking. The table is also sized to have a load - // factor of 50%, i.e. sized to have double the number of entries actually - // inserted, to allow for good lookup/insert performance. - return round_up_power_of_2(page->live_objects() * 2); -} - -inline XForwarding* XForwarding::alloc(XForwardingAllocator* allocator, XPage* page) { - const size_t nentries = XForwarding::nentries(page); - void* const addr = AttachedArray::alloc(allocator, nentries); - return ::new (addr) XForwarding(page, nentries); -} - -inline XForwarding::XForwarding(XPage* page, size_t nentries) : - _virtual(page->virtual_memory()), - _object_alignment_shift(page->object_alignment_shift()), - _entries(nentries), - _page(page), - _ref_lock(), - _ref_count(1), - _ref_abort(false), - _in_place(false) {} - -inline uint8_t XForwarding::type() const { - return _page->type(); -} - -inline uintptr_t XForwarding::start() const { - return _virtual.start(); -} - -inline size_t XForwarding::size() const { - return _virtual.size(); -} - -inline size_t XForwarding::object_alignment_shift() const { - return _object_alignment_shift; -} - -inline void XForwarding::object_iterate(ObjectClosure *cl) { - return _page->object_iterate(cl); -} - -inline void XForwarding::set_in_place() { - _in_place = true; -} - -inline bool XForwarding::in_place() const { - return _in_place; -} - -inline XForwardingEntry* XForwarding::entries() const { - return _entries(this); -} - -inline XForwardingEntry XForwarding::at(XForwardingCursor* cursor) const { - // Load acquire for correctness with regards to - // accesses to the contents of the forwarded object. - return Atomic::load_acquire(entries() + *cursor); -} - -inline XForwardingEntry XForwarding::first(uintptr_t from_index, XForwardingCursor* cursor) const { - const size_t mask = _entries.length() - 1; - const size_t hash = XHash::uint32_to_uint32((uint32_t)from_index); - *cursor = hash & mask; - return at(cursor); -} - -inline XForwardingEntry XForwarding::next(XForwardingCursor* cursor) const { - const size_t mask = _entries.length() - 1; - *cursor = (*cursor + 1) & mask; - return at(cursor); -} - -inline XForwardingEntry XForwarding::find(uintptr_t from_index, XForwardingCursor* cursor) const { - // Reading entries in the table races with the atomic CAS done for - // insertion into the table. This is safe because each entry is at - // most updated once (from zero to something else). - XForwardingEntry entry = first(from_index, cursor); - while (entry.populated()) { - if (entry.from_index() == from_index) { - // Match found, return matching entry - return entry; - } - - entry = next(cursor); - } - - // Match not found, return empty entry - return entry; -} - -inline uintptr_t XForwarding::insert(uintptr_t from_index, uintptr_t to_offset, XForwardingCursor* cursor) { - const XForwardingEntry new_entry(from_index, to_offset); - const XForwardingEntry old_entry; // Empty - - // Make sure that object copy is finished - // before forwarding table installation - OrderAccess::release(); - - for (;;) { - const XForwardingEntry prev_entry = Atomic::cmpxchg(entries() + *cursor, old_entry, new_entry, memory_order_relaxed); - if (!prev_entry.populated()) { - // Success - return to_offset; - } - - // Find next empty or matching entry - XForwardingEntry entry = at(cursor); - while (entry.populated()) { - if (entry.from_index() == from_index) { - // Match found, return already inserted address - return entry.to_offset(); - } - - entry = next(cursor); - } - } -} - -#endif // SHARE_GC_X_XFORWARDING_INLINE_HPP diff --git a/src/hotspot/share/gc/x/xForwardingAllocator.cpp b/src/hotspot/share/gc/x/xForwardingAllocator.cpp deleted file mode 100644 index c8368fde5f5cc..0000000000000 --- a/src/hotspot/share/gc/x/xForwardingAllocator.cpp +++ /dev/null @@ -1,40 +0,0 @@ -/* - * Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#include "precompiled.hpp" -#include "gc/x/xForwardingAllocator.hpp" -#include "memory/allocation.inline.hpp" - -XForwardingAllocator::XForwardingAllocator() : - _start(nullptr), - _end(nullptr), - _top(nullptr) {} - -XForwardingAllocator::~XForwardingAllocator() { - FREE_C_HEAP_ARRAY(char, _start); -} - -void XForwardingAllocator::reset(size_t size) { - _start = _top = REALLOC_C_HEAP_ARRAY(char, _start, size, mtGC); - _end = _start + size; -} diff --git a/src/hotspot/share/gc/x/xForwardingAllocator.hpp b/src/hotspot/share/gc/x/xForwardingAllocator.hpp deleted file mode 100644 index 75495944e8ae3..0000000000000 --- a/src/hotspot/share/gc/x/xForwardingAllocator.hpp +++ /dev/null @@ -1,46 +0,0 @@ -/* - * Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#ifndef SHARE_GC_X_XFORWARDINGALLOCATOR_HPP -#define SHARE_GC_X_XFORWARDINGALLOCATOR_HPP - -#include "utilities/globalDefinitions.hpp" - -class XForwardingAllocator { -private: - char* _start; - char* _end; - char* _top; - -public: - XForwardingAllocator(); - ~XForwardingAllocator(); - - void reset(size_t size); - size_t size() const; - bool is_full() const; - - void* alloc(size_t size); -}; - -#endif // SHARE_GC_X_XFORWARDINGALLOCATOR_HPP diff --git a/src/hotspot/share/gc/x/xForwardingAllocator.inline.hpp b/src/hotspot/share/gc/x/xForwardingAllocator.inline.hpp deleted file mode 100644 index e70986f52062e..0000000000000 --- a/src/hotspot/share/gc/x/xForwardingAllocator.inline.hpp +++ /dev/null @@ -1,46 +0,0 @@ -/* - * Copyright (c) 2020, 2023, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#ifndef SHARE_GC_X_XFORWARDINGALLOCATOR_INLINE_HPP -#define SHARE_GC_X_XFORWARDINGALLOCATOR_INLINE_HPP - -#include "gc/x/xForwardingAllocator.hpp" - -#include "runtime/atomic.hpp" -#include "utilities/debug.hpp" - -inline size_t XForwardingAllocator::size() const { - return _end - _start; -} - -inline bool XForwardingAllocator::is_full() const { - return _top == _end; -} - -inline void* XForwardingAllocator::alloc(size_t size) { - char* const addr = Atomic::fetch_then_add(&_top, size); - assert(addr + size <= _end, "Allocation should never fail"); - return addr; -} - -#endif // SHARE_GC_X_XFORWARDINGALLOCATOR_INLINE_HPP diff --git a/src/hotspot/share/gc/x/xForwardingEntry.hpp b/src/hotspot/share/gc/x/xForwardingEntry.hpp deleted file mode 100644 index 3f8846abbaa2b..0000000000000 --- a/src/hotspot/share/gc/x/xForwardingEntry.hpp +++ /dev/null @@ -1,102 +0,0 @@ -/* - * Copyright (c) 2017, 2021, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#ifndef SHARE_GC_X_XFORWARDINGENTRY_HPP -#define SHARE_GC_X_XFORWARDINGENTRY_HPP - -#include "gc/x/xBitField.hpp" -#include "memory/allocation.hpp" -#include "metaprogramming/primitiveConversions.hpp" - -#include - -class VMStructs; - -// -// Forwarding entry layout -// ----------------------- -// -// 6 4 4 -// 3 6 5 1 0 -// +--------------------+--------------------------------------------------+-+ -// |11111111 11111111 11|111111 11111111 11111111 11111111 11111111 1111111|1| -// +--------------------+--------------------------------------------------+-+ -// | | | -// | | 0-0 Populated Flag (1-bits) * -// | | -// | * 45-1 To Object Offset (45-bits) -// | -// * 63-46 From Object Index (18-bits) -// - -class XForwardingEntry { - friend struct PrimitiveConversions::Translate; - friend class ::VMStructs; - -private: - typedef XBitField field_populated; - typedef XBitField field_to_offset; - typedef XBitField field_from_index; - - uint64_t _entry; - -public: - XForwardingEntry() : - _entry(0) {} - - XForwardingEntry(size_t from_index, size_t to_offset) : - _entry(field_populated::encode(true) | - field_to_offset::encode(to_offset) | - field_from_index::encode(from_index)) {} - - bool populated() const { - return field_populated::decode(_entry); - } - - size_t to_offset() const { - return field_to_offset::decode(_entry); - } - - size_t from_index() const { - return field_from_index::decode(_entry); - } -}; - -// Needed to allow atomic operations on XForwardingEntry -template <> -struct PrimitiveConversions::Translate : public std::true_type { - typedef XForwardingEntry Value; - typedef uint64_t Decayed; - - static Decayed decay(Value v) { - return v._entry; - } - - static Value recover(Decayed d) { - XForwardingEntry entry; - entry._entry = d; - return entry; - } -}; - -#endif // SHARE_GC_X_XFORWARDINGENTRY_HPP diff --git a/src/hotspot/share/gc/x/xForwardingTable.hpp b/src/hotspot/share/gc/x/xForwardingTable.hpp deleted file mode 100644 index 1f110292be516..0000000000000 --- a/src/hotspot/share/gc/x/xForwardingTable.hpp +++ /dev/null @@ -1,47 +0,0 @@ -/* - * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#ifndef SHARE_GC_X_XFORWARDINGTABLE_HPP -#define SHARE_GC_X_XFORWARDINGTABLE_HPP - -#include "gc/x/xGranuleMap.hpp" - -class VMStructs; -class XForwarding; - -class XForwardingTable { - friend class ::VMStructs; - -private: - XGranuleMap _map; - -public: - XForwardingTable(); - - XForwarding* get(uintptr_t addr) const; - - void insert(XForwarding* forwarding); - void remove(XForwarding* forwarding); -}; - -#endif // SHARE_GC_X_XFORWARDINGTABLE_HPP diff --git a/src/hotspot/share/gc/x/xForwardingTable.inline.hpp b/src/hotspot/share/gc/x/xForwardingTable.inline.hpp deleted file mode 100644 index b65b68da4e234..0000000000000 --- a/src/hotspot/share/gc/x/xForwardingTable.inline.hpp +++ /dev/null @@ -1,59 +0,0 @@ -/* - * Copyright (c) 2019, 2020, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#ifndef SHARE_GC_X_XFORWARDINGTABLE_INLINE_HPP -#define SHARE_GC_X_XFORWARDINGTABLE_INLINE_HPP - -#include "gc/x/xForwardingTable.hpp" - -#include "gc/x/xAddress.inline.hpp" -#include "gc/x/xForwarding.inline.hpp" -#include "gc/x/xGlobals.hpp" -#include "gc/x/xGranuleMap.inline.hpp" -#include "utilities/debug.hpp" - -inline XForwardingTable::XForwardingTable() : - _map(XAddressOffsetMax) {} - -inline XForwarding* XForwardingTable::get(uintptr_t addr) const { - assert(!XAddress::is_null(addr), "Invalid address"); - return _map.get(XAddress::offset(addr)); -} - -inline void XForwardingTable::insert(XForwarding* forwarding) { - const uintptr_t offset = forwarding->start(); - const size_t size = forwarding->size(); - - assert(_map.get(offset) == nullptr, "Invalid entry"); - _map.put(offset, size, forwarding); -} - -inline void XForwardingTable::remove(XForwarding* forwarding) { - const uintptr_t offset = forwarding->start(); - const size_t size = forwarding->size(); - - assert(_map.get(offset) == forwarding, "Invalid entry"); - _map.put(offset, size, nullptr); -} - -#endif // SHARE_GC_X_XFORWARDINGTABLE_INLINE_HPP diff --git a/src/hotspot/share/gc/x/xFuture.hpp b/src/hotspot/share/gc/x/xFuture.hpp deleted file mode 100644 index 931f4b58f123c..0000000000000 --- a/src/hotspot/share/gc/x/xFuture.hpp +++ /dev/null @@ -1,43 +0,0 @@ -/* - * Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#ifndef SHARE_GC_X_XFUTURE_HPP -#define SHARE_GC_X_XFUTURE_HPP - -#include "memory/allocation.hpp" -#include "runtime/semaphore.hpp" - -template -class XFuture { -private: - Semaphore _sema; - T _value; - -public: - XFuture(); - - void set(T value); - T get(); -}; - -#endif // SHARE_GC_X_XFUTURE_HPP diff --git a/src/hotspot/share/gc/x/xFuture.inline.hpp b/src/hotspot/share/gc/x/xFuture.inline.hpp deleted file mode 100644 index d3dba3b7151d7..0000000000000 --- a/src/hotspot/share/gc/x/xFuture.inline.hpp +++ /dev/null @@ -1,59 +0,0 @@ -/* - * Copyright (c) 2015, 2021, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#ifndef SHARE_GC_X_XFUTURE_INLINE_HPP -#define SHARE_GC_X_XFUTURE_INLINE_HPP - -#include "gc/x/xFuture.hpp" - -#include "runtime/javaThread.hpp" -#include "runtime/semaphore.inline.hpp" - -template -inline XFuture::XFuture() : - _value() {} - -template -inline void XFuture::set(T value) { - // Set value - _value = value; - - // Notify waiter - _sema.signal(); -} - -template -inline T XFuture::get() { - // Wait for notification - Thread* const thread = Thread::current(); - if (thread->is_Java_thread()) { - _sema.wait_with_safepoint_check(JavaThread::cast(thread)); - } else { - _sema.wait(); - } - - // Return value - return _value; -} - -#endif // SHARE_GC_X_XFUTURE_INLINE_HPP diff --git a/src/hotspot/share/gc/x/xGlobals.cpp b/src/hotspot/share/gc/x/xGlobals.cpp deleted file mode 100644 index b247565bc011d..0000000000000 --- a/src/hotspot/share/gc/x/xGlobals.cpp +++ /dev/null @@ -1,79 +0,0 @@ -/* - * Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#include "precompiled.hpp" -#include "gc/x/xGlobals.hpp" - -uint32_t XGlobalPhase = XPhaseRelocate; -uint32_t XGlobalSeqNum = 1; - -size_t XPageSizeMediumShift; -size_t XPageSizeMedium; - -size_t XObjectSizeLimitMedium; - -const int& XObjectAlignmentSmallShift = LogMinObjAlignmentInBytes; -int XObjectAlignmentMediumShift; - -const int& XObjectAlignmentSmall = MinObjAlignmentInBytes; -int XObjectAlignmentMedium; - -uintptr_t XAddressGoodMask; -uintptr_t XAddressBadMask; -uintptr_t XAddressWeakBadMask; - -static uint32_t* XAddressCalculateBadMaskHighOrderBitsAddr() { - const uintptr_t addr = reinterpret_cast(&XAddressBadMask); - return reinterpret_cast(addr + XAddressBadMaskHighOrderBitsOffset); -} - -uint32_t* XAddressBadMaskHighOrderBitsAddr = XAddressCalculateBadMaskHighOrderBitsAddr(); - -size_t XAddressOffsetBits; -uintptr_t XAddressOffsetMask; -size_t XAddressOffsetMax; - -size_t XAddressMetadataShift; -uintptr_t XAddressMetadataMask; - -uintptr_t XAddressMetadataMarked; -uintptr_t XAddressMetadataMarked0; -uintptr_t XAddressMetadataMarked1; -uintptr_t XAddressMetadataRemapped; -uintptr_t XAddressMetadataFinalizable; - -const char* XGlobalPhaseToString() { - switch (XGlobalPhase) { - case XPhaseMark: - return "Mark"; - - case XPhaseMarkCompleted: - return "MarkCompleted"; - - case XPhaseRelocate: - return "Relocate"; - - default: - return "Unknown"; - } -} diff --git a/src/hotspot/share/gc/x/xGlobals.hpp b/src/hotspot/share/gc/x/xGlobals.hpp deleted file mode 100644 index 662a502a79f86..0000000000000 --- a/src/hotspot/share/gc/x/xGlobals.hpp +++ /dev/null @@ -1,158 +0,0 @@ -/* - * Copyright (c) 2015, 2022, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#ifndef SHARE_GC_X_XGLOBALS_HPP -#define SHARE_GC_X_XGLOBALS_HPP - -#include "utilities/globalDefinitions.hpp" -#include "utilities/macros.hpp" -#include CPU_HEADER(gc/x/xGlobals) - -// Collector name -const char* const XName = "The Z Garbage Collector"; - -// Global phase state -extern uint32_t XGlobalPhase; -const uint32_t XPhaseMark = 0; -const uint32_t XPhaseMarkCompleted = 1; -const uint32_t XPhaseRelocate = 2; -const char* XGlobalPhaseToString(); - -// Global sequence number -extern uint32_t XGlobalSeqNum; - -// Granule shift/size -const size_t XGranuleSizeShift = 21; // 2MB -const size_t XGranuleSize = (size_t)1 << XGranuleSizeShift; - -// Number of heap views -const size_t XHeapViews = XPlatformHeapViews; - -// Virtual memory to physical memory ratio -const size_t XVirtualToPhysicalRatio = 16; // 16:1 - -// Page types -const uint8_t XPageTypeSmall = 0; -const uint8_t XPageTypeMedium = 1; -const uint8_t XPageTypeLarge = 2; - -// Page size shifts -const size_t XPageSizeSmallShift = XGranuleSizeShift; -extern size_t XPageSizeMediumShift; - -// Page sizes -const size_t XPageSizeSmall = (size_t)1 << XPageSizeSmallShift; -extern size_t XPageSizeMedium; - -// Object size limits -const size_t XObjectSizeLimitSmall = XPageSizeSmall / 8; // 12.5% max waste -extern size_t XObjectSizeLimitMedium; - -// Object alignment shifts -extern const int& XObjectAlignmentSmallShift; -extern int XObjectAlignmentMediumShift; -const int XObjectAlignmentLargeShift = XGranuleSizeShift; - -// Object alignments -extern const int& XObjectAlignmentSmall; -extern int XObjectAlignmentMedium; -const int XObjectAlignmentLarge = 1 << XObjectAlignmentLargeShift; - -// -// Good/Bad mask states -// -------------------- -// -// GoodMask BadMask WeakGoodMask WeakBadMask -// -------------------------------------------------------------- -// Marked0 001 110 101 010 -// Marked1 010 101 110 001 -// Remapped 100 011 100 011 -// - -// Good/bad masks -extern uintptr_t XAddressGoodMask; -extern uintptr_t XAddressBadMask; -extern uintptr_t XAddressWeakBadMask; - -// The bad mask is 64 bit. Its high order 32 bits contain all possible value combinations -// that this mask will have. Therefore, the memory where the 32 high order bits are stored, -// can be used as a 32 bit GC epoch counter, that has a different bit pattern every time -// the bad mask is flipped. This provides a pointer to said 32 bits. -extern uint32_t* XAddressBadMaskHighOrderBitsAddr; -const int XAddressBadMaskHighOrderBitsOffset = LITTLE_ENDIAN_ONLY(4) BIG_ENDIAN_ONLY(0); - -// Pointer part of address -extern size_t XAddressOffsetBits; -const size_t XAddressOffsetShift = 0; -extern uintptr_t XAddressOffsetMask; -extern size_t XAddressOffsetMax; - -// Metadata part of address -const size_t XAddressMetadataBits = 4; -extern size_t XAddressMetadataShift; -extern uintptr_t XAddressMetadataMask; - -// Metadata types -extern uintptr_t XAddressMetadataMarked; -extern uintptr_t XAddressMetadataMarked0; -extern uintptr_t XAddressMetadataMarked1; -extern uintptr_t XAddressMetadataRemapped; -extern uintptr_t XAddressMetadataFinalizable; - -// Cache line size -const size_t XCacheLineSize = XPlatformCacheLineSize; -#define XCACHE_ALIGNED ATTRIBUTE_ALIGNED(XCacheLineSize) - -// Mark stack space -extern uintptr_t XMarkStackSpaceStart; -const size_t XMarkStackSpaceExpandSize = (size_t)1 << 25; // 32M - -// Mark stack and magazine sizes -const size_t XMarkStackSizeShift = 11; // 2K -const size_t XMarkStackSize = (size_t)1 << XMarkStackSizeShift; -const size_t XMarkStackHeaderSize = (size_t)1 << 4; // 16B -const size_t XMarkStackSlots = (XMarkStackSize - XMarkStackHeaderSize) / sizeof(uintptr_t); -const size_t XMarkStackMagazineSize = (size_t)1 << 15; // 32K -const size_t XMarkStackMagazineSlots = (XMarkStackMagazineSize / XMarkStackSize) - 1; - -// Mark stripe size -const size_t XMarkStripeShift = XGranuleSizeShift; - -// Max number of mark stripes -const size_t XMarkStripesMax = 16; // Must be a power of two - -// Mark cache size -const size_t XMarkCacheSize = 1024; // Must be a power of two - -// Partial array minimum size -const size_t XMarkPartialArrayMinSizeShift = 12; // 4K -const size_t XMarkPartialArrayMinSize = (size_t)1 << XMarkPartialArrayMinSizeShift; - -// Max number of proactive/terminate flush attempts -const size_t XMarkProactiveFlushMax = 10; -const size_t XMarkTerminateFlushMax = 3; - -// Try complete mark timeout -const uint64_t XMarkCompleteTimeout = 200; // us - -#endif // SHARE_GC_X_XGLOBALS_HPP diff --git a/src/hotspot/share/gc/x/xGranuleMap.hpp b/src/hotspot/share/gc/x/xGranuleMap.hpp deleted file mode 100644 index a9447e1469c6c..0000000000000 --- a/src/hotspot/share/gc/x/xGranuleMap.hpp +++ /dev/null @@ -1,61 +0,0 @@ -/* - * Copyright (c) 2017, 2020, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#ifndef SHARE_GC_X_XGRANULEMAP_HPP -#define SHARE_GC_X_XGRANULEMAP_HPP - -#include "gc/x/xArray.hpp" -#include "memory/allocation.hpp" - -class VMStructs; - -template -class XGranuleMap { - friend class ::VMStructs; - template friend class XGranuleMapIterator; - -private: - const size_t _size; - T* const _map; - - size_t index_for_offset(uintptr_t offset) const; - -public: - XGranuleMap(size_t max_offset); - ~XGranuleMap(); - - T get(uintptr_t offset) const; - void put(uintptr_t offset, T value); - void put(uintptr_t offset, size_t size, T value); - - T get_acquire(uintptr_t offset) const; - void release_put(uintptr_t offset, T value); -}; - -template -class XGranuleMapIterator : public XArrayIteratorImpl { -public: - XGranuleMapIterator(const XGranuleMap* granule_map); -}; - -#endif // SHARE_GC_X_XGRANULEMAP_HPP diff --git a/src/hotspot/share/gc/x/xGranuleMap.inline.hpp b/src/hotspot/share/gc/x/xGranuleMap.inline.hpp deleted file mode 100644 index 95ef5ee2b2d5b..0000000000000 --- a/src/hotspot/share/gc/x/xGranuleMap.inline.hpp +++ /dev/null @@ -1,94 +0,0 @@ -/* - * Copyright (c) 2017, 2020, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#ifndef SHARE_GC_X_XGRANULEMAP_INLINE_HPP -#define SHARE_GC_X_XGRANULEMAP_INLINE_HPP - -#include "gc/x/xGranuleMap.hpp" - -#include "gc/x/xArray.inline.hpp" -#include "gc/x/xGlobals.hpp" -#include "memory/allocation.inline.hpp" -#include "runtime/atomic.hpp" -#include "utilities/align.hpp" -#include "utilities/debug.hpp" - -template -inline XGranuleMap::XGranuleMap(size_t max_offset) : - _size(max_offset >> XGranuleSizeShift), - _map(MmapArrayAllocator::allocate(_size, mtGC)) { - assert(is_aligned(max_offset, XGranuleSize), "Misaligned"); -} - -template -inline XGranuleMap::~XGranuleMap() { - MmapArrayAllocator::free(_map, _size); -} - -template -inline size_t XGranuleMap::index_for_offset(uintptr_t offset) const { - const size_t index = offset >> XGranuleSizeShift; - assert(index < _size, "Invalid index"); - return index; -} - -template -inline T XGranuleMap::get(uintptr_t offset) const { - const size_t index = index_for_offset(offset); - return _map[index]; -} - -template -inline void XGranuleMap::put(uintptr_t offset, T value) { - const size_t index = index_for_offset(offset); - _map[index] = value; -} - -template -inline void XGranuleMap::put(uintptr_t offset, size_t size, T value) { - assert(is_aligned(size, XGranuleSize), "Misaligned"); - - const size_t start_index = index_for_offset(offset); - const size_t end_index = start_index + (size >> XGranuleSizeShift); - for (size_t index = start_index; index < end_index; index++) { - _map[index] = value; - } -} - -template -inline T XGranuleMap::get_acquire(uintptr_t offset) const { - const size_t index = index_for_offset(offset); - return Atomic::load_acquire(_map + index); -} - -template -inline void XGranuleMap::release_put(uintptr_t offset, T value) { - const size_t index = index_for_offset(offset); - Atomic::release_store(_map + index, value); -} - -template -inline XGranuleMapIterator::XGranuleMapIterator(const XGranuleMap* granule_map) : - XArrayIteratorImpl(granule_map->_map, granule_map->_size) {} - -#endif // SHARE_GC_X_XGRANULEMAP_INLINE_HPP diff --git a/src/hotspot/share/gc/x/xHash.hpp b/src/hotspot/share/gc/x/xHash.hpp deleted file mode 100644 index 253f4d231c1c3..0000000000000 --- a/src/hotspot/share/gc/x/xHash.hpp +++ /dev/null @@ -1,36 +0,0 @@ -/* - * Copyright (c) 2015, 2022, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#ifndef SHARE_GC_X_XHASH_HPP -#define SHARE_GC_X_XHASH_HPP - -#include "memory/allStatic.hpp" -#include "utilities/globalDefinitions.hpp" - -class XHash : public AllStatic { -public: - static uint32_t uint32_to_uint32(uint32_t key); - static uint32_t address_to_uint32(uintptr_t key); -}; - -#endif // SHARE_GC_X_XHASH_HPP diff --git a/src/hotspot/share/gc/x/xHash.inline.hpp b/src/hotspot/share/gc/x/xHash.inline.hpp deleted file mode 100644 index 5ff5f540821e0..0000000000000 --- a/src/hotspot/share/gc/x/xHash.inline.hpp +++ /dev/null @@ -1,77 +0,0 @@ -/* - * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -/* - * This file is available under and governed by the GNU General Public - * License version 2 only, as published by the Free Software Foundation. - * However, the following notice accompanied the original version of this - * file: - * - * (C) 2009 by Remo Dentato (rdentato@gmail.com) - * - * - * Redistribution and use in source and binary forms, with or without modification, - * are permitted provided that the following conditions are met: - * - * * Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright notice, - * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR - * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - * http://opensource.org/licenses/bsd-license.php - */ - -#ifndef SHARE_GC_X_XHASH_INLINE_HPP -#define SHARE_GC_X_XHASH_INLINE_HPP - -#include "gc/x/xHash.hpp" - -#include "gc/x/xAddress.inline.hpp" - -inline uint32_t XHash::uint32_to_uint32(uint32_t key) { - key = ~key + (key << 15); - key = key ^ (key >> 12); - key = key + (key << 2); - key = key ^ (key >> 4); - key = key * 2057; - key = key ^ (key >> 16); - return key; -} - -inline uint32_t XHash::address_to_uint32(uintptr_t key) { - return uint32_to_uint32((uint32_t)(key >> 3)); -} - -#endif // SHARE_GC_X_XHASH_INLINE_HPP diff --git a/src/hotspot/share/gc/x/xHeap.cpp b/src/hotspot/share/gc/x/xHeap.cpp deleted file mode 100644 index 3872db785f3bf..0000000000000 --- a/src/hotspot/share/gc/x/xHeap.cpp +++ /dev/null @@ -1,541 +0,0 @@ -/* - * Copyright (c) 2015, 2022, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#include "precompiled.hpp" -#include "classfile/classLoaderDataGraph.hpp" -#include "gc/shared/gc_globals.hpp" -#include "gc/shared/classUnloadingContext.hpp" -#include "gc/shared/locationPrinter.hpp" -#include "gc/shared/tlab_globals.hpp" -#include "gc/x/xAddress.inline.hpp" -#include "gc/x/xArray.inline.hpp" -#include "gc/x/xGlobals.hpp" -#include "gc/x/xHeap.inline.hpp" -#include "gc/x/xHeapIterator.hpp" -#include "gc/x/xHeuristics.hpp" -#include "gc/x/xMark.inline.hpp" -#include "gc/x/xPage.inline.hpp" -#include "gc/x/xPageTable.inline.hpp" -#include "gc/x/xRelocationSet.inline.hpp" -#include "gc/x/xRelocationSetSelector.inline.hpp" -#include "gc/x/xResurrection.hpp" -#include "gc/x/xStat.hpp" -#include "gc/x/xThread.inline.hpp" -#include "gc/x/xVerify.hpp" -#include "gc/x/xWorkers.hpp" -#include "logging/log.hpp" -#include "memory/iterator.hpp" -#include "memory/metaspaceUtils.hpp" -#include "memory/resourceArea.hpp" -#include "prims/jvmtiTagMap.hpp" -#include "runtime/handshake.hpp" -#include "runtime/javaThread.hpp" -#include "runtime/safepoint.hpp" -#include "utilities/debug.hpp" - -static const XStatCounter XCounterUndoPageAllocation("Memory", "Undo Page Allocation", XStatUnitOpsPerSecond); -static const XStatCounter XCounterOutOfMemory("Memory", "Out Of Memory", XStatUnitOpsPerSecond); - -XHeap* XHeap::_heap = nullptr; - -XHeap::XHeap() : - _workers(), - _object_allocator(), - _page_allocator(&_workers, MinHeapSize, InitialHeapSize, MaxHeapSize), - _page_table(), - _forwarding_table(), - _mark(&_workers, &_page_table), - _reference_processor(&_workers), - _weak_roots_processor(&_workers), - _relocate(&_workers), - _relocation_set(&_workers), - _unload(&_workers), - _serviceability(min_capacity(), max_capacity()) { - // Install global heap instance - assert(_heap == nullptr, "Already initialized"); - _heap = this; - - // Update statistics - XStatHeap::set_at_initialize(_page_allocator.stats()); -} - -bool XHeap::is_initialized() const { - return _page_allocator.is_initialized() && _mark.is_initialized(); -} - -size_t XHeap::min_capacity() const { - return _page_allocator.min_capacity(); -} - -size_t XHeap::max_capacity() const { - return _page_allocator.max_capacity(); -} - -size_t XHeap::soft_max_capacity() const { - return _page_allocator.soft_max_capacity(); -} - -size_t XHeap::capacity() const { - return _page_allocator.capacity(); -} - -size_t XHeap::used() const { - return _page_allocator.used(); -} - -size_t XHeap::unused() const { - return _page_allocator.unused(); -} - -size_t XHeap::tlab_capacity() const { - return capacity(); -} - -size_t XHeap::tlab_used() const { - return _object_allocator.used(); -} - -size_t XHeap::max_tlab_size() const { - return XObjectSizeLimitSmall; -} - -size_t XHeap::unsafe_max_tlab_alloc() const { - size_t size = _object_allocator.remaining(); - - if (size < MinTLABSize) { - // The remaining space in the allocator is not enough to - // fit the smallest possible TLAB. This means that the next - // TLAB allocation will force the allocator to get a new - // backing page anyway, which in turn means that we can then - // fit the largest possible TLAB. - size = max_tlab_size(); - } - - return MIN2(size, max_tlab_size()); -} - -bool XHeap::is_in(uintptr_t addr) const { - // An address is considered to be "in the heap" if it points into - // the allocated part of a page, regardless of which heap view is - // used. Note that an address with the finalizable metadata bit set - // is not pointing into a heap view, and therefore not considered - // to be "in the heap". - - if (XAddress::is_in(addr)) { - const XPage* const page = _page_table.get(addr); - if (page != nullptr) { - return page->is_in(addr); - } - } - - return false; -} - -uint XHeap::active_workers() const { - return _workers.active_workers(); -} - -void XHeap::set_active_workers(uint nworkers) { - _workers.set_active_workers(nworkers); -} - -void XHeap::threads_do(ThreadClosure* tc) const { - _page_allocator.threads_do(tc); - _workers.threads_do(tc); -} - -void XHeap::out_of_memory() { - ResourceMark rm; - - XStatInc(XCounterOutOfMemory); - log_info(gc)("Out Of Memory (%s)", Thread::current()->name()); -} - -XPage* XHeap::alloc_page(uint8_t type, size_t size, XAllocationFlags flags) { - XPage* const page = _page_allocator.alloc_page(type, size, flags); - if (page != nullptr) { - // Insert page table entry - _page_table.insert(page); - } - - return page; -} - -void XHeap::undo_alloc_page(XPage* page) { - assert(page->is_allocating(), "Invalid page state"); - - XStatInc(XCounterUndoPageAllocation); - log_trace(gc)("Undo page allocation, thread: " PTR_FORMAT " (%s), page: " PTR_FORMAT ", size: " SIZE_FORMAT, - XThread::id(), XThread::name(), p2i(page), page->size()); - - free_page(page, false /* reclaimed */); -} - -void XHeap::free_page(XPage* page, bool reclaimed) { - // Remove page table entry - _page_table.remove(page); - - // Free page - _page_allocator.free_page(page, reclaimed); -} - -void XHeap::free_pages(const XArray* pages, bool reclaimed) { - // Remove page table entries - XArrayIterator iter(pages); - for (XPage* page; iter.next(&page);) { - _page_table.remove(page); - } - - // Free pages - _page_allocator.free_pages(pages, reclaimed); -} - -void XHeap::flip_to_marked() { - XVerifyViewsFlip flip(&_page_allocator); - XAddress::flip_to_marked(); -} - -void XHeap::flip_to_remapped() { - XVerifyViewsFlip flip(&_page_allocator); - XAddress::flip_to_remapped(); -} - -void XHeap::mark_start() { - assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint"); - - // Verification - ClassLoaderDataGraph::verify_claimed_marks_cleared(ClassLoaderData::_claim_strong); - - if (XHeap::heap()->has_alloc_stalled()) { - // If there are stalled allocations, ensure that regardless of the - // cause of the GC, we have to clear soft references, as we are just - // about to increment the sequence number, and all previous allocations - // will throw if not presented with enough memory. - XHeap::heap()->set_soft_reference_policy(true); - } - - // Flip address view - flip_to_marked(); - - // Retire allocating pages - _object_allocator.retire_pages(); - - // Reset allocated/reclaimed/used statistics - _page_allocator.reset_statistics(); - - // Reset encountered/dropped/enqueued statistics - _reference_processor.reset_statistics(); - - // Enter mark phase - XGlobalPhase = XPhaseMark; - - // Reset marking information - _mark.start(); - - // Update statistics - XStatHeap::set_at_mark_start(_page_allocator.stats()); -} - -void XHeap::mark(bool initial) { - _mark.mark(initial); -} - -void XHeap::mark_flush_and_free(Thread* thread) { - _mark.flush_and_free(thread); -} - -bool XHeap::mark_end() { - assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint"); - - // Try end marking - if (!_mark.end()) { - // Marking not completed, continue concurrent mark - return false; - } - - // Enter mark completed phase - XGlobalPhase = XPhaseMarkCompleted; - - // Verify after mark - XVerify::after_mark(); - - // Update statistics - XStatHeap::set_at_mark_end(_page_allocator.stats()); - - // Block resurrection of weak/phantom references - XResurrection::block(); - - // Prepare to unload stale metadata and nmethods - _unload.prepare(); - - // Notify JVMTI that some tagmap entry objects may have died. - JvmtiTagMap::set_needs_cleaning(); - - return true; -} - -void XHeap::mark_free() { - _mark.free(); -} - -void XHeap::keep_alive(oop obj) { - XBarrier::keep_alive_barrier_on_oop(obj); -} - -void XHeap::set_soft_reference_policy(bool clear) { - _reference_processor.set_soft_reference_policy(clear); -} - -class XRendezvousClosure : public HandshakeClosure { -public: - XRendezvousClosure() : - HandshakeClosure("XRendezvous") {} - - void do_thread(Thread* thread) {} -}; - -void XHeap::process_non_strong_references() { - // Process Soft/Weak/Final/PhantomReferences - _reference_processor.process_references(); - - // Process weak roots - _weak_roots_processor.process_weak_roots(); - - ClassUnloadingContext ctx(_workers.active_workers(), - true /* unregister_nmethods_during_purge */, - true /* lock_nmethod_free_separately */); - - // Unlink stale metadata and nmethods - _unload.unlink(); - - // Perform a handshake. This is needed 1) to make sure that stale - // metadata and nmethods are no longer observable. And 2), to - // prevent the race where a mutator first loads an oop, which is - // logically null but not yet cleared. Then this oop gets cleared - // by the reference processor and resurrection is unblocked. At - // this point the mutator could see the unblocked state and pass - // this invalid oop through the normal barrier path, which would - // incorrectly try to mark the oop. - XRendezvousClosure cl; - Handshake::execute(&cl); - - // Unblock resurrection of weak/phantom references - XResurrection::unblock(); - - // Purge stale metadata and nmethods that were unlinked - _unload.purge(); - - // Enqueue Soft/Weak/Final/PhantomReferences. Note that this - // must be done after unblocking resurrection. Otherwise the - // Finalizer thread could call Reference.get() on the Finalizers - // that were just enqueued, which would incorrectly return null - // during the resurrection block window, since such referents - // are only Finalizable marked. - _reference_processor.enqueue_references(); - - // Clear old markings claim bits. - // Note: Clearing _claim_strong also clears _claim_finalizable. - ClassLoaderDataGraph::clear_claimed_marks(ClassLoaderData::_claim_strong); -} - -void XHeap::free_empty_pages(XRelocationSetSelector* selector, int bulk) { - // Freeing empty pages in bulk is an optimization to avoid grabbing - // the page allocator lock, and trying to satisfy stalled allocations - // too frequently. - if (selector->should_free_empty_pages(bulk)) { - free_pages(selector->empty_pages(), true /* reclaimed */); - selector->clear_empty_pages(); - } -} - -void XHeap::select_relocation_set() { - // Do not allow pages to be deleted - _page_allocator.enable_deferred_delete(); - - // Register relocatable pages with selector - XRelocationSetSelector selector; - XPageTableIterator pt_iter(&_page_table); - for (XPage* page; pt_iter.next(&page);) { - if (!page->is_relocatable()) { - // Not relocatable, don't register - continue; - } - - if (page->is_marked()) { - // Register live page - selector.register_live_page(page); - } else { - // Register empty page - selector.register_empty_page(page); - - // Reclaim empty pages in bulk - free_empty_pages(&selector, 64 /* bulk */); - } - } - - // Reclaim remaining empty pages - free_empty_pages(&selector, 0 /* bulk */); - - // Allow pages to be deleted - _page_allocator.disable_deferred_delete(); - - // Select relocation set - selector.select(); - - // Install relocation set - _relocation_set.install(&selector); - - // Setup forwarding table - XRelocationSetIterator rs_iter(&_relocation_set); - for (XForwarding* forwarding; rs_iter.next(&forwarding);) { - _forwarding_table.insert(forwarding); - } - - // Update statistics - XStatRelocation::set_at_select_relocation_set(selector.stats()); - XStatHeap::set_at_select_relocation_set(selector.stats()); -} - -void XHeap::reset_relocation_set() { - // Reset forwarding table - XRelocationSetIterator iter(&_relocation_set); - for (XForwarding* forwarding; iter.next(&forwarding);) { - _forwarding_table.remove(forwarding); - } - - // Reset relocation set - _relocation_set.reset(); -} - -void XHeap::relocate_start() { - assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint"); - - // Finish unloading stale metadata and nmethods - _unload.finish(); - - // Flip address view - flip_to_remapped(); - - // Enter relocate phase - XGlobalPhase = XPhaseRelocate; - - // Update statistics - XStatHeap::set_at_relocate_start(_page_allocator.stats()); -} - -void XHeap::relocate() { - // Relocate relocation set - _relocate.relocate(&_relocation_set); - - // Update statistics - XStatHeap::set_at_relocate_end(_page_allocator.stats(), _object_allocator.relocated()); -} - -bool XHeap::is_allocating(uintptr_t addr) const { - const XPage* const page = _page_table.get(addr); - return page->is_allocating(); -} - -void XHeap::object_iterate(ObjectClosure* cl, bool visit_weaks) { - assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint"); - XHeapIterator iter(1 /* nworkers */, visit_weaks); - iter.object_iterate(cl, 0 /* worker_id */); -} - -ParallelObjectIteratorImpl* XHeap::parallel_object_iterator(uint nworkers, bool visit_weaks) { - assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint"); - return new XHeapIterator(nworkers, visit_weaks); -} - -void XHeap::pages_do(XPageClosure* cl) { - XPageTableIterator iter(&_page_table); - for (XPage* page; iter.next(&page);) { - cl->do_page(page); - } - _page_allocator.pages_do(cl); -} - -void XHeap::serviceability_initialize() { - _serviceability.initialize(); -} - -GCMemoryManager* XHeap::serviceability_cycle_memory_manager() { - return _serviceability.cycle_memory_manager(); -} - -GCMemoryManager* XHeap::serviceability_pause_memory_manager() { - return _serviceability.pause_memory_manager(); -} - -MemoryPool* XHeap::serviceability_memory_pool() { - return _serviceability.memory_pool(); -} - -XServiceabilityCounters* XHeap::serviceability_counters() { - return _serviceability.counters(); -} - -void XHeap::print_on(outputStream* st) const { - st->print_cr(" ZHeap used " SIZE_FORMAT "M, capacity " SIZE_FORMAT "M, max capacity " SIZE_FORMAT "M", - used() / M, - capacity() / M, - max_capacity() / M); - MetaspaceUtils::print_on(st); -} - -void XHeap::print_extended_on(outputStream* st) const { - print_on(st); - st->cr(); - - // Do not allow pages to be deleted - _page_allocator.enable_deferred_delete(); - - // Print all pages - st->print_cr("ZGC Page Table:"); - XPageTableIterator iter(&_page_table); - for (XPage* page; iter.next(&page);) { - page->print_on(st); - } - - // Allow pages to be deleted - _page_allocator.disable_deferred_delete(); -} - -bool XHeap::print_location(outputStream* st, uintptr_t addr) const { - if (LocationPrinter::is_valid_obj((void*)addr)) { - st->print(PTR_FORMAT " is a %s oop: ", addr, XAddress::is_good(addr) ? "good" : "bad"); - XOop::from_address(addr)->print_on(st); - return true; - } - - return false; -} - -void XHeap::verify() { - // Heap verification can only be done between mark end and - // relocate start. This is the only window where all oop are - // good and the whole heap is in a consistent state. - guarantee(XGlobalPhase == XPhaseMarkCompleted, "Invalid phase"); - - XVerify::after_weak_processing(); -} diff --git a/src/hotspot/share/gc/x/xHeap.hpp b/src/hotspot/share/gc/x/xHeap.hpp deleted file mode 100644 index af2c73180d91a..0000000000000 --- a/src/hotspot/share/gc/x/xHeap.hpp +++ /dev/null @@ -1,167 +0,0 @@ -/* - * Copyright (c) 2015, 2022, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#ifndef SHARE_GC_X_XHEAP_HPP -#define SHARE_GC_X_XHEAP_HPP - -#include "gc/x/xAllocationFlags.hpp" -#include "gc/x/xArray.hpp" -#include "gc/x/xForwardingTable.hpp" -#include "gc/x/xMark.hpp" -#include "gc/x/xObjectAllocator.hpp" -#include "gc/x/xPageAllocator.hpp" -#include "gc/x/xPageTable.hpp" -#include "gc/x/xReferenceProcessor.hpp" -#include "gc/x/xRelocate.hpp" -#include "gc/x/xRelocationSet.hpp" -#include "gc/x/xWeakRootsProcessor.hpp" -#include "gc/x/xServiceability.hpp" -#include "gc/x/xUnload.hpp" -#include "gc/x/xWorkers.hpp" - -class ThreadClosure; -class VMStructs; -class XPage; -class XRelocationSetSelector; - -class XHeap { - friend class ::VMStructs; - -private: - static XHeap* _heap; - - XWorkers _workers; - XObjectAllocator _object_allocator; - XPageAllocator _page_allocator; - XPageTable _page_table; - XForwardingTable _forwarding_table; - XMark _mark; - XReferenceProcessor _reference_processor; - XWeakRootsProcessor _weak_roots_processor; - XRelocate _relocate; - XRelocationSet _relocation_set; - XUnload _unload; - XServiceability _serviceability; - - void flip_to_marked(); - void flip_to_remapped(); - - void free_empty_pages(XRelocationSetSelector* selector, int bulk); - - void out_of_memory(); - -public: - static XHeap* heap(); - - XHeap(); - - bool is_initialized() const; - - // Heap metrics - size_t min_capacity() const; - size_t max_capacity() const; - size_t soft_max_capacity() const; - size_t capacity() const; - size_t used() const; - size_t unused() const; - - size_t tlab_capacity() const; - size_t tlab_used() const; - size_t max_tlab_size() const; - size_t unsafe_max_tlab_alloc() const; - - bool is_in(uintptr_t addr) const; - - // Threads - uint active_workers() const; - void set_active_workers(uint nworkers); - void threads_do(ThreadClosure* tc) const; - - // Reference processing - ReferenceDiscoverer* reference_discoverer(); - void set_soft_reference_policy(bool clear); - - // Non-strong reference processing - void process_non_strong_references(); - - // Page allocation - XPage* alloc_page(uint8_t type, size_t size, XAllocationFlags flags); - void undo_alloc_page(XPage* page); - void free_page(XPage* page, bool reclaimed); - void free_pages(const XArray* pages, bool reclaimed); - - // Object allocation - uintptr_t alloc_tlab(size_t size); - uintptr_t alloc_object(size_t size); - uintptr_t alloc_object_for_relocation(size_t size); - void undo_alloc_object_for_relocation(uintptr_t addr, size_t size); - bool has_alloc_stalled() const; - void check_out_of_memory(); - - // Marking - bool is_object_live(uintptr_t addr) const; - bool is_object_strongly_live(uintptr_t addr) const; - template void mark_object(uintptr_t addr); - void mark_start(); - void mark(bool initial); - void mark_flush_and_free(Thread* thread); - bool mark_end(); - void mark_free(); - void keep_alive(oop obj); - - // Relocation set - void select_relocation_set(); - void reset_relocation_set(); - - // Relocation - void relocate_start(); - uintptr_t relocate_object(uintptr_t addr); - uintptr_t remap_object(uintptr_t addr); - void relocate(); - - // Continuations - bool is_allocating(uintptr_t addr) const; - - // Iteration - void object_iterate(ObjectClosure* cl, bool visit_weaks); - ParallelObjectIteratorImpl* parallel_object_iterator(uint nworkers, bool visit_weaks); - void pages_do(XPageClosure* cl); - - // Serviceability - void serviceability_initialize(); - GCMemoryManager* serviceability_cycle_memory_manager(); - GCMemoryManager* serviceability_pause_memory_manager(); - MemoryPool* serviceability_memory_pool(); - XServiceabilityCounters* serviceability_counters(); - - // Printing - void print_on(outputStream* st) const; - void print_extended_on(outputStream* st) const; - bool print_location(outputStream* st, uintptr_t addr) const; - - // Verification - bool is_oop(uintptr_t addr) const; - void verify(); -}; - -#endif // SHARE_GC_X_XHEAP_HPP diff --git a/src/hotspot/share/gc/x/xHeap.inline.hpp b/src/hotspot/share/gc/x/xHeap.inline.hpp deleted file mode 100644 index 793a720017704..0000000000000 --- a/src/hotspot/share/gc/x/xHeap.inline.hpp +++ /dev/null @@ -1,127 +0,0 @@ -/* - * Copyright (c) 2015, 2022, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#ifndef SHARE_GC_X_XHEAP_INLINE_HPP -#define SHARE_GC_X_XHEAP_INLINE_HPP - -#include "gc/x/xHeap.hpp" - -#include "gc/x/xAddress.inline.hpp" -#include "gc/x/xForwardingTable.inline.hpp" -#include "gc/x/xMark.inline.hpp" -#include "gc/x/xPage.inline.hpp" -#include "gc/x/xPageTable.inline.hpp" -#include "utilities/debug.hpp" - -inline XHeap* XHeap::heap() { - assert(_heap != nullptr, "Not initialized"); - return _heap; -} - -inline ReferenceDiscoverer* XHeap::reference_discoverer() { - return &_reference_processor; -} - -inline bool XHeap::is_object_live(uintptr_t addr) const { - XPage* page = _page_table.get(addr); - return page->is_object_live(addr); -} - -inline bool XHeap::is_object_strongly_live(uintptr_t addr) const { - XPage* page = _page_table.get(addr); - return page->is_object_strongly_live(addr); -} - -template -inline void XHeap::mark_object(uintptr_t addr) { - assert(XGlobalPhase == XPhaseMark, "Mark not allowed"); - _mark.mark_object(addr); -} - -inline uintptr_t XHeap::alloc_tlab(size_t size) { - guarantee(size <= max_tlab_size(), "TLAB too large"); - return _object_allocator.alloc_object(size); -} - -inline uintptr_t XHeap::alloc_object(size_t size) { - uintptr_t addr = _object_allocator.alloc_object(size); - assert(XAddress::is_good_or_null(addr), "Bad address"); - - if (addr == 0) { - out_of_memory(); - } - - return addr; -} - -inline uintptr_t XHeap::alloc_object_for_relocation(size_t size) { - const uintptr_t addr = _object_allocator.alloc_object_for_relocation(&_page_table, size); - assert(XAddress::is_good_or_null(addr), "Bad address"); - return addr; -} - -inline void XHeap::undo_alloc_object_for_relocation(uintptr_t addr, size_t size) { - XPage* const page = _page_table.get(addr); - _object_allocator.undo_alloc_object_for_relocation(page, addr, size); -} - -inline uintptr_t XHeap::relocate_object(uintptr_t addr) { - assert(XGlobalPhase == XPhaseRelocate, "Relocate not allowed"); - - XForwarding* const forwarding = _forwarding_table.get(addr); - if (forwarding == nullptr) { - // Not forwarding - return XAddress::good(addr); - } - - // Relocate object - return _relocate.relocate_object(forwarding, XAddress::good(addr)); -} - -inline uintptr_t XHeap::remap_object(uintptr_t addr) { - assert(XGlobalPhase == XPhaseMark || - XGlobalPhase == XPhaseMarkCompleted, "Forward not allowed"); - - XForwarding* const forwarding = _forwarding_table.get(addr); - if (forwarding == nullptr) { - // Not forwarding - return XAddress::good(addr); - } - - // Forward object - return _relocate.forward_object(forwarding, XAddress::good(addr)); -} - -inline bool XHeap::has_alloc_stalled() const { - return _page_allocator.has_alloc_stalled(); -} - -inline void XHeap::check_out_of_memory() { - _page_allocator.check_out_of_memory(); -} - -inline bool XHeap::is_oop(uintptr_t addr) const { - return XAddress::is_good(addr) && is_object_aligned(addr) && is_in(addr); -} - -#endif // SHARE_GC_X_XHEAP_INLINE_HPP diff --git a/src/hotspot/share/gc/x/xHeapIterator.cpp b/src/hotspot/share/gc/x/xHeapIterator.cpp deleted file mode 100644 index 47a6db26f6984..0000000000000 --- a/src/hotspot/share/gc/x/xHeapIterator.cpp +++ /dev/null @@ -1,439 +0,0 @@ -/* - * Copyright (c) 2017, 2022, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#include "precompiled.hpp" -#include "classfile/classLoaderDataGraph.hpp" -#include "gc/shared/barrierSetNMethod.hpp" -#include "gc/shared/gc_globals.hpp" -#include "gc/shared/taskqueue.inline.hpp" -#include "gc/x/xAddress.inline.hpp" -#include "gc/x/xCollectedHeap.hpp" -#include "gc/x/xGlobals.hpp" -#include "gc/x/xGranuleMap.inline.hpp" -#include "gc/x/xHeapIterator.hpp" -#include "gc/x/xLock.inline.hpp" -#include "gc/x/xNMethod.hpp" -#include "gc/x/xOop.inline.hpp" -#include "memory/iterator.inline.hpp" -#include "utilities/bitMap.inline.hpp" - -class XHeapIteratorBitMap : public CHeapObj { -private: - CHeapBitMap _bitmap; - -public: - XHeapIteratorBitMap(size_t size_in_bits) : - _bitmap(size_in_bits, mtGC) {} - - bool try_set_bit(size_t index) { - return _bitmap.par_set_bit(index); - } -}; - -class XHeapIteratorContext { -private: - XHeapIterator* const _iter; - XHeapIteratorQueue* const _queue; - XHeapIteratorArrayQueue* const _array_queue; - const uint _worker_id; - XStatTimerDisable _timer_disable; - -public: - XHeapIteratorContext(XHeapIterator* iter, uint worker_id) : - _iter(iter), - _queue(_iter->_queues.queue(worker_id)), - _array_queue(_iter->_array_queues.queue(worker_id)), - _worker_id(worker_id) {} - - void mark_and_push(oop obj) const { - if (_iter->mark_object(obj)) { - _queue->push(obj); - } - } - - void push_array(const ObjArrayTask& array) const { - _array_queue->push(array); - } - - bool pop(oop& obj) const { - return _queue->pop_overflow(obj) || _queue->pop_local(obj); - } - - bool pop_array(ObjArrayTask& array) const { - return _array_queue->pop_overflow(array) || _array_queue->pop_local(array); - } - - bool steal(oop& obj) const { - return _iter->_queues.steal(_worker_id, obj); - } - - bool steal_array(ObjArrayTask& array) const { - return _iter->_array_queues.steal(_worker_id, array); - } - - bool is_drained() const { - return _queue->is_empty() && _array_queue->is_empty(); - } -}; - -template -class XHeapIteratorRootOopClosure : public OopClosure { -private: - const XHeapIteratorContext& _context; - - oop load_oop(oop* p) { - if (Weak) { - return NativeAccess::oop_load(p); - } - - return NativeAccess::oop_load(p); - } - -public: - XHeapIteratorRootOopClosure(const XHeapIteratorContext& context) : - _context(context) {} - - virtual void do_oop(oop* p) { - const oop obj = load_oop(p); - _context.mark_and_push(obj); - } - - virtual void do_oop(narrowOop* p) { - ShouldNotReachHere(); - } -}; - -template -class XHeapIteratorOopClosure : public OopIterateClosure { -private: - const XHeapIteratorContext& _context; - const oop _base; - - oop load_oop(oop* p) { - assert(XCollectedHeap::heap()->is_in(p), "Should be in heap"); - - if (VisitReferents) { - return HeapAccess::oop_load_at(_base, _base->field_offset(p)); - } - - return HeapAccess::oop_load(p); - } - -public: - XHeapIteratorOopClosure(const XHeapIteratorContext& context, oop base) : - OopIterateClosure(), - _context(context), - _base(base) {} - - virtual ReferenceIterationMode reference_iteration_mode() { - return VisitReferents ? DO_FIELDS : DO_FIELDS_EXCEPT_REFERENT; - } - - virtual void do_oop(oop* p) { - const oop obj = load_oop(p); - _context.mark_and_push(obj); - } - - virtual void do_oop(narrowOop* p) { - ShouldNotReachHere(); - } - - virtual bool do_metadata() { - return true; - } - - virtual void do_klass(Klass* k) { - ClassLoaderData* const cld = k->class_loader_data(); - XHeapIteratorOopClosure::do_cld(cld); - } - - virtual void do_cld(ClassLoaderData* cld) { - class NativeAccessClosure : public OopClosure { - private: - const XHeapIteratorContext& _context; - - public: - explicit NativeAccessClosure(const XHeapIteratorContext& context) : - _context(context) {} - - virtual void do_oop(oop* p) { - assert(!XCollectedHeap::heap()->is_in(p), "Should not be in heap"); - const oop obj = NativeAccess::oop_load(p); - _context.mark_and_push(obj); - } - - virtual void do_oop(narrowOop* p) { - ShouldNotReachHere(); - } - }; - - NativeAccessClosure cl(_context); - cld->oops_do(&cl, ClassLoaderData::_claim_other); - } - - // Don't follow loom stack metadata; it's already followed in other ways through CLDs - virtual void do_nmethod(nmethod* nm) {} - virtual void do_method(Method* m) {} -}; - -XHeapIterator::XHeapIterator(uint nworkers, bool visit_weaks) : - _visit_weaks(visit_weaks), - _timer_disable(), - _bitmaps(XAddressOffsetMax), - _bitmaps_lock(), - _queues(nworkers), - _array_queues(nworkers), - _roots(ClassLoaderData::_claim_other), - _weak_roots(), - _terminator(nworkers, &_queues) { - - // Create queues - for (uint i = 0; i < _queues.size(); i++) { - XHeapIteratorQueue* const queue = new XHeapIteratorQueue(); - _queues.register_queue(i, queue); - } - - // Create array queues - for (uint i = 0; i < _array_queues.size(); i++) { - XHeapIteratorArrayQueue* const array_queue = new XHeapIteratorArrayQueue(); - _array_queues.register_queue(i, array_queue); - } -} - -XHeapIterator::~XHeapIterator() { - // Destroy bitmaps - XHeapIteratorBitMapsIterator iter(&_bitmaps); - for (XHeapIteratorBitMap* bitmap; iter.next(&bitmap);) { - delete bitmap; - } - - // Destroy array queues - for (uint i = 0; i < _array_queues.size(); i++) { - delete _array_queues.queue(i); - } - - // Destroy queues - for (uint i = 0; i < _queues.size(); i++) { - delete _queues.queue(i); - } - - // Clear claimed CLD bits - ClassLoaderDataGraph::clear_claimed_marks(ClassLoaderData::_claim_other); -} - -static size_t object_index_max() { - return XGranuleSize >> XObjectAlignmentSmallShift; -} - -static size_t object_index(oop obj) { - const uintptr_t addr = XOop::to_address(obj); - const uintptr_t offset = XAddress::offset(addr); - const uintptr_t mask = XGranuleSize - 1; - return (offset & mask) >> XObjectAlignmentSmallShift; -} - -XHeapIteratorBitMap* XHeapIterator::object_bitmap(oop obj) { - const uintptr_t offset = XAddress::offset(XOop::to_address(obj)); - XHeapIteratorBitMap* bitmap = _bitmaps.get_acquire(offset); - if (bitmap == nullptr) { - XLocker locker(&_bitmaps_lock); - bitmap = _bitmaps.get(offset); - if (bitmap == nullptr) { - // Install new bitmap - bitmap = new XHeapIteratorBitMap(object_index_max()); - _bitmaps.release_put(offset, bitmap); - } - } - - return bitmap; -} - -bool XHeapIterator::mark_object(oop obj) { - if (obj == nullptr) { - return false; - } - - XHeapIteratorBitMap* const bitmap = object_bitmap(obj); - const size_t index = object_index(obj); - return bitmap->try_set_bit(index); -} - -typedef ClaimingCLDToOopClosure XHeapIteratorCLDCLosure; - -class XHeapIteratorNMethodClosure : public NMethodClosure { -private: - OopClosure* const _cl; - BarrierSetNMethod* const _bs_nm; - -public: - XHeapIteratorNMethodClosure(OopClosure* cl) : - _cl(cl), - _bs_nm(BarrierSet::barrier_set()->barrier_set_nmethod()) {} - - virtual void do_nmethod(nmethod* nm) { - // If ClassUnloading is turned off, all nmethods are considered strong, - // not only those on the call stacks. The heap iteration might happen - // before the concurrent processign of the code cache, make sure that - // all nmethods have been processed before visiting the oops. - _bs_nm->nmethod_entry_barrier(nm); - - XNMethod::nmethod_oops_do(nm, _cl); - } -}; - -class XHeapIteratorThreadClosure : public ThreadClosure { -private: - OopClosure* const _cl; - NMethodClosure* const _nm_cl; - -public: - XHeapIteratorThreadClosure(OopClosure* cl, NMethodClosure* nm_cl) : - _cl(cl), - _nm_cl(nm_cl) {} - - void do_thread(Thread* thread) { - thread->oops_do(_cl, _nm_cl); - } -}; - -void XHeapIterator::push_strong_roots(const XHeapIteratorContext& context) { - XHeapIteratorRootOopClosure cl(context); - XHeapIteratorCLDCLosure cld_cl(&cl); - XHeapIteratorNMethodClosure nm_cl(&cl); - XHeapIteratorThreadClosure thread_cl(&cl, &nm_cl); - - _roots.apply(&cl, - &cld_cl, - &thread_cl, - &nm_cl); -} - -void XHeapIterator::push_weak_roots(const XHeapIteratorContext& context) { - XHeapIteratorRootOopClosure cl(context); - _weak_roots.apply(&cl); -} - -template -void XHeapIterator::push_roots(const XHeapIteratorContext& context) { - push_strong_roots(context); - if (VisitWeaks) { - push_weak_roots(context); - } -} - -template -void XHeapIterator::follow_object(const XHeapIteratorContext& context, oop obj) { - XHeapIteratorOopClosure cl(context, obj); - obj->oop_iterate(&cl); -} - -void XHeapIterator::follow_array(const XHeapIteratorContext& context, oop obj) { - // Follow klass - XHeapIteratorOopClosure cl(context, obj); - cl.do_klass(obj->klass()); - - // Push array chunk - context.push_array(ObjArrayTask(obj, 0 /* index */)); -} - -void XHeapIterator::follow_array_chunk(const XHeapIteratorContext& context, const ObjArrayTask& array) { - const objArrayOop obj = objArrayOop(array.obj()); - const int length = obj->length(); - const int start = array.index(); - const int stride = MIN2(length - start, ObjArrayMarkingStride); - const int end = start + stride; - - // Push remaining array chunk first - if (end < length) { - context.push_array(ObjArrayTask(obj, end)); - } - - // Follow array chunk - XHeapIteratorOopClosure cl(context, obj); - obj->oop_iterate_range(&cl, start, end); -} - -template -void XHeapIterator::visit_and_follow(const XHeapIteratorContext& context, ObjectClosure* cl, oop obj) { - // Visit - cl->do_object(obj); - - // Follow - if (obj->is_objArray()) { - follow_array(context, obj); - } else { - follow_object(context, obj); - } -} - -template -void XHeapIterator::drain(const XHeapIteratorContext& context, ObjectClosure* cl) { - ObjArrayTask array; - oop obj; - - do { - while (context.pop(obj)) { - visit_and_follow(context, cl, obj); - } - - if (context.pop_array(array)) { - follow_array_chunk(context, array); - } - } while (!context.is_drained()); -} - -template -void XHeapIterator::steal(const XHeapIteratorContext& context, ObjectClosure* cl) { - ObjArrayTask array; - oop obj; - - if (context.steal_array(array)) { - follow_array_chunk(context, array); - } else if (context.steal(obj)) { - visit_and_follow(context, cl, obj); - } -} - -template -void XHeapIterator::drain_and_steal(const XHeapIteratorContext& context, ObjectClosure* cl) { - do { - drain(context, cl); - steal(context, cl); - } while (!context.is_drained() || !_terminator.offer_termination()); -} - -template -void XHeapIterator::object_iterate_inner(const XHeapIteratorContext& context, ObjectClosure* object_cl) { - push_roots(context); - drain_and_steal(context, object_cl); -} - -void XHeapIterator::object_iterate(ObjectClosure* cl, uint worker_id) { - XHeapIteratorContext context(this, worker_id); - - if (_visit_weaks) { - object_iterate_inner(context, cl); - } else { - object_iterate_inner(context, cl); - } -} diff --git a/src/hotspot/share/gc/x/xHeapIterator.hpp b/src/hotspot/share/gc/x/xHeapIterator.hpp deleted file mode 100644 index 0d990a616f886..0000000000000 --- a/src/hotspot/share/gc/x/xHeapIterator.hpp +++ /dev/null @@ -1,97 +0,0 @@ -/* - * Copyright (c) 2017, 2020, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#ifndef SHARE_GC_X_XHEAPITERATOR_HPP -#define SHARE_GC_X_XHEAPITERATOR_HPP - -#include "gc/shared/collectedHeap.hpp" -#include "gc/shared/taskTerminator.hpp" -#include "gc/shared/taskqueue.hpp" -#include "gc/x/xGranuleMap.hpp" -#include "gc/x/xLock.hpp" -#include "gc/x/xRootsIterator.hpp" -#include "gc/x/xStat.hpp" - -class XHeapIteratorBitMap; -class XHeapIteratorContext; - -using XHeapIteratorBitMaps = XGranuleMap; -using XHeapIteratorBitMapsIterator = XGranuleMapIterator; -using XHeapIteratorQueue = OverflowTaskQueue; -using XHeapIteratorQueues = GenericTaskQueueSet; -using XHeapIteratorArrayQueue = OverflowTaskQueue; -using XHeapIteratorArrayQueues = GenericTaskQueueSet; - -class XHeapIterator : public ParallelObjectIteratorImpl { - friend class XHeapIteratorContext; - -private: - const bool _visit_weaks; - XStatTimerDisable _timer_disable; - XHeapIteratorBitMaps _bitmaps; - XLock _bitmaps_lock; - XHeapIteratorQueues _queues; - XHeapIteratorArrayQueues _array_queues; - XRootsIterator _roots; - XWeakRootsIterator _weak_roots; - TaskTerminator _terminator; - - XHeapIteratorBitMap* object_bitmap(oop obj); - - bool mark_object(oop obj); - - void push_strong_roots(const XHeapIteratorContext& context); - void push_weak_roots(const XHeapIteratorContext& context); - - template - void push_roots(const XHeapIteratorContext& context); - - template - void follow_object(const XHeapIteratorContext& context, oop obj); - - void follow_array(const XHeapIteratorContext& context, oop obj); - void follow_array_chunk(const XHeapIteratorContext& context, const ObjArrayTask& array); - - template - void visit_and_follow(const XHeapIteratorContext& context, ObjectClosure* cl, oop obj); - - template - void drain(const XHeapIteratorContext& context, ObjectClosure* cl); - - template - void steal(const XHeapIteratorContext& context, ObjectClosure* cl); - - template - void drain_and_steal(const XHeapIteratorContext& context, ObjectClosure* cl); - - template - void object_iterate_inner(const XHeapIteratorContext& context, ObjectClosure* cl); - -public: - XHeapIterator(uint nworkers, bool visit_weaks); - virtual ~XHeapIterator(); - - virtual void object_iterate(ObjectClosure* cl, uint worker_id); -}; - -#endif // SHARE_GC_X_XHEAPITERATOR_HPP diff --git a/src/hotspot/share/gc/x/xHeuristics.cpp b/src/hotspot/share/gc/x/xHeuristics.cpp deleted file mode 100644 index ec89fa41919da..0000000000000 --- a/src/hotspot/share/gc/x/xHeuristics.cpp +++ /dev/null @@ -1,104 +0,0 @@ -/* - * Copyright (c) 2019, 2021, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#include "precompiled.hpp" -#include "gc/shared/gcLogPrecious.hpp" -#include "gc/shared/gc_globals.hpp" -#include "gc/x/xCPU.inline.hpp" -#include "gc/x/xGlobals.hpp" -#include "gc/x/xHeuristics.hpp" -#include "runtime/globals.hpp" -#include "runtime/os.hpp" -#include "utilities/globalDefinitions.hpp" -#include "utilities/powerOfTwo.hpp" - -void XHeuristics::set_medium_page_size() { - // Set XPageSizeMedium so that a medium page occupies at most 3.125% of the - // max heap size. XPageSizeMedium is initially set to 0, which means medium - // pages are effectively disabled. It is adjusted only if XPageSizeMedium - // becomes larger than XPageSizeSmall. - const size_t min = XGranuleSize; - const size_t max = XGranuleSize * 16; - const size_t unclamped = MaxHeapSize * 0.03125; - const size_t clamped = clamp(unclamped, min, max); - const size_t size = round_down_power_of_2(clamped); - - if (size > XPageSizeSmall) { - // Enable medium pages - XPageSizeMedium = size; - XPageSizeMediumShift = log2i_exact(XPageSizeMedium); - XObjectSizeLimitMedium = XPageSizeMedium / 8; - XObjectAlignmentMediumShift = (int)XPageSizeMediumShift - 13; - XObjectAlignmentMedium = 1 << XObjectAlignmentMediumShift; - } -} - -size_t XHeuristics::relocation_headroom() { - // Calculate headroom needed to avoid in-place relocation. Each worker will try - // to allocate a small page, and all workers will share a single medium page. - const uint nworkers = UseDynamicNumberOfGCThreads ? ConcGCThreads : MAX2(ConcGCThreads, ParallelGCThreads); - return (nworkers * XPageSizeSmall) + XPageSizeMedium; -} - -bool XHeuristics::use_per_cpu_shared_small_pages() { - // Use per-CPU shared small pages only if these pages occupy at most 3.125% - // of the max heap size. Otherwise fall back to using a single shared small - // page. This is useful when using small heaps on large machines. - const size_t per_cpu_share = (MaxHeapSize * 0.03125) / XCPU::count(); - return per_cpu_share >= XPageSizeSmall; -} - -static uint nworkers_based_on_ncpus(double cpu_share_in_percent) { - return ceil(os::initial_active_processor_count() * cpu_share_in_percent / 100.0); -} - -static uint nworkers_based_on_heap_size(double heap_share_in_percent) { - const int nworkers = (MaxHeapSize * (heap_share_in_percent / 100.0)) / XPageSizeSmall; - return MAX2(nworkers, 1); -} - -static uint nworkers(double cpu_share_in_percent) { - // Cap number of workers so that they don't use more than 2% of the max heap - // during relocation. This is useful when using small heaps on large machines. - return MIN2(nworkers_based_on_ncpus(cpu_share_in_percent), - nworkers_based_on_heap_size(2.0)); -} - -uint XHeuristics::nparallel_workers() { - // Use 60% of the CPUs, rounded up. We would like to use as many threads as - // possible to increase parallelism. However, using a thread count that is - // close to the number of processors tends to lead to over-provisioning and - // scheduling latency issues. Using 60% of the active processors appears to - // be a fairly good balance. - return nworkers(60.0); -} - -uint XHeuristics::nconcurrent_workers() { - // The number of concurrent threads we would like to use heavily depends - // on the type of workload we are running. Using too many threads will have - // a negative impact on the application throughput, while using too few - // threads will prolong the GC-cycle and we then risk being out-run by the - // application. When in dynamic mode, use up to 25% of the active processors. - // When in non-dynamic mode, use 12.5% of the active processors. - return nworkers(UseDynamicNumberOfGCThreads ? 25.0 : 12.5); -} diff --git a/src/hotspot/share/gc/x/xHeuristics.hpp b/src/hotspot/share/gc/x/xHeuristics.hpp deleted file mode 100644 index 2ca798257b233..0000000000000 --- a/src/hotspot/share/gc/x/xHeuristics.hpp +++ /dev/null @@ -1,41 +0,0 @@ -/* - * Copyright (c) 2019, 2022, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#ifndef SHARE_GC_X_XHEURISTICS_HPP -#define SHARE_GC_X_XHEURISTICS_HPP - -#include "memory/allStatic.hpp" - -class XHeuristics : public AllStatic { -public: - static void set_medium_page_size(); - - static size_t relocation_headroom(); - - static bool use_per_cpu_shared_small_pages(); - - static uint nparallel_workers(); - static uint nconcurrent_workers(); -}; - -#endif // SHARE_GC_X_XHEURISTICS_HPP diff --git a/src/hotspot/share/gc/x/xInitialize.cpp b/src/hotspot/share/gc/x/xInitialize.cpp deleted file mode 100644 index 156be17971fce..0000000000000 --- a/src/hotspot/share/gc/x/xInitialize.cpp +++ /dev/null @@ -1,58 +0,0 @@ -/* - * Copyright (c) 2016, 2020, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#include "precompiled.hpp" -#include "gc/x/xAddress.hpp" -#include "gc/x/xBarrierSet.hpp" -#include "gc/x/xCPU.hpp" -#include "gc/x/xGlobals.hpp" -#include "gc/x/xHeuristics.hpp" -#include "gc/x/xInitialize.hpp" -#include "gc/x/xLargePages.hpp" -#include "gc/x/xNUMA.hpp" -#include "gc/x/xStat.hpp" -#include "gc/x/xThreadLocalAllocBuffer.hpp" -#include "gc/x/xTracer.hpp" -#include "logging/log.hpp" -#include "runtime/vm_version.hpp" - -XInitialize::XInitialize(XBarrierSet* barrier_set) { - log_info(gc, init)("Initializing %s", XName); - log_info(gc, init)("Version: %s (%s)", - VM_Version::vm_release(), - VM_Version::jdk_debug_level()); - log_info(gc, init)("Using deprecated non-generational mode"); - - // Early initialization - XAddress::initialize(); - XNUMA::initialize(); - XCPU::initialize(); - XStatValue::initialize(); - XThreadLocalAllocBuffer::initialize(); - XTracer::initialize(); - XLargePages::initialize(); - XHeuristics::set_medium_page_size(); - XBarrierSet::set_barrier_set(barrier_set); - - pd_initialize(); -} diff --git a/src/hotspot/share/gc/x/xInitialize.hpp b/src/hotspot/share/gc/x/xInitialize.hpp deleted file mode 100644 index 30e7b65293ed6..0000000000000 --- a/src/hotspot/share/gc/x/xInitialize.hpp +++ /dev/null @@ -1,39 +0,0 @@ -/* - * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#ifndef SHARE_GC_X_XINITIALIZE_HPP -#define SHARE_GC_X_XINITIALIZE_HPP - -#include "memory/allocation.hpp" - -class XBarrierSet; - -class XInitialize { -private: - void pd_initialize(); - -public: - XInitialize(XBarrierSet* barrier_set); -}; - -#endif // SHARE_GC_X_XINITIALIZE_HPP diff --git a/src/hotspot/share/gc/x/xLargePages.cpp b/src/hotspot/share/gc/x/xLargePages.cpp deleted file mode 100644 index 13da763c6a39c..0000000000000 --- a/src/hotspot/share/gc/x/xLargePages.cpp +++ /dev/null @@ -1,49 +0,0 @@ -/* - * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#include "precompiled.hpp" -#include "gc/shared/gcLogPrecious.hpp" -#include "gc/x/xLargePages.hpp" -#include "runtime/os.hpp" - -XLargePages::State XLargePages::_state; - -void XLargePages::initialize() { - pd_initialize(); - - log_info_p(gc, init)("Memory: " JULONG_FORMAT "M", os::physical_memory() / M); - log_info_p(gc, init)("Large Page Support: %s", to_string()); -} - -const char* XLargePages::to_string() { - switch (_state) { - case Explicit: - return "Enabled (Explicit)"; - - case Transparent: - return "Enabled (Transparent)"; - - default: - return "Disabled"; - } -} diff --git a/src/hotspot/share/gc/x/xLargePages.hpp b/src/hotspot/share/gc/x/xLargePages.hpp deleted file mode 100644 index 562e83ffbd088..0000000000000 --- a/src/hotspot/share/gc/x/xLargePages.hpp +++ /dev/null @@ -1,51 +0,0 @@ -/* - * Copyright (c) 2017, 2022, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#ifndef SHARE_GC_X_XLARGEPAGES_HPP -#define SHARE_GC_X_XLARGEPAGES_HPP - -#include "memory/allStatic.hpp" - -class XLargePages : public AllStatic { -private: - enum State { - Disabled, - Explicit, - Transparent - }; - - static State _state; - - static void pd_initialize(); - -public: - static void initialize(); - - static bool is_enabled(); - static bool is_explicit(); - static bool is_transparent(); - - static const char* to_string(); -}; - -#endif // SHARE_GC_X_XLARGEPAGES_HPP diff --git a/src/hotspot/share/gc/x/xLargePages.inline.hpp b/src/hotspot/share/gc/x/xLargePages.inline.hpp deleted file mode 100644 index 2f027c3b17605..0000000000000 --- a/src/hotspot/share/gc/x/xLargePages.inline.hpp +++ /dev/null @@ -1,41 +0,0 @@ -/* - * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#ifndef SHARE_GC_X_XLARGEPAGES_INLINE_HPP -#define SHARE_GC_X_XLARGEPAGES_INLINE_HPP - -#include "gc/x/xLargePages.hpp" - -inline bool XLargePages::is_enabled() { - return _state != Disabled; -} - -inline bool XLargePages::is_explicit() { - return _state == Explicit; -} - -inline bool XLargePages::is_transparent() { - return _state == Transparent; -} - -#endif // SHARE_GC_X_XLARGEPAGES_INLINE_HPP diff --git a/src/hotspot/share/gc/x/xList.hpp b/src/hotspot/share/gc/x/xList.hpp deleted file mode 100644 index d689704d65388..0000000000000 --- a/src/hotspot/share/gc/x/xList.hpp +++ /dev/null @@ -1,116 +0,0 @@ -/* - * Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#ifndef SHARE_GC_X_XLIST_HPP -#define SHARE_GC_X_XLIST_HPP - -#include "memory/allocation.hpp" -#include "utilities/globalDefinitions.hpp" - -template class XList; - -// Element in a doubly linked list -template -class XListNode { - friend class XList; - -private: - XListNode* _next; - XListNode* _prev; - - NONCOPYABLE(XListNode); - - void verify_links() const; - void verify_links_linked() const; - void verify_links_unlinked() const; - -public: - XListNode(); - ~XListNode(); -}; - -// Doubly linked list -template -class XList { -private: - XListNode _head; - size_t _size; - - NONCOPYABLE(XList); - - void verify_head() const; - - void insert(XListNode* before, XListNode* node); - - XListNode* cast_to_inner(T* elem) const; - T* cast_to_outer(XListNode* node) const; - -public: - XList(); - - size_t size() const; - bool is_empty() const; - - T* first() const; - T* last() const; - T* next(T* elem) const; - T* prev(T* elem) const; - - void insert_first(T* elem); - void insert_last(T* elem); - void insert_before(T* before, T* elem); - void insert_after(T* after, T* elem); - - void remove(T* elem); - T* remove_first(); - T* remove_last(); -}; - -template -class XListIteratorImpl : public StackObj { -private: - const XList* const _list; - T* _next; - -public: - XListIteratorImpl(const XList* list); - - bool next(T** elem); -}; - -template -class XListRemoveIteratorImpl : public StackObj { -private: - XList* const _list; - -public: - XListRemoveIteratorImpl(XList* list); - - bool next(T** elem); -}; - -template using XListIterator = XListIteratorImpl; -template using XListReverseIterator = XListIteratorImpl; -template using XListRemoveIterator = XListRemoveIteratorImpl; - -#endif // SHARE_GC_X_XLIST_HPP diff --git a/src/hotspot/share/gc/x/xList.inline.hpp b/src/hotspot/share/gc/x/xList.inline.hpp deleted file mode 100644 index 22ca5b820597a..0000000000000 --- a/src/hotspot/share/gc/x/xList.inline.hpp +++ /dev/null @@ -1,238 +0,0 @@ -/* - * Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#ifndef SHARE_GC_X_XLIST_INLINE_HPP -#define SHARE_GC_X_XLIST_INLINE_HPP - -#include "gc/x/xList.hpp" - -#include "utilities/debug.hpp" - -template -inline XListNode::XListNode() : - _next(this), - _prev(this) {} - -template -inline XListNode::~XListNode() { - verify_links_unlinked(); -} - -template -inline void XListNode::verify_links() const { - assert(_next->_prev == this, "Corrupt list node"); - assert(_prev->_next == this, "Corrupt list node"); -} - -template -inline void XListNode::verify_links_linked() const { - assert(_next != this, "Should be in a list"); - assert(_prev != this, "Should be in a list"); - verify_links(); -} - -template -inline void XListNode::verify_links_unlinked() const { - assert(_next == this, "Should not be in a list"); - assert(_prev == this, "Should not be in a list"); -} - -template -inline void XList::verify_head() const { - _head.verify_links(); -} - -template -inline void XList::insert(XListNode* before, XListNode* node) { - verify_head(); - - before->verify_links(); - node->verify_links_unlinked(); - - node->_prev = before; - node->_next = before->_next; - before->_next = node; - node->_next->_prev = node; - - before->verify_links_linked(); - node->verify_links_linked(); - - _size++; -} - -template -inline XListNode* XList::cast_to_inner(T* elem) const { - return &elem->_node; -} - -template -inline T* XList::cast_to_outer(XListNode* node) const { - return (T*)((uintptr_t)node - offset_of(T, _node)); -} - -template -inline XList::XList() : - _head(), - _size(0) { - verify_head(); -} - -template -inline size_t XList::size() const { - verify_head(); - return _size; -} - -template -inline bool XList::is_empty() const { - return size() == 0; -} - -template -inline T* XList::first() const { - return is_empty() ? nullptr : cast_to_outer(_head._next); -} - -template -inline T* XList::last() const { - return is_empty() ? nullptr : cast_to_outer(_head._prev); -} - -template -inline T* XList::next(T* elem) const { - verify_head(); - - XListNode* const node = cast_to_inner(elem); - node->verify_links_linked(); - - XListNode* const next = node->_next; - next->verify_links_linked(); - - return (next == &_head) ? nullptr : cast_to_outer(next); -} - -template -inline T* XList::prev(T* elem) const { - verify_head(); - - XListNode* const node = cast_to_inner(elem); - node->verify_links_linked(); - - XListNode* const prev = node->_prev; - prev->verify_links_linked(); - - return (prev == &_head) ? nullptr : cast_to_outer(prev); -} - -template -inline void XList::insert_first(T* elem) { - insert(&_head, cast_to_inner(elem)); -} - -template -inline void XList::insert_last(T* elem) { - insert(_head._prev, cast_to_inner(elem)); -} - -template -inline void XList::insert_before(T* before, T* elem) { - insert(cast_to_inner(before)->_prev, cast_to_inner(elem)); -} - -template -inline void XList::insert_after(T* after, T* elem) { - insert(cast_to_inner(after), cast_to_inner(elem)); -} - -template -inline void XList::remove(T* elem) { - verify_head(); - - XListNode* const node = cast_to_inner(elem); - node->verify_links_linked(); - - XListNode* const next = node->_next; - XListNode* const prev = node->_prev; - next->verify_links_linked(); - prev->verify_links_linked(); - - node->_next = prev->_next; - node->_prev = next->_prev; - node->verify_links_unlinked(); - - next->_prev = prev; - prev->_next = next; - next->verify_links(); - prev->verify_links(); - - _size--; -} - -template -inline T* XList::remove_first() { - T* elem = first(); - if (elem != nullptr) { - remove(elem); - } - - return elem; -} - -template -inline T* XList::remove_last() { - T* elem = last(); - if (elem != nullptr) { - remove(elem); - } - - return elem; -} - -template -inline XListIteratorImpl::XListIteratorImpl(const XList* list) : - _list(list), - _next(Forward ? list->first() : list->last()) {} - -template -inline bool XListIteratorImpl::next(T** elem) { - if (_next != nullptr) { - *elem = _next; - _next = Forward ? _list->next(_next) : _list->prev(_next); - return true; - } - - // No more elements - return false; -} - -template -inline XListRemoveIteratorImpl::XListRemoveIteratorImpl(XList* list) : - _list(list) {} - -template -inline bool XListRemoveIteratorImpl::next(T** elem) { - *elem = Forward ? _list->remove_first() : _list->remove_last(); - return *elem != nullptr; -} - -#endif // SHARE_GC_X_XLIST_INLINE_HPP diff --git a/src/hotspot/share/gc/x/xLiveMap.cpp b/src/hotspot/share/gc/x/xLiveMap.cpp deleted file mode 100644 index 91ef99754f791..0000000000000 --- a/src/hotspot/share/gc/x/xLiveMap.cpp +++ /dev/null @@ -1,133 +0,0 @@ -/* - * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#include "precompiled.hpp" -#include "gc/x/xHeap.inline.hpp" -#include "gc/x/xLiveMap.inline.hpp" -#include "gc/x/xStat.hpp" -#include "gc/x/xThread.inline.hpp" -#include "logging/log.hpp" -#include "runtime/atomic.hpp" -#include "utilities/debug.hpp" -#include "utilities/powerOfTwo.hpp" - -static const XStatCounter XCounterMarkSeqNumResetContention("Contention", "Mark SeqNum Reset Contention", XStatUnitOpsPerSecond); -static const XStatCounter XCounterMarkSegmentResetContention("Contention", "Mark Segment Reset Contention", XStatUnitOpsPerSecond); - -static size_t bitmap_size(uint32_t size, size_t nsegments) { - // We need at least one bit per segment - return MAX2(size, nsegments) * 2; -} - -XLiveMap::XLiveMap(uint32_t size) : - _seqnum(0), - _live_objects(0), - _live_bytes(0), - _segment_live_bits(0), - _segment_claim_bits(0), - _bitmap(bitmap_size(size, nsegments)), - _segment_shift(exact_log2(segment_size())) {} - -void XLiveMap::reset(size_t index) { - const uint32_t seqnum_initializing = (uint32_t)-1; - bool contention = false; - - // Multiple threads can enter here, make sure only one of them - // resets the marking information while the others busy wait. - for (uint32_t seqnum = Atomic::load_acquire(&_seqnum); - seqnum != XGlobalSeqNum; - seqnum = Atomic::load_acquire(&_seqnum)) { - if ((seqnum != seqnum_initializing) && - (Atomic::cmpxchg(&_seqnum, seqnum, seqnum_initializing) == seqnum)) { - // Reset marking information - _live_bytes = 0; - _live_objects = 0; - - // Clear segment claimed/live bits - segment_live_bits().clear(); - segment_claim_bits().clear(); - - assert(_seqnum == seqnum_initializing, "Invalid"); - - // Make sure the newly reset marking information is ordered - // before the update of the page seqnum, such that when the - // up-to-date seqnum is load acquired, the bit maps will not - // contain stale information. - Atomic::release_store(&_seqnum, XGlobalSeqNum); - break; - } - - // Mark reset contention - if (!contention) { - // Count contention once - XStatInc(XCounterMarkSeqNumResetContention); - contention = true; - - log_trace(gc)("Mark seqnum reset contention, thread: " PTR_FORMAT " (%s), map: " PTR_FORMAT ", bit: " SIZE_FORMAT, - XThread::id(), XThread::name(), p2i(this), index); - } - } -} - -void XLiveMap::reset_segment(BitMap::idx_t segment) { - bool contention = false; - - if (!claim_segment(segment)) { - // Already claimed, wait for live bit to be set - while (!is_segment_live(segment)) { - // Mark reset contention - if (!contention) { - // Count contention once - XStatInc(XCounterMarkSegmentResetContention); - contention = true; - - log_trace(gc)("Mark segment reset contention, thread: " PTR_FORMAT " (%s), map: " PTR_FORMAT ", segment: " SIZE_FORMAT, - XThread::id(), XThread::name(), p2i(this), segment); - } - } - - // Segment is live - return; - } - - // Segment claimed, clear it - const BitMap::idx_t start_index = segment_start(segment); - const BitMap::idx_t end_index = segment_end(segment); - if (segment_size() / BitsPerWord >= 32) { - _bitmap.clear_large_range(start_index, end_index); - } else { - _bitmap.clear_range(start_index, end_index); - } - - // Set live bit - const bool success = set_segment_live(segment); - assert(success, "Should never fail"); -} - -void XLiveMap::resize(uint32_t size) { - const size_t new_bitmap_size = bitmap_size(size, nsegments); - if (_bitmap.size() != new_bitmap_size) { - _bitmap.reinitialize(new_bitmap_size, false /* clear */); - _segment_shift = exact_log2(segment_size()); - } -} diff --git a/src/hotspot/share/gc/x/xLiveMap.hpp b/src/hotspot/share/gc/x/xLiveMap.hpp deleted file mode 100644 index 7bad774c6c6c9..0000000000000 --- a/src/hotspot/share/gc/x/xLiveMap.hpp +++ /dev/null @@ -1,90 +0,0 @@ -/* - * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#ifndef SHARE_GC_X_XLIVEMAP_HPP -#define SHARE_GC_X_XLIVEMAP_HPP - -#include "gc/x/xBitMap.hpp" -#include "memory/allocation.hpp" - -class ObjectClosure; - -class XLiveMap { - friend class XLiveMapTest; - -private: - static const size_t nsegments = 64; - - volatile uint32_t _seqnum; - volatile uint32_t _live_objects; - volatile size_t _live_bytes; - BitMap::bm_word_t _segment_live_bits; - BitMap::bm_word_t _segment_claim_bits; - XBitMap _bitmap; - size_t _segment_shift; - - const BitMapView segment_live_bits() const; - const BitMapView segment_claim_bits() const; - - BitMapView segment_live_bits(); - BitMapView segment_claim_bits(); - - BitMap::idx_t segment_size() const; - - BitMap::idx_t segment_start(BitMap::idx_t segment) const; - BitMap::idx_t segment_end(BitMap::idx_t segment) const; - - bool is_segment_live(BitMap::idx_t segment) const; - bool set_segment_live(BitMap::idx_t segment); - - BitMap::idx_t first_live_segment() const; - BitMap::idx_t next_live_segment(BitMap::idx_t segment) const; - BitMap::idx_t index_to_segment(BitMap::idx_t index) const; - - bool claim_segment(BitMap::idx_t segment); - - void reset(size_t index); - void reset_segment(BitMap::idx_t segment); - - void iterate_segment(ObjectClosure* cl, BitMap::idx_t segment, uintptr_t page_start, size_t page_object_alignment_shift); - -public: - XLiveMap(uint32_t size); - - void reset(); - void resize(uint32_t size); - - bool is_marked() const; - - uint32_t live_objects() const; - size_t live_bytes() const; - - bool get(size_t index) const; - bool set(size_t index, bool finalizable, bool& inc_live); - - void inc_live(uint32_t objects, size_t bytes); - - void iterate(ObjectClosure* cl, uintptr_t page_start, size_t page_object_alignment_shift); -}; - -#endif // SHARE_GC_X_XLIVEMAP_HPP diff --git a/src/hotspot/share/gc/x/xLiveMap.inline.hpp b/src/hotspot/share/gc/x/xLiveMap.inline.hpp deleted file mode 100644 index f836f9ab4c21f..0000000000000 --- a/src/hotspot/share/gc/x/xLiveMap.inline.hpp +++ /dev/null @@ -1,175 +0,0 @@ -/* - * Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#ifndef SHARE_GC_X_XLIVEMAP_INLINE_HPP -#define SHARE_GC_X_XLIVEMAP_INLINE_HPP - -#include "gc/x/xLiveMap.hpp" - -#include "gc/x/xBitMap.inline.hpp" -#include "gc/x/xMark.hpp" -#include "gc/x/xOop.inline.hpp" -#include "gc/x/xUtils.inline.hpp" -#include "runtime/atomic.hpp" -#include "utilities/bitMap.inline.hpp" -#include "utilities/debug.hpp" - -inline void XLiveMap::reset() { - _seqnum = 0; -} - -inline bool XLiveMap::is_marked() const { - return Atomic::load_acquire(&_seqnum) == XGlobalSeqNum; -} - -inline uint32_t XLiveMap::live_objects() const { - assert(XGlobalPhase != XPhaseMark, "Invalid phase"); - return _live_objects; -} - -inline size_t XLiveMap::live_bytes() const { - assert(XGlobalPhase != XPhaseMark, "Invalid phase"); - return _live_bytes; -} - -inline const BitMapView XLiveMap::segment_live_bits() const { - return BitMapView(const_cast(&_segment_live_bits), nsegments); -} - -inline const BitMapView XLiveMap::segment_claim_bits() const { - return BitMapView(const_cast(&_segment_claim_bits), nsegments); -} - -inline BitMapView XLiveMap::segment_live_bits() { - return BitMapView(&_segment_live_bits, nsegments); -} - -inline BitMapView XLiveMap::segment_claim_bits() { - return BitMapView(&_segment_claim_bits, nsegments); -} - -inline bool XLiveMap::is_segment_live(BitMap::idx_t segment) const { - return segment_live_bits().par_at(segment); -} - -inline bool XLiveMap::set_segment_live(BitMap::idx_t segment) { - return segment_live_bits().par_set_bit(segment, memory_order_release); -} - -inline bool XLiveMap::claim_segment(BitMap::idx_t segment) { - return segment_claim_bits().par_set_bit(segment, memory_order_acq_rel); -} - -inline BitMap::idx_t XLiveMap::first_live_segment() const { - return segment_live_bits().find_first_set_bit(0, nsegments); -} - -inline BitMap::idx_t XLiveMap::next_live_segment(BitMap::idx_t segment) const { - return segment_live_bits().find_first_set_bit(segment + 1, nsegments); -} - -inline BitMap::idx_t XLiveMap::segment_size() const { - return _bitmap.size() / nsegments; -} - -inline BitMap::idx_t XLiveMap::index_to_segment(BitMap::idx_t index) const { - return index >> _segment_shift; -} - -inline bool XLiveMap::get(size_t index) const { - BitMap::idx_t segment = index_to_segment(index); - return is_marked() && // Page is marked - is_segment_live(segment) && // Segment is marked - _bitmap.par_at(index, memory_order_relaxed); // Object is marked -} - -inline bool XLiveMap::set(size_t index, bool finalizable, bool& inc_live) { - if (!is_marked()) { - // First object to be marked during this - // cycle, reset marking information. - reset(index); - } - - const BitMap::idx_t segment = index_to_segment(index); - if (!is_segment_live(segment)) { - // First object to be marked in this segment during - // this cycle, reset segment bitmap. - reset_segment(segment); - } - - return _bitmap.par_set_bit_pair(index, finalizable, inc_live); -} - -inline void XLiveMap::inc_live(uint32_t objects, size_t bytes) { - Atomic::add(&_live_objects, objects); - Atomic::add(&_live_bytes, bytes); -} - -inline BitMap::idx_t XLiveMap::segment_start(BitMap::idx_t segment) const { - return segment_size() * segment; -} - -inline BitMap::idx_t XLiveMap::segment_end(BitMap::idx_t segment) const { - return segment_start(segment) + segment_size(); -} - -inline void XLiveMap::iterate_segment(ObjectClosure* cl, BitMap::idx_t segment, uintptr_t page_start, size_t page_object_alignment_shift) { - assert(is_segment_live(segment), "Must be"); - - const BitMap::idx_t start_index = segment_start(segment); - const BitMap::idx_t end_index = segment_end(segment); - BitMap::idx_t index = _bitmap.find_first_set_bit(start_index, end_index); - - while (index < end_index) { - // Calculate object address - const uintptr_t addr = page_start + ((index / 2) << page_object_alignment_shift); - - // Get the size of the object before calling the closure, which - // might overwrite the object in case we are relocating in-place. - const size_t size = XUtils::object_size(addr); - - // Apply closure - cl->do_object(XOop::from_address(addr)); - - // Find next bit after this object - const uintptr_t next_addr = align_up(addr + size, 1 << page_object_alignment_shift); - const BitMap::idx_t next_index = ((next_addr - page_start) >> page_object_alignment_shift) * 2; - if (next_index >= end_index) { - // End of live map - break; - } - - index = _bitmap.find_first_set_bit(next_index, end_index); - } -} - -inline void XLiveMap::iterate(ObjectClosure* cl, uintptr_t page_start, size_t page_object_alignment_shift) { - if (is_marked()) { - for (BitMap::idx_t segment = first_live_segment(); segment < nsegments; segment = next_live_segment(segment)) { - // For each live segment - iterate_segment(cl, segment, page_start, page_object_alignment_shift); - } - } -} - -#endif // SHARE_GC_X_XLIVEMAP_INLINE_HPP diff --git a/src/hotspot/share/gc/x/xLock.hpp b/src/hotspot/share/gc/x/xLock.hpp deleted file mode 100644 index 2ba612d033cc0..0000000000000 --- a/src/hotspot/share/gc/x/xLock.hpp +++ /dev/null @@ -1,79 +0,0 @@ -/* - * Copyright (c) 2015, 2022, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#ifndef SHARE_GC_X_XLOCK_HPP -#define SHARE_GC_X_XLOCK_HPP - -#include "memory/allocation.hpp" -#include "runtime/mutex.hpp" - -class XLock { -private: - PlatformMutex _lock; - -public: - void lock(); - bool try_lock(); - void unlock(); -}; - -class XReentrantLock { -private: - XLock _lock; - Thread* volatile _owner; - uint64_t _count; - -public: - XReentrantLock(); - - void lock(); - void unlock(); - - bool is_owned() const; -}; - -class XConditionLock { -private: - PlatformMonitor _lock; - -public: - void lock(); - bool try_lock(); - void unlock(); - - bool wait(uint64_t millis = 0); - void notify(); - void notify_all(); -}; - -template -class XLocker : public StackObj { -private: - T* const _lock; - -public: - XLocker(T* lock); - ~XLocker(); -}; - -#endif // SHARE_GC_X_XLOCK_HPP diff --git a/src/hotspot/share/gc/x/xLock.inline.hpp b/src/hotspot/share/gc/x/xLock.inline.hpp deleted file mode 100644 index a72b65aa22852..0000000000000 --- a/src/hotspot/share/gc/x/xLock.inline.hpp +++ /dev/null @@ -1,120 +0,0 @@ -/* - * Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#ifndef SHARE_GC_X_XLOCK_INLINE_HPP -#define SHARE_GC_X_XLOCK_INLINE_HPP - -#include "gc/x/xLock.hpp" - -#include "runtime/atomic.hpp" -#include "runtime/javaThread.hpp" -#include "runtime/os.inline.hpp" -#include "utilities/debug.hpp" - -inline void XLock::lock() { - _lock.lock(); -} - -inline bool XLock::try_lock() { - return _lock.try_lock(); -} - -inline void XLock::unlock() { - _lock.unlock(); -} - -inline XReentrantLock::XReentrantLock() : - _lock(), - _owner(nullptr), - _count(0) {} - -inline void XReentrantLock::lock() { - Thread* const thread = Thread::current(); - Thread* const owner = Atomic::load(&_owner); - - if (owner != thread) { - _lock.lock(); - Atomic::store(&_owner, thread); - } - - _count++; -} - -inline void XReentrantLock::unlock() { - assert(is_owned(), "Invalid owner"); - assert(_count > 0, "Invalid count"); - - _count--; - - if (_count == 0) { - Atomic::store(&_owner, (Thread*)nullptr); - _lock.unlock(); - } -} - -inline bool XReentrantLock::is_owned() const { - Thread* const thread = Thread::current(); - Thread* const owner = Atomic::load(&_owner); - return owner == thread; -} - -inline void XConditionLock::lock() { - _lock.lock(); -} - -inline bool XConditionLock::try_lock() { - return _lock.try_lock(); -} - -inline void XConditionLock::unlock() { - _lock.unlock(); -} - -inline bool XConditionLock::wait(uint64_t millis) { - return _lock.wait(millis) == OS_OK; -} - -inline void XConditionLock::notify() { - _lock.notify(); -} - -inline void XConditionLock::notify_all() { - _lock.notify_all(); -} - -template -inline XLocker::XLocker(T* lock) : - _lock(lock) { - if (_lock != nullptr) { - _lock->lock(); - } -} - -template -inline XLocker::~XLocker() { - if (_lock != nullptr) { - _lock->unlock(); - } -} - -#endif // SHARE_GC_X_XLOCK_INLINE_HPP diff --git a/src/hotspot/share/gc/x/xMark.cpp b/src/hotspot/share/gc/x/xMark.cpp deleted file mode 100644 index 016c570261552..0000000000000 --- a/src/hotspot/share/gc/x/xMark.cpp +++ /dev/null @@ -1,877 +0,0 @@ -/* - * Copyright (c) 2015, 2022, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#include "precompiled.hpp" -#include "classfile/classLoaderData.hpp" -#include "classfile/classLoaderDataGraph.hpp" -#include "classfile/javaClasses.inline.hpp" -#include "code/nmethod.hpp" -#include "gc/shared/continuationGCSupport.inline.hpp" -#include "gc/shared/gc_globals.hpp" -#include "gc/shared/stringdedup/stringDedup.hpp" -#include "gc/shared/suspendibleThreadSet.hpp" -#include "gc/x/xAbort.inline.hpp" -#include "gc/x/xBarrier.inline.hpp" -#include "gc/x/xHeap.inline.hpp" -#include "gc/x/xLock.inline.hpp" -#include "gc/x/xMark.inline.hpp" -#include "gc/x/xMarkCache.inline.hpp" -#include "gc/x/xMarkContext.inline.hpp" -#include "gc/x/xMarkStack.inline.hpp" -#include "gc/x/xMarkTerminate.inline.hpp" -#include "gc/x/xNMethod.hpp" -#include "gc/x/xOop.inline.hpp" -#include "gc/x/xPage.hpp" -#include "gc/x/xPageTable.inline.hpp" -#include "gc/x/xRootsIterator.hpp" -#include "gc/x/xStackWatermark.hpp" -#include "gc/x/xStat.hpp" -#include "gc/x/xTask.hpp" -#include "gc/x/xThread.inline.hpp" -#include "gc/x/xThreadLocalAllocBuffer.hpp" -#include "gc/x/xUtils.inline.hpp" -#include "gc/x/xWorkers.hpp" -#include "logging/log.hpp" -#include "memory/iterator.inline.hpp" -#include "oops/objArrayOop.inline.hpp" -#include "oops/oop.inline.hpp" -#include "runtime/atomic.hpp" -#include "runtime/continuation.hpp" -#include "runtime/handshake.hpp" -#include "runtime/javaThread.hpp" -#include "runtime/prefetch.inline.hpp" -#include "runtime/safepointMechanism.hpp" -#include "runtime/stackWatermark.hpp" -#include "runtime/stackWatermarkSet.inline.hpp" -#include "runtime/threads.hpp" -#include "utilities/align.hpp" -#include "utilities/globalDefinitions.hpp" -#include "utilities/powerOfTwo.hpp" -#include "utilities/ticks.hpp" - -static const XStatSubPhase XSubPhaseConcurrentMark("Concurrent Mark"); -static const XStatSubPhase XSubPhaseConcurrentMarkTryFlush("Concurrent Mark Try Flush"); -static const XStatSubPhase XSubPhaseConcurrentMarkTryTerminate("Concurrent Mark Try Terminate"); -static const XStatSubPhase XSubPhaseMarkTryComplete("Pause Mark Try Complete"); - -XMark::XMark(XWorkers* workers, XPageTable* page_table) : - _workers(workers), - _page_table(page_table), - _allocator(), - _stripes(), - _terminate(), - _work_terminateflush(true), - _work_nproactiveflush(0), - _work_nterminateflush(0), - _nproactiveflush(0), - _nterminateflush(0), - _ntrycomplete(0), - _ncontinue(0), - _nworkers(0) {} - -bool XMark::is_initialized() const { - return _allocator.is_initialized(); -} - -size_t XMark::calculate_nstripes(uint nworkers) const { - // Calculate the number of stripes from the number of workers we use, - // where the number of stripes must be a power of two and we want to - // have at least one worker per stripe. - const size_t nstripes = round_down_power_of_2(nworkers); - return MIN2(nstripes, XMarkStripesMax); -} - -void XMark::start() { - // Verification - if (ZVerifyMarking) { - verify_all_stacks_empty(); - } - - // Increment global sequence number to invalidate - // marking information for all pages. - XGlobalSeqNum++; - - // Note that we start a marking cycle. - // Unlike other GCs, the color switch implicitly changes the nmethods - // to be armed, and the thread-local disarm values are lazily updated - // when JavaThreads wake up from safepoints. - CodeCache::on_gc_marking_cycle_start(); - - // Reset flush/continue counters - _nproactiveflush = 0; - _nterminateflush = 0; - _ntrycomplete = 0; - _ncontinue = 0; - - // Set number of workers to use - _nworkers = _workers->active_workers(); - - // Set number of mark stripes to use, based on number - // of workers we will use in the concurrent mark phase. - const size_t nstripes = calculate_nstripes(_nworkers); - _stripes.set_nstripes(nstripes); - - // Update statistics - XStatMark::set_at_mark_start(nstripes); - - // Print worker/stripe distribution - LogTarget(Debug, gc, marking) log; - if (log.is_enabled()) { - log.print("Mark Worker/Stripe Distribution"); - for (uint worker_id = 0; worker_id < _nworkers; worker_id++) { - const XMarkStripe* const stripe = _stripes.stripe_for_worker(_nworkers, worker_id); - const size_t stripe_id = _stripes.stripe_id(stripe); - log.print(" Worker %u(%u) -> Stripe " SIZE_FORMAT "(" SIZE_FORMAT ")", - worker_id, _nworkers, stripe_id, nstripes); - } - } -} - -void XMark::prepare_work() { - assert(_nworkers == _workers->active_workers(), "Invalid number of workers"); - - // Set number of active workers - _terminate.reset(_nworkers); - - // Reset flush counters - _work_nproactiveflush = _work_nterminateflush = 0; - _work_terminateflush = true; -} - -void XMark::finish_work() { - // Accumulate proactive/terminate flush counters - _nproactiveflush += _work_nproactiveflush; - _nterminateflush += _work_nterminateflush; -} - -bool XMark::is_array(uintptr_t addr) const { - return XOop::from_address(addr)->is_objArray(); -} - -void XMark::push_partial_array(uintptr_t addr, size_t size, bool finalizable) { - assert(is_aligned(addr, XMarkPartialArrayMinSize), "Address misaligned"); - XMarkThreadLocalStacks* const stacks = XThreadLocalData::stacks(Thread::current()); - XMarkStripe* const stripe = _stripes.stripe_for_addr(addr); - const uintptr_t offset = XAddress::offset(addr) >> XMarkPartialArrayMinSizeShift; - const uintptr_t length = size / oopSize; - const XMarkStackEntry entry(offset, length, finalizable); - - log_develop_trace(gc, marking)("Array push partial: " PTR_FORMAT " (" SIZE_FORMAT "), stripe: " SIZE_FORMAT, - addr, size, _stripes.stripe_id(stripe)); - - stacks->push(&_allocator, &_stripes, stripe, entry, false /* publish */); -} - -void XMark::follow_small_array(uintptr_t addr, size_t size, bool finalizable) { - assert(size <= XMarkPartialArrayMinSize, "Too large, should be split"); - const size_t length = size / oopSize; - - log_develop_trace(gc, marking)("Array follow small: " PTR_FORMAT " (" SIZE_FORMAT ")", addr, size); - - XBarrier::mark_barrier_on_oop_array((oop*)addr, length, finalizable); -} - -void XMark::follow_large_array(uintptr_t addr, size_t size, bool finalizable) { - assert(size <= (size_t)arrayOopDesc::max_array_length(T_OBJECT) * oopSize, "Too large"); - assert(size > XMarkPartialArrayMinSize, "Too small, should not be split"); - const uintptr_t start = addr; - const uintptr_t end = start + size; - - // Calculate the aligned middle start/end/size, where the middle start - // should always be greater than the start (hence the +1 below) to make - // sure we always do some follow work, not just split the array into pieces. - const uintptr_t middle_start = align_up(start + 1, XMarkPartialArrayMinSize); - const size_t middle_size = align_down(end - middle_start, XMarkPartialArrayMinSize); - const uintptr_t middle_end = middle_start + middle_size; - - log_develop_trace(gc, marking)("Array follow large: " PTR_FORMAT "-" PTR_FORMAT" (" SIZE_FORMAT "), " - "middle: " PTR_FORMAT "-" PTR_FORMAT " (" SIZE_FORMAT ")", - start, end, size, middle_start, middle_end, middle_size); - - // Push unaligned trailing part - if (end > middle_end) { - const uintptr_t trailing_addr = middle_end; - const size_t trailing_size = end - middle_end; - push_partial_array(trailing_addr, trailing_size, finalizable); - } - - // Push aligned middle part(s) - uintptr_t partial_addr = middle_end; - while (partial_addr > middle_start) { - const size_t parts = 2; - const size_t partial_size = align_up((partial_addr - middle_start) / parts, XMarkPartialArrayMinSize); - partial_addr -= partial_size; - push_partial_array(partial_addr, partial_size, finalizable); - } - - // Follow leading part - assert(start < middle_start, "Miscalculated middle start"); - const uintptr_t leading_addr = start; - const size_t leading_size = middle_start - start; - follow_small_array(leading_addr, leading_size, finalizable); -} - -void XMark::follow_array(uintptr_t addr, size_t size, bool finalizable) { - if (size <= XMarkPartialArrayMinSize) { - follow_small_array(addr, size, finalizable); - } else { - follow_large_array(addr, size, finalizable); - } -} - -void XMark::follow_partial_array(XMarkStackEntry entry, bool finalizable) { - const uintptr_t addr = XAddress::good(entry.partial_array_offset() << XMarkPartialArrayMinSizeShift); - const size_t size = entry.partial_array_length() * oopSize; - - follow_array(addr, size, finalizable); -} - -template -class XMarkBarrierOopClosure : public ClaimMetadataVisitingOopIterateClosure { -public: - XMarkBarrierOopClosure() : - ClaimMetadataVisitingOopIterateClosure(finalizable - ? ClassLoaderData::_claim_finalizable - : ClassLoaderData::_claim_strong, - finalizable - ? nullptr - : XHeap::heap()->reference_discoverer()) {} - - virtual void do_oop(oop* p) { - XBarrier::mark_barrier_on_oop_field(p, finalizable); - } - - virtual void do_oop(narrowOop* p) { - ShouldNotReachHere(); - } - - virtual void do_nmethod(nmethod* nm) { - assert(!finalizable, "Can't handle finalizable marking of nmethods"); - nm->run_nmethod_entry_barrier(); - } -}; - -void XMark::follow_array_object(objArrayOop obj, bool finalizable) { - if (finalizable) { - XMarkBarrierOopClosure cl; - cl.do_klass(obj->klass()); - } else { - XMarkBarrierOopClosure cl; - cl.do_klass(obj->klass()); - } - - const uintptr_t addr = (uintptr_t)obj->base(); - const size_t size = (size_t)obj->length() * oopSize; - - follow_array(addr, size, finalizable); -} - -void XMark::follow_object(oop obj, bool finalizable) { - if (ContinuationGCSupport::relativize_stack_chunk(obj)) { - // Loom doesn't support mixing of finalizable marking and strong marking of - // stack chunks. See: RelativizeDerivedOopClosure. - XMarkBarrierOopClosure cl; - obj->oop_iterate(&cl); - return; - } - - if (finalizable) { - XMarkBarrierOopClosure cl; - obj->oop_iterate(&cl); - } else { - XMarkBarrierOopClosure cl; - obj->oop_iterate(&cl); - } -} - -static void try_deduplicate(XMarkContext* context, oop obj) { - if (!StringDedup::is_enabled()) { - // Not enabled - return; - } - - if (!java_lang_String::is_instance(obj)) { - // Not a String object - return; - } - - if (java_lang_String::test_and_set_deduplication_requested(obj)) { - // Already requested deduplication - return; - } - - // Request deduplication - context->string_dedup_requests()->add(obj); -} - -void XMark::mark_and_follow(XMarkContext* context, XMarkStackEntry entry) { - // Decode flags - const bool finalizable = entry.finalizable(); - const bool partial_array = entry.partial_array(); - - if (partial_array) { - follow_partial_array(entry, finalizable); - return; - } - - // Decode object address and additional flags - const uintptr_t addr = entry.object_address(); - const bool mark = entry.mark(); - bool inc_live = entry.inc_live(); - const bool follow = entry.follow(); - - XPage* const page = _page_table->get(addr); - assert(page->is_relocatable(), "Invalid page state"); - - // Mark - if (mark && !page->mark_object(addr, finalizable, inc_live)) { - // Already marked - return; - } - - // Increment live - if (inc_live) { - // Update live objects/bytes for page. We use the aligned object - // size since that is the actual number of bytes used on the page - // and alignment paddings can never be reclaimed. - const size_t size = XUtils::object_size(addr); - const size_t aligned_size = align_up(size, page->object_alignment()); - context->cache()->inc_live(page, aligned_size); - } - - // Follow - if (follow) { - if (is_array(addr)) { - follow_array_object(objArrayOop(XOop::from_address(addr)), finalizable); - } else { - const oop obj = XOop::from_address(addr); - follow_object(obj, finalizable); - - if (!finalizable) { - // Try deduplicate - try_deduplicate(context, obj); - } - } - } -} - -template -bool XMark::drain(XMarkContext* context, T* timeout) { - XMarkStripe* const stripe = context->stripe(); - XMarkThreadLocalStacks* const stacks = context->stacks(); - XMarkStackEntry entry; - - // Drain stripe stacks - while (stacks->pop(&_allocator, &_stripes, stripe, entry)) { - mark_and_follow(context, entry); - - // Check timeout - if (timeout->has_expired()) { - // Timeout - return false; - } - } - - // Success - return !timeout->has_expired(); -} - -bool XMark::try_steal_local(XMarkContext* context) { - XMarkStripe* const stripe = context->stripe(); - XMarkThreadLocalStacks* const stacks = context->stacks(); - - // Try to steal a local stack from another stripe - for (XMarkStripe* victim_stripe = _stripes.stripe_next(stripe); - victim_stripe != stripe; - victim_stripe = _stripes.stripe_next(victim_stripe)) { - XMarkStack* const stack = stacks->steal(&_stripes, victim_stripe); - if (stack != nullptr) { - // Success, install the stolen stack - stacks->install(&_stripes, stripe, stack); - return true; - } - } - - // Nothing to steal - return false; -} - -bool XMark::try_steal_global(XMarkContext* context) { - XMarkStripe* const stripe = context->stripe(); - XMarkThreadLocalStacks* const stacks = context->stacks(); - - // Try to steal a stack from another stripe - for (XMarkStripe* victim_stripe = _stripes.stripe_next(stripe); - victim_stripe != stripe; - victim_stripe = _stripes.stripe_next(victim_stripe)) { - XMarkStack* const stack = victim_stripe->steal_stack(); - if (stack != nullptr) { - // Success, install the stolen stack - stacks->install(&_stripes, stripe, stack); - return true; - } - } - - // Nothing to steal - return false; -} - -bool XMark::try_steal(XMarkContext* context) { - return try_steal_local(context) || try_steal_global(context); -} - -void XMark::idle() const { - os::naked_short_sleep(1); -} - -class XMarkFlushAndFreeStacksClosure : public HandshakeClosure { -private: - XMark* const _mark; - bool _flushed; - -public: - XMarkFlushAndFreeStacksClosure(XMark* mark) : - HandshakeClosure("XMarkFlushAndFreeStacks"), - _mark(mark), - _flushed(false) {} - - void do_thread(Thread* thread) { - if (_mark->flush_and_free(thread)) { - _flushed = true; - } - } - - bool flushed() const { - return _flushed; - } -}; - -bool XMark::flush(bool at_safepoint) { - XMarkFlushAndFreeStacksClosure cl(this); - if (at_safepoint) { - Threads::threads_do(&cl); - } else { - Handshake::execute(&cl); - } - - // Returns true if more work is available - return cl.flushed() || !_stripes.is_empty(); -} - -bool XMark::try_flush(volatile size_t* nflush) { - Atomic::inc(nflush); - - XStatTimer timer(XSubPhaseConcurrentMarkTryFlush); - return flush(false /* at_safepoint */); -} - -bool XMark::try_proactive_flush() { - // Only do proactive flushes from worker 0 - if (XThread::worker_id() != 0) { - return false; - } - - if (Atomic::load(&_work_nproactiveflush) == XMarkProactiveFlushMax || - Atomic::load(&_work_nterminateflush) != 0) { - // Limit reached or we're trying to terminate - return false; - } - - return try_flush(&_work_nproactiveflush); -} - -bool XMark::try_terminate() { - XStatTimer timer(XSubPhaseConcurrentMarkTryTerminate); - - if (_terminate.enter_stage0()) { - // Last thread entered stage 0, flush - if (Atomic::load(&_work_terminateflush) && - Atomic::load(&_work_nterminateflush) != XMarkTerminateFlushMax) { - // Exit stage 0 to allow other threads to continue marking - _terminate.exit_stage0(); - - // Flush before termination - if (!try_flush(&_work_nterminateflush)) { - // No more work available, skip further flush attempts - Atomic::store(&_work_terminateflush, false); - } - - // Don't terminate, regardless of whether we successfully - // flushed out more work or not. We've already exited - // termination stage 0, to allow other threads to continue - // marking, so this thread has to return false and also - // make another round of attempted marking. - return false; - } - } - - for (;;) { - if (_terminate.enter_stage1()) { - // Last thread entered stage 1, terminate - return true; - } - - // Idle to give the other threads - // a chance to enter termination. - idle(); - - if (!_terminate.try_exit_stage1()) { - // All workers in stage 1, terminate - return true; - } - - if (_terminate.try_exit_stage0()) { - // More work available, don't terminate - return false; - } - } -} - -class XMarkNoTimeout : public StackObj { -public: - bool has_expired() { - // No timeout, but check for signal to abort - return XAbort::should_abort(); - } -}; - -void XMark::work_without_timeout(XMarkContext* context) { - XStatTimer timer(XSubPhaseConcurrentMark); - XMarkNoTimeout no_timeout; - - for (;;) { - if (!drain(context, &no_timeout)) { - // Abort - break; - } - - if (try_steal(context)) { - // Stole work - continue; - } - - if (try_proactive_flush()) { - // Work available - continue; - } - - if (try_terminate()) { - // Terminate - break; - } - } -} - -class XMarkTimeout : public StackObj { -private: - const Ticks _start; - const uint64_t _timeout; - const uint64_t _check_interval; - uint64_t _check_at; - uint64_t _check_count; - bool _expired; - -public: - XMarkTimeout(uint64_t timeout_in_micros) : - _start(Ticks::now()), - _timeout(_start.value() + TimeHelper::micros_to_counter(timeout_in_micros)), - _check_interval(200), - _check_at(_check_interval), - _check_count(0), - _expired(false) {} - - ~XMarkTimeout() { - const Tickspan duration = Ticks::now() - _start; - log_debug(gc, marking)("Mark With Timeout (%s): %s, " UINT64_FORMAT " oops, %.3fms", - XThread::name(), _expired ? "Expired" : "Completed", - _check_count, TimeHelper::counter_to_millis(duration.value())); - } - - bool has_expired() { - if (++_check_count == _check_at) { - _check_at += _check_interval; - if ((uint64_t)Ticks::now().value() >= _timeout) { - // Timeout - _expired = true; - } - } - - return _expired; - } -}; - -void XMark::work_with_timeout(XMarkContext* context, uint64_t timeout_in_micros) { - XStatTimer timer(XSubPhaseMarkTryComplete); - XMarkTimeout timeout(timeout_in_micros); - - for (;;) { - if (!drain(context, &timeout)) { - // Timed out - break; - } - - if (try_steal(context)) { - // Stole work - continue; - } - - // Terminate - break; - } -} - -void XMark::work(uint64_t timeout_in_micros) { - XMarkStripe* const stripe = _stripes.stripe_for_worker(_nworkers, XThread::worker_id()); - XMarkThreadLocalStacks* const stacks = XThreadLocalData::stacks(Thread::current()); - XMarkContext context(_stripes.nstripes(), stripe, stacks); - - if (timeout_in_micros == 0) { - work_without_timeout(&context); - } else { - work_with_timeout(&context, timeout_in_micros); - } - - // Flush and publish stacks - stacks->flush(&_allocator, &_stripes); - - // Free remaining stacks - stacks->free(&_allocator); -} - -class XMarkOopClosure : public OopClosure { - virtual void do_oop(oop* p) { - XBarrier::mark_barrier_on_oop_field(p, false /* finalizable */); - } - - virtual void do_oop(narrowOop* p) { - ShouldNotReachHere(); - } -}; - -class XMarkThreadClosure : public ThreadClosure { -private: - OopClosure* const _cl; - -public: - XMarkThreadClosure(OopClosure* cl) : - _cl(cl) { - XThreadLocalAllocBuffer::reset_statistics(); - } - ~XMarkThreadClosure() { - XThreadLocalAllocBuffer::publish_statistics(); - } - virtual void do_thread(Thread* thread) { - JavaThread* const jt = JavaThread::cast(thread); - StackWatermarkSet::finish_processing(jt, _cl, StackWatermarkKind::gc); - XThreadLocalAllocBuffer::update_stats(jt); - } -}; - -class XMarkNMethodClosure : public NMethodClosure { -private: - OopClosure* const _cl; - -public: - XMarkNMethodClosure(OopClosure* cl) : - _cl(cl) {} - - virtual void do_nmethod(nmethod* nm) { - XLocker locker(XNMethod::lock_for_nmethod(nm)); - if (XNMethod::is_armed(nm)) { - XNMethod::nmethod_oops_do_inner(nm, _cl); - - // CodeCache unloading support - nm->mark_as_maybe_on_stack(); - - XNMethod::disarm(nm); - } - } -}; - -typedef ClaimingCLDToOopClosure XMarkCLDClosure; - -class XMarkRootsTask : public XTask { -private: - XMark* const _mark; - SuspendibleThreadSetJoiner _sts_joiner; - XRootsIterator _roots; - - XMarkOopClosure _cl; - XMarkCLDClosure _cld_cl; - XMarkThreadClosure _thread_cl; - XMarkNMethodClosure _nm_cl; - -public: - XMarkRootsTask(XMark* mark) : - XTask("XMarkRootsTask"), - _mark(mark), - _sts_joiner(), - _roots(ClassLoaderData::_claim_strong), - _cl(), - _cld_cl(&_cl), - _thread_cl(&_cl), - _nm_cl(&_cl) { - ClassLoaderDataGraph_lock->lock(); - } - - ~XMarkRootsTask() { - ClassLoaderDataGraph_lock->unlock(); - } - - virtual void work() { - _roots.apply(&_cl, - &_cld_cl, - &_thread_cl, - &_nm_cl); - - // Flush and free worker stacks. Needed here since - // the set of workers executing during root scanning - // can be different from the set of workers executing - // during mark. - _mark->flush_and_free(); - } -}; - -class XMarkTask : public XTask { -private: - XMark* const _mark; - const uint64_t _timeout_in_micros; - -public: - XMarkTask(XMark* mark, uint64_t timeout_in_micros = 0) : - XTask("XMarkTask"), - _mark(mark), - _timeout_in_micros(timeout_in_micros) { - _mark->prepare_work(); - } - - ~XMarkTask() { - _mark->finish_work(); - } - - virtual void work() { - _mark->work(_timeout_in_micros); - } -}; - -void XMark::mark(bool initial) { - if (initial) { - XMarkRootsTask task(this); - _workers->run(&task); - } - - XMarkTask task(this); - _workers->run(&task); -} - -bool XMark::try_complete() { - _ntrycomplete++; - - // Use nconcurrent number of worker threads to maintain the - // worker/stripe distribution used during concurrent mark. - XMarkTask task(this, XMarkCompleteTimeout); - _workers->run(&task); - - // Successful if all stripes are empty - return _stripes.is_empty(); -} - -bool XMark::try_end() { - // Flush all mark stacks - if (!flush(true /* at_safepoint */)) { - // Mark completed - return true; - } - - // Try complete marking by doing a limited - // amount of mark work in this phase. - return try_complete(); -} - -bool XMark::end() { - // Try end marking - if (!try_end()) { - // Mark not completed - _ncontinue++; - return false; - } - - // Verification - if (ZVerifyMarking) { - verify_all_stacks_empty(); - } - - // Update statistics - XStatMark::set_at_mark_end(_nproactiveflush, _nterminateflush, _ntrycomplete, _ncontinue); - - // Note that we finished a marking cycle. - // Unlike other GCs, we do not arm the nmethods - // when marking terminates. - CodeCache::on_gc_marking_cycle_finish(); - - // Mark completed - return true; -} - -void XMark::free() { - // Free any unused mark stack space - _allocator.free(); - - // Update statistics - XStatMark::set_at_mark_free(_allocator.size()); -} - -void XMark::flush_and_free() { - Thread* const thread = Thread::current(); - flush_and_free(thread); -} - -bool XMark::flush_and_free(Thread* thread) { - XMarkThreadLocalStacks* const stacks = XThreadLocalData::stacks(thread); - const bool flushed = stacks->flush(&_allocator, &_stripes); - stacks->free(&_allocator); - return flushed; -} - -class XVerifyMarkStacksEmptyClosure : public ThreadClosure { -private: - const XMarkStripeSet* const _stripes; - -public: - XVerifyMarkStacksEmptyClosure(const XMarkStripeSet* stripes) : - _stripes(stripes) {} - - void do_thread(Thread* thread) { - XMarkThreadLocalStacks* const stacks = XThreadLocalData::stacks(thread); - guarantee(stacks->is_empty(_stripes), "Should be empty"); - } -}; - -void XMark::verify_all_stacks_empty() const { - // Verify thread stacks - XVerifyMarkStacksEmptyClosure cl(&_stripes); - Threads::threads_do(&cl); - - // Verify stripe stacks - guarantee(_stripes.is_empty(), "Should be empty"); -} diff --git a/src/hotspot/share/gc/x/xMark.hpp b/src/hotspot/share/gc/x/xMark.hpp deleted file mode 100644 index 5e40b79f02e3e..0000000000000 --- a/src/hotspot/share/gc/x/xMark.hpp +++ /dev/null @@ -1,106 +0,0 @@ -/* - * Copyright (c) 2015, 2021, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#ifndef SHARE_GC_X_XMARK_HPP -#define SHARE_GC_X_XMARK_HPP - -#include "gc/x/xMarkStack.hpp" -#include "gc/x/xMarkStackAllocator.hpp" -#include "gc/x/xMarkStackEntry.hpp" -#include "gc/x/xMarkTerminate.hpp" -#include "oops/oopsHierarchy.hpp" -#include "utilities/globalDefinitions.hpp" - -class Thread; -class XMarkContext; -class XPageTable; -class XWorkers; - -class XMark { - friend class XMarkTask; - -private: - XWorkers* const _workers; - XPageTable* const _page_table; - XMarkStackAllocator _allocator; - XMarkStripeSet _stripes; - XMarkTerminate _terminate; - volatile bool _work_terminateflush; - volatile size_t _work_nproactiveflush; - volatile size_t _work_nterminateflush; - size_t _nproactiveflush; - size_t _nterminateflush; - size_t _ntrycomplete; - size_t _ncontinue; - uint _nworkers; - - size_t calculate_nstripes(uint nworkers) const; - - bool is_array(uintptr_t addr) const; - void push_partial_array(uintptr_t addr, size_t size, bool finalizable); - void follow_small_array(uintptr_t addr, size_t size, bool finalizable); - void follow_large_array(uintptr_t addr, size_t size, bool finalizable); - void follow_array(uintptr_t addr, size_t size, bool finalizable); - void follow_partial_array(XMarkStackEntry entry, bool finalizable); - void follow_array_object(objArrayOop obj, bool finalizable); - void follow_object(oop obj, bool finalizable); - void mark_and_follow(XMarkContext* context, XMarkStackEntry entry); - - template bool drain(XMarkContext* context, T* timeout); - bool try_steal_local(XMarkContext* context); - bool try_steal_global(XMarkContext* context); - bool try_steal(XMarkContext* context); - void idle() const; - bool flush(bool at_safepoint); - bool try_proactive_flush(); - bool try_flush(volatile size_t* nflush); - bool try_terminate(); - bool try_complete(); - bool try_end(); - - void prepare_work(); - void finish_work(); - - void work_without_timeout(XMarkContext* context); - void work_with_timeout(XMarkContext* context, uint64_t timeout_in_micros); - void work(uint64_t timeout_in_micros); - - void verify_all_stacks_empty() const; - -public: - XMark(XWorkers* workers, XPageTable* page_table); - - bool is_initialized() const; - - template void mark_object(uintptr_t addr); - - void start(); - void mark(bool initial); - bool end(); - void free(); - - void flush_and_free(); - bool flush_and_free(Thread* thread); -}; - -#endif // SHARE_GC_X_XMARK_HPP diff --git a/src/hotspot/share/gc/x/xMark.inline.hpp b/src/hotspot/share/gc/x/xMark.inline.hpp deleted file mode 100644 index 1f8fc81f525c3..0000000000000 --- a/src/hotspot/share/gc/x/xMark.inline.hpp +++ /dev/null @@ -1,80 +0,0 @@ -/* - * Copyright (c) 2015, 2021, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#ifndef SHARE_GC_X_XMARK_INLINE_HPP -#define SHARE_GC_X_XMARK_INLINE_HPP - -#include "gc/x/xMark.hpp" - -#include "gc/x/xAddress.inline.hpp" -#include "gc/x/xMarkStack.inline.hpp" -#include "gc/x/xPage.inline.hpp" -#include "gc/x/xPageTable.inline.hpp" -#include "gc/x/xThreadLocalData.hpp" -#include "runtime/javaThread.hpp" -#include "utilities/debug.hpp" - -// Marking before pushing helps reduce mark stack memory usage. However, -// we only mark before pushing in GC threads to avoid burdening Java threads -// with writing to, and potentially first having to clear, mark bitmaps. -// -// It's also worth noting that while marking an object can be done at any -// time in the marking phase, following an object can only be done after -// root processing has called ClassLoaderDataGraph::clear_claimed_marks(), -// since it otherwise would interact badly with claiming of CLDs. - -template -inline void XMark::mark_object(uintptr_t addr) { - assert(XAddress::is_marked(addr), "Should be marked"); - - XPage* const page = _page_table->get(addr); - if (page->is_allocating()) { - // Already implicitly marked - return; - } - - const bool mark_before_push = gc_thread; - bool inc_live = false; - - if (mark_before_push) { - // Try mark object - if (!page->mark_object(addr, finalizable, inc_live)) { - // Already marked - return; - } - } else { - // Don't push if already marked - if (page->is_object_marked(addr)) { - // Already marked - return; - } - } - - // Push - XMarkThreadLocalStacks* const stacks = XThreadLocalData::stacks(Thread::current()); - XMarkStripe* const stripe = _stripes.stripe_for_addr(addr); - XMarkStackEntry entry(addr, !mark_before_push, inc_live, follow, finalizable); - stacks->push(&_allocator, &_stripes, stripe, entry, publish); -} - -#endif // SHARE_GC_X_XMARK_INLINE_HPP diff --git a/src/hotspot/share/gc/x/xMarkCache.cpp b/src/hotspot/share/gc/x/xMarkCache.cpp deleted file mode 100644 index c7e580ed88334..0000000000000 --- a/src/hotspot/share/gc/x/xMarkCache.cpp +++ /dev/null @@ -1,42 +0,0 @@ -/* - * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#include "precompiled.hpp" -#include "gc/x/xMarkCache.inline.hpp" -#include "utilities/globalDefinitions.hpp" -#include "utilities/powerOfTwo.hpp" - -XMarkCacheEntry::XMarkCacheEntry() : - _page(nullptr), - _objects(0), - _bytes(0) {} - -XMarkCache::XMarkCache(size_t nstripes) : - _shift(XMarkStripeShift + exact_log2(nstripes)) {} - -XMarkCache::~XMarkCache() { - // Evict all entries - for (size_t i = 0; i < XMarkCacheSize; i++) { - _cache[i].evict(); - } -} diff --git a/src/hotspot/share/gc/x/xMarkCache.hpp b/src/hotspot/share/gc/x/xMarkCache.hpp deleted file mode 100644 index 8fbdc87352237..0000000000000 --- a/src/hotspot/share/gc/x/xMarkCache.hpp +++ /dev/null @@ -1,57 +0,0 @@ -/* - * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#ifndef SHARE_GC_X_XMARKCACHE_HPP -#define SHARE_GC_X_XMARKCACHE_HPP - -#include "gc/x/xGlobals.hpp" -#include "memory/allocation.hpp" - -class XPage; - -class XMarkCacheEntry { -private: - XPage* _page; - uint32_t _objects; - size_t _bytes; - -public: - XMarkCacheEntry(); - - void inc_live(XPage* page, size_t bytes); - void evict(); -}; - -class XMarkCache : public StackObj { -private: - const size_t _shift; - XMarkCacheEntry _cache[XMarkCacheSize]; - -public: - XMarkCache(size_t nstripes); - ~XMarkCache(); - - void inc_live(XPage* page, size_t bytes); -}; - -#endif // SHARE_GC_X_XMARKCACHE_HPP diff --git a/src/hotspot/share/gc/x/xMarkCache.inline.hpp b/src/hotspot/share/gc/x/xMarkCache.inline.hpp deleted file mode 100644 index 27dd1b9333986..0000000000000 --- a/src/hotspot/share/gc/x/xMarkCache.inline.hpp +++ /dev/null @@ -1,59 +0,0 @@ -/* - * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#ifndef SHARE_GC_X_XMARKCACHE_INLINE_HPP -#define SHARE_GC_X_XMARKCACHE_INLINE_HPP - -#include "gc/x/xMarkCache.hpp" - -#include "gc/x/xPage.inline.hpp" - -inline void XMarkCacheEntry::inc_live(XPage* page, size_t bytes) { - if (_page == page) { - // Cache hit - _objects++; - _bytes += bytes; - } else { - // Cache miss - evict(); - _page = page; - _objects = 1; - _bytes = bytes; - } -} - -inline void XMarkCacheEntry::evict() { - if (_page != nullptr) { - // Write cached data out to page - _page->inc_live(_objects, _bytes); - _page = nullptr; - } -} - -inline void XMarkCache::inc_live(XPage* page, size_t bytes) { - const size_t mask = XMarkCacheSize - 1; - const size_t index = (page->start() >> _shift) & mask; - _cache[index].inc_live(page, bytes); -} - -#endif // SHARE_GC_X_XMARKCACHE_INLINE_HPP diff --git a/src/hotspot/share/gc/x/xMarkContext.hpp b/src/hotspot/share/gc/x/xMarkContext.hpp deleted file mode 100644 index 246822931b7b1..0000000000000 --- a/src/hotspot/share/gc/x/xMarkContext.hpp +++ /dev/null @@ -1,52 +0,0 @@ -/* - * Copyright (c) 2021, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#ifndef SHARE_GC_X_XMARKCONTEXT_HPP -#define SHARE_GC_X_XMARKCONTEXT_HPP - -#include "gc/x/xMarkCache.hpp" -#include "gc/shared/stringdedup/stringDedup.hpp" -#include "memory/allocation.hpp" - -class XMarkStripe; -class XMarkThreadLocalStacks; - -class XMarkContext : public StackObj { -private: - XMarkCache _cache; - XMarkStripe* const _stripe; - XMarkThreadLocalStacks* const _stacks; - StringDedup::Requests _string_dedup_requests; - -public: - XMarkContext(size_t nstripes, - XMarkStripe* stripe, - XMarkThreadLocalStacks* stacks); - - XMarkCache* cache(); - XMarkStripe* stripe(); - XMarkThreadLocalStacks* stacks(); - StringDedup::Requests* string_dedup_requests(); -}; - -#endif // SHARE_GC_X_XMARKCONTEXT_HPP diff --git a/src/hotspot/share/gc/x/xMarkContext.inline.hpp b/src/hotspot/share/gc/x/xMarkContext.inline.hpp deleted file mode 100644 index 74a182b67c33a..0000000000000 --- a/src/hotspot/share/gc/x/xMarkContext.inline.hpp +++ /dev/null @@ -1,53 +0,0 @@ -/* - * Copyright (c) 2021, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#ifndef SHARE_GC_X_XMARKCONTEXT_INLINE_HPP -#define SHARE_GC_X_XMARKCONTEXT_INLINE_HPP - -#include "gc/x/xMarkContext.hpp" - -inline XMarkContext::XMarkContext(size_t nstripes, - XMarkStripe* stripe, - XMarkThreadLocalStacks* stacks) : - _cache(nstripes), - _stripe(stripe), - _stacks(stacks), - _string_dedup_requests() {} - -inline XMarkCache* XMarkContext::cache() { - return &_cache; -} - -inline XMarkStripe* XMarkContext::stripe() { - return _stripe; -} - -inline XMarkThreadLocalStacks* XMarkContext::stacks() { - return _stacks; -} - -inline StringDedup::Requests* XMarkContext::string_dedup_requests() { - return &_string_dedup_requests; -} - -#endif // SHARE_GC_X_XMARKCACHE_INLINE_HPP diff --git a/src/hotspot/share/gc/x/xMarkStack.cpp b/src/hotspot/share/gc/x/xMarkStack.cpp deleted file mode 100644 index 6f7619c9a3572..0000000000000 --- a/src/hotspot/share/gc/x/xMarkStack.cpp +++ /dev/null @@ -1,226 +0,0 @@ -/* - * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#include "precompiled.hpp" -#include "gc/x/xMarkStack.inline.hpp" -#include "gc/x/xMarkStackAllocator.hpp" -#include "logging/log.hpp" -#include "utilities/debug.hpp" -#include "utilities/powerOfTwo.hpp" - -XMarkStripe::XMarkStripe() : - _published(), - _overflowed() {} - -XMarkStripeSet::XMarkStripeSet() : - _nstripes(0), - _nstripes_mask(0), - _stripes() {} - -void XMarkStripeSet::set_nstripes(size_t nstripes) { - assert(is_power_of_2(nstripes), "Must be a power of two"); - assert(is_power_of_2(XMarkStripesMax), "Must be a power of two"); - assert(nstripes >= 1, "Invalid number of stripes"); - assert(nstripes <= XMarkStripesMax, "Invalid number of stripes"); - - _nstripes = nstripes; - _nstripes_mask = nstripes - 1; - - log_debug(gc, marking)("Using " SIZE_FORMAT " mark stripes", _nstripes); -} - -bool XMarkStripeSet::is_empty() const { - for (size_t i = 0; i < _nstripes; i++) { - if (!_stripes[i].is_empty()) { - return false; - } - } - - return true; -} - -XMarkStripe* XMarkStripeSet::stripe_for_worker(uint nworkers, uint worker_id) { - const size_t spillover_limit = (nworkers / _nstripes) * _nstripes; - size_t index; - - if (worker_id < spillover_limit) { - // Not a spillover worker, use natural stripe - index = worker_id & _nstripes_mask; - } else { - // Distribute spillover workers evenly across stripes - const size_t spillover_nworkers = nworkers - spillover_limit; - const size_t spillover_worker_id = worker_id - spillover_limit; - const double spillover_chunk = (double)_nstripes / (double)spillover_nworkers; - index = spillover_worker_id * spillover_chunk; - } - - assert(index < _nstripes, "Invalid index"); - return &_stripes[index]; -} - -XMarkThreadLocalStacks::XMarkThreadLocalStacks() : - _magazine(nullptr) { - for (size_t i = 0; i < XMarkStripesMax; i++) { - _stacks[i] = nullptr; - } -} - -bool XMarkThreadLocalStacks::is_empty(const XMarkStripeSet* stripes) const { - for (size_t i = 0; i < stripes->nstripes(); i++) { - XMarkStack* const stack = _stacks[i]; - if (stack != nullptr) { - return false; - } - } - - return true; -} - -XMarkStack* XMarkThreadLocalStacks::allocate_stack(XMarkStackAllocator* allocator) { - if (_magazine == nullptr) { - // Allocate new magazine - _magazine = allocator->alloc_magazine(); - if (_magazine == nullptr) { - return nullptr; - } - } - - XMarkStack* stack = nullptr; - - if (!_magazine->pop(stack)) { - // Magazine is empty, convert magazine into a new stack - _magazine->~XMarkStackMagazine(); - stack = new ((void*)_magazine) XMarkStack(); - _magazine = nullptr; - } - - return stack; -} - -void XMarkThreadLocalStacks::free_stack(XMarkStackAllocator* allocator, XMarkStack* stack) { - for (;;) { - if (_magazine == nullptr) { - // Convert stack into a new magazine - stack->~XMarkStack(); - _magazine = new ((void*)stack) XMarkStackMagazine(); - return; - } - - if (_magazine->push(stack)) { - // Success - return; - } - - // Free and uninstall full magazine - allocator->free_magazine(_magazine); - _magazine = nullptr; - } -} - -bool XMarkThreadLocalStacks::push_slow(XMarkStackAllocator* allocator, - XMarkStripe* stripe, - XMarkStack** stackp, - XMarkStackEntry entry, - bool publish) { - XMarkStack* stack = *stackp; - - for (;;) { - if (stack == nullptr) { - // Allocate and install new stack - *stackp = stack = allocate_stack(allocator); - if (stack == nullptr) { - // Out of mark stack memory - return false; - } - } - - if (stack->push(entry)) { - // Success - return true; - } - - // Publish/Overflow and uninstall stack - stripe->publish_stack(stack, publish); - *stackp = stack = nullptr; - } -} - -bool XMarkThreadLocalStacks::pop_slow(XMarkStackAllocator* allocator, - XMarkStripe* stripe, - XMarkStack** stackp, - XMarkStackEntry& entry) { - XMarkStack* stack = *stackp; - - for (;;) { - if (stack == nullptr) { - // Try steal and install stack - *stackp = stack = stripe->steal_stack(); - if (stack == nullptr) { - // Nothing to steal - return false; - } - } - - if (stack->pop(entry)) { - // Success - return true; - } - - // Free and uninstall stack - free_stack(allocator, stack); - *stackp = stack = nullptr; - } -} - -bool XMarkThreadLocalStacks::flush(XMarkStackAllocator* allocator, XMarkStripeSet* stripes) { - bool flushed = false; - - // Flush all stacks - for (size_t i = 0; i < stripes->nstripes(); i++) { - XMarkStripe* const stripe = stripes->stripe_at(i); - XMarkStack** const stackp = &_stacks[i]; - XMarkStack* const stack = *stackp; - if (stack == nullptr) { - continue; - } - - // Free/Publish and uninstall stack - if (stack->is_empty()) { - free_stack(allocator, stack); - } else { - stripe->publish_stack(stack); - flushed = true; - } - *stackp = nullptr; - } - - return flushed; -} - -void XMarkThreadLocalStacks::free(XMarkStackAllocator* allocator) { - // Free and uninstall magazine - if (_magazine != nullptr) { - allocator->free_magazine(_magazine); - _magazine = nullptr; - } -} diff --git a/src/hotspot/share/gc/x/xMarkStack.hpp b/src/hotspot/share/gc/x/xMarkStack.hpp deleted file mode 100644 index e012b89749d6a..0000000000000 --- a/src/hotspot/share/gc/x/xMarkStack.hpp +++ /dev/null @@ -1,164 +0,0 @@ -/* - * Copyright (c) 2016, 2020, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#ifndef SHARE_GC_X_XMARKSTACK_HPP -#define SHARE_GC_X_XMARKSTACK_HPP - -#include "gc/x/xGlobals.hpp" -#include "gc/x/xMarkStackEntry.hpp" -#include "utilities/globalDefinitions.hpp" - -template -class XStack { -private: - size_t _top; - XStack* _next; - T _slots[S]; - - bool is_full() const; - -public: - XStack(); - - bool is_empty() const; - - bool push(T value); - bool pop(T& value); - - XStack* next() const; - XStack** next_addr(); -}; - -template -class XStackList { -private: - T* volatile _head; - - T* encode_versioned_pointer(const T* stack, uint32_t version) const; - void decode_versioned_pointer(const T* vstack, T** stack, uint32_t* version) const; - -public: - XStackList(); - - bool is_empty() const; - - void push(T* stack); - T* pop(); - - void clear(); -}; - -using XMarkStack = XStack; -using XMarkStackList = XStackList; -using XMarkStackMagazine = XStack; -using XMarkStackMagazineList = XStackList; - -static_assert(sizeof(XMarkStack) == XMarkStackSize, "XMarkStack size mismatch"); -static_assert(sizeof(XMarkStackMagazine) <= XMarkStackSize, "XMarkStackMagazine size too large"); - -class XMarkStripe { -private: - XCACHE_ALIGNED XMarkStackList _published; - XCACHE_ALIGNED XMarkStackList _overflowed; - -public: - XMarkStripe(); - - bool is_empty() const; - - void publish_stack(XMarkStack* stack, bool publish = true); - XMarkStack* steal_stack(); -}; - -class XMarkStripeSet { -private: - size_t _nstripes; - size_t _nstripes_mask; - XMarkStripe _stripes[XMarkStripesMax]; - -public: - XMarkStripeSet(); - - size_t nstripes() const; - void set_nstripes(size_t nstripes); - - bool is_empty() const; - - size_t stripe_id(const XMarkStripe* stripe) const; - XMarkStripe* stripe_at(size_t index); - XMarkStripe* stripe_next(XMarkStripe* stripe); - XMarkStripe* stripe_for_worker(uint nworkers, uint worker_id); - XMarkStripe* stripe_for_addr(uintptr_t addr); -}; - -class XMarkStackAllocator; - -class XMarkThreadLocalStacks { -private: - XMarkStackMagazine* _magazine; - XMarkStack* _stacks[XMarkStripesMax]; - - XMarkStack* allocate_stack(XMarkStackAllocator* allocator); - void free_stack(XMarkStackAllocator* allocator, XMarkStack* stack); - - bool push_slow(XMarkStackAllocator* allocator, - XMarkStripe* stripe, - XMarkStack** stackp, - XMarkStackEntry entry, - bool publish); - - bool pop_slow(XMarkStackAllocator* allocator, - XMarkStripe* stripe, - XMarkStack** stackp, - XMarkStackEntry& entry); - -public: - XMarkThreadLocalStacks(); - - bool is_empty(const XMarkStripeSet* stripes) const; - - void install(XMarkStripeSet* stripes, - XMarkStripe* stripe, - XMarkStack* stack); - - XMarkStack* steal(XMarkStripeSet* stripes, - XMarkStripe* stripe); - - bool push(XMarkStackAllocator* allocator, - XMarkStripeSet* stripes, - XMarkStripe* stripe, - XMarkStackEntry entry, - bool publish); - - bool pop(XMarkStackAllocator* allocator, - XMarkStripeSet* stripes, - XMarkStripe* stripe, - XMarkStackEntry& entry); - - bool flush(XMarkStackAllocator* allocator, - XMarkStripeSet* stripes); - - void free(XMarkStackAllocator* allocator); -}; - -#endif // SHARE_GC_X_XMARKSTACK_HPP diff --git a/src/hotspot/share/gc/x/xMarkStack.inline.hpp b/src/hotspot/share/gc/x/xMarkStack.inline.hpp deleted file mode 100644 index e643c1e32243a..0000000000000 --- a/src/hotspot/share/gc/x/xMarkStack.inline.hpp +++ /dev/null @@ -1,266 +0,0 @@ -/* - * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#ifndef SHARE_GC_X_XMARKSTACK_INLINE_HPP -#define SHARE_GC_X_XMARKSTACK_INLINE_HPP - -#include "gc/x/xMarkStack.hpp" - -#include "utilities/debug.hpp" -#include "runtime/atomic.hpp" - -template -inline XStack::XStack() : - _top(0), - _next(nullptr) {} - -template -inline bool XStack::is_empty() const { - return _top == 0; -} - -template -inline bool XStack::is_full() const { - return _top == S; -} - -template -inline bool XStack::push(T value) { - if (is_full()) { - return false; - } - - _slots[_top++] = value; - return true; -} - -template -inline bool XStack::pop(T& value) { - if (is_empty()) { - return false; - } - - value = _slots[--_top]; - return true; -} - -template -inline XStack* XStack::next() const { - return _next; -} - -template -inline XStack** XStack::next_addr() { - return &_next; -} - -template -inline XStackList::XStackList() : - _head(encode_versioned_pointer(nullptr, 0)) {} - -template -inline T* XStackList::encode_versioned_pointer(const T* stack, uint32_t version) const { - uint64_t addr; - - if (stack == nullptr) { - addr = (uint32_t)-1; - } else { - addr = ((uint64_t)stack - XMarkStackSpaceStart) >> XMarkStackSizeShift; - } - - return (T*)((addr << 32) | (uint64_t)version); -} - -template -inline void XStackList::decode_versioned_pointer(const T* vstack, T** stack, uint32_t* version) const { - const uint64_t addr = (uint64_t)vstack >> 32; - - if (addr == (uint32_t)-1) { - *stack = nullptr; - } else { - *stack = (T*)((addr << XMarkStackSizeShift) + XMarkStackSpaceStart); - } - - *version = (uint32_t)(uint64_t)vstack; -} - -template -inline bool XStackList::is_empty() const { - const T* vstack = _head; - T* stack = nullptr; - uint32_t version = 0; - - decode_versioned_pointer(vstack, &stack, &version); - return stack == nullptr; -} - -template -inline void XStackList::push(T* stack) { - T* vstack = _head; - uint32_t version = 0; - - for (;;) { - decode_versioned_pointer(vstack, stack->next_addr(), &version); - T* const new_vstack = encode_versioned_pointer(stack, version + 1); - T* const prev_vstack = Atomic::cmpxchg(&_head, vstack, new_vstack); - if (prev_vstack == vstack) { - // Success - break; - } - - // Retry - vstack = prev_vstack; - } -} - -template -inline T* XStackList::pop() { - T* vstack = _head; - T* stack = nullptr; - uint32_t version = 0; - - for (;;) { - decode_versioned_pointer(vstack, &stack, &version); - if (stack == nullptr) { - return nullptr; - } - - T* const new_vstack = encode_versioned_pointer(stack->next(), version + 1); - T* const prev_vstack = Atomic::cmpxchg(&_head, vstack, new_vstack); - if (prev_vstack == vstack) { - // Success - return stack; - } - - // Retry - vstack = prev_vstack; - } -} - -template -inline void XStackList::clear() { - _head = encode_versioned_pointer(nullptr, 0); -} - -inline bool XMarkStripe::is_empty() const { - return _published.is_empty() && _overflowed.is_empty(); -} - -inline void XMarkStripe::publish_stack(XMarkStack* stack, bool publish) { - // A stack is published either on the published list or the overflowed - // list. The published list is used by mutators publishing stacks for GC - // workers to work on, while the overflowed list is used by GC workers - // to publish stacks that overflowed. The intention here is to avoid - // contention between mutators and GC workers as much as possible, while - // still allowing GC workers to help out and steal work from each other. - if (publish) { - _published.push(stack); - } else { - _overflowed.push(stack); - } -} - -inline XMarkStack* XMarkStripe::steal_stack() { - // Steal overflowed stacks first, then published stacks - XMarkStack* const stack = _overflowed.pop(); - if (stack != nullptr) { - return stack; - } - - return _published.pop(); -} - -inline size_t XMarkStripeSet::nstripes() const { - return _nstripes; -} - -inline size_t XMarkStripeSet::stripe_id(const XMarkStripe* stripe) const { - const size_t index = ((uintptr_t)stripe - (uintptr_t)_stripes) / sizeof(XMarkStripe); - assert(index < _nstripes, "Invalid index"); - return index; -} - -inline XMarkStripe* XMarkStripeSet::stripe_at(size_t index) { - assert(index < _nstripes, "Invalid index"); - return &_stripes[index]; -} - -inline XMarkStripe* XMarkStripeSet::stripe_next(XMarkStripe* stripe) { - const size_t index = (stripe_id(stripe) + 1) & _nstripes_mask; - assert(index < _nstripes, "Invalid index"); - return &_stripes[index]; -} - -inline XMarkStripe* XMarkStripeSet::stripe_for_addr(uintptr_t addr) { - const size_t index = (addr >> XMarkStripeShift) & _nstripes_mask; - assert(index < _nstripes, "Invalid index"); - return &_stripes[index]; -} - -inline void XMarkThreadLocalStacks::install(XMarkStripeSet* stripes, - XMarkStripe* stripe, - XMarkStack* stack) { - XMarkStack** const stackp = &_stacks[stripes->stripe_id(stripe)]; - assert(*stackp == nullptr, "Should be empty"); - *stackp = stack; -} - -inline XMarkStack* XMarkThreadLocalStacks::steal(XMarkStripeSet* stripes, - XMarkStripe* stripe) { - XMarkStack** const stackp = &_stacks[stripes->stripe_id(stripe)]; - XMarkStack* const stack = *stackp; - if (stack != nullptr) { - *stackp = nullptr; - } - - return stack; -} - -inline bool XMarkThreadLocalStacks::push(XMarkStackAllocator* allocator, - XMarkStripeSet* stripes, - XMarkStripe* stripe, - XMarkStackEntry entry, - bool publish) { - XMarkStack** const stackp = &_stacks[stripes->stripe_id(stripe)]; - XMarkStack* const stack = *stackp; - if (stack != nullptr && stack->push(entry)) { - return true; - } - - return push_slow(allocator, stripe, stackp, entry, publish); -} - -inline bool XMarkThreadLocalStacks::pop(XMarkStackAllocator* allocator, - XMarkStripeSet* stripes, - XMarkStripe* stripe, - XMarkStackEntry& entry) { - XMarkStack** const stackp = &_stacks[stripes->stripe_id(stripe)]; - XMarkStack* const stack = *stackp; - if (stack != nullptr && stack->pop(entry)) { - return true; - } - - return pop_slow(allocator, stripe, stackp, entry); -} - -#endif // SHARE_GC_X_XMARKSTACK_INLINE_HPP diff --git a/src/hotspot/share/gc/x/xMarkStackAllocator.cpp b/src/hotspot/share/gc/x/xMarkStackAllocator.cpp deleted file mode 100644 index b5cc3ad641aff..0000000000000 --- a/src/hotspot/share/gc/x/xMarkStackAllocator.cpp +++ /dev/null @@ -1,221 +0,0 @@ -/* - * Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#include "precompiled.hpp" -#include "gc/shared/gcLogPrecious.hpp" -#include "gc/shared/gc_globals.hpp" -#include "gc/x/xLock.inline.hpp" -#include "gc/x/xMarkStack.inline.hpp" -#include "gc/x/xMarkStackAllocator.hpp" -#include "logging/log.hpp" -#include "runtime/atomic.hpp" -#include "runtime/os.hpp" -#include "utilities/debug.hpp" - -uintptr_t XMarkStackSpaceStart; - -XMarkStackSpace::XMarkStackSpace() : - _expand_lock(), - _start(0), - _top(0), - _end(0) { - assert(ZMarkStackSpaceLimit >= XMarkStackSpaceExpandSize, "ZMarkStackSpaceLimit too small"); - - // Reserve address space - const size_t size = ZMarkStackSpaceLimit; - const uintptr_t addr = (uintptr_t)os::reserve_memory(size, !ExecMem, mtGC); - if (addr == 0) { - log_error_pd(gc, marking)("Failed to reserve address space for mark stacks"); - return; - } - - // Successfully initialized - _start = _top = _end = addr; - - // Register mark stack space start - XMarkStackSpaceStart = _start; - - // Prime space - _end += expand_space(); -} - -bool XMarkStackSpace::is_initialized() const { - return _start != 0; -} - -size_t XMarkStackSpace::size() const { - return _end - _start; -} - -size_t XMarkStackSpace::used() const { - return _top - _start; -} - -size_t XMarkStackSpace::expand_space() { - const size_t expand_size = XMarkStackSpaceExpandSize; - const size_t old_size = size(); - const size_t new_size = old_size + expand_size; - - if (new_size > ZMarkStackSpaceLimit) { - // Expansion limit reached. This is a fatal error since we - // currently can't recover from running out of mark stack space. - fatal("Mark stack space exhausted. Use -XX:ZMarkStackSpaceLimit= to increase the " - "maximum number of bytes allocated for mark stacks. Current limit is " SIZE_FORMAT "M.", - ZMarkStackSpaceLimit / M); - } - - log_debug(gc, marking)("Expanding mark stack space: " SIZE_FORMAT "M->" SIZE_FORMAT "M", - old_size / M, new_size / M); - - // Expand - os::commit_memory_or_exit((char*)_end, expand_size, false /* executable */, "Mark stack space"); - - return expand_size; -} - -size_t XMarkStackSpace::shrink_space() { - // Shrink to what is currently used - const size_t old_size = size(); - const size_t new_size = align_up(used(), XMarkStackSpaceExpandSize); - const size_t shrink_size = old_size - new_size; - - if (shrink_size > 0) { - // Shrink - log_debug(gc, marking)("Shrinking mark stack space: " SIZE_FORMAT "M->" SIZE_FORMAT "M", - old_size / M, new_size / M); - - const uintptr_t shrink_start = _end - shrink_size; - os::uncommit_memory((char*)shrink_start, shrink_size, false /* executable */); - } - - return shrink_size; -} - -uintptr_t XMarkStackSpace::alloc_space(size_t size) { - uintptr_t top = Atomic::load(&_top); - - for (;;) { - const uintptr_t end = Atomic::load(&_end); - const uintptr_t new_top = top + size; - if (new_top > end) { - // Not enough space left - return 0; - } - - const uintptr_t prev_top = Atomic::cmpxchg(&_top, top, new_top); - if (prev_top == top) { - // Success - return top; - } - - // Retry - top = prev_top; - } -} - -uintptr_t XMarkStackSpace::expand_and_alloc_space(size_t size) { - XLocker locker(&_expand_lock); - - // Retry allocation before expanding - uintptr_t addr = alloc_space(size); - if (addr != 0) { - return addr; - } - - // Expand - const size_t expand_size = expand_space(); - - // Increment top before end to make sure another - // thread can't steal out newly expanded space. - addr = Atomic::fetch_then_add(&_top, size); - Atomic::add(&_end, expand_size); - - return addr; -} - -uintptr_t XMarkStackSpace::alloc(size_t size) { - assert(size <= XMarkStackSpaceExpandSize, "Invalid size"); - - const uintptr_t addr = alloc_space(size); - if (addr != 0) { - return addr; - } - - return expand_and_alloc_space(size); -} - -void XMarkStackSpace::free() { - _end -= shrink_space(); - _top = _start; -} - -XMarkStackAllocator::XMarkStackAllocator() : - _freelist(), - _space() {} - -bool XMarkStackAllocator::is_initialized() const { - return _space.is_initialized(); -} - -size_t XMarkStackAllocator::size() const { - return _space.size(); -} - -XMarkStackMagazine* XMarkStackAllocator::create_magazine_from_space(uintptr_t addr, size_t size) { - assert(is_aligned(size, XMarkStackSize), "Invalid size"); - - // Use first stack as magazine - XMarkStackMagazine* const magazine = new ((void*)addr) XMarkStackMagazine(); - for (size_t i = XMarkStackSize; i < size; i += XMarkStackSize) { - XMarkStack* const stack = new ((void*)(addr + i)) XMarkStack(); - const bool success = magazine->push(stack); - assert(success, "Magazine should never get full"); - } - - return magazine; -} - -XMarkStackMagazine* XMarkStackAllocator::alloc_magazine() { - // Try allocating from the free list first - XMarkStackMagazine* const magazine = _freelist.pop(); - if (magazine != nullptr) { - return magazine; - } - - // Allocate new magazine - const uintptr_t addr = _space.alloc(XMarkStackMagazineSize); - if (addr == 0) { - return nullptr; - } - - return create_magazine_from_space(addr, XMarkStackMagazineSize); -} - -void XMarkStackAllocator::free_magazine(XMarkStackMagazine* magazine) { - _freelist.push(magazine); -} - -void XMarkStackAllocator::free() { - _freelist.clear(); - _space.free(); -} diff --git a/src/hotspot/share/gc/x/xMarkStackAllocator.hpp b/src/hotspot/share/gc/x/xMarkStackAllocator.hpp deleted file mode 100644 index 5e81ae284cf50..0000000000000 --- a/src/hotspot/share/gc/x/xMarkStackAllocator.hpp +++ /dev/null @@ -1,77 +0,0 @@ -/* - * Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#ifndef SHARE_GC_X_XMARKSTACKALLOCATOR_HPP -#define SHARE_GC_X_XMARKSTACKALLOCATOR_HPP - -#include "gc/x/xGlobals.hpp" -#include "gc/x/xLock.hpp" -#include "utilities/globalDefinitions.hpp" - -class XMarkStackSpace { -private: - XLock _expand_lock; - uintptr_t _start; - volatile uintptr_t _top; - volatile uintptr_t _end; - - size_t used() const; - - size_t expand_space(); - size_t shrink_space(); - - uintptr_t alloc_space(size_t size); - uintptr_t expand_and_alloc_space(size_t size); - -public: - XMarkStackSpace(); - - bool is_initialized() const; - - size_t size() const; - - uintptr_t alloc(size_t size); - void free(); -}; - -class XMarkStackAllocator { -private: - XCACHE_ALIGNED XMarkStackMagazineList _freelist; - XCACHE_ALIGNED XMarkStackSpace _space; - - XMarkStackMagazine* create_magazine_from_space(uintptr_t addr, size_t size); - -public: - XMarkStackAllocator(); - - bool is_initialized() const; - - size_t size() const; - - XMarkStackMagazine* alloc_magazine(); - void free_magazine(XMarkStackMagazine* magazine); - - void free(); -}; - -#endif // SHARE_GC_X_XMARKSTACKALLOCATOR_HPP diff --git a/src/hotspot/share/gc/x/xMarkStackEntry.hpp b/src/hotspot/share/gc/x/xMarkStackEntry.hpp deleted file mode 100644 index 61df1798df2a0..0000000000000 --- a/src/hotspot/share/gc/x/xMarkStackEntry.hpp +++ /dev/null @@ -1,142 +0,0 @@ -/* - * Copyright (c) 2017, 2021, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#ifndef SHARE_GC_X_XMARKSTACKENTRY_HPP -#define SHARE_GC_X_XMARKSTACKENTRY_HPP - -#include "gc/x/xBitField.hpp" -#include "memory/allocation.hpp" - -// -// Mark stack entry layout -// ----------------------- -// -// Object entry -// ------------ -// -// 6 -// 3 5 4 3 2 1 0 -// +------------------------------------------------------------------+-+-+-+-+-+ -// |11111111 11111111 11111111 11111111 11111111 11111111 11111111 111|1|1|1|1|1| -// +------------------------------------------------------------------+-+-+-+-+-+ -// | | | | | | -// | 4-4 Mark Flag (1-bit) * | | | | -// | | | | | -// | 3-3 Increment Live Flag (1-bit) * | | | -// | | | | -// | 2-2 Follow Flag (1-bit) * | | -// | | | -// | 1-1 Partial Array Flag (1-bit) * | -// | | -// | 0-0 Final Flag (1-bit) * -// | -// * 63-5 Object Address (59-bits) -// -// -// Partial array entry -// ------------------- -// -// 6 3 3 -// 3 2 1 2 1 0 -// +------------------------------------+---------------------------------+-+-+ -// |11111111 11111111 11111111 11111111 |11111111 11111111 11111111 111111|1|1| -// +------------------------------------+---------------------------------+-+-+ -// | | | | -// | | 1-1 Partial Array Flag (1-bit) * | -// | | | -// | | 0-0 Final Flag (1-bit) * -// | | -// | * 31-2 Partial Array Length (30-bits) -// | -// * 63-32 Partial Array Address Offset (32-bits) -// - -class XMarkStackEntry { -private: - typedef XBitField field_finalizable; - typedef XBitField field_partial_array; - typedef XBitField field_follow; - typedef XBitField field_inc_live; - typedef XBitField field_mark; - typedef XBitField field_object_address; - typedef XBitField field_partial_array_length; - typedef XBitField field_partial_array_offset; - - uint64_t _entry; - -public: - XMarkStackEntry() { - // This constructor is intentionally left empty and does not initialize - // _entry to allow it to be optimized out when instantiating XMarkStack, - // which has a long array of XMarkStackEntry elements, but doesn't care - // what _entry is initialized to. - } - - XMarkStackEntry(uintptr_t object_address, bool mark, bool inc_live, bool follow, bool finalizable) : - _entry(field_object_address::encode(object_address) | - field_mark::encode(mark) | - field_inc_live::encode(inc_live) | - field_follow::encode(follow) | - field_partial_array::encode(false) | - field_finalizable::encode(finalizable)) {} - - XMarkStackEntry(size_t partial_array_offset, size_t partial_array_length, bool finalizable) : - _entry(field_partial_array_offset::encode(partial_array_offset) | - field_partial_array_length::encode(partial_array_length) | - field_partial_array::encode(true) | - field_finalizable::encode(finalizable)) {} - - bool finalizable() const { - return field_finalizable::decode(_entry); - } - - bool partial_array() const { - return field_partial_array::decode(_entry); - } - - size_t partial_array_offset() const { - return field_partial_array_offset::decode(_entry); - } - - size_t partial_array_length() const { - return field_partial_array_length::decode(_entry); - } - - bool follow() const { - return field_follow::decode(_entry); - } - - bool inc_live() const { - return field_inc_live::decode(_entry); - } - - bool mark() const { - return field_mark::decode(_entry); - } - - uintptr_t object_address() const { - return field_object_address::decode(_entry); - } -}; - -#endif // SHARE_GC_X_XMARKSTACKENTRY_HPP diff --git a/src/hotspot/share/gc/x/xMarkTerminate.hpp b/src/hotspot/share/gc/x/xMarkTerminate.hpp deleted file mode 100644 index 28f18f6e1cbc7..0000000000000 --- a/src/hotspot/share/gc/x/xMarkTerminate.hpp +++ /dev/null @@ -1,54 +0,0 @@ -/* - * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#ifndef SHARE_GC_X_XMARKTERMINATE_HPP -#define SHARE_GC_X_XMARKTERMINATE_HPP - -#include "gc/x/xGlobals.hpp" -#include "memory/allocation.hpp" -#include "utilities/globalDefinitions.hpp" - -class XMarkTerminate { -private: - uint _nworkers; - XCACHE_ALIGNED volatile uint _nworking_stage0; - volatile uint _nworking_stage1; - - bool enter_stage(volatile uint* nworking_stage); - void exit_stage(volatile uint* nworking_stage); - bool try_exit_stage(volatile uint* nworking_stage); - -public: - XMarkTerminate(); - - void reset(uint nworkers); - - bool enter_stage0(); - void exit_stage0(); - bool try_exit_stage0(); - - bool enter_stage1(); - bool try_exit_stage1(); -}; - -#endif // SHARE_GC_X_XMARKTERMINATE_HPP diff --git a/src/hotspot/share/gc/x/xMarkTerminate.inline.hpp b/src/hotspot/share/gc/x/xMarkTerminate.inline.hpp deleted file mode 100644 index e4b9256ba6b7e..0000000000000 --- a/src/hotspot/share/gc/x/xMarkTerminate.inline.hpp +++ /dev/null @@ -1,88 +0,0 @@ -/* - * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#ifndef SHARE_GC_X_XMARKTERMINATE_INLINE_HPP -#define SHARE_GC_X_XMARKTERMINATE_INLINE_HPP - -#include "gc/x/xMarkTerminate.hpp" - -#include "runtime/atomic.hpp" - -inline XMarkTerminate::XMarkTerminate() : - _nworkers(0), - _nworking_stage0(0), - _nworking_stage1(0) {} - -inline bool XMarkTerminate::enter_stage(volatile uint* nworking_stage) { - return Atomic::sub(nworking_stage, 1u) == 0; -} - -inline void XMarkTerminate::exit_stage(volatile uint* nworking_stage) { - Atomic::add(nworking_stage, 1u); -} - -inline bool XMarkTerminate::try_exit_stage(volatile uint* nworking_stage) { - uint nworking = Atomic::load(nworking_stage); - - for (;;) { - if (nworking == 0) { - return false; - } - - const uint new_nworking = nworking + 1; - const uint prev_nworking = Atomic::cmpxchg(nworking_stage, nworking, new_nworking); - if (prev_nworking == nworking) { - // Success - return true; - } - - // Retry - nworking = prev_nworking; - } -} - -inline void XMarkTerminate::reset(uint nworkers) { - _nworkers = _nworking_stage0 = _nworking_stage1 = nworkers; -} - -inline bool XMarkTerminate::enter_stage0() { - return enter_stage(&_nworking_stage0); -} - -inline void XMarkTerminate::exit_stage0() { - exit_stage(&_nworking_stage0); -} - -inline bool XMarkTerminate::try_exit_stage0() { - return try_exit_stage(&_nworking_stage0); -} - -inline bool XMarkTerminate::enter_stage1() { - return enter_stage(&_nworking_stage1); -} - -inline bool XMarkTerminate::try_exit_stage1() { - return try_exit_stage(&_nworking_stage1); -} - -#endif // SHARE_GC_X_XMARKTERMINATE_INLINE_HPP diff --git a/src/hotspot/share/gc/x/xMemory.cpp b/src/hotspot/share/gc/x/xMemory.cpp deleted file mode 100644 index e394f580ab939..0000000000000 --- a/src/hotspot/share/gc/x/xMemory.cpp +++ /dev/null @@ -1,220 +0,0 @@ -/* - * Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#include "precompiled.hpp" -#include "gc/x/xList.inline.hpp" -#include "gc/x/xLock.inline.hpp" -#include "gc/x/xMemory.inline.hpp" - -XMemory* XMemoryManager::create(uintptr_t start, size_t size) { - XMemory* const area = new XMemory(start, size); - if (_callbacks._create != nullptr) { - _callbacks._create(area); - } - return area; -} - -void XMemoryManager::destroy(XMemory* area) { - if (_callbacks._destroy != nullptr) { - _callbacks._destroy(area); - } - delete area; -} - -void XMemoryManager::shrink_from_front(XMemory* area, size_t size) { - if (_callbacks._shrink_from_front != nullptr) { - _callbacks._shrink_from_front(area, size); - } - area->shrink_from_front(size); -} - -void XMemoryManager::shrink_from_back(XMemory* area, size_t size) { - if (_callbacks._shrink_from_back != nullptr) { - _callbacks._shrink_from_back(area, size); - } - area->shrink_from_back(size); -} - -void XMemoryManager::grow_from_front(XMemory* area, size_t size) { - if (_callbacks._grow_from_front != nullptr) { - _callbacks._grow_from_front(area, size); - } - area->grow_from_front(size); -} - -void XMemoryManager::grow_from_back(XMemory* area, size_t size) { - if (_callbacks._grow_from_back != nullptr) { - _callbacks._grow_from_back(area, size); - } - area->grow_from_back(size); -} - -XMemoryManager::Callbacks::Callbacks() : - _create(nullptr), - _destroy(nullptr), - _shrink_from_front(nullptr), - _shrink_from_back(nullptr), - _grow_from_front(nullptr), - _grow_from_back(nullptr) {} - -XMemoryManager::XMemoryManager() : - _freelist(), - _callbacks() {} - -void XMemoryManager::register_callbacks(const Callbacks& callbacks) { - _callbacks = callbacks; -} - -uintptr_t XMemoryManager::peek_low_address() const { - XLocker locker(&_lock); - - const XMemory* const area = _freelist.first(); - if (area != nullptr) { - return area->start(); - } - - // Out of memory - return UINTPTR_MAX; -} - -uintptr_t XMemoryManager::alloc_low_address(size_t size) { - XLocker locker(&_lock); - - XListIterator iter(&_freelist); - for (XMemory* area; iter.next(&area);) { - if (area->size() >= size) { - if (area->size() == size) { - // Exact match, remove area - const uintptr_t start = area->start(); - _freelist.remove(area); - destroy(area); - return start; - } else { - // Larger than requested, shrink area - const uintptr_t start = area->start(); - shrink_from_front(area, size); - return start; - } - } - } - - // Out of memory - return UINTPTR_MAX; -} - -uintptr_t XMemoryManager::alloc_low_address_at_most(size_t size, size_t* allocated) { - XLocker locker(&_lock); - - XMemory* area = _freelist.first(); - if (area != nullptr) { - if (area->size() <= size) { - // Smaller than or equal to requested, remove area - const uintptr_t start = area->start(); - *allocated = area->size(); - _freelist.remove(area); - destroy(area); - return start; - } else { - // Larger than requested, shrink area - const uintptr_t start = area->start(); - shrink_from_front(area, size); - *allocated = size; - return start; - } - } - - // Out of memory - *allocated = 0; - return UINTPTR_MAX; -} - -uintptr_t XMemoryManager::alloc_high_address(size_t size) { - XLocker locker(&_lock); - - XListReverseIterator iter(&_freelist); - for (XMemory* area; iter.next(&area);) { - if (area->size() >= size) { - if (area->size() == size) { - // Exact match, remove area - const uintptr_t start = area->start(); - _freelist.remove(area); - destroy(area); - return start; - } else { - // Larger than requested, shrink area - shrink_from_back(area, size); - return area->end(); - } - } - } - - // Out of memory - return UINTPTR_MAX; -} - -void XMemoryManager::free(uintptr_t start, size_t size) { - assert(start != UINTPTR_MAX, "Invalid address"); - const uintptr_t end = start + size; - - XLocker locker(&_lock); - - XListIterator iter(&_freelist); - for (XMemory* area; iter.next(&area);) { - if (start < area->start()) { - XMemory* const prev = _freelist.prev(area); - if (prev != nullptr && start == prev->end()) { - if (end == area->start()) { - // Merge with prev and current area - grow_from_back(prev, size + area->size()); - _freelist.remove(area); - delete area; - } else { - // Merge with prev area - grow_from_back(prev, size); - } - } else if (end == area->start()) { - // Merge with current area - grow_from_front(area, size); - } else { - // Insert new area before current area - assert(end < area->start(), "Areas must not overlap"); - XMemory* const new_area = create(start, size); - _freelist.insert_before(area, new_area); - } - - // Done - return; - } - } - - // Insert last - XMemory* const last = _freelist.last(); - if (last != nullptr && start == last->end()) { - // Merge with last area - grow_from_back(last, size); - } else { - // Insert new area last - XMemory* const new_area = create(start, size); - _freelist.insert_last(new_area); - } -} diff --git a/src/hotspot/share/gc/x/xMemory.hpp b/src/hotspot/share/gc/x/xMemory.hpp deleted file mode 100644 index 2c3739cb44a58..0000000000000 --- a/src/hotspot/share/gc/x/xMemory.hpp +++ /dev/null @@ -1,93 +0,0 @@ -/* - * Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#ifndef SHARE_GC_X_XMEMORY_HPP -#define SHARE_GC_X_XMEMORY_HPP - -#include "gc/x/xList.hpp" -#include "gc/x/xLock.hpp" -#include "memory/allocation.hpp" - -class XMemory : public CHeapObj { - friend class XList; - -private: - uintptr_t _start; - uintptr_t _end; - XListNode _node; - -public: - XMemory(uintptr_t start, size_t size); - - uintptr_t start() const; - uintptr_t end() const; - size_t size() const; - - void shrink_from_front(size_t size); - void shrink_from_back(size_t size); - void grow_from_front(size_t size); - void grow_from_back(size_t size); -}; - -class XMemoryManager { -public: - typedef void (*CreateDestroyCallback)(const XMemory* area); - typedef void (*ResizeCallback)(const XMemory* area, size_t size); - - struct Callbacks { - CreateDestroyCallback _create; - CreateDestroyCallback _destroy; - ResizeCallback _shrink_from_front; - ResizeCallback _shrink_from_back; - ResizeCallback _grow_from_front; - ResizeCallback _grow_from_back; - - Callbacks(); - }; - -private: - mutable XLock _lock; - XList _freelist; - Callbacks _callbacks; - - XMemory* create(uintptr_t start, size_t size); - void destroy(XMemory* area); - void shrink_from_front(XMemory* area, size_t size); - void shrink_from_back(XMemory* area, size_t size); - void grow_from_front(XMemory* area, size_t size); - void grow_from_back(XMemory* area, size_t size); - -public: - XMemoryManager(); - - void register_callbacks(const Callbacks& callbacks); - - uintptr_t peek_low_address() const; - uintptr_t alloc_low_address(size_t size); - uintptr_t alloc_low_address_at_most(size_t size, size_t* allocated); - uintptr_t alloc_high_address(size_t size); - - void free(uintptr_t start, size_t size); -}; - -#endif // SHARE_GC_X_XMEMORY_HPP diff --git a/src/hotspot/share/gc/x/xMemory.inline.hpp b/src/hotspot/share/gc/x/xMemory.inline.hpp deleted file mode 100644 index 332cdae9160ed..0000000000000 --- a/src/hotspot/share/gc/x/xMemory.inline.hpp +++ /dev/null @@ -1,67 +0,0 @@ -/* - * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#ifndef SHARE_GC_X_XMEMORY_INLINE_HPP -#define SHARE_GC_X_XMEMORY_INLINE_HPP - -#include "gc/x/xMemory.hpp" - -#include "gc/x/xList.inline.hpp" -#include "utilities/debug.hpp" - -inline XMemory::XMemory(uintptr_t start, size_t size) : - _start(start), - _end(start + size) {} - -inline uintptr_t XMemory::start() const { - return _start; -} - -inline uintptr_t XMemory::end() const { - return _end; -} - -inline size_t XMemory::size() const { - return end() - start(); -} - -inline void XMemory::shrink_from_front(size_t size) { - assert(this->size() > size, "Too small"); - _start += size; -} - -inline void XMemory::shrink_from_back(size_t size) { - assert(this->size() > size, "Too small"); - _end -= size; -} - -inline void XMemory::grow_from_front(size_t size) { - assert(start() >= size, "Too big"); - _start -= size; -} - -inline void XMemory::grow_from_back(size_t size) { - _end += size; -} - -#endif // SHARE_GC_X_XMEMORY_INLINE_HPP diff --git a/src/hotspot/share/gc/x/xMessagePort.hpp b/src/hotspot/share/gc/x/xMessagePort.hpp deleted file mode 100644 index 205652537968c..0000000000000 --- a/src/hotspot/share/gc/x/xMessagePort.hpp +++ /dev/null @@ -1,66 +0,0 @@ -/* - * Copyright (c) 2015, 2021, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#ifndef SHARE_GC_X_XMESSAGEPORT_HPP -#define SHARE_GC_X_XMESSAGEPORT_HPP - -#include "gc/x/xFuture.hpp" -#include "gc/x/xList.hpp" -#include "runtime/mutex.hpp" - -template class XMessageRequest; - -template -class XMessagePort { -private: - typedef XMessageRequest Request; - - mutable Monitor _monitor; - bool _has_message; - T _message; - uint64_t _seqnum; - XList _queue; - -public: - XMessagePort(); - - bool is_busy() const; - - void send_sync(const T& message); - void send_async(const T& message); - - T receive(); - void ack(); -}; - -class XRendezvousPort { -private: - XMessagePort _port; - -public: - void signal(); - void wait(); - void ack(); -}; - -#endif // SHARE_GC_X_XMESSAGEPORT_HPP diff --git a/src/hotspot/share/gc/x/xMessagePort.inline.hpp b/src/hotspot/share/gc/x/xMessagePort.inline.hpp deleted file mode 100644 index 8007a80eacdf0..0000000000000 --- a/src/hotspot/share/gc/x/xMessagePort.inline.hpp +++ /dev/null @@ -1,181 +0,0 @@ -/* - * Copyright (c) 2015, 2021, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#ifndef SHARE_GC_X_XMESSAGEPORT_INLINE_HPP -#define SHARE_GC_X_XMESSAGEPORT_INLINE_HPP - -#include "gc/x/xMessagePort.hpp" - -#include "gc/x/xFuture.inline.hpp" -#include "gc/x/xList.inline.hpp" -#include "runtime/mutexLocker.hpp" - -template -class XMessageRequest : public StackObj { - friend class XList; - -private: - T _message; - uint64_t _seqnum; - XFuture _result; - XListNode _node; - -public: - void initialize(T message, uint64_t seqnum) { - _message = message; - _seqnum = seqnum; - } - - T message() const { - return _message; - } - - uint64_t seqnum() const { - return _seqnum; - } - - void wait() { - const T message = _result.get(); - assert(message == _message, "Message mismatch"); - } - - void satisfy(T message) { - _result.set(message); - } -}; - -template -inline XMessagePort::XMessagePort() : - _monitor(Monitor::nosafepoint, "XMessagePort_lock"), - _has_message(false), - _seqnum(0), - _queue() {} - -template -inline bool XMessagePort::is_busy() const { - MonitorLocker ml(&_monitor, Monitor::_no_safepoint_check_flag); - return _has_message; -} - -template -inline void XMessagePort::send_sync(const T& message) { - Request request; - - { - // Enqueue message - MonitorLocker ml(&_monitor, Monitor::_no_safepoint_check_flag); - request.initialize(message, _seqnum); - _queue.insert_last(&request); - ml.notify(); - } - - // Wait for completion - request.wait(); - - { - // Guard deletion of underlying semaphore. This is a workaround for a - // bug in sem_post() in glibc < 2.21, where it's not safe to destroy - // the semaphore immediately after returning from sem_wait(). The - // reason is that sem_post() can touch the semaphore after a waiting - // thread have returned from sem_wait(). To avoid this race we are - // forcing the waiting thread to acquire/release the lock held by the - // posting thread. https://sourceware.org/bugzilla/show_bug.cgi?id=12674 - MonitorLocker ml(&_monitor, Monitor::_no_safepoint_check_flag); - } -} - -template -inline void XMessagePort::send_async(const T& message) { - MonitorLocker ml(&_monitor, Monitor::_no_safepoint_check_flag); - if (!_has_message) { - // Post message - _message = message; - _has_message = true; - ml.notify(); - } -} - -template -inline T XMessagePort::receive() { - MonitorLocker ml(&_monitor, Monitor::_no_safepoint_check_flag); - - // Wait for message - while (!_has_message && _queue.is_empty()) { - ml.wait(); - } - - // Increment request sequence number - _seqnum++; - - if (!_has_message) { - // Message available in the queue - _message = _queue.first()->message(); - _has_message = true; - } - - return _message; -} - -template -inline void XMessagePort::ack() { - MonitorLocker ml(&_monitor, Monitor::_no_safepoint_check_flag); - - if (!_has_message) { - // Nothing to ack - return; - } - - // Satisfy requests (and duplicates) in queue - XListIterator iter(&_queue); - for (Request* request; iter.next(&request);) { - if (request->message() == _message && request->seqnum() < _seqnum) { - // Dequeue and satisfy request. Note that the dequeue operation must - // happen first, since the request will immediately be deallocated - // once it has been satisfied. - _queue.remove(request); - request->satisfy(_message); - } - } - - if (_queue.is_empty()) { - // Queue is empty - _has_message = false; - } else { - // Post first message in queue - _message = _queue.first()->message(); - } -} - -inline void XRendezvousPort::signal() { - _port.send_sync(true /* ignored */); -} - -inline void XRendezvousPort::wait() { - _port.receive(); -} - -inline void XRendezvousPort::ack() { - _port.ack(); -} - -#endif // SHARE_GC_X_XMESSAGEPORT_INLINE_HPP diff --git a/src/hotspot/share/gc/x/xMetronome.cpp b/src/hotspot/share/gc/x/xMetronome.cpp deleted file mode 100644 index 7f0b649deb467..0000000000000 --- a/src/hotspot/share/gc/x/xMetronome.cpp +++ /dev/null @@ -1,81 +0,0 @@ -/* - * Copyright (c) 2015, 2021, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#include "precompiled.hpp" -#include "gc/x/xMetronome.hpp" -#include "runtime/mutexLocker.hpp" -#include "runtime/timer.hpp" -#include "utilities/ticks.hpp" - -XMetronome::XMetronome(uint64_t hz) : - _monitor(Monitor::nosafepoint, "XMetronome_lock"), - _interval_ms(MILLIUNITS / hz), - _start_ms(0), - _nticks(0), - _stopped(false) {} - -bool XMetronome::wait_for_tick() { - if (_nticks++ == 0) { - // First tick, set start time - const Ticks now = Ticks::now(); - _start_ms = TimeHelper::counter_to_millis(now.value()); - } - - MonitorLocker ml(&_monitor, Monitor::_no_safepoint_check_flag); - - while (!_stopped) { - // We might wake up spuriously from wait, so always recalculate - // the timeout after a wakeup to see if we need to wait again. - const Ticks now = Ticks::now(); - const uint64_t now_ms = TimeHelper::counter_to_millis(now.value()); - const uint64_t next_ms = _start_ms + (_interval_ms * _nticks); - const int64_t timeout_ms = next_ms - now_ms; - - if (timeout_ms > 0) { - // Wait - ml.wait(timeout_ms); - } else { - // Tick - if (timeout_ms < 0) { - const uint64_t overslept = -timeout_ms; - if (overslept > _interval_ms) { - // Missed one or more ticks. Bump _nticks accordingly to - // avoid firing a string of immediate ticks to make up - // for the ones we missed. - _nticks += overslept / _interval_ms; - } - } - - return true; - } - } - - // Stopped - return false; -} - -void XMetronome::stop() { - MonitorLocker ml(&_monitor, Monitor::_no_safepoint_check_flag); - _stopped = true; - ml.notify(); -} diff --git a/src/hotspot/share/gc/x/xMetronome.hpp b/src/hotspot/share/gc/x/xMetronome.hpp deleted file mode 100644 index 8a0f27061c33c..0000000000000 --- a/src/hotspot/share/gc/x/xMetronome.hpp +++ /dev/null @@ -1,45 +0,0 @@ -/* - * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#ifndef SHARE_GC_X_XMETRONOME_HPP -#define SHARE_GC_X_XMETRONOME_HPP - -#include "memory/allocation.hpp" -#include "runtime/mutex.hpp" - -class XMetronome : public StackObj { -private: - Monitor _monitor; - const uint64_t _interval_ms; - uint64_t _start_ms; - uint64_t _nticks; - bool _stopped; - -public: - XMetronome(uint64_t hz); - - bool wait_for_tick(); - void stop(); -}; - -#endif // SHARE_GC_X_XMETRONOME_HPP diff --git a/src/hotspot/share/gc/x/xNMethod.cpp b/src/hotspot/share/gc/x/xNMethod.cpp deleted file mode 100644 index 24b02b8328004..0000000000000 --- a/src/hotspot/share/gc/x/xNMethod.cpp +++ /dev/null @@ -1,349 +0,0 @@ -/* - * Copyright (c) 2017, 2022, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#include "precompiled.hpp" -#include "code/relocInfo.hpp" -#include "code/nmethod.hpp" -#include "gc/shared/barrierSet.hpp" -#include "gc/shared/barrierSetNMethod.hpp" -#include "gc/shared/classUnloadingContext.hpp" -#include "gc/shared/suspendibleThreadSet.hpp" -#include "gc/x/xBarrier.inline.hpp" -#include "gc/x/xGlobals.hpp" -#include "gc/x/xLock.inline.hpp" -#include "gc/x/xNMethod.hpp" -#include "gc/x/xNMethodData.hpp" -#include "gc/x/xNMethodTable.hpp" -#include "gc/x/xTask.hpp" -#include "gc/x/xWorkers.hpp" -#include "logging/log.hpp" -#include "memory/allocation.inline.hpp" -#include "memory/iterator.hpp" -#include "memory/resourceArea.hpp" -#include "memory/universe.hpp" -#include "oops/oop.inline.hpp" -#include "runtime/atomic.hpp" -#include "runtime/continuation.hpp" -#include "utilities/debug.hpp" - -static XNMethodData* gc_data(const nmethod* nm) { - return nm->gc_data(); -} - -static void set_gc_data(nmethod* nm, XNMethodData* data) { - return nm->set_gc_data(data); -} - -void XNMethod::attach_gc_data(nmethod* nm) { - GrowableArray immediate_oops; - bool non_immediate_oops = false; - - // Find all oop relocations - RelocIterator iter(nm); - while (iter.next()) { - if (iter.type() != relocInfo::oop_type) { - // Not an oop - continue; - } - - oop_Relocation* r = iter.oop_reloc(); - - if (!r->oop_is_immediate()) { - // Non-immediate oop found - non_immediate_oops = true; - continue; - } - - if (r->oop_value() != nullptr) { - // Non-null immediate oop found. Null oops can safely be - // ignored since the method will be re-registered if they - // are later patched to be non-null. - immediate_oops.push(r->oop_addr()); - } - } - - // Attach GC data to nmethod - XNMethodData* data = gc_data(nm); - if (data == nullptr) { - data = new XNMethodData(); - set_gc_data(nm, data); - } - - // Attach oops in GC data - XNMethodDataOops* const new_oops = XNMethodDataOops::create(immediate_oops, non_immediate_oops); - XNMethodDataOops* const old_oops = data->swap_oops(new_oops); - XNMethodDataOops::destroy(old_oops); -} - -XReentrantLock* XNMethod::lock_for_nmethod(nmethod* nm) { - return gc_data(nm)->lock(); -} - -XReentrantLock* XNMethod::ic_lock_for_nmethod(nmethod* nm) { - return gc_data(nm)->ic_lock(); -} - -void XNMethod::log_register(const nmethod* nm) { - LogTarget(Trace, gc, nmethod) log; - if (!log.is_enabled()) { - return; - } - - const XNMethodDataOops* const oops = gc_data(nm)->oops(); - - log.print("Register NMethod: %s.%s (" PTR_FORMAT "), " - "Compiler: %s, Oops: %d, ImmediateOops: " SIZE_FORMAT ", NonImmediateOops: %s", - nm->method()->method_holder()->external_name(), - nm->method()->name()->as_C_string(), - p2i(nm), - nm->compiler_name(), - nm->oops_count() - 1, - oops->immediates_count(), - oops->has_non_immediates() ? "Yes" : "No"); - - LogTarget(Trace, gc, nmethod, oops) log_oops; - if (!log_oops.is_enabled()) { - return; - } - - // Print nmethod oops table - { - oop* const begin = nm->oops_begin(); - oop* const end = nm->oops_end(); - for (oop* p = begin; p < end; p++) { - const oop o = Atomic::load(p); // C1 PatchingStub may replace it concurrently. - const char* external_name = (o == nullptr) ? "N/A" : o->klass()->external_name(); - log_oops.print(" Oop[" SIZE_FORMAT "] " PTR_FORMAT " (%s)", - (p - begin), p2i(o), external_name); - } - } - - // Print nmethod immediate oops - { - oop** const begin = oops->immediates_begin(); - oop** const end = oops->immediates_end(); - for (oop** p = begin; p < end; p++) { - log_oops.print(" ImmediateOop[" SIZE_FORMAT "] " PTR_FORMAT " @ " PTR_FORMAT " (%s)", - (p - begin), p2i(**p), p2i(*p), (**p)->klass()->external_name()); - } - } -} - -void XNMethod::log_unregister(const nmethod* nm) { - LogTarget(Debug, gc, nmethod) log; - if (!log.is_enabled()) { - return; - } - - log.print("Unregister NMethod: %s.%s (" PTR_FORMAT ")", - nm->method()->method_holder()->external_name(), - nm->method()->name()->as_C_string(), - p2i(nm)); -} - -void XNMethod::register_nmethod(nmethod* nm) { - ResourceMark rm; - - // Create and attach gc data - attach_gc_data(nm); - - log_register(nm); - - XNMethodTable::register_nmethod(nm); - - // Disarm nmethod entry barrier - disarm(nm); -} - -void XNMethod::unregister_nmethod(nmethod* nm) { - ResourceMark rm; - - log_unregister(nm); - - XNMethodTable::unregister_nmethod(nm); - - // Destroy GC data - delete gc_data(nm); -} - -bool XNMethod::supports_entry_barrier(nmethod* nm) { - BarrierSetNMethod* const bs = BarrierSet::barrier_set()->barrier_set_nmethod(); - return bs->supports_entry_barrier(nm); -} - -bool XNMethod::is_armed(nmethod* nm) { - BarrierSetNMethod* const bs = BarrierSet::barrier_set()->barrier_set_nmethod(); - return bs->is_armed(nm); -} - -void XNMethod::disarm(nmethod* nm) { - BarrierSetNMethod* const bs = BarrierSet::barrier_set()->barrier_set_nmethod(); - bs->disarm(nm); -} - -void XNMethod::set_guard_value(nmethod* nm, int value) { - BarrierSetNMethod* const bs = BarrierSet::barrier_set()->barrier_set_nmethod(); - bs->set_guard_value(nm, value); -} - -void XNMethod::nmethod_oops_do(nmethod* nm, OopClosure* cl) { - XLocker locker(XNMethod::lock_for_nmethod(nm)); - XNMethod::nmethod_oops_do_inner(nm, cl); -} - -void XNMethod::nmethod_oops_do_inner(nmethod* nm, OopClosure* cl) { - // Process oops table - { - oop* const begin = nm->oops_begin(); - oop* const end = nm->oops_end(); - for (oop* p = begin; p < end; p++) { - if (!Universe::contains_non_oop_word(p)) { - cl->do_oop(p); - } - } - } - - XNMethodDataOops* const oops = gc_data(nm)->oops(); - - // Process immediate oops - { - oop** const begin = oops->immediates_begin(); - oop** const end = oops->immediates_end(); - for (oop** p = begin; p < end; p++) { - if (*p != Universe::non_oop_word()) { - cl->do_oop(*p); - } - } - } - - // Process non-immediate oops - if (oops->has_non_immediates()) { - nm->fix_oop_relocations(); - } -} - -class XNMethodOopClosure : public OopClosure { -public: - virtual void do_oop(oop* p) { - if (XResurrection::is_blocked()) { - XBarrier::keep_alive_barrier_on_phantom_root_oop_field(p); - } else { - XBarrier::load_barrier_on_root_oop_field(p); - } - } - - virtual void do_oop(narrowOop* p) { - ShouldNotReachHere(); - } -}; - -void XNMethod::nmethod_oops_barrier(nmethod* nm) { - XNMethodOopClosure cl; - nmethod_oops_do_inner(nm, &cl); -} - -void XNMethod::nmethods_do_begin() { - XNMethodTable::nmethods_do_begin(); -} - -void XNMethod::nmethods_do_end() { - XNMethodTable::nmethods_do_end(); -} - -void XNMethod::nmethods_do(NMethodClosure* cl) { - XNMethodTable::nmethods_do(cl); -} - -class XNMethodUnlinkClosure : public NMethodClosure { -private: - bool _unloading_occurred; - volatile bool _failed; - - void set_failed() { - Atomic::store(&_failed, true); - } - -public: - XNMethodUnlinkClosure(bool unloading_occurred) : - _unloading_occurred(unloading_occurred), - _failed(false) {} - - virtual void do_nmethod(nmethod* nm) { - if (failed()) { - return; - } - - if (nm->is_unloading()) { - XLocker locker(XNMethod::lock_for_nmethod(nm)); - nm->unlink(); - return; - } - - { - XLocker locker(XNMethod::lock_for_nmethod(nm)); - - if (XNMethod::is_armed(nm)) { - // Heal oops and arm phase invariantly - XNMethod::nmethod_oops_barrier(nm); - XNMethod::set_guard_value(nm, 0); - } - } - - // Clear compiled ICs and exception caches - XLocker locker(XNMethod::ic_lock_for_nmethod(nm)); - nm->unload_nmethod_caches(_unloading_occurred); - } - - bool failed() const { - return Atomic::load(&_failed); - } -}; - -class XNMethodUnlinkTask : public XTask { -private: - XNMethodUnlinkClosure _cl; - -public: - XNMethodUnlinkTask(bool unloading_occurred) : - XTask("XNMethodUnlinkTask"), - _cl(unloading_occurred) { - XNMethodTable::nmethods_do_begin(); - } - - ~XNMethodUnlinkTask() { - XNMethodTable::nmethods_do_end(); - } - - virtual void work() { - XNMethodTable::nmethods_do(&_cl); - } -}; - -void XNMethod::unlink(XWorkers* workers, bool unloading_occurred) { - XNMethodUnlinkTask task(unloading_occurred); - workers->run(&task); -} - -void XNMethod::purge() { - ClassUnloadingContext::context()->purge_and_free_nmethods(); -} diff --git a/src/hotspot/share/gc/x/xNMethod.hpp b/src/hotspot/share/gc/x/xNMethod.hpp deleted file mode 100644 index 49fdecf584df3..0000000000000 --- a/src/hotspot/share/gc/x/xNMethod.hpp +++ /dev/null @@ -1,67 +0,0 @@ -/* - * Copyright (c) 2017, 2022, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#ifndef SHARE_GC_X_XNMETHOD_HPP -#define SHARE_GC_X_XNMETHOD_HPP - -#include "memory/allStatic.hpp" - -class nmethod; -class NMethodClosure; -class XReentrantLock; -class XWorkers; - -class XNMethod : public AllStatic { -private: - static void attach_gc_data(nmethod* nm); - - static void log_register(const nmethod* nm); - static void log_unregister(const nmethod* nm); - -public: - static void register_nmethod(nmethod* nm); - static void unregister_nmethod(nmethod* nm); - - static bool supports_entry_barrier(nmethod* nm); - - static bool is_armed(nmethod* nm); - static void disarm(nmethod* nm); - static void set_guard_value(nmethod* nm, int value); - - static void nmethod_oops_do(nmethod* nm, OopClosure* cl); - static void nmethod_oops_do_inner(nmethod* nm, OopClosure* cl); - - static void nmethod_oops_barrier(nmethod* nm); - - static void nmethods_do_begin(); - static void nmethods_do_end(); - static void nmethods_do(NMethodClosure* cl); - - static XReentrantLock* lock_for_nmethod(nmethod* nm); - static XReentrantLock* ic_lock_for_nmethod(nmethod* nm); - - static void unlink(XWorkers* workers, bool unloading_occurred); - static void purge(); -}; - -#endif // SHARE_GC_X_XNMETHOD_HPP diff --git a/src/hotspot/share/gc/x/xNMethodData.cpp b/src/hotspot/share/gc/x/xNMethodData.cpp deleted file mode 100644 index 63fbfda99e29b..0000000000000 --- a/src/hotspot/share/gc/x/xNMethodData.cpp +++ /dev/null @@ -1,93 +0,0 @@ -/* - * Copyright (c) 2017, 2019, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#include "precompiled.hpp" -#include "gc/x/xAttachedArray.inline.hpp" -#include "gc/x/xLock.inline.hpp" -#include "gc/x/xNMethodData.hpp" -#include "memory/allocation.hpp" -#include "runtime/atomic.hpp" -#include "utilities/align.hpp" -#include "utilities/debug.hpp" -#include "utilities/growableArray.hpp" - -XNMethodDataOops* XNMethodDataOops::create(const GrowableArray& immediates, bool has_non_immediates) { - return ::new (AttachedArray::alloc(immediates.length())) XNMethodDataOops(immediates, has_non_immediates); -} - -void XNMethodDataOops::destroy(XNMethodDataOops* oops) { - AttachedArray::free(oops); -} - -XNMethodDataOops::XNMethodDataOops(const GrowableArray& immediates, bool has_non_immediates) : - _immediates(immediates.length()), - _has_non_immediates(has_non_immediates) { - // Save all immediate oops - for (size_t i = 0; i < immediates_count(); i++) { - immediates_begin()[i] = immediates.at(int(i)); - } -} - -size_t XNMethodDataOops::immediates_count() const { - return _immediates.length(); -} - -oop** XNMethodDataOops::immediates_begin() const { - return _immediates(this); -} - -oop** XNMethodDataOops::immediates_end() const { - return immediates_begin() + immediates_count(); -} - -bool XNMethodDataOops::has_non_immediates() const { - return _has_non_immediates; -} - -XNMethodData::XNMethodData() : - _lock(), - _ic_lock(), - _oops(nullptr) {} - -XNMethodData::~XNMethodData() { - XNMethodDataOops::destroy(_oops); -} - -XReentrantLock* XNMethodData::lock() { - return &_lock; -} - -XReentrantLock* XNMethodData::ic_lock() { - return &_ic_lock; -} - -XNMethodDataOops* XNMethodData::oops() const { - return Atomic::load_acquire(&_oops); -} - -XNMethodDataOops* XNMethodData::swap_oops(XNMethodDataOops* new_oops) { - XLocker locker(&_lock); - XNMethodDataOops* const old_oops = _oops; - _oops = new_oops; - return old_oops; -} diff --git a/src/hotspot/share/gc/x/xNMethodData.hpp b/src/hotspot/share/gc/x/xNMethodData.hpp deleted file mode 100644 index 14549f41342ee..0000000000000 --- a/src/hotspot/share/gc/x/xNMethodData.hpp +++ /dev/null @@ -1,73 +0,0 @@ -/* - * Copyright (c) 2017, 2019, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#ifndef SHARE_GC_X_XNMETHODDATA_HPP -#define SHARE_GC_X_XNMETHODDATA_HPP - -#include "gc/x/xAttachedArray.hpp" -#include "gc/x/xLock.hpp" -#include "memory/allocation.hpp" -#include "oops/oopsHierarchy.hpp" -#include "utilities/globalDefinitions.hpp" - -class nmethod; -template class GrowableArray; - -class XNMethodDataOops { -private: - typedef XAttachedArray AttachedArray; - - const AttachedArray _immediates; - const bool _has_non_immediates; - - XNMethodDataOops(const GrowableArray& immediates, bool has_non_immediates); - -public: - static XNMethodDataOops* create(const GrowableArray& immediates, bool has_non_immediates); - static void destroy(XNMethodDataOops* oops); - - size_t immediates_count() const; - oop** immediates_begin() const; - oop** immediates_end() const; - - bool has_non_immediates() const; -}; - -class XNMethodData : public CHeapObj { -private: - XReentrantLock _lock; - XReentrantLock _ic_lock; - XNMethodDataOops* volatile _oops; - -public: - XNMethodData(); - ~XNMethodData(); - - XReentrantLock* lock(); - XReentrantLock* ic_lock(); - - XNMethodDataOops* oops() const; - XNMethodDataOops* swap_oops(XNMethodDataOops* oops); -}; - -#endif // SHARE_GC_X_XNMETHODDATA_HPP diff --git a/src/hotspot/share/gc/x/xNMethodTable.cpp b/src/hotspot/share/gc/x/xNMethodTable.cpp deleted file mode 100644 index 52fcba755a70b..0000000000000 --- a/src/hotspot/share/gc/x/xNMethodTable.cpp +++ /dev/null @@ -1,234 +0,0 @@ -/* - * Copyright (c) 2017, 2019, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#include "precompiled.hpp" -#include "code/relocInfo.hpp" -#include "code/nmethod.hpp" -#include "gc/shared/barrierSet.hpp" -#include "gc/shared/barrierSetNMethod.hpp" -#include "gc/x/xGlobals.hpp" -#include "gc/x/xHash.inline.hpp" -#include "gc/x/xLock.inline.hpp" -#include "gc/x/xNMethodData.hpp" -#include "gc/x/xNMethodTable.hpp" -#include "gc/x/xNMethodTableEntry.hpp" -#include "gc/x/xNMethodTableIteration.hpp" -#include "gc/x/xSafeDelete.inline.hpp" -#include "gc/x/xTask.hpp" -#include "gc/x/xWorkers.hpp" -#include "logging/log.hpp" -#include "memory/allocation.hpp" -#include "memory/iterator.hpp" -#include "memory/resourceArea.hpp" -#include "runtime/mutexLocker.hpp" -#include "utilities/debug.hpp" -#include "utilities/powerOfTwo.hpp" - -XNMethodTableEntry* XNMethodTable::_table = nullptr; -size_t XNMethodTable::_size = 0; -size_t XNMethodTable::_nregistered = 0; -size_t XNMethodTable::_nunregistered = 0; -XNMethodTableIteration XNMethodTable::_iteration; -XSafeDeleteNoLock XNMethodTable::_safe_delete; - -size_t XNMethodTable::first_index(const nmethod* nm, size_t size) { - assert(is_power_of_2(size), "Invalid size"); - const size_t mask = size - 1; - const size_t hash = XHash::address_to_uint32((uintptr_t)nm); - return hash & mask; -} - -size_t XNMethodTable::next_index(size_t prev_index, size_t size) { - assert(is_power_of_2(size), "Invalid size"); - const size_t mask = size - 1; - return (prev_index + 1) & mask; -} - -bool XNMethodTable::register_entry(XNMethodTableEntry* table, size_t size, nmethod* nm) { - const XNMethodTableEntry entry(nm); - size_t index = first_index(nm, size); - - for (;;) { - const XNMethodTableEntry table_entry = table[index]; - - if (!table_entry.registered() && !table_entry.unregistered()) { - // Insert new entry - table[index] = entry; - return true; - } - - if (table_entry.registered() && table_entry.method() == nm) { - // Replace existing entry - table[index] = entry; - return false; - } - - index = next_index(index, size); - } -} - -void XNMethodTable::unregister_entry(XNMethodTableEntry* table, size_t size, nmethod* nm) { - size_t index = first_index(nm, size); - - for (;;) { - const XNMethodTableEntry table_entry = table[index]; - assert(table_entry.registered() || table_entry.unregistered(), "Entry not found"); - - if (table_entry.registered() && table_entry.method() == nm) { - // Remove entry - table[index] = XNMethodTableEntry(true /* unregistered */); - return; - } - - index = next_index(index, size); - } -} - -void XNMethodTable::rebuild(size_t new_size) { - assert(CodeCache_lock->owned_by_self(), "Lock must be held"); - - assert(is_power_of_2(new_size), "Invalid size"); - - log_debug(gc, nmethod)("Rebuilding NMethod Table: " - SIZE_FORMAT "->" SIZE_FORMAT " entries, " - SIZE_FORMAT "(%.0f%%->%.0f%%) registered, " - SIZE_FORMAT "(%.0f%%->%.0f%%) unregistered", - _size, new_size, - _nregistered, percent_of(_nregistered, _size), percent_of(_nregistered, new_size), - _nunregistered, percent_of(_nunregistered, _size), 0.0); - - // Allocate new table - XNMethodTableEntry* const new_table = new XNMethodTableEntry[new_size]; - - // Transfer all registered entries - for (size_t i = 0; i < _size; i++) { - const XNMethodTableEntry entry = _table[i]; - if (entry.registered()) { - register_entry(new_table, new_size, entry.method()); - } - } - - // Free old table - _safe_delete(_table); - - // Install new table - _table = new_table; - _size = new_size; - _nunregistered = 0; -} - -void XNMethodTable::rebuild_if_needed() { - // The hash table uses linear probing. To avoid wasting memory while - // at the same time maintaining good hash collision behavior we want - // to keep the table occupancy between 30% and 70%. The table always - // grows/shrinks by doubling/halving its size. Pruning of unregistered - // entries is done by rebuilding the table with or without resizing it. - const size_t min_size = 1024; - const size_t shrink_threshold = _size * 0.30; - const size_t prune_threshold = _size * 0.65; - const size_t grow_threshold = _size * 0.70; - - if (_size == 0) { - // Initialize table - rebuild(min_size); - } else if (_nregistered < shrink_threshold && _size > min_size) { - // Shrink table - rebuild(_size / 2); - } else if (_nregistered + _nunregistered > grow_threshold) { - // Prune or grow table - if (_nregistered < prune_threshold) { - // Prune table - rebuild(_size); - } else { - // Grow table - rebuild(_size * 2); - } - } -} - -size_t XNMethodTable::registered_nmethods() { - return _nregistered; -} - -size_t XNMethodTable::unregistered_nmethods() { - return _nunregistered; -} - -void XNMethodTable::register_nmethod(nmethod* nm) { - assert(CodeCache_lock->owned_by_self(), "Lock must be held"); - - // Grow/Shrink/Prune table if needed - rebuild_if_needed(); - - // Insert new entry - if (register_entry(_table, _size, nm)) { - // New entry registered. When register_entry() instead returns - // false the nmethod was already in the table so we do not want - // to increase number of registered entries in that case. - _nregistered++; - } -} - -void XNMethodTable::wait_until_iteration_done() { - assert(CodeCache_lock->owned_by_self(), "Lock must be held"); - - while (_iteration.in_progress()) { - CodeCache_lock->wait_without_safepoint_check(); - } -} - -void XNMethodTable::unregister_nmethod(nmethod* nm) { - assert(CodeCache_lock->owned_by_self(), "Lock must be held"); - - // Remove entry - unregister_entry(_table, _size, nm); - _nunregistered++; - _nregistered--; -} - -void XNMethodTable::nmethods_do_begin() { - MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); - - // Do not allow the table to be deleted while iterating - _safe_delete.enable_deferred_delete(); - - // Prepare iteration - _iteration.nmethods_do_begin(_table, _size); -} - -void XNMethodTable::nmethods_do_end() { - MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); - - // Finish iteration - _iteration.nmethods_do_end(); - - // Allow the table to be deleted - _safe_delete.disable_deferred_delete(); - - // Notify iteration done - CodeCache_lock->notify_all(); -} - -void XNMethodTable::nmethods_do(NMethodClosure* cl) { - _iteration.nmethods_do(cl); -} diff --git a/src/hotspot/share/gc/x/xNMethodTable.hpp b/src/hotspot/share/gc/x/xNMethodTable.hpp deleted file mode 100644 index ebb7803a08376..0000000000000 --- a/src/hotspot/share/gc/x/xNMethodTable.hpp +++ /dev/null @@ -1,74 +0,0 @@ -/* - * Copyright (c) 2017, 2022, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#ifndef SHARE_GC_X_XNMETHODTABLE_HPP -#define SHARE_GC_X_XNMETHODTABLE_HPP - -#include "gc/x/xNMethodTableIteration.hpp" -#include "gc/x/xSafeDelete.hpp" -#include "memory/allStatic.hpp" - -class nmethod; -class NMethodClosure; -class XNMethodTableEntry; -class XWorkers; - -class XNMethodTable : public AllStatic { -private: - static XNMethodTableEntry* _table; - static size_t _size; - static size_t _nregistered; - static size_t _nunregistered; - static XNMethodTableIteration _iteration; - static XSafeDeleteNoLock _safe_delete; - - static XNMethodTableEntry* create(size_t size); - static void destroy(XNMethodTableEntry* table); - - static size_t first_index(const nmethod* nm, size_t size); - static size_t next_index(size_t prev_index, size_t size); - - static bool register_entry(XNMethodTableEntry* table, size_t size, nmethod* nm); - static void unregister_entry(XNMethodTableEntry* table, size_t size, nmethod* nm); - - static void rebuild(size_t new_size); - static void rebuild_if_needed(); - -public: - static size_t registered_nmethods(); - static size_t unregistered_nmethods(); - - static void register_nmethod(nmethod* nm); - static void unregister_nmethod(nmethod* nm); - - static void wait_until_iteration_done(); - - static void nmethods_do_begin(); - static void nmethods_do_end(); - static void nmethods_do(NMethodClosure* cl); - - static void unlink(XWorkers* workers, bool unloading_occurred); - static void purge(XWorkers* workers); -}; - -#endif // SHARE_GC_X_XNMETHODTABLE_HPP diff --git a/src/hotspot/share/gc/x/xNMethodTableEntry.hpp b/src/hotspot/share/gc/x/xNMethodTableEntry.hpp deleted file mode 100644 index 9f06abb0bdbf6..0000000000000 --- a/src/hotspot/share/gc/x/xNMethodTableEntry.hpp +++ /dev/null @@ -1,81 +0,0 @@ -/* - * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#ifndef SHARE_GC_X_XNMETHODTABLEENTRY_HPP -#define SHARE_GC_X_XNMETHODTABLEENTRY_HPP - -#include "gc/x/xBitField.hpp" -#include "memory/allocation.hpp" - -class nmethod; - -// -// NMethod table entry layout -// -------------------------- -// -// 6 -// 3 2 1 0 -// +---------------------------------------------------------------------+-+-+ -// |11111111 11111111 11111111 11111111 11111111 11111111 11111111 111111|1|1| -// +---------------------------------------------------------------------+-+-+ -// | | | -// | 1-1 Unregistered Flag (1-bits) * | -// | | -// | 0-0 Registered Flag (1-bits) * -// | -// * 63-2 NMethod Address (62-bits) -// - -class XNMethodTableEntry : public CHeapObj { -private: - typedef XBitField field_registered; - typedef XBitField field_unregistered; - typedef XBitField field_method; - - uint64_t _entry; - -public: - explicit XNMethodTableEntry(bool unregistered = false) : - _entry(field_registered::encode(false) | - field_unregistered::encode(unregistered) | - field_method::encode(nullptr)) {} - - explicit XNMethodTableEntry(nmethod* method) : - _entry(field_registered::encode(true) | - field_unregistered::encode(false) | - field_method::encode(method)) {} - - bool registered() const { - return field_registered::decode(_entry); - } - - bool unregistered() const { - return field_unregistered::decode(_entry); - } - - nmethod* method() const { - return field_method::decode(_entry); - } -}; - -#endif // SHARE_GC_X_XNMETHODTABLEENTRY_HPP diff --git a/src/hotspot/share/gc/x/xNMethodTableIteration.cpp b/src/hotspot/share/gc/x/xNMethodTableIteration.cpp deleted file mode 100644 index c9248e6342021..0000000000000 --- a/src/hotspot/share/gc/x/xNMethodTableIteration.cpp +++ /dev/null @@ -1,76 +0,0 @@ -/* - * Copyright (c) 2017, 2023, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#include "precompiled.hpp" -#include "gc/x/xNMethodTableEntry.hpp" -#include "gc/x/xNMethodTableIteration.hpp" -#include "memory/iterator.hpp" -#include "runtime/atomic.hpp" -#include "utilities/debug.hpp" -#include "utilities/globalDefinitions.hpp" - -XNMethodTableIteration::XNMethodTableIteration() : - _table(nullptr), - _size(0), - _claimed(0) {} - -bool XNMethodTableIteration::in_progress() const { - return _table != nullptr; -} - -void XNMethodTableIteration::nmethods_do_begin(XNMethodTableEntry* table, size_t size) { - assert(!in_progress(), "precondition"); - - _table = table; - _size = size; - _claimed = 0; -} - -void XNMethodTableIteration::nmethods_do_end() { - assert(_claimed >= _size, "Failed to claim all table entries"); - - // Finish iteration - _table = nullptr; -} - -void XNMethodTableIteration::nmethods_do(NMethodClosure* cl) { - for (;;) { - // Claim table partition. Each partition is currently sized to span - // two cache lines. This number is just a guess, but seems to work well. - const size_t partition_size = (XCacheLineSize * 2) / sizeof(XNMethodTableEntry); - const size_t partition_start = MIN2(Atomic::fetch_then_add(&_claimed, partition_size), _size); - const size_t partition_end = MIN2(partition_start + partition_size, _size); - if (partition_start == partition_end) { - // End of table - break; - } - - // Process table partition - for (size_t i = partition_start; i < partition_end; i++) { - const XNMethodTableEntry entry = _table[i]; - if (entry.registered()) { - cl->do_nmethod(entry.method()); - } - } - } -} diff --git a/src/hotspot/share/gc/x/xNMethodTableIteration.hpp b/src/hotspot/share/gc/x/xNMethodTableIteration.hpp deleted file mode 100644 index 1677b334490fc..0000000000000 --- a/src/hotspot/share/gc/x/xNMethodTableIteration.hpp +++ /dev/null @@ -1,48 +0,0 @@ -/* - * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#ifndef SHARE_GC_X_XNMETHODTABLEITERATION_HPP -#define SHARE_GC_X_XNMETHODTABLEITERATION_HPP - -#include "gc/x/xGlobals.hpp" - -class NMethodClosure; -class XNMethodTableEntry; - -class XNMethodTableIteration { -private: - XNMethodTableEntry* _table; - size_t _size; - XCACHE_ALIGNED volatile size_t _claimed; - -public: - XNMethodTableIteration(); - - bool in_progress() const; - - void nmethods_do_begin(XNMethodTableEntry* table, size_t size); - void nmethods_do_end(); - void nmethods_do(NMethodClosure* cl); -}; - -#endif // SHARE_GC_X_XNMETHODTABLEITERATION_HPP diff --git a/src/hotspot/share/gc/x/xNUMA.cpp b/src/hotspot/share/gc/x/xNUMA.cpp deleted file mode 100644 index fb99878b200d2..0000000000000 --- a/src/hotspot/share/gc/x/xNUMA.cpp +++ /dev/null @@ -1,41 +0,0 @@ -/* - * Copyright (c) 2016, 2020, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#include "precompiled.hpp" -#include "gc/shared/gcLogPrecious.hpp" -#include "gc/x/xNUMA.hpp" - -bool XNUMA::_enabled; - -void XNUMA::initialize() { - pd_initialize(); - - log_info_p(gc, init)("NUMA Support: %s", to_string()); - if (_enabled) { - log_info_p(gc, init)("NUMA Nodes: %u", count()); - } -} - -const char* XNUMA::to_string() { - return _enabled ? "Enabled" : "Disabled"; -} diff --git a/src/hotspot/share/gc/x/xNUMA.hpp b/src/hotspot/share/gc/x/xNUMA.hpp deleted file mode 100644 index 6331a62c042dc..0000000000000 --- a/src/hotspot/share/gc/x/xNUMA.hpp +++ /dev/null @@ -1,48 +0,0 @@ -/* - * Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#ifndef SHARE_GC_X_XNUMA_HPP -#define SHARE_GC_X_XNUMA_HPP - -#include "memory/allStatic.hpp" -#include "utilities/globalDefinitions.hpp" - -class XNUMA : public AllStatic { -private: - static bool _enabled; - - static void pd_initialize(); - -public: - static void initialize(); - static bool is_enabled(); - - static uint32_t count(); - static uint32_t id(); - - static uint32_t memory_id(uintptr_t addr); - - static const char* to_string(); -}; - -#endif // SHARE_GC_X_XNUMA_HPP diff --git a/src/hotspot/share/gc/x/xNUMA.inline.hpp b/src/hotspot/share/gc/x/xNUMA.inline.hpp deleted file mode 100644 index 17f5b831a31e8..0000000000000 --- a/src/hotspot/share/gc/x/xNUMA.inline.hpp +++ /dev/null @@ -1,33 +0,0 @@ -/* - * Copyright (c) 2019, 2020, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#ifndef SHARE_GC_X_XNUMA_INLINE_HPP -#define SHARE_GC_X_XNUMA_INLINE_HPP - -#include "gc/x/xNUMA.hpp" - -inline bool XNUMA::is_enabled() { - return _enabled; -} - -#endif // SHARE_GC_X_XNUMA_INLINE_HPP diff --git a/src/hotspot/share/gc/x/xObjArrayAllocator.cpp b/src/hotspot/share/gc/x/xObjArrayAllocator.cpp deleted file mode 100644 index 0950b886a9b7b..0000000000000 --- a/src/hotspot/share/gc/x/xObjArrayAllocator.cpp +++ /dev/null @@ -1,103 +0,0 @@ -/* - * Copyright (c) 2019, 2024, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#include "precompiled.hpp" -#include "gc/x/xThreadLocalData.hpp" -#include "gc/x/xObjArrayAllocator.hpp" -#include "gc/x/xUtils.inline.hpp" -#include "oops/arrayKlass.hpp" -#include "runtime/interfaceSupport.inline.hpp" -#include "utilities/debug.hpp" - -XObjArrayAllocator::XObjArrayAllocator(Klass* klass, size_t word_size, int length, bool do_zero, Thread* thread) : - ObjArrayAllocator(klass, word_size, length, do_zero, thread) {} - -void XObjArrayAllocator::yield_for_safepoint() const { - ThreadBlockInVM tbivm(JavaThread::cast(_thread)); -} - -oop XObjArrayAllocator::initialize(HeapWord* mem) const { - // ZGC specializes the initialization by performing segmented clearing - // to allow shorter time-to-safepoints. - - if (!_do_zero) { - // No need for ZGC specialization - return ObjArrayAllocator::initialize(mem); - } - - // A max segment size of 64K was chosen because microbenchmarking - // suggested that it offered a good trade-off between allocation - // time and time-to-safepoint - const size_t segment_max = XUtils::bytes_to_words(64 * K); - const BasicType element_type = ArrayKlass::cast(_klass)->element_type(); - - // Clear leading 32 bits, if necessary. - int base_offset = arrayOopDesc::base_offset_in_bytes(element_type); - if (!is_aligned(base_offset, HeapWordSize)) { - assert(is_aligned(base_offset, BytesPerInt), "array base must be 32 bit aligned"); - *reinterpret_cast(reinterpret_cast(mem) + base_offset) = 0; - base_offset += BytesPerInt; - } - assert(is_aligned(base_offset, HeapWordSize), "remaining array base must be 64 bit aligned"); - - const size_t header = heap_word_size(base_offset); - const size_t payload_size = _word_size - header; - - if (payload_size <= segment_max) { - // To small to use segmented clearing - return ObjArrayAllocator::initialize(mem); - } - - // Segmented clearing - - // The array is going to be exposed before it has been completely - // cleared, therefore we can't expose the header at the end of this - // function. Instead explicitly initialize it according to our needs. - arrayOopDesc::set_mark(mem, markWord::prototype()); - arrayOopDesc::release_set_klass(mem, _klass); - assert(_length >= 0, "length should be non-negative"); - arrayOopDesc::set_length(mem, _length); - - // Keep the array alive across safepoints through an invisible - // root. Invisible roots are not visited by the heap itarator - // and the marking logic will not attempt to follow its elements. - // Relocation knows how to dodge iterating over such objects. - XThreadLocalData::set_invisible_root(_thread, (oop*)&mem); - - for (size_t processed = 0; processed < payload_size; processed += segment_max) { - // Calculate segment - HeapWord* const start = (HeapWord*)(mem + header + processed); - const size_t remaining = payload_size - processed; - const size_t segment_size = MIN2(remaining, segment_max); - - // Clear segment - Copy::zero_to_words(start, segment_size); - - // Safepoint - yield_for_safepoint(); - } - - XThreadLocalData::clear_invisible_root(_thread); - - return cast_to_oop(mem); -} diff --git a/src/hotspot/share/gc/x/xObjArrayAllocator.hpp b/src/hotspot/share/gc/x/xObjArrayAllocator.hpp deleted file mode 100644 index 4a084da3279b3..0000000000000 --- a/src/hotspot/share/gc/x/xObjArrayAllocator.hpp +++ /dev/null @@ -1,39 +0,0 @@ -/* - * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#ifndef SHARE_GC_X_XOBJARRAYALLOCATOR_HPP -#define SHARE_GC_X_XOBJARRAYALLOCATOR_HPP - -#include "gc/shared/memAllocator.hpp" - -class XObjArrayAllocator : public ObjArrayAllocator { -private: - virtual oop initialize(HeapWord* mem) const override; - - void yield_for_safepoint() const; - -public: - XObjArrayAllocator(Klass* klass, size_t word_size, int length, bool do_zero, Thread* thread); -}; - -#endif // SHARE_GC_X_XOBJARRAYALLOCATOR_HPP diff --git a/src/hotspot/share/gc/x/xObjectAllocator.cpp b/src/hotspot/share/gc/x/xObjectAllocator.cpp deleted file mode 100644 index 26981ce913175..0000000000000 --- a/src/hotspot/share/gc/x/xObjectAllocator.cpp +++ /dev/null @@ -1,267 +0,0 @@ -/* - * Copyright (c) 2015, 2021, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#include "precompiled.hpp" -#include "gc/x/xGlobals.hpp" -#include "gc/x/xHeap.inline.hpp" -#include "gc/x/xHeuristics.hpp" -#include "gc/x/xObjectAllocator.hpp" -#include "gc/x/xPage.inline.hpp" -#include "gc/x/xPageTable.inline.hpp" -#include "gc/x/xStat.hpp" -#include "gc/x/xThread.inline.hpp" -#include "gc/x/xValue.inline.hpp" -#include "logging/log.hpp" -#include "runtime/atomic.hpp" -#include "runtime/safepoint.hpp" -#include "utilities/align.hpp" -#include "utilities/debug.hpp" - -static const XStatCounter XCounterUndoObjectAllocationSucceeded("Memory", "Undo Object Allocation Succeeded", XStatUnitOpsPerSecond); -static const XStatCounter XCounterUndoObjectAllocationFailed("Memory", "Undo Object Allocation Failed", XStatUnitOpsPerSecond); - -XObjectAllocator::XObjectAllocator() : - _use_per_cpu_shared_small_pages(XHeuristics::use_per_cpu_shared_small_pages()), - _used(0), - _undone(0), - _alloc_for_relocation(0), - _undo_alloc_for_relocation(0), - _shared_medium_page(nullptr), - _shared_small_page(nullptr) {} - -XPage** XObjectAllocator::shared_small_page_addr() { - return _use_per_cpu_shared_small_pages ? _shared_small_page.addr() : _shared_small_page.addr(0); -} - -XPage* const* XObjectAllocator::shared_small_page_addr() const { - return _use_per_cpu_shared_small_pages ? _shared_small_page.addr() : _shared_small_page.addr(0); -} - -void XObjectAllocator::register_alloc_for_relocation(const XPageTable* page_table, uintptr_t addr, size_t size) { - const XPage* const page = page_table->get(addr); - const size_t aligned_size = align_up(size, page->object_alignment()); - Atomic::add(_alloc_for_relocation.addr(), aligned_size); -} - -void XObjectAllocator::register_undo_alloc_for_relocation(const XPage* page, size_t size) { - const size_t aligned_size = align_up(size, page->object_alignment()); - Atomic::add(_undo_alloc_for_relocation.addr(), aligned_size); -} - -XPage* XObjectAllocator::alloc_page(uint8_t type, size_t size, XAllocationFlags flags) { - XPage* const page = XHeap::heap()->alloc_page(type, size, flags); - if (page != nullptr) { - // Increment used bytes - Atomic::add(_used.addr(), size); - } - - return page; -} - -void XObjectAllocator::undo_alloc_page(XPage* page) { - // Increment undone bytes - Atomic::add(_undone.addr(), page->size()); - - XHeap::heap()->undo_alloc_page(page); -} - -uintptr_t XObjectAllocator::alloc_object_in_shared_page(XPage** shared_page, - uint8_t page_type, - size_t page_size, - size_t size, - XAllocationFlags flags) { - uintptr_t addr = 0; - XPage* page = Atomic::load_acquire(shared_page); - - if (page != nullptr) { - addr = page->alloc_object_atomic(size); - } - - if (addr == 0) { - // Allocate new page - XPage* const new_page = alloc_page(page_type, page_size, flags); - if (new_page != nullptr) { - // Allocate object before installing the new page - addr = new_page->alloc_object(size); - - retry: - // Install new page - XPage* const prev_page = Atomic::cmpxchg(shared_page, page, new_page); - if (prev_page != page) { - if (prev_page == nullptr) { - // Previous page was retired, retry installing the new page - page = prev_page; - goto retry; - } - - // Another page already installed, try allocation there first - const uintptr_t prev_addr = prev_page->alloc_object_atomic(size); - if (prev_addr == 0) { - // Allocation failed, retry installing the new page - page = prev_page; - goto retry; - } - - // Allocation succeeded in already installed page - addr = prev_addr; - - // Undo new page allocation - undo_alloc_page(new_page); - } - } - } - - return addr; -} - -uintptr_t XObjectAllocator::alloc_large_object(size_t size, XAllocationFlags flags) { - uintptr_t addr = 0; - - // Allocate new large page - const size_t page_size = align_up(size, XGranuleSize); - XPage* const page = alloc_page(XPageTypeLarge, page_size, flags); - if (page != nullptr) { - // Allocate the object - addr = page->alloc_object(size); - } - - return addr; -} - -uintptr_t XObjectAllocator::alloc_medium_object(size_t size, XAllocationFlags flags) { - return alloc_object_in_shared_page(_shared_medium_page.addr(), XPageTypeMedium, XPageSizeMedium, size, flags); -} - -uintptr_t XObjectAllocator::alloc_small_object(size_t size, XAllocationFlags flags) { - return alloc_object_in_shared_page(shared_small_page_addr(), XPageTypeSmall, XPageSizeSmall, size, flags); -} - -uintptr_t XObjectAllocator::alloc_object(size_t size, XAllocationFlags flags) { - if (size <= XObjectSizeLimitSmall) { - // Small - return alloc_small_object(size, flags); - } else if (size <= XObjectSizeLimitMedium) { - // Medium - return alloc_medium_object(size, flags); - } else { - // Large - return alloc_large_object(size, flags); - } -} - -uintptr_t XObjectAllocator::alloc_object(size_t size) { - XAllocationFlags flags; - return alloc_object(size, flags); -} - -uintptr_t XObjectAllocator::alloc_object_for_relocation(const XPageTable* page_table, size_t size) { - XAllocationFlags flags; - flags.set_non_blocking(); - - const uintptr_t addr = alloc_object(size, flags); - if (addr != 0) { - register_alloc_for_relocation(page_table, addr, size); - } - - return addr; -} - -void XObjectAllocator::undo_alloc_object_for_relocation(XPage* page, uintptr_t addr, size_t size) { - const uint8_t type = page->type(); - - if (type == XPageTypeLarge) { - register_undo_alloc_for_relocation(page, size); - undo_alloc_page(page); - XStatInc(XCounterUndoObjectAllocationSucceeded); - } else { - if (page->undo_alloc_object_atomic(addr, size)) { - register_undo_alloc_for_relocation(page, size); - XStatInc(XCounterUndoObjectAllocationSucceeded); - } else { - XStatInc(XCounterUndoObjectAllocationFailed); - } - } -} - -size_t XObjectAllocator::used() const { - size_t total_used = 0; - size_t total_undone = 0; - - XPerCPUConstIterator iter_used(&_used); - for (const size_t* cpu_used; iter_used.next(&cpu_used);) { - total_used += *cpu_used; - } - - XPerCPUConstIterator iter_undone(&_undone); - for (const size_t* cpu_undone; iter_undone.next(&cpu_undone);) { - total_undone += *cpu_undone; - } - - return total_used - total_undone; -} - -size_t XObjectAllocator::remaining() const { - assert(XThread::is_java(), "Should be a Java thread"); - - const XPage* const page = Atomic::load_acquire(shared_small_page_addr()); - if (page != nullptr) { - return page->remaining(); - } - - return 0; -} - -size_t XObjectAllocator::relocated() const { - size_t total_alloc = 0; - size_t total_undo_alloc = 0; - - XPerCPUConstIterator iter_alloc(&_alloc_for_relocation); - for (const size_t* alloc; iter_alloc.next(&alloc);) { - total_alloc += Atomic::load(alloc); - } - - XPerCPUConstIterator iter_undo_alloc(&_undo_alloc_for_relocation); - for (const size_t* undo_alloc; iter_undo_alloc.next(&undo_alloc);) { - total_undo_alloc += Atomic::load(undo_alloc); - } - - assert(total_alloc >= total_undo_alloc, "Mismatch"); - - return total_alloc - total_undo_alloc; -} - -void XObjectAllocator::retire_pages() { - assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint"); - - // Reset used and undone bytes - _used.set_all(0); - _undone.set_all(0); - - // Reset relocated bytes - _alloc_for_relocation.set_all(0); - _undo_alloc_for_relocation.set_all(0); - - // Reset allocation pages - _shared_medium_page.set(nullptr); - _shared_small_page.set_all(nullptr); -} diff --git a/src/hotspot/share/gc/x/xObjectAllocator.hpp b/src/hotspot/share/gc/x/xObjectAllocator.hpp deleted file mode 100644 index 8880c41f3d598..0000000000000 --- a/src/hotspot/share/gc/x/xObjectAllocator.hpp +++ /dev/null @@ -1,79 +0,0 @@ -/* - * Copyright (c) 2015, 2021, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#ifndef SHARE_GC_X_XOBJECTALLOCATOR_HPP -#define SHARE_GC_X_XOBJECTALLOCATOR_HPP - -#include "gc/x/xAllocationFlags.hpp" -#include "gc/x/xValue.hpp" - -class XPage; -class XPageTable; - -class XObjectAllocator { -private: - const bool _use_per_cpu_shared_small_pages; - XPerCPU _used; - XPerCPU _undone; - XPerCPU _alloc_for_relocation; - XPerCPU _undo_alloc_for_relocation; - XContended _shared_medium_page; - XPerCPU _shared_small_page; - - XPage** shared_small_page_addr(); - XPage* const* shared_small_page_addr() const; - - void register_alloc_for_relocation(const XPageTable* page_table, uintptr_t addr, size_t size); - void register_undo_alloc_for_relocation(const XPage* page, size_t size); - - XPage* alloc_page(uint8_t type, size_t size, XAllocationFlags flags); - void undo_alloc_page(XPage* page); - - // Allocate an object in a shared page. Allocate and - // atomically install a new page if necessary. - uintptr_t alloc_object_in_shared_page(XPage** shared_page, - uint8_t page_type, - size_t page_size, - size_t size, - XAllocationFlags flags); - - uintptr_t alloc_large_object(size_t size, XAllocationFlags flags); - uintptr_t alloc_medium_object(size_t size, XAllocationFlags flags); - uintptr_t alloc_small_object(size_t size, XAllocationFlags flags); - uintptr_t alloc_object(size_t size, XAllocationFlags flags); - -public: - XObjectAllocator(); - - uintptr_t alloc_object(size_t size); - uintptr_t alloc_object_for_relocation(const XPageTable* page_table, size_t size); - void undo_alloc_object_for_relocation(XPage* page, uintptr_t addr, size_t size); - - size_t used() const; - size_t remaining() const; - size_t relocated() const; - - void retire_pages(); -}; - -#endif // SHARE_GC_X_XOBJECTALLOCATOR_HPP diff --git a/src/hotspot/share/gc/x/xOop.hpp b/src/hotspot/share/gc/x/xOop.hpp deleted file mode 100644 index 92cc7a225fe6b..0000000000000 --- a/src/hotspot/share/gc/x/xOop.hpp +++ /dev/null @@ -1,36 +0,0 @@ -/* - * Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#ifndef SHARE_GC_X_XOOP_HPP -#define SHARE_GC_X_XOOP_HPP - -#include "memory/allStatic.hpp" -#include "oops/oopsHierarchy.hpp" - -class XOop : public AllStatic { -public: - static oop from_address(uintptr_t addr); - static uintptr_t to_address(oop o); -}; - -#endif // SHARE_GC_X_XOOP_HPP diff --git a/src/hotspot/share/gc/x/xOop.inline.hpp b/src/hotspot/share/gc/x/xOop.inline.hpp deleted file mode 100644 index 933987577d113..0000000000000 --- a/src/hotspot/share/gc/x/xOop.inline.hpp +++ /dev/null @@ -1,37 +0,0 @@ -/* - * Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#ifndef SHARE_GC_X_XOOP_INLINE_HPP -#define SHARE_GC_X_XOOP_INLINE_HPP - -#include "gc/x/xOop.hpp" - -inline oop XOop::from_address(uintptr_t addr) { - return cast_to_oop(addr); -} - -inline uintptr_t XOop::to_address(oop o) { - return cast_from_oop(o); -} - -#endif // SHARE_GC_X_XOOP_INLINE_HPP diff --git a/src/hotspot/share/gc/x/xPage.cpp b/src/hotspot/share/gc/x/xPage.cpp deleted file mode 100644 index b48500ab96e38..0000000000000 --- a/src/hotspot/share/gc/x/xPage.cpp +++ /dev/null @@ -1,135 +0,0 @@ -/* - * Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#include "precompiled.hpp" -#include "gc/x/xList.inline.hpp" -#include "gc/x/xPage.inline.hpp" -#include "gc/x/xPhysicalMemory.inline.hpp" -#include "gc/x/xVirtualMemory.inline.hpp" -#include "utilities/align.hpp" -#include "utilities/debug.hpp" - -XPage::XPage(const XVirtualMemory& vmem, const XPhysicalMemory& pmem) : - XPage(type_from_size(vmem.size()), vmem, pmem) {} - -XPage::XPage(uint8_t type, const XVirtualMemory& vmem, const XPhysicalMemory& pmem) : - _type(type), - _numa_id((uint8_t)-1), - _seqnum(0), - _virtual(vmem), - _top(start()), - _livemap(object_max_count()), - _last_used(0), - _physical(pmem), - _node() { - assert_initialized(); -} - -XPage::~XPage() {} - -void XPage::assert_initialized() const { - assert(!_virtual.is_null(), "Should not be null"); - assert(!_physical.is_null(), "Should not be null"); - assert(_virtual.size() == _physical.size(), "Virtual/Physical size mismatch"); - assert((_type == XPageTypeSmall && size() == XPageSizeSmall) || - (_type == XPageTypeMedium && size() == XPageSizeMedium) || - (_type == XPageTypeLarge && is_aligned(size(), XGranuleSize)), - "Page type/size mismatch"); -} - -void XPage::reset() { - _seqnum = XGlobalSeqNum; - _top = start(); - _livemap.reset(); - _last_used = 0; -} - -void XPage::reset_for_in_place_relocation() { - _seqnum = XGlobalSeqNum; - _top = start(); -} - -XPage* XPage::retype(uint8_t type) { - assert(_type != type, "Invalid retype"); - _type = type; - _livemap.resize(object_max_count()); - return this; -} - -XPage* XPage::split(size_t size) { - return split(type_from_size(size), size); -} - -XPage* XPage::split(uint8_t type, size_t size) { - assert(_virtual.size() > size, "Invalid split"); - - // Resize this page, keep _numa_id, _seqnum, and _last_used - const XVirtualMemory vmem = _virtual.split(size); - const XPhysicalMemory pmem = _physical.split(size); - _type = type_from_size(_virtual.size()); - _top = start(); - _livemap.resize(object_max_count()); - - // Create new page, inherit _seqnum and _last_used - XPage* const page = new XPage(type, vmem, pmem); - page->_seqnum = _seqnum; - page->_last_used = _last_used; - return page; -} - -XPage* XPage::split_committed() { - // Split any committed part of this page into a separate page, - // leaving this page with only uncommitted physical memory. - const XPhysicalMemory pmem = _physical.split_committed(); - if (pmem.is_null()) { - // Nothing committed - return nullptr; - } - - assert(!_physical.is_null(), "Should not be null"); - - // Resize this page - const XVirtualMemory vmem = _virtual.split(pmem.size()); - _type = type_from_size(_virtual.size()); - _top = start(); - _livemap.resize(object_max_count()); - - // Create new page - return new XPage(vmem, pmem); -} - -void XPage::print_on(outputStream* out) const { - out->print_cr(" %-6s " PTR_FORMAT " " PTR_FORMAT " " PTR_FORMAT " %s%s", - type_to_string(), start(), top(), end(), - is_allocating() ? " Allocating" : "", - is_relocatable() ? " Relocatable" : ""); -} - -void XPage::print() const { - print_on(tty); -} - -void XPage::verify_live(uint32_t live_objects, size_t live_bytes) const { - guarantee(live_objects == _livemap.live_objects(), "Invalid number of live objects"); - guarantee(live_bytes == _livemap.live_bytes(), "Invalid number of live bytes"); -} diff --git a/src/hotspot/share/gc/x/xPage.hpp b/src/hotspot/share/gc/x/xPage.hpp deleted file mode 100644 index c1040e034bd1c..0000000000000 --- a/src/hotspot/share/gc/x/xPage.hpp +++ /dev/null @@ -1,125 +0,0 @@ -/* - * Copyright (c) 2015, 2021, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#ifndef SHARE_GC_X_XPAGE_HPP -#define SHARE_GC_X_XPAGE_HPP - -#include "gc/x/xList.hpp" -#include "gc/x/xLiveMap.hpp" -#include "gc/x/xPhysicalMemory.hpp" -#include "gc/x/xVirtualMemory.hpp" -#include "memory/allocation.hpp" - -class VMStructs; - -class XPage : public CHeapObj { - friend class ::VMStructs; - friend class XList; - -private: - uint8_t _type; - uint8_t _numa_id; - uint32_t _seqnum; - XVirtualMemory _virtual; - volatile uintptr_t _top; - XLiveMap _livemap; - uint64_t _last_used; - XPhysicalMemory _physical; - XListNode _node; - - void assert_initialized() const; - - uint8_t type_from_size(size_t size) const; - const char* type_to_string() const; - - bool is_object_marked(uintptr_t addr) const; - bool is_object_strongly_marked(uintptr_t addr) const; - -public: - XPage(const XVirtualMemory& vmem, const XPhysicalMemory& pmem); - XPage(uint8_t type, const XVirtualMemory& vmem, const XPhysicalMemory& pmem); - ~XPage(); - - uint32_t object_max_count() const; - size_t object_alignment_shift() const; - size_t object_alignment() const; - - uint8_t type() const; - uintptr_t start() const; - uintptr_t end() const; - size_t size() const; - uintptr_t top() const; - size_t remaining() const; - - const XVirtualMemory& virtual_memory() const; - const XPhysicalMemory& physical_memory() const; - XPhysicalMemory& physical_memory(); - - uint8_t numa_id(); - - bool is_allocating() const; - bool is_relocatable() const; - - uint64_t last_used() const; - void set_last_used(); - - void reset(); - void reset_for_in_place_relocation(); - - XPage* retype(uint8_t type); - XPage* split(size_t size); - XPage* split(uint8_t type, size_t size); - XPage* split_committed(); - - bool is_in(uintptr_t addr) const; - - bool is_marked() const; - template bool is_object_marked(uintptr_t addr) const; - bool is_object_live(uintptr_t addr) const; - bool is_object_strongly_live(uintptr_t addr) const; - bool mark_object(uintptr_t addr, bool finalizable, bool& inc_live); - - void inc_live(uint32_t objects, size_t bytes); - uint32_t live_objects() const; - size_t live_bytes() const; - - void object_iterate(ObjectClosure* cl); - - uintptr_t alloc_object(size_t size); - uintptr_t alloc_object_atomic(size_t size); - - bool undo_alloc_object(uintptr_t addr, size_t size); - bool undo_alloc_object_atomic(uintptr_t addr, size_t size); - - void print_on(outputStream* out) const; - void print() const; - - void verify_live(uint32_t live_objects, size_t live_bytes) const; -}; - -class XPageClosure { -public: - virtual void do_page(const XPage* page) = 0; -}; - -#endif // SHARE_GC_X_XPAGE_HPP diff --git a/src/hotspot/share/gc/x/xPage.inline.hpp b/src/hotspot/share/gc/x/xPage.inline.hpp deleted file mode 100644 index d9b0fd2039dc1..0000000000000 --- a/src/hotspot/share/gc/x/xPage.inline.hpp +++ /dev/null @@ -1,313 +0,0 @@ -/* - * Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#ifndef SHARE_GC_X_XPAGE_INLINE_HPP -#define SHARE_GC_X_XPAGE_INLINE_HPP - -#include "gc/x/xPage.hpp" - -#include "gc/x/xAddress.inline.hpp" -#include "gc/x/xGlobals.hpp" -#include "gc/x/xLiveMap.inline.hpp" -#include "gc/x/xNUMA.hpp" -#include "gc/x/xPhysicalMemory.inline.hpp" -#include "gc/x/xVirtualMemory.inline.hpp" -#include "runtime/atomic.hpp" -#include "runtime/os.hpp" -#include "utilities/align.hpp" -#include "utilities/checkedCast.hpp" -#include "utilities/debug.hpp" - -inline uint8_t XPage::type_from_size(size_t size) const { - if (size == XPageSizeSmall) { - return XPageTypeSmall; - } else if (size == XPageSizeMedium) { - return XPageTypeMedium; - } else { - return XPageTypeLarge; - } -} - -inline const char* XPage::type_to_string() const { - switch (type()) { - case XPageTypeSmall: - return "Small"; - - case XPageTypeMedium: - return "Medium"; - - default: - assert(type() == XPageTypeLarge, "Invalid page type"); - return "Large"; - } -} - -inline uint32_t XPage::object_max_count() const { - switch (type()) { - case XPageTypeLarge: - // A large page can only contain a single - // object aligned to the start of the page. - return 1; - - default: - return (uint32_t)(size() >> object_alignment_shift()); - } -} - -inline size_t XPage::object_alignment_shift() const { - switch (type()) { - case XPageTypeSmall: - return XObjectAlignmentSmallShift; - - case XPageTypeMedium: - return XObjectAlignmentMediumShift; - - default: - assert(type() == XPageTypeLarge, "Invalid page type"); - return XObjectAlignmentLargeShift; - } -} - -inline size_t XPage::object_alignment() const { - switch (type()) { - case XPageTypeSmall: - return XObjectAlignmentSmall; - - case XPageTypeMedium: - return XObjectAlignmentMedium; - - default: - assert(type() == XPageTypeLarge, "Invalid page type"); - return XObjectAlignmentLarge; - } -} - -inline uint8_t XPage::type() const { - return _type; -} - -inline uintptr_t XPage::start() const { - return _virtual.start(); -} - -inline uintptr_t XPage::end() const { - return _virtual.end(); -} - -inline size_t XPage::size() const { - return _virtual.size(); -} - -inline uintptr_t XPage::top() const { - return _top; -} - -inline size_t XPage::remaining() const { - return end() - top(); -} - -inline const XVirtualMemory& XPage::virtual_memory() const { - return _virtual; -} - -inline const XPhysicalMemory& XPage::physical_memory() const { - return _physical; -} - -inline XPhysicalMemory& XPage::physical_memory() { - return _physical; -} - -inline uint8_t XPage::numa_id() { - if (_numa_id == (uint8_t)-1) { - _numa_id = checked_cast(XNUMA::memory_id(XAddress::good(start()))); - } - - return _numa_id; -} - -inline bool XPage::is_allocating() const { - return _seqnum == XGlobalSeqNum; -} - -inline bool XPage::is_relocatable() const { - return _seqnum < XGlobalSeqNum; -} - -inline uint64_t XPage::last_used() const { - return _last_used; -} - -inline void XPage::set_last_used() { - _last_used = (uint64_t)ceil(os::elapsedTime()); -} - -inline bool XPage::is_in(uintptr_t addr) const { - const uintptr_t offset = XAddress::offset(addr); - return offset >= start() && offset < top(); -} - -inline bool XPage::is_marked() const { - assert(is_relocatable(), "Invalid page state"); - return _livemap.is_marked(); -} - -inline bool XPage::is_object_marked(uintptr_t addr) const { - assert(is_relocatable(), "Invalid page state"); - const size_t index = ((XAddress::offset(addr) - start()) >> object_alignment_shift()) * 2; - return _livemap.get(index); -} - -inline bool XPage::is_object_strongly_marked(uintptr_t addr) const { - assert(is_relocatable(), "Invalid page state"); - const size_t index = ((XAddress::offset(addr) - start()) >> object_alignment_shift()) * 2; - return _livemap.get(index + 1); -} - -template -inline bool XPage::is_object_marked(uintptr_t addr) const { - return finalizable ? is_object_marked(addr) : is_object_strongly_marked(addr); -} - -inline bool XPage::is_object_live(uintptr_t addr) const { - return is_allocating() || is_object_marked(addr); -} - -inline bool XPage::is_object_strongly_live(uintptr_t addr) const { - return is_allocating() || is_object_strongly_marked(addr); -} - -inline bool XPage::mark_object(uintptr_t addr, bool finalizable, bool& inc_live) { - assert(XAddress::is_marked(addr), "Invalid address"); - assert(is_relocatable(), "Invalid page state"); - assert(is_in(addr), "Invalid address"); - - // Set mark bit - const size_t index = ((XAddress::offset(addr) - start()) >> object_alignment_shift()) * 2; - return _livemap.set(index, finalizable, inc_live); -} - -inline void XPage::inc_live(uint32_t objects, size_t bytes) { - _livemap.inc_live(objects, bytes); -} - -inline uint32_t XPage::live_objects() const { - assert(is_marked(), "Should be marked"); - return _livemap.live_objects(); -} - -inline size_t XPage::live_bytes() const { - assert(is_marked(), "Should be marked"); - return _livemap.live_bytes(); -} - -inline void XPage::object_iterate(ObjectClosure* cl) { - _livemap.iterate(cl, XAddress::good(start()), object_alignment_shift()); -} - -inline uintptr_t XPage::alloc_object(size_t size) { - assert(is_allocating(), "Invalid state"); - - const size_t aligned_size = align_up(size, object_alignment()); - const uintptr_t addr = top(); - const uintptr_t new_top = addr + aligned_size; - - if (new_top > end()) { - // Not enough space left - return 0; - } - - _top = new_top; - - return XAddress::good(addr); -} - -inline uintptr_t XPage::alloc_object_atomic(size_t size) { - assert(is_allocating(), "Invalid state"); - - const size_t aligned_size = align_up(size, object_alignment()); - uintptr_t addr = top(); - - for (;;) { - const uintptr_t new_top = addr + aligned_size; - if (new_top > end()) { - // Not enough space left - return 0; - } - - const uintptr_t prev_top = Atomic::cmpxchg(&_top, addr, new_top); - if (prev_top == addr) { - // Success - return XAddress::good(addr); - } - - // Retry - addr = prev_top; - } -} - -inline bool XPage::undo_alloc_object(uintptr_t addr, size_t size) { - assert(is_allocating(), "Invalid state"); - - const uintptr_t offset = XAddress::offset(addr); - const size_t aligned_size = align_up(size, object_alignment()); - const uintptr_t old_top = top(); - const uintptr_t new_top = old_top - aligned_size; - - if (new_top != offset) { - // Failed to undo allocation, not the last allocated object - return false; - } - - _top = new_top; - - // Success - return true; -} - -inline bool XPage::undo_alloc_object_atomic(uintptr_t addr, size_t size) { - assert(is_allocating(), "Invalid state"); - - const uintptr_t offset = XAddress::offset(addr); - const size_t aligned_size = align_up(size, object_alignment()); - uintptr_t old_top = top(); - - for (;;) { - const uintptr_t new_top = old_top - aligned_size; - if (new_top != offset) { - // Failed to undo allocation, not the last allocated object - return false; - } - - const uintptr_t prev_top = Atomic::cmpxchg(&_top, old_top, new_top); - if (prev_top == old_top) { - // Success - return true; - } - - // Retry - old_top = prev_top; - } -} - -#endif // SHARE_GC_X_XPAGE_INLINE_HPP diff --git a/src/hotspot/share/gc/x/xPageAllocator.cpp b/src/hotspot/share/gc/x/xPageAllocator.cpp deleted file mode 100644 index ccc715682c0cb..0000000000000 --- a/src/hotspot/share/gc/x/xPageAllocator.cpp +++ /dev/null @@ -1,870 +0,0 @@ -/* - * Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#include "precompiled.hpp" -#include "gc/shared/gcLogPrecious.hpp" -#include "gc/shared/suspendibleThreadSet.hpp" -#include "gc/x/xArray.inline.hpp" -#include "gc/x/xCollectedHeap.hpp" -#include "gc/x/xFuture.inline.hpp" -#include "gc/x/xGlobals.hpp" -#include "gc/x/xLock.inline.hpp" -#include "gc/x/xPage.inline.hpp" -#include "gc/x/xPageAllocator.inline.hpp" -#include "gc/x/xPageCache.hpp" -#include "gc/x/xSafeDelete.inline.hpp" -#include "gc/x/xStat.hpp" -#include "gc/x/xTask.hpp" -#include "gc/x/xUncommitter.hpp" -#include "gc/x/xUnmapper.hpp" -#include "gc/x/xWorkers.hpp" -#include "jfr/jfrEvents.hpp" -#include "logging/log.hpp" -#include "runtime/globals.hpp" -#include "runtime/init.hpp" -#include "runtime/java.hpp" -#include "utilities/debug.hpp" -#include "utilities/globalDefinitions.hpp" - -static const XStatCounter XCounterAllocationRate("Memory", "Allocation Rate", XStatUnitBytesPerSecond); -static const XStatCounter XCounterPageCacheFlush("Memory", "Page Cache Flush", XStatUnitBytesPerSecond); -static const XStatCounter XCounterDefragment("Memory", "Defragment", XStatUnitOpsPerSecond); -static const XStatCriticalPhase XCriticalPhaseAllocationStall("Allocation Stall"); - -enum XPageAllocationStall { - XPageAllocationStallSuccess, - XPageAllocationStallFailed, - XPageAllocationStallStartGC -}; - -class XPageAllocation : public StackObj { - friend class XList; - -private: - const uint8_t _type; - const size_t _size; - const XAllocationFlags _flags; - const uint32_t _seqnum; - size_t _flushed; - size_t _committed; - XList _pages; - XListNode _node; - XFuture _stall_result; - -public: - XPageAllocation(uint8_t type, size_t size, XAllocationFlags flags) : - _type(type), - _size(size), - _flags(flags), - _seqnum(XGlobalSeqNum), - _flushed(0), - _committed(0), - _pages(), - _node(), - _stall_result() {} - - uint8_t type() const { - return _type; - } - - size_t size() const { - return _size; - } - - XAllocationFlags flags() const { - return _flags; - } - - uint32_t seqnum() const { - return _seqnum; - } - - size_t flushed() const { - return _flushed; - } - - void set_flushed(size_t flushed) { - _flushed = flushed; - } - - size_t committed() const { - return _committed; - } - - void set_committed(size_t committed) { - _committed = committed; - } - - XPageAllocationStall wait() { - return _stall_result.get(); - } - - XList* pages() { - return &_pages; - } - - void satisfy(XPageAllocationStall result) { - _stall_result.set(result); - } -}; - -XPageAllocator::XPageAllocator(XWorkers* workers, - size_t min_capacity, - size_t initial_capacity, - size_t max_capacity) : - _lock(), - _cache(), - _virtual(max_capacity), - _physical(max_capacity), - _min_capacity(min_capacity), - _max_capacity(max_capacity), - _current_max_capacity(max_capacity), - _capacity(0), - _claimed(0), - _used(0), - _used_high(0), - _used_low(0), - _reclaimed(0), - _stalled(), - _nstalled(0), - _satisfied(), - _unmapper(new XUnmapper(this)), - _uncommitter(new XUncommitter(this)), - _safe_delete(), - _initialized(false) { - - if (!_virtual.is_initialized() || !_physical.is_initialized()) { - return; - } - - log_info_p(gc, init)("Min Capacity: " SIZE_FORMAT "M", min_capacity / M); - log_info_p(gc, init)("Initial Capacity: " SIZE_FORMAT "M", initial_capacity / M); - log_info_p(gc, init)("Max Capacity: " SIZE_FORMAT "M", max_capacity / M); - if (XPageSizeMedium > 0) { - log_info_p(gc, init)("Medium Page Size: " SIZE_FORMAT "M", XPageSizeMedium / M); - } else { - log_info_p(gc, init)("Medium Page Size: N/A"); - } - log_info_p(gc, init)("Pre-touch: %s", AlwaysPreTouch ? "Enabled" : "Disabled"); - - // Warn if system limits could stop us from reaching max capacity - _physical.warn_commit_limits(max_capacity); - - // Check if uncommit should and can be enabled - _physical.try_enable_uncommit(min_capacity, max_capacity); - - // Pre-map initial capacity - if (!prime_cache(workers, initial_capacity)) { - log_error_p(gc)("Failed to allocate initial Java heap (" SIZE_FORMAT "M)", initial_capacity / M); - return; - } - - // Successfully initialized - _initialized = true; -} - -class XPreTouchTask : public XTask { -private: - const XPhysicalMemoryManager* const _physical; - volatile uintptr_t _start; - const uintptr_t _end; - -public: - XPreTouchTask(const XPhysicalMemoryManager* physical, uintptr_t start, uintptr_t end) : - XTask("XPreTouchTask"), - _physical(physical), - _start(start), - _end(end) {} - - virtual void work() { - for (;;) { - // Get granule offset - const size_t size = XGranuleSize; - const uintptr_t offset = Atomic::fetch_then_add(&_start, size); - if (offset >= _end) { - // Done - break; - } - - // Pre-touch granule - _physical->pretouch(offset, size); - } - } -}; - -bool XPageAllocator::prime_cache(XWorkers* workers, size_t size) { - XAllocationFlags flags; - - flags.set_non_blocking(); - flags.set_low_address(); - - XPage* const page = alloc_page(XPageTypeLarge, size, flags); - if (page == nullptr) { - return false; - } - - if (AlwaysPreTouch) { - // Pre-touch page - XPreTouchTask task(&_physical, page->start(), page->end()); - workers->run_all(&task); - } - - free_page(page, false /* reclaimed */); - - return true; -} - -bool XPageAllocator::is_initialized() const { - return _initialized; -} - -size_t XPageAllocator::min_capacity() const { - return _min_capacity; -} - -size_t XPageAllocator::max_capacity() const { - return _max_capacity; -} - -size_t XPageAllocator::soft_max_capacity() const { - // Note that SoftMaxHeapSize is a manageable flag - const size_t soft_max_capacity = Atomic::load(&SoftMaxHeapSize); - const size_t current_max_capacity = Atomic::load(&_current_max_capacity); - return MIN2(soft_max_capacity, current_max_capacity); -} - -size_t XPageAllocator::capacity() const { - return Atomic::load(&_capacity); -} - -size_t XPageAllocator::used() const { - return Atomic::load(&_used); -} - -size_t XPageAllocator::unused() const { - const ssize_t capacity = (ssize_t)Atomic::load(&_capacity); - const ssize_t used = (ssize_t)Atomic::load(&_used); - const ssize_t claimed = (ssize_t)Atomic::load(&_claimed); - const ssize_t unused = capacity - used - claimed; - return unused > 0 ? (size_t)unused : 0; -} - -XPageAllocatorStats XPageAllocator::stats() const { - XLocker locker(&_lock); - return XPageAllocatorStats(_min_capacity, - _max_capacity, - soft_max_capacity(), - _capacity, - _used, - _used_high, - _used_low, - _reclaimed); -} - -void XPageAllocator::reset_statistics() { - assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint"); - _reclaimed = 0; - _used_high = _used_low = _used; - _nstalled = 0; -} - -size_t XPageAllocator::increase_capacity(size_t size) { - const size_t increased = MIN2(size, _current_max_capacity - _capacity); - - if (increased > 0) { - // Update atomically since we have concurrent readers - Atomic::add(&_capacity, increased); - - // Record time of last commit. When allocation, we prefer increasing - // the capacity over flushing the cache. That means there could be - // expired pages in the cache at this time. However, since we are - // increasing the capacity we are obviously in need of committed - // memory and should therefore not be uncommitting memory. - _cache.set_last_commit(); - } - - return increased; -} - -void XPageAllocator::decrease_capacity(size_t size, bool set_max_capacity) { - // Update atomically since we have concurrent readers - Atomic::sub(&_capacity, size); - - if (set_max_capacity) { - // Adjust current max capacity to avoid further attempts to increase capacity - log_error_p(gc)("Forced to lower max Java heap size from " - SIZE_FORMAT "M(%.0f%%) to " SIZE_FORMAT "M(%.0f%%)", - _current_max_capacity / M, percent_of(_current_max_capacity, _max_capacity), - _capacity / M, percent_of(_capacity, _max_capacity)); - - // Update atomically since we have concurrent readers - Atomic::store(&_current_max_capacity, _capacity); - } -} - -void XPageAllocator::increase_used(size_t size, bool worker_relocation) { - if (worker_relocation) { - // Allocating a page for the purpose of worker relocation has - // a negative contribution to the number of reclaimed bytes. - _reclaimed -= size; - } - - // Update atomically since we have concurrent readers - const size_t used = Atomic::add(&_used, size); - if (used > _used_high) { - _used_high = used; - } -} - -void XPageAllocator::decrease_used(size_t size, bool reclaimed) { - // Only pages explicitly released with the reclaimed flag set - // counts as reclaimed bytes. This flag is true when we release - // a page after relocation, and is false when we release a page - // to undo an allocation. - if (reclaimed) { - _reclaimed += size; - } - - // Update atomically since we have concurrent readers - const size_t used = Atomic::sub(&_used, size); - if (used < _used_low) { - _used_low = used; - } -} - -bool XPageAllocator::commit_page(XPage* page) { - // Commit physical memory - return _physical.commit(page->physical_memory()); -} - -void XPageAllocator::uncommit_page(XPage* page) { - if (!ZUncommit) { - return; - } - - // Uncommit physical memory - _physical.uncommit(page->physical_memory()); -} - -void XPageAllocator::map_page(const XPage* page) const { - // Map physical memory - _physical.map(page->start(), page->physical_memory()); -} - -void XPageAllocator::unmap_page(const XPage* page) const { - // Unmap physical memory - _physical.unmap(page->start(), page->size()); -} - -void XPageAllocator::destroy_page(XPage* page) { - // Free virtual memory - _virtual.free(page->virtual_memory()); - - // Free physical memory - _physical.free(page->physical_memory()); - - // Delete page safely - _safe_delete(page); -} - -bool XPageAllocator::is_alloc_allowed(size_t size) const { - const size_t available = _current_max_capacity - _used - _claimed; - return available >= size; -} - -bool XPageAllocator::alloc_page_common_inner(uint8_t type, size_t size, XList* pages) { - if (!is_alloc_allowed(size)) { - // Out of memory - return false; - } - - // Try allocate from the page cache - XPage* const page = _cache.alloc_page(type, size); - if (page != nullptr) { - // Success - pages->insert_last(page); - return true; - } - - // Try increase capacity - const size_t increased = increase_capacity(size); - if (increased < size) { - // Could not increase capacity enough to satisfy the allocation - // completely. Flush the page cache to satisfy the remainder. - const size_t remaining = size - increased; - _cache.flush_for_allocation(remaining, pages); - } - - // Success - return true; -} - -bool XPageAllocator::alloc_page_common(XPageAllocation* allocation) { - const uint8_t type = allocation->type(); - const size_t size = allocation->size(); - const XAllocationFlags flags = allocation->flags(); - XList* const pages = allocation->pages(); - - if (!alloc_page_common_inner(type, size, pages)) { - // Out of memory - return false; - } - - // Updated used statistics - increase_used(size, flags.worker_relocation()); - - // Success - return true; -} - -static void check_out_of_memory_during_initialization() { - if (!is_init_completed()) { - vm_exit_during_initialization("java.lang.OutOfMemoryError", "Java heap too small"); - } -} - -bool XPageAllocator::alloc_page_stall(XPageAllocation* allocation) { - XStatTimer timer(XCriticalPhaseAllocationStall); - EventZAllocationStall event; - XPageAllocationStall result; - - // We can only block if the VM is fully initialized - check_out_of_memory_during_initialization(); - - // Increment stalled counter - Atomic::inc(&_nstalled); - - do { - // Start asynchronous GC - XCollectedHeap::heap()->collect(GCCause::_z_allocation_stall); - - // Wait for allocation to complete, fail or request a GC - result = allocation->wait(); - } while (result == XPageAllocationStallStartGC); - - { - // - // We grab the lock here for two different reasons: - // - // 1) Guard deletion of underlying semaphore. This is a workaround for - // a bug in sem_post() in glibc < 2.21, where it's not safe to destroy - // the semaphore immediately after returning from sem_wait(). The - // reason is that sem_post() can touch the semaphore after a waiting - // thread have returned from sem_wait(). To avoid this race we are - // forcing the waiting thread to acquire/release the lock held by the - // posting thread. https://sourceware.org/bugzilla/show_bug.cgi?id=12674 - // - // 2) Guard the list of satisfied pages. - // - XLocker locker(&_lock); - _satisfied.remove(allocation); - } - - // Send event - event.commit(allocation->type(), allocation->size()); - - return (result == XPageAllocationStallSuccess); -} - -bool XPageAllocator::alloc_page_or_stall(XPageAllocation* allocation) { - { - XLocker locker(&_lock); - - if (alloc_page_common(allocation)) { - // Success - return true; - } - - // Failed - if (allocation->flags().non_blocking()) { - // Don't stall - return false; - } - - // Enqueue allocation request - _stalled.insert_last(allocation); - } - - // Stall - return alloc_page_stall(allocation); -} - -XPage* XPageAllocator::alloc_page_create(XPageAllocation* allocation) { - const size_t size = allocation->size(); - - // Allocate virtual memory. To make error handling a lot more straight - // forward, we allocate virtual memory before destroying flushed pages. - // Flushed pages are also unmapped and destroyed asynchronously, so we - // can't immediately reuse that part of the address space anyway. - const XVirtualMemory vmem = _virtual.alloc(size, allocation->flags().low_address()); - if (vmem.is_null()) { - log_error(gc)("Out of address space"); - return nullptr; - } - - XPhysicalMemory pmem; - size_t flushed = 0; - - // Harvest physical memory from flushed pages - XListRemoveIterator iter(allocation->pages()); - for (XPage* page; iter.next(&page);) { - flushed += page->size(); - - // Harvest flushed physical memory - XPhysicalMemory& fmem = page->physical_memory(); - pmem.add_segments(fmem); - fmem.remove_segments(); - - // Unmap and destroy page - _unmapper->unmap_and_destroy_page(page); - } - - if (flushed > 0) { - allocation->set_flushed(flushed); - - // Update statistics - XStatInc(XCounterPageCacheFlush, flushed); - log_debug(gc, heap)("Page Cache Flushed: " SIZE_FORMAT "M", flushed / M); - } - - // Allocate any remaining physical memory. Capacity and used has - // already been adjusted, we just need to fetch the memory, which - // is guaranteed to succeed. - if (flushed < size) { - const size_t remaining = size - flushed; - allocation->set_committed(remaining); - _physical.alloc(pmem, remaining); - } - - // Create new page - return new XPage(allocation->type(), vmem, pmem); -} - -bool XPageAllocator::should_defragment(const XPage* page) const { - // A small page can end up at a high address (second half of the address space) - // if we've split a larger page or we have a constrained address space. To help - // fight address space fragmentation we remap such pages to a lower address, if - // a lower address is available. - return page->type() == XPageTypeSmall && - page->start() >= _virtual.reserved() / 2 && - page->start() > _virtual.lowest_available_address(); -} - -bool XPageAllocator::is_alloc_satisfied(XPageAllocation* allocation) const { - // The allocation is immediately satisfied if the list of pages contains - // exactly one page, with the type and size that was requested. However, - // even if the allocation is immediately satisfied we might still want to - // return false here to force the page to be remapped to fight address - // space fragmentation. - - if (allocation->pages()->size() != 1) { - // Not a single page - return false; - } - - const XPage* const page = allocation->pages()->first(); - if (page->type() != allocation->type() || - page->size() != allocation->size()) { - // Wrong type or size - return false; - } - - if (should_defragment(page)) { - // Defragment address space - XStatInc(XCounterDefragment); - return false; - } - - // Allocation immediately satisfied - return true; -} - -XPage* XPageAllocator::alloc_page_finalize(XPageAllocation* allocation) { - // Fast path - if (is_alloc_satisfied(allocation)) { - return allocation->pages()->remove_first(); - } - - // Slow path - XPage* const page = alloc_page_create(allocation); - if (page == nullptr) { - // Out of address space - return nullptr; - } - - // Commit page - if (commit_page(page)) { - // Success - map_page(page); - return page; - } - - // Failed or partially failed. Split of any successfully committed - // part of the page into a new page and insert it into list of pages, - // so that it will be re-inserted into the page cache. - XPage* const committed_page = page->split_committed(); - destroy_page(page); - - if (committed_page != nullptr) { - map_page(committed_page); - allocation->pages()->insert_last(committed_page); - } - - return nullptr; -} - -void XPageAllocator::alloc_page_failed(XPageAllocation* allocation) { - XLocker locker(&_lock); - - size_t freed = 0; - - // Free any allocated/flushed pages - XListRemoveIterator iter(allocation->pages()); - for (XPage* page; iter.next(&page);) { - freed += page->size(); - free_page_inner(page, false /* reclaimed */); - } - - // Adjust capacity and used to reflect the failed capacity increase - const size_t remaining = allocation->size() - freed; - decrease_used(remaining, false /* reclaimed */); - decrease_capacity(remaining, true /* set_max_capacity */); - - // Try satisfy stalled allocations - satisfy_stalled(); -} - -XPage* XPageAllocator::alloc_page(uint8_t type, size_t size, XAllocationFlags flags) { - EventZPageAllocation event; - -retry: - XPageAllocation allocation(type, size, flags); - - // Allocate one or more pages from the page cache. If the allocation - // succeeds but the returned pages don't cover the complete allocation, - // then finalize phase is allowed to allocate the remaining memory - // directly from the physical memory manager. Note that this call might - // block in a safepoint if the non-blocking flag is not set. - if (!alloc_page_or_stall(&allocation)) { - // Out of memory - return nullptr; - } - - XPage* const page = alloc_page_finalize(&allocation); - if (page == nullptr) { - // Failed to commit or map. Clean up and retry, in the hope that - // we can still allocate by flushing the page cache (more aggressively). - alloc_page_failed(&allocation); - goto retry; - } - - // Reset page. This updates the page's sequence number and must - // be done after we potentially blocked in a safepoint (stalled) - // where the global sequence number was updated. - page->reset(); - - // Update allocation statistics. Exclude worker relocations to avoid - // artificial inflation of the allocation rate during relocation. - if (!flags.worker_relocation() && is_init_completed()) { - // Note that there are two allocation rate counters, which have - // different purposes and are sampled at different frequencies. - const size_t bytes = page->size(); - XStatInc(XCounterAllocationRate, bytes); - XStatInc(XStatAllocRate::counter(), bytes); - } - - // Send event - event.commit(type, size, allocation.flushed(), allocation.committed(), - page->physical_memory().nsegments(), flags.non_blocking()); - - return page; -} - -void XPageAllocator::satisfy_stalled() { - for (;;) { - XPageAllocation* const allocation = _stalled.first(); - if (allocation == nullptr) { - // Allocation queue is empty - return; - } - - if (!alloc_page_common(allocation)) { - // Allocation could not be satisfied, give up - return; - } - - // Allocation succeeded, dequeue and satisfy allocation request. - // Note that we must dequeue the allocation request first, since - // it will immediately be deallocated once it has been satisfied. - _stalled.remove(allocation); - _satisfied.insert_last(allocation); - allocation->satisfy(XPageAllocationStallSuccess); - } -} - -void XPageAllocator::free_page_inner(XPage* page, bool reclaimed) { - // Update used statistics - decrease_used(page->size(), reclaimed); - - // Set time when last used - page->set_last_used(); - - // Cache page - _cache.free_page(page); -} - -void XPageAllocator::free_page(XPage* page, bool reclaimed) { - XLocker locker(&_lock); - - // Free page - free_page_inner(page, reclaimed); - - // Try satisfy stalled allocations - satisfy_stalled(); -} - -void XPageAllocator::free_pages(const XArray* pages, bool reclaimed) { - XLocker locker(&_lock); - - // Free pages - XArrayIterator iter(pages); - for (XPage* page; iter.next(&page);) { - free_page_inner(page, reclaimed); - } - - // Try satisfy stalled allocations - satisfy_stalled(); -} - -size_t XPageAllocator::uncommit(uint64_t* timeout) { - // We need to join the suspendible thread set while manipulating capacity and - // used, to make sure GC safepoints will have a consistent view. However, when - // ZVerifyViews is enabled we need to join at a broader scope to also make sure - // we don't change the address good mask after pages have been flushed, and - // thereby made invisible to pages_do(), but before they have been unmapped. - SuspendibleThreadSetJoiner joiner(ZVerifyViews); - XList pages; - size_t flushed; - - { - SuspendibleThreadSetJoiner joiner(!ZVerifyViews); - XLocker locker(&_lock); - - // Never uncommit below min capacity. We flush out and uncommit chunks at - // a time (~0.8% of the max capacity, but at least one granule and at most - // 256M), in case demand for memory increases while we are uncommitting. - const size_t retain = MAX2(_used, _min_capacity); - const size_t release = _capacity - retain; - const size_t limit = MIN2(align_up(_current_max_capacity >> 7, XGranuleSize), 256 * M); - const size_t flush = MIN2(release, limit); - - // Flush pages to uncommit - flushed = _cache.flush_for_uncommit(flush, &pages, timeout); - if (flushed == 0) { - // Nothing flushed - return 0; - } - - // Record flushed pages as claimed - Atomic::add(&_claimed, flushed); - } - - // Unmap, uncommit, and destroy flushed pages - XListRemoveIterator iter(&pages); - for (XPage* page; iter.next(&page);) { - unmap_page(page); - uncommit_page(page); - destroy_page(page); - } - - { - SuspendibleThreadSetJoiner joiner(!ZVerifyViews); - XLocker locker(&_lock); - - // Adjust claimed and capacity to reflect the uncommit - Atomic::sub(&_claimed, flushed); - decrease_capacity(flushed, false /* set_max_capacity */); - } - - return flushed; -} - -void XPageAllocator::enable_deferred_delete() const { - _safe_delete.enable_deferred_delete(); -} - -void XPageAllocator::disable_deferred_delete() const { - _safe_delete.disable_deferred_delete(); -} - -void XPageAllocator::debug_map_page(const XPage* page) const { - assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint"); - _physical.debug_map(page->start(), page->physical_memory()); -} - -void XPageAllocator::debug_unmap_page(const XPage* page) const { - assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint"); - _physical.debug_unmap(page->start(), page->size()); -} - -void XPageAllocator::pages_do(XPageClosure* cl) const { - assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint"); - - XListIterator iter_satisfied(&_satisfied); - for (XPageAllocation* allocation; iter_satisfied.next(&allocation);) { - XListIterator iter_pages(allocation->pages()); - for (XPage* page; iter_pages.next(&page);) { - cl->do_page(page); - } - } - - _cache.pages_do(cl); -} - -bool XPageAllocator::has_alloc_stalled() const { - return Atomic::load(&_nstalled) != 0; -} - -void XPageAllocator::check_out_of_memory() { - XLocker locker(&_lock); - - // Fail allocation requests that were enqueued before the - // last GC cycle started, otherwise start a new GC cycle. - for (XPageAllocation* allocation = _stalled.first(); allocation != nullptr; allocation = _stalled.first()) { - if (allocation->seqnum() == XGlobalSeqNum) { - // Start a new GC cycle, keep allocation requests enqueued - allocation->satisfy(XPageAllocationStallStartGC); - return; - } - - // Out of memory, fail allocation request - _stalled.remove(allocation); - _satisfied.insert_last(allocation); - allocation->satisfy(XPageAllocationStallFailed); - } -} - -void XPageAllocator::threads_do(ThreadClosure* tc) const { - tc->do_thread(_unmapper); - tc->do_thread(_uncommitter); -} diff --git a/src/hotspot/share/gc/x/xPageAllocator.hpp b/src/hotspot/share/gc/x/xPageAllocator.hpp deleted file mode 100644 index b907e50043d42..0000000000000 --- a/src/hotspot/share/gc/x/xPageAllocator.hpp +++ /dev/null @@ -1,174 +0,0 @@ -/* - * Copyright (c) 2015, 2021, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#ifndef SHARE_GC_X_XPAGEALLOCATOR_HPP -#define SHARE_GC_X_XPAGEALLOCATOR_HPP - -#include "gc/x/xAllocationFlags.hpp" -#include "gc/x/xArray.hpp" -#include "gc/x/xList.hpp" -#include "gc/x/xLock.hpp" -#include "gc/x/xPageCache.hpp" -#include "gc/x/xPhysicalMemory.hpp" -#include "gc/x/xSafeDelete.hpp" -#include "gc/x/xVirtualMemory.hpp" - -class ThreadClosure; -class VMStructs; -class XPageAllocation; -class XPageAllocatorStats; -class XWorkers; -class XUncommitter; -class XUnmapper; - -class XPageAllocator { - friend class ::VMStructs; - friend class XUnmapper; - friend class XUncommitter; - -private: - mutable XLock _lock; - XPageCache _cache; - XVirtualMemoryManager _virtual; - XPhysicalMemoryManager _physical; - const size_t _min_capacity; - const size_t _max_capacity; - volatile size_t _current_max_capacity; - volatile size_t _capacity; - volatile size_t _claimed; - volatile size_t _used; - size_t _used_high; - size_t _used_low; - ssize_t _reclaimed; - XList _stalled; - volatile uint64_t _nstalled; - XList _satisfied; - XUnmapper* _unmapper; - XUncommitter* _uncommitter; - mutable XSafeDelete _safe_delete; - bool _initialized; - - bool prime_cache(XWorkers* workers, size_t size); - - size_t increase_capacity(size_t size); - void decrease_capacity(size_t size, bool set_max_capacity); - - void increase_used(size_t size, bool relocation); - void decrease_used(size_t size, bool reclaimed); - - bool commit_page(XPage* page); - void uncommit_page(XPage* page); - - void map_page(const XPage* page) const; - void unmap_page(const XPage* page) const; - - void destroy_page(XPage* page); - - bool is_alloc_allowed(size_t size) const; - - bool alloc_page_common_inner(uint8_t type, size_t size, XList* pages); - bool alloc_page_common(XPageAllocation* allocation); - bool alloc_page_stall(XPageAllocation* allocation); - bool alloc_page_or_stall(XPageAllocation* allocation); - bool should_defragment(const XPage* page) const; - bool is_alloc_satisfied(XPageAllocation* allocation) const; - XPage* alloc_page_create(XPageAllocation* allocation); - XPage* alloc_page_finalize(XPageAllocation* allocation); - void alloc_page_failed(XPageAllocation* allocation); - - void satisfy_stalled(); - - void free_page_inner(XPage* page, bool reclaimed); - - size_t uncommit(uint64_t* timeout); - -public: - XPageAllocator(XWorkers* workers, - size_t min_capacity, - size_t initial_capacity, - size_t max_capacity); - - bool is_initialized() const; - - size_t min_capacity() const; - size_t max_capacity() const; - size_t soft_max_capacity() const; - size_t capacity() const; - size_t used() const; - size_t unused() const; - - XPageAllocatorStats stats() const; - - void reset_statistics(); - - XPage* alloc_page(uint8_t type, size_t size, XAllocationFlags flags); - void free_page(XPage* page, bool reclaimed); - void free_pages(const XArray* pages, bool reclaimed); - - void enable_deferred_delete() const; - void disable_deferred_delete() const; - - void debug_map_page(const XPage* page) const; - void debug_unmap_page(const XPage* page) const; - - bool has_alloc_stalled() const; - void check_out_of_memory(); - - void pages_do(XPageClosure* cl) const; - - void threads_do(ThreadClosure* tc) const; -}; - -class XPageAllocatorStats { -private: - size_t _min_capacity; - size_t _max_capacity; - size_t _soft_max_capacity; - size_t _current_max_capacity; - size_t _capacity; - size_t _used; - size_t _used_high; - size_t _used_low; - size_t _reclaimed; - -public: - XPageAllocatorStats(size_t min_capacity, - size_t max_capacity, - size_t soft_max_capacity, - size_t capacity, - size_t used, - size_t used_high, - size_t used_low, - size_t reclaimed); - - size_t min_capacity() const; - size_t max_capacity() const; - size_t soft_max_capacity() const; - size_t capacity() const; - size_t used() const; - size_t used_high() const; - size_t used_low() const; - size_t reclaimed() const; -}; - -#endif // SHARE_GC_X_XPAGEALLOCATOR_HPP diff --git a/src/hotspot/share/gc/x/xPageAllocator.inline.hpp b/src/hotspot/share/gc/x/xPageAllocator.inline.hpp deleted file mode 100644 index dbaf77f56a051..0000000000000 --- a/src/hotspot/share/gc/x/xPageAllocator.inline.hpp +++ /dev/null @@ -1,78 +0,0 @@ -/* - * Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#ifndef SHARE_GC_X_XPAGEALLOCATOR_INLINE_HPP -#define SHARE_GC_X_XPAGEALLOCATOR_INLINE_HPP - -#include "gc/x/xPageAllocator.hpp" - -inline XPageAllocatorStats::XPageAllocatorStats(size_t min_capacity, - size_t max_capacity, - size_t soft_max_capacity, - size_t capacity, - size_t used, - size_t used_high, - size_t used_low, - size_t reclaimed) : - _min_capacity(min_capacity), - _max_capacity(max_capacity), - _soft_max_capacity(soft_max_capacity), - _capacity(capacity), - _used(used), - _used_high(used_high), - _used_low(used_low), - _reclaimed(reclaimed) {} - -inline size_t XPageAllocatorStats::min_capacity() const { - return _min_capacity; -} - -inline size_t XPageAllocatorStats::max_capacity() const { - return _max_capacity; -} - -inline size_t XPageAllocatorStats::soft_max_capacity() const { - return _soft_max_capacity; -} - -inline size_t XPageAllocatorStats::capacity() const { - return _capacity; -} - -inline size_t XPageAllocatorStats::used() const { - return _used; -} - -inline size_t XPageAllocatorStats::used_high() const { - return _used_high; -} - -inline size_t XPageAllocatorStats::used_low() const { - return _used_low; -} - -inline size_t XPageAllocatorStats::reclaimed() const { - return _reclaimed; -} - -#endif // SHARE_GC_X_XPAGEALLOCATOR_INLINE_HPP diff --git a/src/hotspot/share/gc/x/xPageCache.cpp b/src/hotspot/share/gc/x/xPageCache.cpp deleted file mode 100644 index d38b0646a8a41..0000000000000 --- a/src/hotspot/share/gc/x/xPageCache.cpp +++ /dev/null @@ -1,356 +0,0 @@ -/* - * Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#include "precompiled.hpp" -#include "gc/x/xGlobals.hpp" -#include "gc/x/xList.inline.hpp" -#include "gc/x/xNUMA.hpp" -#include "gc/x/xPage.inline.hpp" -#include "gc/x/xPageCache.hpp" -#include "gc/x/xStat.hpp" -#include "gc/x/xValue.inline.hpp" -#include "memory/allocation.hpp" -#include "runtime/globals.hpp" -#include "runtime/os.hpp" - -static const XStatCounter XCounterPageCacheHitL1("Memory", "Page Cache Hit L1", XStatUnitOpsPerSecond); -static const XStatCounter XCounterPageCacheHitL2("Memory", "Page Cache Hit L2", XStatUnitOpsPerSecond); -static const XStatCounter XCounterPageCacheHitL3("Memory", "Page Cache Hit L3", XStatUnitOpsPerSecond); -static const XStatCounter XCounterPageCacheMiss("Memory", "Page Cache Miss", XStatUnitOpsPerSecond); - -class XPageCacheFlushClosure : public StackObj { - friend class XPageCache; - -protected: - const size_t _requested; - size_t _flushed; - -public: - XPageCacheFlushClosure(size_t requested); - virtual bool do_page(const XPage* page) = 0; -}; - -XPageCacheFlushClosure::XPageCacheFlushClosure(size_t requested) : - _requested(requested), - _flushed(0) {} - -XPageCache::XPageCache() : - _small(), - _medium(), - _large(), - _last_commit(0) {} - -XPage* XPageCache::alloc_small_page() { - const uint32_t numa_id = XNUMA::id(); - const uint32_t numa_count = XNUMA::count(); - - // Try NUMA local page cache - XPage* const l1_page = _small.get(numa_id).remove_first(); - if (l1_page != nullptr) { - XStatInc(XCounterPageCacheHitL1); - return l1_page; - } - - // Try NUMA remote page cache(s) - uint32_t remote_numa_id = numa_id + 1; - const uint32_t remote_numa_count = numa_count - 1; - for (uint32_t i = 0; i < remote_numa_count; i++) { - if (remote_numa_id == numa_count) { - remote_numa_id = 0; - } - - XPage* const l2_page = _small.get(remote_numa_id).remove_first(); - if (l2_page != nullptr) { - XStatInc(XCounterPageCacheHitL2); - return l2_page; - } - - remote_numa_id++; - } - - return nullptr; -} - -XPage* XPageCache::alloc_medium_page() { - XPage* const page = _medium.remove_first(); - if (page != nullptr) { - XStatInc(XCounterPageCacheHitL1); - return page; - } - - return nullptr; -} - -XPage* XPageCache::alloc_large_page(size_t size) { - // Find a page with the right size - XListIterator iter(&_large); - for (XPage* page; iter.next(&page);) { - if (size == page->size()) { - // Page found - _large.remove(page); - XStatInc(XCounterPageCacheHitL1); - return page; - } - } - - return nullptr; -} - -XPage* XPageCache::alloc_oversized_medium_page(size_t size) { - if (size <= XPageSizeMedium) { - return _medium.remove_first(); - } - - return nullptr; -} - -XPage* XPageCache::alloc_oversized_large_page(size_t size) { - // Find a page that is large enough - XListIterator iter(&_large); - for (XPage* page; iter.next(&page);) { - if (size <= page->size()) { - // Page found - _large.remove(page); - return page; - } - } - - return nullptr; -} - -XPage* XPageCache::alloc_oversized_page(size_t size) { - XPage* page = alloc_oversized_large_page(size); - if (page == nullptr) { - page = alloc_oversized_medium_page(size); - } - - if (page != nullptr) { - XStatInc(XCounterPageCacheHitL3); - } - - return page; -} - -XPage* XPageCache::alloc_page(uint8_t type, size_t size) { - XPage* page; - - // Try allocate exact page - if (type == XPageTypeSmall) { - page = alloc_small_page(); - } else if (type == XPageTypeMedium) { - page = alloc_medium_page(); - } else { - page = alloc_large_page(size); - } - - if (page == nullptr) { - // Try allocate potentially oversized page - XPage* const oversized = alloc_oversized_page(size); - if (oversized != nullptr) { - if (size < oversized->size()) { - // Split oversized page - page = oversized->split(type, size); - - // Cache remainder - free_page(oversized); - } else { - // Re-type correctly sized page - page = oversized->retype(type); - } - } - } - - if (page == nullptr) { - XStatInc(XCounterPageCacheMiss); - } - - return page; -} - -void XPageCache::free_page(XPage* page) { - const uint8_t type = page->type(); - if (type == XPageTypeSmall) { - _small.get(page->numa_id()).insert_first(page); - } else if (type == XPageTypeMedium) { - _medium.insert_first(page); - } else { - _large.insert_first(page); - } -} - -bool XPageCache::flush_list_inner(XPageCacheFlushClosure* cl, XList* from, XList* to) { - XPage* const page = from->last(); - if (page == nullptr || !cl->do_page(page)) { - // Don't flush page - return false; - } - - // Flush page - from->remove(page); - to->insert_last(page); - return true; -} - -void XPageCache::flush_list(XPageCacheFlushClosure* cl, XList* from, XList* to) { - while (flush_list_inner(cl, from, to)); -} - -void XPageCache::flush_per_numa_lists(XPageCacheFlushClosure* cl, XPerNUMA >* from, XList* to) { - const uint32_t numa_count = XNUMA::count(); - uint32_t numa_done = 0; - uint32_t numa_next = 0; - - // Flush lists round-robin - while (numa_done < numa_count) { - XList* numa_list = from->addr(numa_next); - if (++numa_next == numa_count) { - numa_next = 0; - } - - if (flush_list_inner(cl, numa_list, to)) { - // Not done - numa_done = 0; - } else { - // Done - numa_done++; - } - } -} - -void XPageCache::flush(XPageCacheFlushClosure* cl, XList* to) { - // Prefer flushing large, then medium and last small pages - flush_list(cl, &_large, to); - flush_list(cl, &_medium, to); - flush_per_numa_lists(cl, &_small, to); - - if (cl->_flushed > cl->_requested) { - // Overflushed, re-insert part of last page into the cache - const size_t overflushed = cl->_flushed - cl->_requested; - XPage* const reinsert = to->last()->split(overflushed); - free_page(reinsert); - cl->_flushed -= overflushed; - } -} - -class XPageCacheFlushForAllocationClosure : public XPageCacheFlushClosure { -public: - XPageCacheFlushForAllocationClosure(size_t requested) : - XPageCacheFlushClosure(requested) {} - - virtual bool do_page(const XPage* page) { - if (_flushed < _requested) { - // Flush page - _flushed += page->size(); - return true; - } - - // Don't flush page - return false; - } -}; - -void XPageCache::flush_for_allocation(size_t requested, XList* to) { - XPageCacheFlushForAllocationClosure cl(requested); - flush(&cl, to); -} - -class XPageCacheFlushForUncommitClosure : public XPageCacheFlushClosure { -private: - const uint64_t _now; - uint64_t* _timeout; - -public: - XPageCacheFlushForUncommitClosure(size_t requested, uint64_t now, uint64_t* timeout) : - XPageCacheFlushClosure(requested), - _now(now), - _timeout(timeout) { - // Set initial timeout - *_timeout = ZUncommitDelay; - } - - virtual bool do_page(const XPage* page) { - const uint64_t expires = page->last_used() + ZUncommitDelay; - if (expires > _now) { - // Don't flush page, record shortest non-expired timeout - *_timeout = MIN2(*_timeout, expires - _now); - return false; - } - - if (_flushed >= _requested) { - // Don't flush page, requested amount flushed - return false; - } - - // Flush page - _flushed += page->size(); - return true; - } -}; - -size_t XPageCache::flush_for_uncommit(size_t requested, XList* to, uint64_t* timeout) { - const uint64_t now = os::elapsedTime(); - const uint64_t expires = _last_commit + ZUncommitDelay; - if (expires > now) { - // Delay uncommit, set next timeout - *timeout = expires - now; - return 0; - } - - if (requested == 0) { - // Nothing to flush, set next timeout - *timeout = ZUncommitDelay; - return 0; - } - - XPageCacheFlushForUncommitClosure cl(requested, now, timeout); - flush(&cl, to); - - return cl._flushed; -} - -void XPageCache::set_last_commit() { - _last_commit = ceil(os::elapsedTime()); -} - -void XPageCache::pages_do(XPageClosure* cl) const { - // Small - XPerNUMAConstIterator > iter_numa(&_small); - for (const XList* list; iter_numa.next(&list);) { - XListIterator iter_small(list); - for (XPage* page; iter_small.next(&page);) { - cl->do_page(page); - } - } - - // Medium - XListIterator iter_medium(&_medium); - for (XPage* page; iter_medium.next(&page);) { - cl->do_page(page); - } - - // Large - XListIterator iter_large(&_large); - for (XPage* page; iter_large.next(&page);) { - cl->do_page(page); - } -} diff --git a/src/hotspot/share/gc/x/xPageCache.hpp b/src/hotspot/share/gc/x/xPageCache.hpp deleted file mode 100644 index 9ed80a933f43b..0000000000000 --- a/src/hotspot/share/gc/x/xPageCache.hpp +++ /dev/null @@ -1,67 +0,0 @@ -/* - * Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#ifndef SHARE_GC_X_XPAGECACHE_HPP -#define SHARE_GC_X_XPAGECACHE_HPP - -#include "gc/x/xList.hpp" -#include "gc/x/xPage.hpp" -#include "gc/x/xValue.hpp" - -class XPageCacheFlushClosure; - -class XPageCache { -private: - XPerNUMA > _small; - XList _medium; - XList _large; - uint64_t _last_commit; - - XPage* alloc_small_page(); - XPage* alloc_medium_page(); - XPage* alloc_large_page(size_t size); - - XPage* alloc_oversized_medium_page(size_t size); - XPage* alloc_oversized_large_page(size_t size); - XPage* alloc_oversized_page(size_t size); - - bool flush_list_inner(XPageCacheFlushClosure* cl, XList* from, XList* to); - void flush_list(XPageCacheFlushClosure* cl, XList* from, XList* to); - void flush_per_numa_lists(XPageCacheFlushClosure* cl, XPerNUMA >* from, XList* to); - void flush(XPageCacheFlushClosure* cl, XList* to); - -public: - XPageCache(); - - XPage* alloc_page(uint8_t type, size_t size); - void free_page(XPage* page); - - void flush_for_allocation(size_t requested, XList* to); - size_t flush_for_uncommit(size_t requested, XList* to, uint64_t* timeout); - - void set_last_commit(); - - void pages_do(XPageClosure* cl) const; -}; - -#endif // SHARE_GC_X_XPAGECACHE_HPP diff --git a/src/hotspot/share/gc/x/xPageTable.cpp b/src/hotspot/share/gc/x/xPageTable.cpp deleted file mode 100644 index c3103e808ca21..0000000000000 --- a/src/hotspot/share/gc/x/xPageTable.cpp +++ /dev/null @@ -1,53 +0,0 @@ -/* - * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#include "precompiled.hpp" -#include "gc/x/xGlobals.hpp" -#include "gc/x/xGranuleMap.inline.hpp" -#include "gc/x/xPage.inline.hpp" -#include "gc/x/xPageTable.inline.hpp" -#include "runtime/orderAccess.hpp" -#include "utilities/debug.hpp" - -XPageTable::XPageTable() : - _map(XAddressOffsetMax) {} - -void XPageTable::insert(XPage* page) { - const uintptr_t offset = page->start(); - const size_t size = page->size(); - - // Make sure a newly created page is - // visible before updating the page table. - OrderAccess::storestore(); - - assert(_map.get(offset) == nullptr, "Invalid entry"); - _map.put(offset, size, page); -} - -void XPageTable::remove(XPage* page) { - const uintptr_t offset = page->start(); - const size_t size = page->size(); - - assert(_map.get(offset) == page, "Invalid entry"); - _map.put(offset, size, nullptr); -} diff --git a/src/hotspot/share/gc/x/xPageTable.hpp b/src/hotspot/share/gc/x/xPageTable.hpp deleted file mode 100644 index 958dd73555770..0000000000000 --- a/src/hotspot/share/gc/x/xPageTable.hpp +++ /dev/null @@ -1,61 +0,0 @@ -/* - * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#ifndef SHARE_GC_X_XPAGETABLE_HPP -#define SHARE_GC_X_XPAGETABLE_HPP - -#include "gc/x/xGranuleMap.hpp" -#include "memory/allocation.hpp" - -class VMStructs; -class XPage; -class XPageTableIterator; - -class XPageTable { - friend class ::VMStructs; - friend class XPageTableIterator; - -private: - XGranuleMap _map; - -public: - XPageTable(); - - XPage* get(uintptr_t addr) const; - - void insert(XPage* page); - void remove(XPage* page); -}; - -class XPageTableIterator : public StackObj { -private: - XGranuleMapIterator _iter; - XPage* _prev; - -public: - XPageTableIterator(const XPageTable* page_table); - - bool next(XPage** page); -}; - -#endif // SHARE_GC_X_XPAGETABLE_HPP diff --git a/src/hotspot/share/gc/x/xPageTable.inline.hpp b/src/hotspot/share/gc/x/xPageTable.inline.hpp deleted file mode 100644 index 65ad223e334f0..0000000000000 --- a/src/hotspot/share/gc/x/xPageTable.inline.hpp +++ /dev/null @@ -1,54 +0,0 @@ -/* - * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#ifndef SHARE_GC_X_XPAGETABLE_INLINE_HPP -#define SHARE_GC_X_XPAGETABLE_INLINE_HPP - -#include "gc/x/xPageTable.hpp" - -#include "gc/x/xAddress.inline.hpp" -#include "gc/x/xGranuleMap.inline.hpp" - -inline XPage* XPageTable::get(uintptr_t addr) const { - assert(!XAddress::is_null(addr), "Invalid address"); - return _map.get(XAddress::offset(addr)); -} - -inline XPageTableIterator::XPageTableIterator(const XPageTable* page_table) : - _iter(&page_table->_map), - _prev(nullptr) {} - -inline bool XPageTableIterator::next(XPage** page) { - for (XPage* entry; _iter.next(&entry);) { - if (entry != nullptr && entry != _prev) { - // Next page found - *page = _prev = entry; - return true; - } - } - - // No more pages - return false; -} - -#endif // SHARE_GC_X_XPAGETABLE_INLINE_HPP diff --git a/src/hotspot/share/gc/x/xPhysicalMemory.cpp b/src/hotspot/share/gc/x/xPhysicalMemory.cpp deleted file mode 100644 index 0269c64f0f1aa..0000000000000 --- a/src/hotspot/share/gc/x/xPhysicalMemory.cpp +++ /dev/null @@ -1,434 +0,0 @@ -/* - * Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#include "precompiled.hpp" -#include "gc/shared/gcLogPrecious.hpp" -#include "gc/x/xAddress.inline.hpp" -#include "gc/x/xArray.inline.hpp" -#include "gc/x/xGlobals.hpp" -#include "gc/x/xLargePages.inline.hpp" -#include "gc/x/xList.inline.hpp" -#include "gc/x/xNUMA.inline.hpp" -#include "gc/x/xPhysicalMemory.inline.hpp" -#include "logging/log.hpp" -#include "nmt/memTracker.hpp" -#include "runtime/globals.hpp" -#include "runtime/globals_extension.hpp" -#include "runtime/init.hpp" -#include "runtime/os.hpp" -#include "utilities/align.hpp" -#include "utilities/debug.hpp" -#include "utilities/globalDefinitions.hpp" -#include "utilities/powerOfTwo.hpp" - -XPhysicalMemory::XPhysicalMemory() : - _segments() {} - -XPhysicalMemory::XPhysicalMemory(const XPhysicalMemorySegment& segment) : - _segments() { - add_segment(segment); -} - -XPhysicalMemory::XPhysicalMemory(const XPhysicalMemory& pmem) : - _segments() { - add_segments(pmem); -} - -const XPhysicalMemory& XPhysicalMemory::operator=(const XPhysicalMemory& pmem) { - // Free segments - _segments.clear_and_deallocate(); - - // Copy segments - add_segments(pmem); - - return *this; -} - -size_t XPhysicalMemory::size() const { - size_t size = 0; - - for (int i = 0; i < _segments.length(); i++) { - size += _segments.at(i).size(); - } - - return size; -} - -void XPhysicalMemory::insert_segment(int index, uintptr_t start, size_t size, bool committed) { - _segments.insert_before(index, XPhysicalMemorySegment(start, size, committed)); -} - -void XPhysicalMemory::replace_segment(int index, uintptr_t start, size_t size, bool committed) { - _segments.at_put(index, XPhysicalMemorySegment(start, size, committed)); -} - -void XPhysicalMemory::remove_segment(int index) { - _segments.remove_at(index); -} - -void XPhysicalMemory::add_segments(const XPhysicalMemory& pmem) { - for (int i = 0; i < pmem.nsegments(); i++) { - add_segment(pmem.segment(i)); - } -} - -void XPhysicalMemory::remove_segments() { - _segments.clear_and_deallocate(); -} - -static bool is_mergable(const XPhysicalMemorySegment& before, const XPhysicalMemorySegment& after) { - return before.end() == after.start() && before.is_committed() == after.is_committed(); -} - -void XPhysicalMemory::add_segment(const XPhysicalMemorySegment& segment) { - // Insert segments in address order, merge segments when possible - for (int i = _segments.length(); i > 0; i--) { - const int current = i - 1; - - if (_segments.at(current).end() <= segment.start()) { - if (is_mergable(_segments.at(current), segment)) { - if (current + 1 < _segments.length() && is_mergable(segment, _segments.at(current + 1))) { - // Merge with end of current segment and start of next segment - const size_t start = _segments.at(current).start(); - const size_t size = _segments.at(current).size() + segment.size() + _segments.at(current + 1).size(); - replace_segment(current, start, size, segment.is_committed()); - remove_segment(current + 1); - return; - } - - // Merge with end of current segment - const size_t start = _segments.at(current).start(); - const size_t size = _segments.at(current).size() + segment.size(); - replace_segment(current, start, size, segment.is_committed()); - return; - } else if (current + 1 < _segments.length() && is_mergable(segment, _segments.at(current + 1))) { - // Merge with start of next segment - const size_t start = segment.start(); - const size_t size = segment.size() + _segments.at(current + 1).size(); - replace_segment(current + 1, start, size, segment.is_committed()); - return; - } - - // Insert after current segment - insert_segment(current + 1, segment.start(), segment.size(), segment.is_committed()); - return; - } - } - - if (_segments.length() > 0 && is_mergable(segment, _segments.at(0))) { - // Merge with start of first segment - const size_t start = segment.start(); - const size_t size = segment.size() + _segments.at(0).size(); - replace_segment(0, start, size, segment.is_committed()); - return; - } - - // Insert before first segment - insert_segment(0, segment.start(), segment.size(), segment.is_committed()); -} - -bool XPhysicalMemory::commit_segment(int index, size_t size) { - assert(size <= _segments.at(index).size(), "Invalid size"); - assert(!_segments.at(index).is_committed(), "Invalid state"); - - if (size == _segments.at(index).size()) { - // Completely committed - _segments.at(index).set_committed(true); - return true; - } - - if (size > 0) { - // Partially committed, split segment - insert_segment(index + 1, _segments.at(index).start() + size, _segments.at(index).size() - size, false /* committed */); - replace_segment(index, _segments.at(index).start(), size, true /* committed */); - } - - return false; -} - -bool XPhysicalMemory::uncommit_segment(int index, size_t size) { - assert(size <= _segments.at(index).size(), "Invalid size"); - assert(_segments.at(index).is_committed(), "Invalid state"); - - if (size == _segments.at(index).size()) { - // Completely uncommitted - _segments.at(index).set_committed(false); - return true; - } - - if (size > 0) { - // Partially uncommitted, split segment - insert_segment(index + 1, _segments.at(index).start() + size, _segments.at(index).size() - size, true /* committed */); - replace_segment(index, _segments.at(index).start(), size, false /* committed */); - } - - return false; -} - -XPhysicalMemory XPhysicalMemory::split(size_t size) { - XPhysicalMemory pmem; - int nsegments = 0; - - for (int i = 0; i < _segments.length(); i++) { - const XPhysicalMemorySegment& segment = _segments.at(i); - if (pmem.size() < size) { - if (pmem.size() + segment.size() <= size) { - // Transfer segment - pmem.add_segment(segment); - } else { - // Split segment - const size_t split_size = size - pmem.size(); - pmem.add_segment(XPhysicalMemorySegment(segment.start(), split_size, segment.is_committed())); - _segments.at_put(nsegments++, XPhysicalMemorySegment(segment.start() + split_size, segment.size() - split_size, segment.is_committed())); - } - } else { - // Keep segment - _segments.at_put(nsegments++, segment); - } - } - - _segments.trunc_to(nsegments); - - return pmem; -} - -XPhysicalMemory XPhysicalMemory::split_committed() { - XPhysicalMemory pmem; - int nsegments = 0; - - for (int i = 0; i < _segments.length(); i++) { - const XPhysicalMemorySegment& segment = _segments.at(i); - if (segment.is_committed()) { - // Transfer segment - pmem.add_segment(segment); - } else { - // Keep segment - _segments.at_put(nsegments++, segment); - } - } - - _segments.trunc_to(nsegments); - - return pmem; -} - -XPhysicalMemoryManager::XPhysicalMemoryManager(size_t max_capacity) : - _backing(max_capacity) { - // Make the whole range free - _manager.free(0, max_capacity); -} - -bool XPhysicalMemoryManager::is_initialized() const { - return _backing.is_initialized(); -} - -void XPhysicalMemoryManager::warn_commit_limits(size_t max_capacity) const { - _backing.warn_commit_limits(max_capacity); -} - -void XPhysicalMemoryManager::try_enable_uncommit(size_t min_capacity, size_t max_capacity) { - assert(!is_init_completed(), "Invalid state"); - - // If uncommit is not explicitly disabled, max capacity is greater than - // min capacity, and uncommit is supported by the platform, then uncommit - // will be enabled. - if (!ZUncommit) { - log_info_p(gc, init)("Uncommit: Disabled"); - return; - } - - if (max_capacity == min_capacity) { - log_info_p(gc, init)("Uncommit: Implicitly Disabled (-Xms equals -Xmx)"); - FLAG_SET_ERGO(ZUncommit, false); - return; - } - - // Test if uncommit is supported by the operating system by committing - // and then uncommitting a granule. - XPhysicalMemory pmem(XPhysicalMemorySegment(0, XGranuleSize, false /* committed */)); - if (!commit(pmem) || !uncommit(pmem)) { - log_info_p(gc, init)("Uncommit: Implicitly Disabled (Not supported by operating system)"); - FLAG_SET_ERGO(ZUncommit, false); - return; - } - - log_info_p(gc, init)("Uncommit: Enabled"); - log_info_p(gc, init)("Uncommit Delay: " UINTX_FORMAT "s", ZUncommitDelay); -} - -void XPhysicalMemoryManager::nmt_commit(uintptr_t offset, size_t size) const { - // From an NMT point of view we treat the first heap view (marked0) as committed - const uintptr_t addr = XAddress::marked0(offset); - MemTracker::record_virtual_memory_commit((void*)addr, size, CALLER_PC); -} - -void XPhysicalMemoryManager::nmt_uncommit(uintptr_t offset, size_t size) const { - const uintptr_t addr = XAddress::marked0(offset); - ThreadCritical tc; - MemTracker::record_virtual_memory_uncommit((address)addr, size); -} - -void XPhysicalMemoryManager::alloc(XPhysicalMemory& pmem, size_t size) { - assert(is_aligned(size, XGranuleSize), "Invalid size"); - - // Allocate segments - while (size > 0) { - size_t allocated = 0; - const uintptr_t start = _manager.alloc_low_address_at_most(size, &allocated); - assert(start != UINTPTR_MAX, "Allocation should never fail"); - pmem.add_segment(XPhysicalMemorySegment(start, allocated, false /* committed */)); - size -= allocated; - } -} - -void XPhysicalMemoryManager::free(const XPhysicalMemory& pmem) { - // Free segments - for (int i = 0; i < pmem.nsegments(); i++) { - const XPhysicalMemorySegment& segment = pmem.segment(i); - _manager.free(segment.start(), segment.size()); - } -} - -bool XPhysicalMemoryManager::commit(XPhysicalMemory& pmem) { - // Commit segments - for (int i = 0; i < pmem.nsegments(); i++) { - const XPhysicalMemorySegment& segment = pmem.segment(i); - if (segment.is_committed()) { - // Segment already committed - continue; - } - - // Commit segment - const size_t committed = _backing.commit(segment.start(), segment.size()); - if (!pmem.commit_segment(i, committed)) { - // Failed or partially failed - return false; - } - } - - // Success - return true; -} - -bool XPhysicalMemoryManager::uncommit(XPhysicalMemory& pmem) { - // Commit segments - for (int i = 0; i < pmem.nsegments(); i++) { - const XPhysicalMemorySegment& segment = pmem.segment(i); - if (!segment.is_committed()) { - // Segment already uncommitted - continue; - } - - // Uncommit segment - const size_t uncommitted = _backing.uncommit(segment.start(), segment.size()); - if (!pmem.uncommit_segment(i, uncommitted)) { - // Failed or partially failed - return false; - } - } - - // Success - return true; -} - -void XPhysicalMemoryManager::pretouch_view(uintptr_t addr, size_t size) const { - const size_t page_size = XLargePages::is_explicit() ? XGranuleSize : os::vm_page_size(); - os::pretouch_memory((void*)addr, (void*)(addr + size), page_size); -} - -void XPhysicalMemoryManager::map_view(uintptr_t addr, const XPhysicalMemory& pmem) const { - size_t size = 0; - - // Map segments - for (int i = 0; i < pmem.nsegments(); i++) { - const XPhysicalMemorySegment& segment = pmem.segment(i); - _backing.map(addr + size, segment.size(), segment.start()); - size += segment.size(); - } - - // Setup NUMA interleaving for large pages - if (XNUMA::is_enabled() && XLargePages::is_explicit()) { - // To get granule-level NUMA interleaving when using large pages, - // we simply let the kernel interleave the memory for us at page - // fault time. - os::numa_make_global((char*)addr, size); - } -} - -void XPhysicalMemoryManager::unmap_view(uintptr_t addr, size_t size) const { - _backing.unmap(addr, size); -} - -void XPhysicalMemoryManager::pretouch(uintptr_t offset, size_t size) const { - if (ZVerifyViews) { - // Pre-touch good view - pretouch_view(XAddress::good(offset), size); - } else { - // Pre-touch all views - pretouch_view(XAddress::marked0(offset), size); - pretouch_view(XAddress::marked1(offset), size); - pretouch_view(XAddress::remapped(offset), size); - } -} - -void XPhysicalMemoryManager::map(uintptr_t offset, const XPhysicalMemory& pmem) const { - const size_t size = pmem.size(); - - if (ZVerifyViews) { - // Map good view - map_view(XAddress::good(offset), pmem); - } else { - // Map all views - map_view(XAddress::marked0(offset), pmem); - map_view(XAddress::marked1(offset), pmem); - map_view(XAddress::remapped(offset), pmem); - } - - nmt_commit(offset, size); -} - -void XPhysicalMemoryManager::unmap(uintptr_t offset, size_t size) const { - nmt_uncommit(offset, size); - - if (ZVerifyViews) { - // Unmap good view - unmap_view(XAddress::good(offset), size); - } else { - // Unmap all views - unmap_view(XAddress::marked0(offset), size); - unmap_view(XAddress::marked1(offset), size); - unmap_view(XAddress::remapped(offset), size); - } -} - -void XPhysicalMemoryManager::debug_map(uintptr_t offset, const XPhysicalMemory& pmem) const { - // Map good view - assert(ZVerifyViews, "Should be enabled"); - map_view(XAddress::good(offset), pmem); -} - -void XPhysicalMemoryManager::debug_unmap(uintptr_t offset, size_t size) const { - // Unmap good view - assert(ZVerifyViews, "Should be enabled"); - unmap_view(XAddress::good(offset), size); -} diff --git a/src/hotspot/share/gc/x/xPhysicalMemory.hpp b/src/hotspot/share/gc/x/xPhysicalMemory.hpp deleted file mode 100644 index 26d8ed9bb9641..0000000000000 --- a/src/hotspot/share/gc/x/xPhysicalMemory.hpp +++ /dev/null @@ -1,116 +0,0 @@ -/* - * Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#ifndef SHARE_GC_X_XPHYSICALMEMORY_HPP -#define SHARE_GC_X_XPHYSICALMEMORY_HPP - -#include "gc/x/xArray.hpp" -#include "gc/x/xMemory.hpp" -#include "memory/allocation.hpp" -#include OS_HEADER(gc/x/xPhysicalMemoryBacking) - -class XPhysicalMemorySegment : public CHeapObj { -private: - uintptr_t _start; - uintptr_t _end; - bool _committed; - -public: - XPhysicalMemorySegment(); - XPhysicalMemorySegment(uintptr_t start, size_t size, bool committed); - - uintptr_t start() const; - uintptr_t end() const; - size_t size() const; - - bool is_committed() const; - void set_committed(bool committed); -}; - -class XPhysicalMemory { -private: - XArray _segments; - - void insert_segment(int index, uintptr_t start, size_t size, bool committed); - void replace_segment(int index, uintptr_t start, size_t size, bool committed); - void remove_segment(int index); - -public: - XPhysicalMemory(); - XPhysicalMemory(const XPhysicalMemorySegment& segment); - XPhysicalMemory(const XPhysicalMemory& pmem); - const XPhysicalMemory& operator=(const XPhysicalMemory& pmem); - - bool is_null() const; - size_t size() const; - - int nsegments() const; - const XPhysicalMemorySegment& segment(int index) const; - - void add_segments(const XPhysicalMemory& pmem); - void remove_segments(); - - void add_segment(const XPhysicalMemorySegment& segment); - bool commit_segment(int index, size_t size); - bool uncommit_segment(int index, size_t size); - - XPhysicalMemory split(size_t size); - XPhysicalMemory split_committed(); -}; - -class XPhysicalMemoryManager { -private: - XPhysicalMemoryBacking _backing; - XMemoryManager _manager; - - void nmt_commit(uintptr_t offset, size_t size) const; - void nmt_uncommit(uintptr_t offset, size_t size) const; - - void pretouch_view(uintptr_t addr, size_t size) const; - void map_view(uintptr_t addr, const XPhysicalMemory& pmem) const; - void unmap_view(uintptr_t addr, size_t size) const; - -public: - XPhysicalMemoryManager(size_t max_capacity); - - bool is_initialized() const; - - void warn_commit_limits(size_t max_capacity) const; - void try_enable_uncommit(size_t min_capacity, size_t max_capacity); - - void alloc(XPhysicalMemory& pmem, size_t size); - void free(const XPhysicalMemory& pmem); - - bool commit(XPhysicalMemory& pmem); - bool uncommit(XPhysicalMemory& pmem); - - void pretouch(uintptr_t offset, size_t size) const; - - void map(uintptr_t offset, const XPhysicalMemory& pmem) const; - void unmap(uintptr_t offset, size_t size) const; - - void debug_map(uintptr_t offset, const XPhysicalMemory& pmem) const; - void debug_unmap(uintptr_t offset, size_t size) const; -}; - -#endif // SHARE_GC_X_XPHYSICALMEMORY_HPP diff --git a/src/hotspot/share/gc/x/xPhysicalMemory.inline.hpp b/src/hotspot/share/gc/x/xPhysicalMemory.inline.hpp deleted file mode 100644 index 70f38e2abdbbb..0000000000000 --- a/src/hotspot/share/gc/x/xPhysicalMemory.inline.hpp +++ /dev/null @@ -1,74 +0,0 @@ -/* - * Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#ifndef SHARE_GC_X_XPHYSICALMEMORY_INLINE_HPP -#define SHARE_GC_X_XPHYSICALMEMORY_INLINE_HPP - -#include "gc/x/xPhysicalMemory.hpp" - -#include "gc/x/xAddress.inline.hpp" -#include "utilities/debug.hpp" - -inline XPhysicalMemorySegment::XPhysicalMemorySegment() : - _start(UINTPTR_MAX), - _end(UINTPTR_MAX), - _committed(false) {} - -inline XPhysicalMemorySegment::XPhysicalMemorySegment(uintptr_t start, size_t size, bool committed) : - _start(start), - _end(start + size), - _committed(committed) {} - -inline uintptr_t XPhysicalMemorySegment::start() const { - return _start; -} - -inline uintptr_t XPhysicalMemorySegment::end() const { - return _end; -} - -inline size_t XPhysicalMemorySegment::size() const { - return _end - _start; -} - -inline bool XPhysicalMemorySegment::is_committed() const { - return _committed; -} - -inline void XPhysicalMemorySegment::set_committed(bool committed) { - _committed = committed; -} - -inline bool XPhysicalMemory::is_null() const { - return _segments.length() == 0; -} - -inline int XPhysicalMemory::nsegments() const { - return _segments.length(); -} - -inline const XPhysicalMemorySegment& XPhysicalMemory::segment(int index) const { - return _segments.at(index); -} - -#endif // SHARE_GC_X_XPHYSICALMEMORY_INLINE_HPP diff --git a/src/hotspot/share/gc/x/xReferenceProcessor.cpp b/src/hotspot/share/gc/x/xReferenceProcessor.cpp deleted file mode 100644 index acbb96eaf41e2..0000000000000 --- a/src/hotspot/share/gc/x/xReferenceProcessor.cpp +++ /dev/null @@ -1,459 +0,0 @@ -/* - * Copyright (c) 2015, 2021, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#include "precompiled.hpp" -#include "classfile/javaClasses.inline.hpp" -#include "gc/shared/referencePolicy.hpp" -#include "gc/shared/referenceProcessorStats.hpp" -#include "gc/x/xHeap.inline.hpp" -#include "gc/x/xReferenceProcessor.hpp" -#include "gc/x/xStat.hpp" -#include "gc/x/xTask.hpp" -#include "gc/x/xTracer.inline.hpp" -#include "gc/x/xValue.inline.hpp" -#include "memory/universe.hpp" -#include "runtime/atomic.hpp" -#include "runtime/mutexLocker.hpp" -#include "runtime/os.hpp" - -static const XStatSubPhase XSubPhaseConcurrentReferencesProcess("Concurrent References Process"); -static const XStatSubPhase XSubPhaseConcurrentReferencesEnqueue("Concurrent References Enqueue"); - -static ReferenceType reference_type(oop reference) { - return InstanceKlass::cast(reference->klass())->reference_type(); -} - -static const char* reference_type_name(ReferenceType type) { - switch (type) { - case REF_SOFT: - return "Soft"; - - case REF_WEAK: - return "Weak"; - - case REF_FINAL: - return "Final"; - - case REF_PHANTOM: - return "Phantom"; - - default: - ShouldNotReachHere(); - return "Unknown"; - } -} - -static volatile oop* reference_referent_addr(oop reference) { - return (volatile oop*)java_lang_ref_Reference::referent_addr_raw(reference); -} - -static oop reference_referent(oop reference) { - return Atomic::load(reference_referent_addr(reference)); -} - -static void reference_clear_referent(oop reference) { - java_lang_ref_Reference::clear_referent_raw(reference); -} - -static oop* reference_discovered_addr(oop reference) { - return (oop*)java_lang_ref_Reference::discovered_addr_raw(reference); -} - -static oop reference_discovered(oop reference) { - return *reference_discovered_addr(reference); -} - -static void reference_set_discovered(oop reference, oop discovered) { - java_lang_ref_Reference::set_discovered_raw(reference, discovered); -} - -static oop* reference_next_addr(oop reference) { - return (oop*)java_lang_ref_Reference::next_addr_raw(reference); -} - -static oop reference_next(oop reference) { - return *reference_next_addr(reference); -} - -static void reference_set_next(oop reference, oop next) { - java_lang_ref_Reference::set_next_raw(reference, next); -} - -static void soft_reference_update_clock() { - const jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC; - java_lang_ref_SoftReference::set_clock(now); -} - -XReferenceProcessor::XReferenceProcessor(XWorkers* workers) : - _workers(workers), - _soft_reference_policy(nullptr), - _encountered_count(), - _discovered_count(), - _enqueued_count(), - _discovered_list(nullptr), - _pending_list(nullptr), - _pending_list_tail(_pending_list.addr()) {} - -void XReferenceProcessor::set_soft_reference_policy(bool clear) { - static AlwaysClearPolicy always_clear_policy; - static LRUMaxHeapPolicy lru_max_heap_policy; - - if (clear) { - log_info(gc, ref)("Clearing All SoftReferences"); - _soft_reference_policy = &always_clear_policy; - } else { - _soft_reference_policy = &lru_max_heap_policy; - } - - _soft_reference_policy->setup(); -} - -bool XReferenceProcessor::is_inactive(oop reference, oop referent, ReferenceType type) const { - if (type == REF_FINAL) { - // A FinalReference is inactive if its next field is non-null. An application can't - // call enqueue() or clear() on a FinalReference. - return reference_next(reference) != nullptr; - } else { - // A non-FinalReference is inactive if the referent is null. The referent can only - // be null if the application called Reference.enqueue() or Reference.clear(). - return referent == nullptr; - } -} - -bool XReferenceProcessor::is_strongly_live(oop referent) const { - return XHeap::heap()->is_object_strongly_live(XOop::to_address(referent)); -} - -bool XReferenceProcessor::is_softly_live(oop reference, ReferenceType type) const { - if (type != REF_SOFT) { - // Not a SoftReference - return false; - } - - // Ask SoftReference policy - const jlong clock = java_lang_ref_SoftReference::clock(); - assert(clock != 0, "Clock not initialized"); - assert(_soft_reference_policy != nullptr, "Policy not initialized"); - return !_soft_reference_policy->should_clear_reference(reference, clock); -} - -bool XReferenceProcessor::should_discover(oop reference, ReferenceType type) const { - volatile oop* const referent_addr = reference_referent_addr(reference); - const oop referent = XBarrier::weak_load_barrier_on_oop_field(referent_addr); - - if (is_inactive(reference, referent, type)) { - return false; - } - - if (is_strongly_live(referent)) { - return false; - } - - if (is_softly_live(reference, type)) { - return false; - } - - // PhantomReferences with finalizable marked referents should technically not have - // to be discovered. However, InstanceRefKlass::oop_oop_iterate_ref_processing() - // does not know about the finalizable mark concept, and will therefore mark - // referents in non-discovered PhantomReferences as strongly live. To prevent - // this, we always discover PhantomReferences with finalizable marked referents. - // They will automatically be dropped during the reference processing phase. - return true; -} - -bool XReferenceProcessor::should_drop(oop reference, ReferenceType type) const { - const oop referent = reference_referent(reference); - if (referent == nullptr) { - // Reference has been cleared, by a call to Reference.enqueue() - // or Reference.clear() from the application, which means we - // should drop the reference. - return true; - } - - // Check if the referent is still alive, in which case we should - // drop the reference. - if (type == REF_PHANTOM) { - return XBarrier::is_alive_barrier_on_phantom_oop(referent); - } else { - return XBarrier::is_alive_barrier_on_weak_oop(referent); - } -} - -void XReferenceProcessor::keep_alive(oop reference, ReferenceType type) const { - volatile oop* const p = reference_referent_addr(reference); - if (type == REF_PHANTOM) { - XBarrier::keep_alive_barrier_on_phantom_oop_field(p); - } else { - XBarrier::keep_alive_barrier_on_weak_oop_field(p); - } -} - -void XReferenceProcessor::make_inactive(oop reference, ReferenceType type) const { - if (type == REF_FINAL) { - // Don't clear referent. It is needed by the Finalizer thread to make the call - // to finalize(). A FinalReference is instead made inactive by self-looping the - // next field. An application can't call FinalReference.enqueue(), so there is - // no race to worry about when setting the next field. - assert(reference_next(reference) == nullptr, "Already inactive"); - reference_set_next(reference, reference); - } else { - // Clear referent - reference_clear_referent(reference); - } -} - -void XReferenceProcessor::discover(oop reference, ReferenceType type) { - log_trace(gc, ref)("Discovered Reference: " PTR_FORMAT " (%s)", p2i(reference), reference_type_name(type)); - - // Update statistics - _discovered_count.get()[type]++; - - if (type == REF_FINAL) { - // Mark referent (and its reachable subgraph) finalizable. This avoids - // the problem of later having to mark those objects if the referent is - // still final reachable during processing. - volatile oop* const referent_addr = reference_referent_addr(reference); - XBarrier::mark_barrier_on_oop_field(referent_addr, true /* finalizable */); - } - - // Add reference to discovered list - assert(reference_discovered(reference) == nullptr, "Already discovered"); - oop* const list = _discovered_list.addr(); - reference_set_discovered(reference, *list); - *list = reference; -} - -bool XReferenceProcessor::discover_reference(oop reference, ReferenceType type) { - if (!RegisterReferences) { - // Reference processing disabled - return false; - } - - log_trace(gc, ref)("Encountered Reference: " PTR_FORMAT " (%s)", p2i(reference), reference_type_name(type)); - - // Update statistics - _encountered_count.get()[type]++; - - if (!should_discover(reference, type)) { - // Not discovered - return false; - } - - discover(reference, type); - - // Discovered - return true; -} - -oop XReferenceProcessor::drop(oop reference, ReferenceType type) { - log_trace(gc, ref)("Dropped Reference: " PTR_FORMAT " (%s)", p2i(reference), reference_type_name(type)); - - // Keep referent alive - keep_alive(reference, type); - - // Unlink and return next in list - const oop next = reference_discovered(reference); - reference_set_discovered(reference, nullptr); - return next; -} - -oop* XReferenceProcessor::keep(oop reference, ReferenceType type) { - log_trace(gc, ref)("Enqueued Reference: " PTR_FORMAT " (%s)", p2i(reference), reference_type_name(type)); - - // Update statistics - _enqueued_count.get()[type]++; - - // Make reference inactive - make_inactive(reference, type); - - // Return next in list - return reference_discovered_addr(reference); -} - -void XReferenceProcessor::work() { - // Process discovered references - oop* const list = _discovered_list.addr(); - oop* p = list; - - while (*p != nullptr) { - const oop reference = *p; - const ReferenceType type = reference_type(reference); - - if (should_drop(reference, type)) { - *p = drop(reference, type); - } else { - p = keep(reference, type); - } - } - - // Prepend discovered references to internal pending list - if (*list != nullptr) { - *p = Atomic::xchg(_pending_list.addr(), *list); - if (*p == nullptr) { - // First to prepend to list, record tail - _pending_list_tail = p; - } - - // Clear discovered list - *list = nullptr; - } -} - -bool XReferenceProcessor::is_empty() const { - XPerWorkerConstIterator iter(&_discovered_list); - for (const oop* list; iter.next(&list);) { - if (*list != nullptr) { - return false; - } - } - - if (_pending_list.get() != nullptr) { - return false; - } - - return true; -} - -void XReferenceProcessor::reset_statistics() { - assert(is_empty(), "Should be empty"); - - // Reset encountered - XPerWorkerIterator iter_encountered(&_encountered_count); - for (Counters* counters; iter_encountered.next(&counters);) { - for (int i = REF_SOFT; i <= REF_PHANTOM; i++) { - (*counters)[i] = 0; - } - } - - // Reset discovered - XPerWorkerIterator iter_discovered(&_discovered_count); - for (Counters* counters; iter_discovered.next(&counters);) { - for (int i = REF_SOFT; i <= REF_PHANTOM; i++) { - (*counters)[i] = 0; - } - } - - // Reset enqueued - XPerWorkerIterator iter_enqueued(&_enqueued_count); - for (Counters* counters; iter_enqueued.next(&counters);) { - for (int i = REF_SOFT; i <= REF_PHANTOM; i++) { - (*counters)[i] = 0; - } - } -} - -void XReferenceProcessor::collect_statistics() { - Counters encountered = {}; - Counters discovered = {}; - Counters enqueued = {}; - - // Sum encountered - XPerWorkerConstIterator iter_encountered(&_encountered_count); - for (const Counters* counters; iter_encountered.next(&counters);) { - for (int i = REF_SOFT; i <= REF_PHANTOM; i++) { - encountered[i] += (*counters)[i]; - } - } - - // Sum discovered - XPerWorkerConstIterator iter_discovered(&_discovered_count); - for (const Counters* counters; iter_discovered.next(&counters);) { - for (int i = REF_SOFT; i <= REF_PHANTOM; i++) { - discovered[i] += (*counters)[i]; - } - } - - // Sum enqueued - XPerWorkerConstIterator iter_enqueued(&_enqueued_count); - for (const Counters* counters; iter_enqueued.next(&counters);) { - for (int i = REF_SOFT; i <= REF_PHANTOM; i++) { - enqueued[i] += (*counters)[i]; - } - } - - // Update statistics - XStatReferences::set_soft(encountered[REF_SOFT], discovered[REF_SOFT], enqueued[REF_SOFT]); - XStatReferences::set_weak(encountered[REF_WEAK], discovered[REF_WEAK], enqueued[REF_WEAK]); - XStatReferences::set_final(encountered[REF_FINAL], discovered[REF_FINAL], enqueued[REF_FINAL]); - XStatReferences::set_phantom(encountered[REF_PHANTOM], discovered[REF_PHANTOM], enqueued[REF_PHANTOM]); - - // Trace statistics - const ReferenceProcessorStats stats(discovered[REF_SOFT], - discovered[REF_WEAK], - discovered[REF_FINAL], - discovered[REF_PHANTOM]); - XTracer::tracer()->report_gc_reference_stats(stats); -} - -class XReferenceProcessorTask : public XTask { -private: - XReferenceProcessor* const _reference_processor; - -public: - XReferenceProcessorTask(XReferenceProcessor* reference_processor) : - XTask("XReferenceProcessorTask"), - _reference_processor(reference_processor) {} - - virtual void work() { - _reference_processor->work(); - } -}; - -void XReferenceProcessor::process_references() { - XStatTimer timer(XSubPhaseConcurrentReferencesProcess); - - // Process discovered lists - XReferenceProcessorTask task(this); - _workers->run(&task); - - // Update SoftReference clock - soft_reference_update_clock(); - - // Collect, log and trace statistics - collect_statistics(); -} - -void XReferenceProcessor::enqueue_references() { - XStatTimer timer(XSubPhaseConcurrentReferencesEnqueue); - - if (_pending_list.get() == nullptr) { - // Nothing to enqueue - return; - } - - { - // Heap_lock protects external pending list - MonitorLocker ml(Heap_lock); - - // Prepend internal pending list to external pending list - *_pending_list_tail = Universe::swap_reference_pending_list(_pending_list.get()); - - // Notify ReferenceHandler thread - ml.notify_all(); - } - - // Reset internal pending list - _pending_list.set(nullptr); - _pending_list_tail = _pending_list.addr(); -} diff --git a/src/hotspot/share/gc/x/xReferenceProcessor.hpp b/src/hotspot/share/gc/x/xReferenceProcessor.hpp deleted file mode 100644 index 1ff7b14e868d6..0000000000000 --- a/src/hotspot/share/gc/x/xReferenceProcessor.hpp +++ /dev/null @@ -1,79 +0,0 @@ -/* - * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#ifndef SHARE_GC_X_XREFERENCEPROCESSOR_HPP -#define SHARE_GC_X_XREFERENCEPROCESSOR_HPP - -#include "gc/shared/referenceDiscoverer.hpp" -#include "gc/x/xValue.hpp" - -class ReferencePolicy; -class XWorkers; - -class XReferenceProcessor : public ReferenceDiscoverer { - friend class XReferenceProcessorTask; - -private: - static const size_t reference_type_count = REF_PHANTOM + 1; - typedef size_t Counters[reference_type_count]; - - XWorkers* const _workers; - ReferencePolicy* _soft_reference_policy; - XPerWorker _encountered_count; - XPerWorker _discovered_count; - XPerWorker _enqueued_count; - XPerWorker _discovered_list; - XContended _pending_list; - oop* _pending_list_tail; - - bool is_inactive(oop reference, oop referent, ReferenceType type) const; - bool is_strongly_live(oop referent) const; - bool is_softly_live(oop reference, ReferenceType type) const; - - bool should_discover(oop reference, ReferenceType type) const; - bool should_drop(oop reference, ReferenceType type) const; - void keep_alive(oop reference, ReferenceType type) const; - void make_inactive(oop reference, ReferenceType type) const; - - void discover(oop reference, ReferenceType type); - - oop drop(oop reference, ReferenceType type); - oop* keep(oop reference, ReferenceType type); - - bool is_empty() const; - - void work(); - void collect_statistics(); - -public: - XReferenceProcessor(XWorkers* workers); - - void set_soft_reference_policy(bool clear); - void reset_statistics(); - - virtual bool discover_reference(oop reference, ReferenceType type); - void process_references(); - void enqueue_references(); -}; - -#endif // SHARE_GC_X_XREFERENCEPROCESSOR_HPP diff --git a/src/hotspot/share/gc/x/xRelocate.cpp b/src/hotspot/share/gc/x/xRelocate.cpp deleted file mode 100644 index 645989eaba393..0000000000000 --- a/src/hotspot/share/gc/x/xRelocate.cpp +++ /dev/null @@ -1,419 +0,0 @@ -/* - * Copyright (c) 2015, 2021, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#include "precompiled.hpp" -#include "gc/shared/gc_globals.hpp" -#include "gc/x/xAbort.inline.hpp" -#include "gc/x/xAddress.inline.hpp" -#include "gc/x/xBarrier.inline.hpp" -#include "gc/x/xForwarding.inline.hpp" -#include "gc/x/xHeap.inline.hpp" -#include "gc/x/xPage.inline.hpp" -#include "gc/x/xRelocate.hpp" -#include "gc/x/xRelocationSet.inline.hpp" -#include "gc/x/xStat.hpp" -#include "gc/x/xTask.hpp" -#include "gc/x/xThread.inline.hpp" -#include "gc/x/xWorkers.hpp" -#include "prims/jvmtiTagMap.hpp" -#include "runtime/atomic.hpp" -#include "utilities/debug.hpp" - -XRelocate::XRelocate(XWorkers* workers) : - _workers(workers) {} - -static uintptr_t forwarding_index(XForwarding* forwarding, uintptr_t from_addr) { - const uintptr_t from_offset = XAddress::offset(from_addr); - return (from_offset - forwarding->start()) >> forwarding->object_alignment_shift(); -} - -static uintptr_t forwarding_find(XForwarding* forwarding, uintptr_t from_addr, XForwardingCursor* cursor) { - const uintptr_t from_index = forwarding_index(forwarding, from_addr); - const XForwardingEntry entry = forwarding->find(from_index, cursor); - return entry.populated() ? XAddress::good(entry.to_offset()) : 0; -} - -static uintptr_t forwarding_insert(XForwarding* forwarding, uintptr_t from_addr, uintptr_t to_addr, XForwardingCursor* cursor) { - const uintptr_t from_index = forwarding_index(forwarding, from_addr); - const uintptr_t to_offset = XAddress::offset(to_addr); - const uintptr_t to_offset_final = forwarding->insert(from_index, to_offset, cursor); - return XAddress::good(to_offset_final); -} - -static uintptr_t relocate_object_inner(XForwarding* forwarding, uintptr_t from_addr, XForwardingCursor* cursor) { - assert(XHeap::heap()->is_object_live(from_addr), "Should be live"); - - // Allocate object - const size_t size = XUtils::object_size(from_addr); - const uintptr_t to_addr = XHeap::heap()->alloc_object_for_relocation(size); - if (to_addr == 0) { - // Allocation failed - return 0; - } - - // Copy object - XUtils::object_copy_disjoint(from_addr, to_addr, size); - - // Insert forwarding - const uintptr_t to_addr_final = forwarding_insert(forwarding, from_addr, to_addr, cursor); - if (to_addr_final != to_addr) { - // Already relocated, try undo allocation - XHeap::heap()->undo_alloc_object_for_relocation(to_addr, size); - } - - return to_addr_final; -} - -uintptr_t XRelocate::relocate_object(XForwarding* forwarding, uintptr_t from_addr) const { - XForwardingCursor cursor; - - // Lookup forwarding - uintptr_t to_addr = forwarding_find(forwarding, from_addr, &cursor); - if (to_addr != 0) { - // Already relocated - return to_addr; - } - - // Relocate object - if (forwarding->retain_page()) { - to_addr = relocate_object_inner(forwarding, from_addr, &cursor); - forwarding->release_page(); - - if (to_addr != 0) { - // Success - return to_addr; - } - - // Failed to relocate object. Wait for a worker thread to complete - // relocation of this page, and then forward the object. If the GC - // aborts the relocation phase before the page has been relocated, - // then wait return false and we just forward the object in-place. - if (!forwarding->wait_page_released()) { - // Forward object in-place - return forwarding_insert(forwarding, from_addr, from_addr, &cursor); - } - } - - // Forward object - return forward_object(forwarding, from_addr); -} - -uintptr_t XRelocate::forward_object(XForwarding* forwarding, uintptr_t from_addr) const { - XForwardingCursor cursor; - const uintptr_t to_addr = forwarding_find(forwarding, from_addr, &cursor); - assert(to_addr != 0, "Should be forwarded"); - return to_addr; -} - -static XPage* alloc_page(const XForwarding* forwarding) { - if (ZStressRelocateInPlace) { - // Simulate failure to allocate a new page. This will - // cause the page being relocated to be relocated in-place. - return nullptr; - } - - XAllocationFlags flags; - flags.set_non_blocking(); - flags.set_worker_relocation(); - return XHeap::heap()->alloc_page(forwarding->type(), forwarding->size(), flags); -} - -static void free_page(XPage* page) { - XHeap::heap()->free_page(page, true /* reclaimed */); -} - -static bool should_free_target_page(XPage* page) { - // Free target page if it is empty. We can end up with an empty target - // page if we allocated a new target page, and then lost the race to - // relocate the remaining objects, leaving the target page empty when - // relocation completed. - return page != nullptr && page->top() == page->start(); -} - -class XRelocateSmallAllocator { -private: - volatile size_t _in_place_count; - -public: - XRelocateSmallAllocator() : - _in_place_count(0) {} - - XPage* alloc_target_page(XForwarding* forwarding, XPage* target) { - XPage* const page = alloc_page(forwarding); - if (page == nullptr) { - Atomic::inc(&_in_place_count); - } - - return page; - } - - void share_target_page(XPage* page) { - // Does nothing - } - - void free_target_page(XPage* page) { - if (should_free_target_page(page)) { - free_page(page); - } - } - - void free_relocated_page(XPage* page) { - free_page(page); - } - - uintptr_t alloc_object(XPage* page, size_t size) const { - return (page != nullptr) ? page->alloc_object(size) : 0; - } - - void undo_alloc_object(XPage* page, uintptr_t addr, size_t size) const { - page->undo_alloc_object(addr, size); - } - - size_t in_place_count() const { - return _in_place_count; - } -}; - -class XRelocateMediumAllocator { -private: - XConditionLock _lock; - XPage* _shared; - bool _in_place; - volatile size_t _in_place_count; - -public: - XRelocateMediumAllocator() : - _lock(), - _shared(nullptr), - _in_place(false), - _in_place_count(0) {} - - ~XRelocateMediumAllocator() { - if (should_free_target_page(_shared)) { - free_page(_shared); - } - } - - XPage* alloc_target_page(XForwarding* forwarding, XPage* target) { - XLocker locker(&_lock); - - // Wait for any ongoing in-place relocation to complete - while (_in_place) { - _lock.wait(); - } - - // Allocate a new page only if the shared page is the same as the - // current target page. The shared page will be different from the - // current target page if another thread shared a page, or allocated - // a new page. - if (_shared == target) { - _shared = alloc_page(forwarding); - if (_shared == nullptr) { - Atomic::inc(&_in_place_count); - _in_place = true; - } - } - - return _shared; - } - - void share_target_page(XPage* page) { - XLocker locker(&_lock); - - assert(_in_place, "Invalid state"); - assert(_shared == nullptr, "Invalid state"); - assert(page != nullptr, "Invalid page"); - - _shared = page; - _in_place = false; - - _lock.notify_all(); - } - - void free_target_page(XPage* page) { - // Does nothing - } - - void free_relocated_page(XPage* page) { - free_page(page); - } - - uintptr_t alloc_object(XPage* page, size_t size) const { - return (page != nullptr) ? page->alloc_object_atomic(size) : 0; - } - - void undo_alloc_object(XPage* page, uintptr_t addr, size_t size) const { - page->undo_alloc_object_atomic(addr, size); - } - - size_t in_place_count() const { - return _in_place_count; - } -}; - -template -class XRelocateClosure : public ObjectClosure { -private: - Allocator* const _allocator; - XForwarding* _forwarding; - XPage* _target; - - bool relocate_object(uintptr_t from_addr) const { - XForwardingCursor cursor; - - // Lookup forwarding - if (forwarding_find(_forwarding, from_addr, &cursor) != 0) { - // Already relocated - return true; - } - - // Allocate object - const size_t size = XUtils::object_size(from_addr); - const uintptr_t to_addr = _allocator->alloc_object(_target, size); - if (to_addr == 0) { - // Allocation failed - return false; - } - - // Copy object. Use conjoint copying if we are relocating - // in-place and the new object overlapps with the old object. - if (_forwarding->in_place() && to_addr + size > from_addr) { - XUtils::object_copy_conjoint(from_addr, to_addr, size); - } else { - XUtils::object_copy_disjoint(from_addr, to_addr, size); - } - - // Insert forwarding - if (forwarding_insert(_forwarding, from_addr, to_addr, &cursor) != to_addr) { - // Already relocated, undo allocation - _allocator->undo_alloc_object(_target, to_addr, size); - } - - return true; - } - - virtual void do_object(oop obj) { - const uintptr_t addr = XOop::to_address(obj); - assert(XHeap::heap()->is_object_live(addr), "Should be live"); - - while (!relocate_object(addr)) { - // Allocate a new target page, or if that fails, use the page being - // relocated as the new target, which will cause it to be relocated - // in-place. - _target = _allocator->alloc_target_page(_forwarding, _target); - if (_target != nullptr) { - continue; - } - - // Claim the page being relocated to block other threads from accessing - // it, or its forwarding table, until it has been released (relocation - // completed). - _target = _forwarding->claim_page(); - _target->reset_for_in_place_relocation(); - _forwarding->set_in_place(); - } - } - -public: - XRelocateClosure(Allocator* allocator) : - _allocator(allocator), - _forwarding(nullptr), - _target(nullptr) {} - - ~XRelocateClosure() { - _allocator->free_target_page(_target); - } - - void do_forwarding(XForwarding* forwarding) { - _forwarding = forwarding; - - // Check if we should abort - if (XAbort::should_abort()) { - _forwarding->abort_page(); - return; - } - - // Relocate objects - _forwarding->object_iterate(this); - - // Verify - if (ZVerifyForwarding) { - _forwarding->verify(); - } - - // Release relocated page - _forwarding->release_page(); - - if (_forwarding->in_place()) { - // The relocated page has been relocated in-place and should not - // be freed. Keep it as target page until it is full, and offer to - // share it with other worker threads. - _allocator->share_target_page(_target); - } else { - // Detach and free relocated page - XPage* const page = _forwarding->detach_page(); - _allocator->free_relocated_page(page); - } - } -}; - -class XRelocateTask : public XTask { -private: - XRelocationSetParallelIterator _iter; - XRelocateSmallAllocator _small_allocator; - XRelocateMediumAllocator _medium_allocator; - - static bool is_small(XForwarding* forwarding) { - return forwarding->type() == XPageTypeSmall; - } - -public: - XRelocateTask(XRelocationSet* relocation_set) : - XTask("XRelocateTask"), - _iter(relocation_set), - _small_allocator(), - _medium_allocator() {} - - ~XRelocateTask() { - XStatRelocation::set_at_relocate_end(_small_allocator.in_place_count(), - _medium_allocator.in_place_count()); - } - - virtual void work() { - XRelocateClosure small(&_small_allocator); - XRelocateClosure medium(&_medium_allocator); - - for (XForwarding* forwarding; _iter.next(&forwarding);) { - if (is_small(forwarding)) { - small.do_forwarding(forwarding); - } else { - medium.do_forwarding(forwarding); - } - } - } -}; - -void XRelocate::relocate(XRelocationSet* relocation_set) { - XRelocateTask task(relocation_set); - _workers->run(&task); -} diff --git a/src/hotspot/share/gc/x/xRelocate.hpp b/src/hotspot/share/gc/x/xRelocate.hpp deleted file mode 100644 index 46ab39240f643..0000000000000 --- a/src/hotspot/share/gc/x/xRelocate.hpp +++ /dev/null @@ -1,49 +0,0 @@ -/* - * Copyright (c) 2015, 2021, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#ifndef SHARE_GC_X_XRELOCATE_HPP -#define SHARE_GC_X_XRELOCATE_HPP - -#include "gc/x/xRelocationSet.hpp" - -class XForwarding; -class XWorkers; - -class XRelocate { - friend class XRelocateTask; - -private: - XWorkers* const _workers; - - void work(XRelocationSetParallelIterator* iter); - -public: - XRelocate(XWorkers* workers); - - uintptr_t relocate_object(XForwarding* forwarding, uintptr_t from_addr) const; - uintptr_t forward_object(XForwarding* forwarding, uintptr_t from_addr) const; - - void relocate(XRelocationSet* relocation_set); -}; - -#endif // SHARE_GC_X_XRELOCATE_HPP diff --git a/src/hotspot/share/gc/x/xRelocationSet.cpp b/src/hotspot/share/gc/x/xRelocationSet.cpp deleted file mode 100644 index eeb42c4bf328c..0000000000000 --- a/src/hotspot/share/gc/x/xRelocationSet.cpp +++ /dev/null @@ -1,135 +0,0 @@ -/* - * Copyright (c) 2017, 2023, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#include "precompiled.hpp" -#include "gc/x/xArray.inline.hpp" -#include "gc/x/xForwarding.inline.hpp" -#include "gc/x/xForwardingAllocator.inline.hpp" -#include "gc/x/xRelocationSet.inline.hpp" -#include "gc/x/xRelocationSetSelector.inline.hpp" -#include "gc/x/xStat.hpp" -#include "gc/x/xTask.hpp" -#include "gc/x/xWorkers.hpp" -#include "runtime/atomic.hpp" -#include "utilities/debug.hpp" - -class XRelocationSetInstallTask : public XTask { -private: - XForwardingAllocator* const _allocator; - XForwarding** _forwardings; - const size_t _nforwardings; - XArrayParallelIterator _small_iter; - XArrayParallelIterator _medium_iter; - volatile size_t _small_next; - volatile size_t _medium_next; - - void install(XForwarding* forwarding, volatile size_t* next) { - const size_t index = Atomic::fetch_then_add(next, 1u); - assert(index < _nforwardings, "Invalid index"); - _forwardings[index] = forwarding; - } - - void install_small(XForwarding* forwarding) { - install(forwarding, &_small_next); - } - - void install_medium(XForwarding* forwarding) { - install(forwarding, &_medium_next); - } - -public: - XRelocationSetInstallTask(XForwardingAllocator* allocator, const XRelocationSetSelector* selector) : - XTask("XRelocationSetInstallTask"), - _allocator(allocator), - _forwardings(nullptr), - _nforwardings(selector->small()->length() + selector->medium()->length()), - _small_iter(selector->small()), - _medium_iter(selector->medium()), - _small_next(selector->medium()->length()), - _medium_next(0) { - - // Reset the allocator to have room for the relocation - // set, all forwardings, and all forwarding entries. - const size_t relocation_set_size = _nforwardings * sizeof(XForwarding*); - const size_t forwardings_size = _nforwardings * sizeof(XForwarding); - const size_t forwarding_entries_size = selector->forwarding_entries() * sizeof(XForwardingEntry); - _allocator->reset(relocation_set_size + forwardings_size + forwarding_entries_size); - - // Allocate relocation set - _forwardings = new (_allocator->alloc(relocation_set_size)) XForwarding*[_nforwardings]; - } - - ~XRelocationSetInstallTask() { - assert(_allocator->is_full(), "Should be full"); - } - - virtual void work() { - // Allocate and install forwardings for small pages - for (XPage* page; _small_iter.next(&page);) { - XForwarding* const forwarding = XForwarding::alloc(_allocator, page); - install_small(forwarding); - } - - // Allocate and install forwardings for medium pages - for (XPage* page; _medium_iter.next(&page);) { - XForwarding* const forwarding = XForwarding::alloc(_allocator, page); - install_medium(forwarding); - } - } - - XForwarding** forwardings() const { - return _forwardings; - } - - size_t nforwardings() const { - return _nforwardings; - } -}; - -XRelocationSet::XRelocationSet(XWorkers* workers) : - _workers(workers), - _allocator(), - _forwardings(nullptr), - _nforwardings(0) {} - -void XRelocationSet::install(const XRelocationSetSelector* selector) { - // Install relocation set - XRelocationSetInstallTask task(&_allocator, selector); - _workers->run(&task); - - _forwardings = task.forwardings(); - _nforwardings = task.nforwardings(); - - // Update statistics - XStatRelocation::set_at_install_relocation_set(_allocator.size()); -} - -void XRelocationSet::reset() { - // Destroy forwardings - XRelocationSetIterator iter(this); - for (XForwarding* forwarding; iter.next(&forwarding);) { - forwarding->~XForwarding(); - } - - _nforwardings = 0; -} diff --git a/src/hotspot/share/gc/x/xRelocationSet.hpp b/src/hotspot/share/gc/x/xRelocationSet.hpp deleted file mode 100644 index bbbb3770516b5..0000000000000 --- a/src/hotspot/share/gc/x/xRelocationSet.hpp +++ /dev/null @@ -1,59 +0,0 @@ -/* - * Copyright (c) 2017, 2020, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#ifndef SHARE_GC_X_XRELOCATIONSET_HPP -#define SHARE_GC_X_XRELOCATIONSET_HPP - -#include "gc/x/xArray.hpp" -#include "gc/x/xForwardingAllocator.hpp" - -class XForwarding; -class XRelocationSetSelector; -class XWorkers; - -class XRelocationSet { - template friend class XRelocationSetIteratorImpl; - -private: - XWorkers* _workers; - XForwardingAllocator _allocator; - XForwarding** _forwardings; - size_t _nforwardings; - -public: - XRelocationSet(XWorkers* workers); - - void install(const XRelocationSetSelector* selector); - void reset(); -}; - -template -class XRelocationSetIteratorImpl : public XArrayIteratorImpl { -public: - XRelocationSetIteratorImpl(XRelocationSet* relocation_set); -}; - -using XRelocationSetIterator = XRelocationSetIteratorImpl; -using XRelocationSetParallelIterator = XRelocationSetIteratorImpl; - -#endif // SHARE_GC_X_XRELOCATIONSET_HPP diff --git a/src/hotspot/share/gc/x/xRelocationSet.inline.hpp b/src/hotspot/share/gc/x/xRelocationSet.inline.hpp deleted file mode 100644 index 3b76fbce46a2b..0000000000000 --- a/src/hotspot/share/gc/x/xRelocationSet.inline.hpp +++ /dev/null @@ -1,35 +0,0 @@ -/* - * Copyright (c) 2017, 2020, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#ifndef SHARE_GC_X_XRELOCATIONSET_INLINE_HPP -#define SHARE_GC_X_XRELOCATIONSET_INLINE_HPP - -#include "gc/x/xRelocationSet.hpp" - -#include "gc/x/xArray.inline.hpp" - -template -inline XRelocationSetIteratorImpl::XRelocationSetIteratorImpl(XRelocationSet* relocation_set) : - XArrayIteratorImpl(relocation_set->_forwardings, relocation_set->_nforwardings) {} - -#endif // SHARE_GC_X_XRELOCATIONSET_INLINE_HPP diff --git a/src/hotspot/share/gc/x/xRelocationSetSelector.cpp b/src/hotspot/share/gc/x/xRelocationSetSelector.cpp deleted file mode 100644 index 514e70b874357..0000000000000 --- a/src/hotspot/share/gc/x/xRelocationSetSelector.cpp +++ /dev/null @@ -1,213 +0,0 @@ -/* - * Copyright (c) 2017, 2021, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#include "precompiled.hpp" -#include "gc/shared/gc_globals.hpp" -#include "gc/x/xArray.inline.hpp" -#include "gc/x/xForwarding.inline.hpp" -#include "gc/x/xPage.inline.hpp" -#include "gc/x/xRelocationSetSelector.inline.hpp" -#include "jfr/jfrEvents.hpp" -#include "logging/log.hpp" -#include "runtime/globals.hpp" -#include "utilities/debug.hpp" -#include "utilities/powerOfTwo.hpp" - -XRelocationSetSelectorGroupStats::XRelocationSetSelectorGroupStats() : - _npages_candidates(0), - _total(0), - _live(0), - _empty(0), - _npages_selected(0), - _relocate(0) {} - -XRelocationSetSelectorGroup::XRelocationSetSelectorGroup(const char* name, - uint8_t page_type, - size_t page_size, - size_t object_size_limit) : - _name(name), - _page_type(page_type), - _page_size(page_size), - _object_size_limit(object_size_limit), - _fragmentation_limit(page_size * (ZFragmentationLimit / 100)), - _live_pages(), - _forwarding_entries(0), - _stats() {} - -bool XRelocationSetSelectorGroup::is_disabled() { - // Medium pages are disabled when their page size is zero - return _page_type == XPageTypeMedium && _page_size == 0; -} - -bool XRelocationSetSelectorGroup::is_selectable() { - // Large pages are not selectable - return _page_type != XPageTypeLarge; -} - -void XRelocationSetSelectorGroup::semi_sort() { - // Semi-sort live pages by number of live bytes in ascending order - const size_t npartitions_shift = 11; - const size_t npartitions = (size_t)1 << npartitions_shift; - const size_t partition_size = _page_size >> npartitions_shift; - const size_t partition_size_shift = exact_log2(partition_size); - - // Partition slots/fingers - int partitions[npartitions] = { /* zero initialize */ }; - - // Calculate partition slots - XArrayIterator iter1(&_live_pages); - for (XPage* page; iter1.next(&page);) { - const size_t index = page->live_bytes() >> partition_size_shift; - partitions[index]++; - } - - // Calculate partition fingers - int finger = 0; - for (size_t i = 0; i < npartitions; i++) { - const int slots = partitions[i]; - partitions[i] = finger; - finger += slots; - } - - // Allocate destination array - const int npages = _live_pages.length(); - XArray sorted_live_pages(npages, npages, nullptr); - - // Sort pages into partitions - XArrayIterator iter2(&_live_pages); - for (XPage* page; iter2.next(&page);) { - const size_t index = page->live_bytes() >> partition_size_shift; - const int finger = partitions[index]++; - assert(sorted_live_pages.at(finger) == nullptr, "Invalid finger"); - sorted_live_pages.at_put(finger, page); - } - - _live_pages.swap(&sorted_live_pages); -} - -void XRelocationSetSelectorGroup::select_inner() { - // Calculate the number of pages to relocate by successively including pages in - // a candidate relocation set and calculate the maximum space requirement for - // their live objects. - const int npages = _live_pages.length(); - int selected_from = 0; - int selected_to = 0; - size_t npages_selected = 0; - size_t selected_live_bytes = 0; - size_t selected_forwarding_entries = 0; - size_t from_live_bytes = 0; - size_t from_forwarding_entries = 0; - - semi_sort(); - - for (int from = 1; from <= npages; from++) { - // Add page to the candidate relocation set - XPage* const page = _live_pages.at(from - 1); - from_live_bytes += page->live_bytes(); - from_forwarding_entries += XForwarding::nentries(page); - - // Calculate the maximum number of pages needed by the candidate relocation set. - // By subtracting the object size limit from the pages size we get the maximum - // number of pages that the relocation set is guaranteed to fit in, regardless - // of in which order the objects are relocated. - const int to = ceil((double)(from_live_bytes) / (double)(_page_size - _object_size_limit)); - - // Calculate the relative difference in reclaimable space compared to our - // currently selected final relocation set. If this number is larger than the - // acceptable fragmentation limit, then the current candidate relocation set - // becomes our new final relocation set. - const int diff_from = from - selected_from; - const int diff_to = to - selected_to; - const double diff_reclaimable = 100 - percent_of(diff_to, diff_from); - if (diff_reclaimable > ZFragmentationLimit) { - selected_from = from; - selected_to = to; - selected_live_bytes = from_live_bytes; - npages_selected += 1; - selected_forwarding_entries = from_forwarding_entries; - } - - log_trace(gc, reloc)("Candidate Relocation Set (%s Pages): %d->%d, " - "%.1f%% relative defragmentation, " SIZE_FORMAT " forwarding entries, %s", - _name, from, to, diff_reclaimable, from_forwarding_entries, - (selected_from == from) ? "Selected" : "Rejected"); - } - - // Finalize selection - _live_pages.trunc_to(selected_from); - _forwarding_entries = selected_forwarding_entries; - - // Update statistics - _stats._relocate = selected_live_bytes; - _stats._npages_selected = npages_selected; - - log_trace(gc, reloc)("Relocation Set (%s Pages): %d->%d, %d skipped, " SIZE_FORMAT " forwarding entries", - _name, selected_from, selected_to, npages - selected_from, selected_forwarding_entries); -} - -void XRelocationSetSelectorGroup::select() { - if (is_disabled()) { - return; - } - - EventZRelocationSetGroup event; - - if (is_selectable()) { - select_inner(); - } - - // Send event - event.commit(_page_type, _stats.npages_candidates(), _stats.total(), _stats.empty(), _stats.npages_selected(), _stats.relocate()); -} - -XRelocationSetSelector::XRelocationSetSelector() : - _small("Small", XPageTypeSmall, XPageSizeSmall, XObjectSizeLimitSmall), - _medium("Medium", XPageTypeMedium, XPageSizeMedium, XObjectSizeLimitMedium), - _large("Large", XPageTypeLarge, 0 /* page_size */, 0 /* object_size_limit */), - _empty_pages() {} - -void XRelocationSetSelector::select() { - // Select pages to relocate. The resulting relocation set will be - // sorted such that medium pages comes first, followed by small - // pages. Pages within each page group will be semi-sorted by live - // bytes in ascending order. Relocating pages in this order allows - // us to start reclaiming memory more quickly. - - EventZRelocationSet event; - - // Select pages from each group - _large.select(); - _medium.select(); - _small.select(); - - // Send event - event.commit(total(), empty(), relocate()); -} - -XRelocationSetSelectorStats XRelocationSetSelector::stats() const { - XRelocationSetSelectorStats stats; - stats._small = _small.stats(); - stats._medium = _medium.stats(); - stats._large = _large.stats(); - return stats; -} diff --git a/src/hotspot/share/gc/x/xRelocationSetSelector.hpp b/src/hotspot/share/gc/x/xRelocationSetSelector.hpp deleted file mode 100644 index 75e40eeea8c31..0000000000000 --- a/src/hotspot/share/gc/x/xRelocationSetSelector.hpp +++ /dev/null @@ -1,134 +0,0 @@ -/* - * Copyright (c) 2017, 2020, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#ifndef SHARE_GC_X_XRELOCATIONSETSELECTOR_HPP -#define SHARE_GC_X_XRELOCATIONSETSELECTOR_HPP - -#include "gc/x/xArray.hpp" -#include "memory/allocation.hpp" - -class XPage; - -class XRelocationSetSelectorGroupStats { - friend class XRelocationSetSelectorGroup; - -private: - // Candidate set - size_t _npages_candidates; - size_t _total; - size_t _live; - size_t _empty; - - // Selected set - size_t _npages_selected; - size_t _relocate; - -public: - XRelocationSetSelectorGroupStats(); - - size_t npages_candidates() const; - size_t total() const; - size_t live() const; - size_t empty() const; - - size_t npages_selected() const; - size_t relocate() const; -}; - -class XRelocationSetSelectorStats { - friend class XRelocationSetSelector; - -private: - XRelocationSetSelectorGroupStats _small; - XRelocationSetSelectorGroupStats _medium; - XRelocationSetSelectorGroupStats _large; - -public: - const XRelocationSetSelectorGroupStats& small() const; - const XRelocationSetSelectorGroupStats& medium() const; - const XRelocationSetSelectorGroupStats& large() const; -}; - -class XRelocationSetSelectorGroup { -private: - const char* const _name; - const uint8_t _page_type; - const size_t _page_size; - const size_t _object_size_limit; - const size_t _fragmentation_limit; - XArray _live_pages; - size_t _forwarding_entries; - XRelocationSetSelectorGroupStats _stats; - - bool is_disabled(); - bool is_selectable(); - void semi_sort(); - void select_inner(); - -public: - XRelocationSetSelectorGroup(const char* name, - uint8_t page_type, - size_t page_size, - size_t object_size_limit); - - void register_live_page(XPage* page); - void register_empty_page(XPage* page); - void select(); - - const XArray* selected() const; - size_t forwarding_entries() const; - - const XRelocationSetSelectorGroupStats& stats() const; -}; - -class XRelocationSetSelector : public StackObj { -private: - XRelocationSetSelectorGroup _small; - XRelocationSetSelectorGroup _medium; - XRelocationSetSelectorGroup _large; - XArray _empty_pages; - - size_t total() const; - size_t empty() const; - size_t relocate() const; - -public: - XRelocationSetSelector(); - - void register_live_page(XPage* page); - void register_empty_page(XPage* page); - - bool should_free_empty_pages(int bulk) const; - const XArray* empty_pages() const; - void clear_empty_pages(); - - void select(); - - const XArray* small() const; - const XArray* medium() const; - size_t forwarding_entries() const; - - XRelocationSetSelectorStats stats() const; -}; - -#endif // SHARE_GC_X_XRELOCATIONSETSELECTOR_HPP diff --git a/src/hotspot/share/gc/x/xRelocationSetSelector.inline.hpp b/src/hotspot/share/gc/x/xRelocationSetSelector.inline.hpp deleted file mode 100644 index 25e0ede835de0..0000000000000 --- a/src/hotspot/share/gc/x/xRelocationSetSelector.inline.hpp +++ /dev/null @@ -1,165 +0,0 @@ -/* - * Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#ifndef SHARE_GC_X_XRELOCATIONSETSELECTOR_INLINE_HPP -#define SHARE_GC_X_XRELOCATIONSETSELECTOR_INLINE_HPP - -#include "gc/x/xRelocationSetSelector.hpp" - -#include "gc/x/xArray.inline.hpp" -#include "gc/x/xPage.inline.hpp" - -inline size_t XRelocationSetSelectorGroupStats::npages_candidates() const { - return _npages_candidates; -} - -inline size_t XRelocationSetSelectorGroupStats::total() const { - return _total; -} - -inline size_t XRelocationSetSelectorGroupStats::live() const { - return _live; -} - -inline size_t XRelocationSetSelectorGroupStats::empty() const { - return _empty; -} - -inline size_t XRelocationSetSelectorGroupStats::npages_selected() const { - return _npages_selected; -} - -inline size_t XRelocationSetSelectorGroupStats::relocate() const { - return _relocate; -} - -inline const XRelocationSetSelectorGroupStats& XRelocationSetSelectorStats::small() const { - return _small; -} - -inline const XRelocationSetSelectorGroupStats& XRelocationSetSelectorStats::medium() const { - return _medium; -} - -inline const XRelocationSetSelectorGroupStats& XRelocationSetSelectorStats::large() const { - return _large; -} - -inline void XRelocationSetSelectorGroup::register_live_page(XPage* page) { - const uint8_t type = page->type(); - const size_t size = page->size(); - const size_t live = page->live_bytes(); - const size_t garbage = size - live; - - if (garbage > _fragmentation_limit) { - _live_pages.append(page); - } - - _stats._npages_candidates++; - _stats._total += size; - _stats._live += live; -} - -inline void XRelocationSetSelectorGroup::register_empty_page(XPage* page) { - const size_t size = page->size(); - - _stats._npages_candidates++; - _stats._total += size; - _stats._empty += size; -} - -inline const XArray* XRelocationSetSelectorGroup::selected() const { - return &_live_pages; -} - -inline size_t XRelocationSetSelectorGroup::forwarding_entries() const { - return _forwarding_entries; -} - -inline const XRelocationSetSelectorGroupStats& XRelocationSetSelectorGroup::stats() const { - return _stats; -} - -inline void XRelocationSetSelector::register_live_page(XPage* page) { - const uint8_t type = page->type(); - - if (type == XPageTypeSmall) { - _small.register_live_page(page); - } else if (type == XPageTypeMedium) { - _medium.register_live_page(page); - } else { - _large.register_live_page(page); - } -} - -inline void XRelocationSetSelector::register_empty_page(XPage* page) { - const uint8_t type = page->type(); - - if (type == XPageTypeSmall) { - _small.register_empty_page(page); - } else if (type == XPageTypeMedium) { - _medium.register_empty_page(page); - } else { - _large.register_empty_page(page); - } - - _empty_pages.append(page); -} - -inline bool XRelocationSetSelector::should_free_empty_pages(int bulk) const { - return _empty_pages.length() >= bulk && _empty_pages.is_nonempty(); -} - -inline const XArray* XRelocationSetSelector::empty_pages() const { - return &_empty_pages; -} - -inline void XRelocationSetSelector::clear_empty_pages() { - return _empty_pages.clear(); -} - -inline size_t XRelocationSetSelector::total() const { - return _small.stats().total() + _medium.stats().total() + _large.stats().total(); -} - -inline size_t XRelocationSetSelector::empty() const { - return _small.stats().empty() + _medium.stats().empty() + _large.stats().empty(); -} - -inline size_t XRelocationSetSelector::relocate() const { - return _small.stats().relocate() + _medium.stats().relocate() + _large.stats().relocate(); -} - -inline const XArray* XRelocationSetSelector::small() const { - return _small.selected(); -} - -inline const XArray* XRelocationSetSelector::medium() const { - return _medium.selected(); -} - -inline size_t XRelocationSetSelector::forwarding_entries() const { - return _small.forwarding_entries() + _medium.forwarding_entries(); -} - -#endif // SHARE_GC_X_XRELOCATIONSETSELECTOR_INLINE_HPP diff --git a/src/hotspot/share/gc/x/xResurrection.cpp b/src/hotspot/share/gc/x/xResurrection.cpp deleted file mode 100644 index 486f1f8db82e0..0000000000000 --- a/src/hotspot/share/gc/x/xResurrection.cpp +++ /dev/null @@ -1,42 +0,0 @@ -/* - * Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#include "precompiled.hpp" -#include "gc/x/xResurrection.hpp" -#include "runtime/atomic.hpp" -#include "runtime/safepoint.hpp" -#include "utilities/debug.hpp" - -volatile bool XResurrection::_blocked = false; - -void XResurrection::block() { - assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint"); - _blocked = true; -} - -void XResurrection::unblock() { - // No need for anything stronger than a relaxed store here. - // The preceding handshake makes sure that all non-strong - // oops have already been healed at this point. - Atomic::store(&_blocked, false); -} diff --git a/src/hotspot/share/gc/x/xResurrection.hpp b/src/hotspot/share/gc/x/xResurrection.hpp deleted file mode 100644 index d6ce9820e02fe..0000000000000 --- a/src/hotspot/share/gc/x/xResurrection.hpp +++ /dev/null @@ -1,39 +0,0 @@ -/* - * Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#ifndef SHARE_GC_X_XRESURRECTION_HPP -#define SHARE_GC_X_XRESURRECTION_HPP - -#include "memory/allStatic.hpp" - -class XResurrection : public AllStatic { -private: - static volatile bool _blocked; - -public: - static bool is_blocked(); - static void block(); - static void unblock(); -}; - -#endif // SHARE_GC_X_XRESURRECTION_HPP diff --git a/src/hotspot/share/gc/x/xResurrection.inline.hpp b/src/hotspot/share/gc/x/xResurrection.inline.hpp deleted file mode 100644 index af1993945cc41..0000000000000 --- a/src/hotspot/share/gc/x/xResurrection.inline.hpp +++ /dev/null @@ -1,35 +0,0 @@ -/* - * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#ifndef SHARE_GC_X_XRESURRECTION_INLINE_HPP -#define SHARE_GC_X_XRESURRECTION_INLINE_HPP - -#include "gc/x/xResurrection.hpp" - -#include "runtime/atomic.hpp" - -inline bool XResurrection::is_blocked() { - return Atomic::load(&_blocked); -} - -#endif // SHARE_GC_X_XRESURRECTION_INLINE_HPP diff --git a/src/hotspot/share/gc/x/xRootsIterator.cpp b/src/hotspot/share/gc/x/xRootsIterator.cpp deleted file mode 100644 index 4eaeb8e77c2a2..0000000000000 --- a/src/hotspot/share/gc/x/xRootsIterator.cpp +++ /dev/null @@ -1,142 +0,0 @@ -/* - * Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#include "precompiled.hpp" -#include "classfile/classLoaderDataGraph.hpp" -#include "gc/shared/oopStorageSetParState.inline.hpp" -#include "gc/x/xNMethod.hpp" -#include "gc/x/xNMethodTable.hpp" -#include "gc/x/xRootsIterator.hpp" -#include "gc/x/xStat.hpp" -#include "memory/resourceArea.hpp" -#include "prims/jvmtiTagMap.hpp" -#include "runtime/atomic.hpp" -#include "runtime/globals.hpp" -#include "runtime/safepoint.hpp" -#include "utilities/debug.hpp" - -static const XStatSubPhase XSubPhaseConcurrentRootsOopStorageSet("Concurrent Roots OopStorageSet"); -static const XStatSubPhase XSubPhaseConcurrentRootsClassLoaderDataGraph("Concurrent Roots ClassLoaderDataGraph"); -static const XStatSubPhase XSubPhaseConcurrentRootsJavaThreads("Concurrent Roots JavaThreads"); -static const XStatSubPhase XSubPhaseConcurrentRootsCodeCache("Concurrent Roots CodeCache"); -static const XStatSubPhase XSubPhaseConcurrentWeakRootsOopStorageSet("Concurrent Weak Roots OopStorageSet"); - -template -template -void XParallelApply::apply(ClosureType* cl) { - if (!Atomic::load(&_completed)) { - _iter.apply(cl); - if (!Atomic::load(&_completed)) { - Atomic::store(&_completed, true); - } - } -} - -XStrongOopStorageSetIterator::XStrongOopStorageSetIterator() : - _iter() {} - -void XStrongOopStorageSetIterator::apply(OopClosure* cl) { - XStatTimer timer(XSubPhaseConcurrentRootsOopStorageSet); - _iter.oops_do(cl); -} - -void XStrongCLDsIterator::apply(CLDClosure* cl) { - XStatTimer timer(XSubPhaseConcurrentRootsClassLoaderDataGraph); - ClassLoaderDataGraph::always_strong_cld_do(cl); -} - -XJavaThreadsIterator::XJavaThreadsIterator() : - _threads(), - _claimed(0) {} - -uint XJavaThreadsIterator::claim() { - return Atomic::fetch_then_add(&_claimed, 1u); -} - -void XJavaThreadsIterator::apply(ThreadClosure* cl) { - XStatTimer timer(XSubPhaseConcurrentRootsJavaThreads); - - // The resource mark is needed because interpreter oop maps are - // not reused in concurrent mode. Instead, they are temporary and - // resource allocated. - ResourceMark _rm; - - for (uint i = claim(); i < _threads.length(); i = claim()) { - cl->do_thread(_threads.thread_at(i)); - } -} - -XNMethodsIterator::XNMethodsIterator() { - if (!ClassUnloading) { - XNMethod::nmethods_do_begin(); - } -} - -XNMethodsIterator::~XNMethodsIterator() { - if (!ClassUnloading) { - XNMethod::nmethods_do_end(); - } -} - -void XNMethodsIterator::apply(NMethodClosure* cl) { - XStatTimer timer(XSubPhaseConcurrentRootsCodeCache); - XNMethod::nmethods_do(cl); -} - -XRootsIterator::XRootsIterator(int cld_claim) { - if (cld_claim != ClassLoaderData::_claim_none) { - ClassLoaderDataGraph::verify_claimed_marks_cleared(cld_claim); - } -} - -void XRootsIterator::apply(OopClosure* cl, - CLDClosure* cld_cl, - ThreadClosure* thread_cl, - NMethodClosure* nm_cl) { - _oop_storage_set.apply(cl); - _class_loader_data_graph.apply(cld_cl); - _java_threads.apply(thread_cl); - if (!ClassUnloading) { - _nmethods.apply(nm_cl); - } -} - -XWeakOopStorageSetIterator::XWeakOopStorageSetIterator() : - _iter() {} - -void XWeakOopStorageSetIterator::apply(OopClosure* cl) { - XStatTimer timer(XSubPhaseConcurrentWeakRootsOopStorageSet); - _iter.oops_do(cl); -} - -void XWeakOopStorageSetIterator::report_num_dead() { - _iter.report_num_dead(); -} - -void XWeakRootsIterator::report_num_dead() { - _oop_storage_set.iter().report_num_dead(); -} - -void XWeakRootsIterator::apply(OopClosure* cl) { - _oop_storage_set.apply(cl); -} diff --git a/src/hotspot/share/gc/x/xRootsIterator.hpp b/src/hotspot/share/gc/x/xRootsIterator.hpp deleted file mode 100644 index 9adc4c0293868..0000000000000 --- a/src/hotspot/share/gc/x/xRootsIterator.hpp +++ /dev/null @@ -1,124 +0,0 @@ -/* - * Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#ifndef SHARE_GC_X_XROOTSITERATOR_HPP -#define SHARE_GC_X_XROOTSITERATOR_HPP - -#include "gc/shared/oopStorageSetParState.hpp" -#include "logging/log.hpp" -#include "memory/iterator.hpp" -#include "runtime/threadSMR.hpp" - -template -class XParallelApply { -private: - Iterator _iter; - volatile bool _completed; - -public: - XParallelApply() : - _iter(), - _completed(false) {} - - template - void apply(ClosureType* cl); - - Iterator& iter() { - return _iter; - } -}; - -class XStrongOopStorageSetIterator { - OopStorageSetStrongParState _iter; - -public: - XStrongOopStorageSetIterator(); - - void apply(OopClosure* cl); -}; - -class XStrongCLDsIterator { -public: - void apply(CLDClosure* cl); -}; - -class XJavaThreadsIterator { -private: - ThreadsListHandle _threads; - volatile uint _claimed; - - uint claim(); - -public: - XJavaThreadsIterator(); - - void apply(ThreadClosure* cl); -}; - -class XNMethodsIterator { -public: - XNMethodsIterator(); - ~XNMethodsIterator(); - - void apply(NMethodClosure* cl); -}; - -class XRootsIterator { -private: - XParallelApply _oop_storage_set; - XParallelApply _class_loader_data_graph; - XParallelApply _java_threads; - XParallelApply _nmethods; - -public: - XRootsIterator(int cld_claim); - - void apply(OopClosure* cl, - CLDClosure* cld_cl, - ThreadClosure* thread_cl, - NMethodClosure* nm_cl); -}; - -class XWeakOopStorageSetIterator { -private: - OopStorageSetWeakParState _iter; - -public: - XWeakOopStorageSetIterator(); - - void apply(OopClosure* cl); - - void report_num_dead(); -}; - -class XWeakRootsIterator { -private: - XParallelApply _oop_storage_set; - -public: - void apply(OopClosure* cl); - - void report_num_dead(); -}; - -#endif // SHARE_GC_X_XROOTSITERATOR_HPP diff --git a/src/hotspot/share/gc/x/xRuntimeWorkers.cpp b/src/hotspot/share/gc/x/xRuntimeWorkers.cpp deleted file mode 100644 index d7e4a1262fcbd..0000000000000 --- a/src/hotspot/share/gc/x/xRuntimeWorkers.cpp +++ /dev/null @@ -1,85 +0,0 @@ -/* - * Copyright (c) 2018, 2021, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#include "precompiled.hpp" -#include "gc/shared/gcLogPrecious.hpp" -#include "gc/shared/gc_globals.hpp" -#include "gc/x/xLock.inline.hpp" -#include "gc/x/xRuntimeWorkers.hpp" -#include "gc/x/xTask.hpp" -#include "gc/x/xThread.hpp" -#include "runtime/java.hpp" - -class XRuntimeWorkersInitializeTask : public WorkerTask { -private: - const uint _nworkers; - uint _started; - XConditionLock _lock; - -public: - XRuntimeWorkersInitializeTask(uint nworkers) : - WorkerTask("XRuntimeWorkersInitializeTask"), - _nworkers(nworkers), - _started(0), - _lock() {} - - virtual void work(uint worker_id) { - // Wait for all threads to start - XLocker locker(&_lock); - if (++_started == _nworkers) { - // All threads started - _lock.notify_all(); - } else { - while (_started != _nworkers) { - _lock.wait(); - } - } - } -}; - -XRuntimeWorkers::XRuntimeWorkers() : - _workers("RuntimeWorker", - ParallelGCThreads) { - - log_info_p(gc, init)("Runtime Workers: %u", _workers.max_workers()); - - // Initialize worker threads - _workers.initialize_workers(); - _workers.set_active_workers(_workers.max_workers()); - if (_workers.active_workers() != _workers.max_workers()) { - vm_exit_during_initialization("Failed to create XRuntimeWorkers"); - } - - // Execute task to reduce latency in early safepoints, - // which otherwise would have to take on any warmup costs. - XRuntimeWorkersInitializeTask task(_workers.max_workers()); - _workers.run_task(&task); -} - -WorkerThreads* XRuntimeWorkers::workers() { - return &_workers; -} - -void XRuntimeWorkers::threads_do(ThreadClosure* tc) const { - _workers.threads_do(tc); -} diff --git a/src/hotspot/share/gc/x/xRuntimeWorkers.hpp b/src/hotspot/share/gc/x/xRuntimeWorkers.hpp deleted file mode 100644 index 114521d65067e..0000000000000 --- a/src/hotspot/share/gc/x/xRuntimeWorkers.hpp +++ /dev/null @@ -1,43 +0,0 @@ -/* - * Copyright (c) 2018, 2021, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#ifndef SHARE_GC_X_XRUNTIMEWORKERS_HPP -#define SHARE_GC_X_XRUNTIMEWORKERS_HPP - -#include "gc/shared/workerThread.hpp" - -class ThreadClosure; - -class XRuntimeWorkers { -private: - WorkerThreads _workers; - -public: - XRuntimeWorkers(); - - WorkerThreads* workers(); - - void threads_do(ThreadClosure* tc) const; -}; - -#endif // SHARE_GC_X_XRUNTIMEWORKERS_HPP diff --git a/src/hotspot/share/gc/x/xSafeDelete.hpp b/src/hotspot/share/gc/x/xSafeDelete.hpp deleted file mode 100644 index c41a38ce1873a..0000000000000 --- a/src/hotspot/share/gc/x/xSafeDelete.hpp +++ /dev/null @@ -1,68 +0,0 @@ -/* - * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#ifndef SHARE_GC_X_XSAFEDELETE_HPP -#define SHARE_GC_X_XSAFEDELETE_HPP - -#include "gc/x/xArray.hpp" -#include "gc/x/xLock.hpp" - -#include - -template -class XSafeDeleteImpl { -private: - using ItemT = std::remove_extent_t; - - XLock* _lock; - uint64_t _enabled; - XArray _deferred; - - bool deferred_delete(ItemT* item); - void immediate_delete(ItemT* item); - -public: - XSafeDeleteImpl(XLock* lock); - - void enable_deferred_delete(); - void disable_deferred_delete(); - - void operator()(ItemT* item); -}; - -template -class XSafeDelete : public XSafeDeleteImpl { -private: - XLock _lock; - -public: - XSafeDelete(); -}; - -template -class XSafeDeleteNoLock : public XSafeDeleteImpl { -public: - XSafeDeleteNoLock(); -}; - -#endif // SHARE_GC_X_XSAFEDELETE_HPP diff --git a/src/hotspot/share/gc/x/xSafeDelete.inline.hpp b/src/hotspot/share/gc/x/xSafeDelete.inline.hpp deleted file mode 100644 index 7e428c710e8ee..0000000000000 --- a/src/hotspot/share/gc/x/xSafeDelete.inline.hpp +++ /dev/null @@ -1,100 +0,0 @@ -/* - * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#ifndef SHARE_GC_X_XSAFEDELETE_INLINE_HPP -#define SHARE_GC_X_XSAFEDELETE_INLINE_HPP - -#include "gc/x/xSafeDelete.hpp" - -#include "gc/x/xArray.inline.hpp" -#include "utilities/debug.hpp" - -#include - -template -XSafeDeleteImpl::XSafeDeleteImpl(XLock* lock) : - _lock(lock), - _enabled(0), - _deferred() {} - -template -bool XSafeDeleteImpl::deferred_delete(ItemT* item) { - XLocker locker(_lock); - if (_enabled > 0) { - _deferred.append(item); - return true; - } - - return false; -} - -template -void XSafeDeleteImpl::immediate_delete(ItemT* item) { - if (std::is_array::value) { - delete [] item; - } else { - delete item; - } -} - -template -void XSafeDeleteImpl::enable_deferred_delete() { - XLocker locker(_lock); - _enabled++; -} - -template -void XSafeDeleteImpl::disable_deferred_delete() { - XArray deferred; - - { - XLocker locker(_lock); - assert(_enabled > 0, "Invalid state"); - if (--_enabled == 0) { - deferred.swap(&_deferred); - } - } - - XArrayIterator iter(&deferred); - for (ItemT* item; iter.next(&item);) { - immediate_delete(item); - } -} - -template -void XSafeDeleteImpl::operator()(ItemT* item) { - if (!deferred_delete(item)) { - immediate_delete(item); - } -} - -template -XSafeDelete::XSafeDelete() : - XSafeDeleteImpl(&_lock), - _lock() {} - -template -XSafeDeleteNoLock::XSafeDeleteNoLock() : - XSafeDeleteImpl(nullptr) {} - -#endif // SHARE_GC_X_XSAFEDELETE_INLINE_HPP diff --git a/src/hotspot/share/gc/x/xServiceability.cpp b/src/hotspot/share/gc/x/xServiceability.cpp deleted file mode 100644 index f3b51b6bb4a35..0000000000000 --- a/src/hotspot/share/gc/x/xServiceability.cpp +++ /dev/null @@ -1,177 +0,0 @@ -/* - * Copyright (c) 2017, 2021, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#include "precompiled.hpp" -#include "gc/shared/generationCounters.hpp" -#include "gc/shared/hSpaceCounters.hpp" -#include "gc/x/xCollectedHeap.hpp" -#include "gc/x/xHeap.inline.hpp" -#include "gc/x/xServiceability.hpp" -#include "memory/metaspaceCounters.hpp" -#include "runtime/perfData.hpp" - -class XGenerationCounters : public GenerationCounters { -public: - XGenerationCounters(const char* name, int ordinal, int spaces, - size_t min_capacity, size_t max_capacity, size_t curr_capacity) : - GenerationCounters(name, ordinal, spaces, - min_capacity, max_capacity, curr_capacity) {} - - void update_capacity(size_t capacity) { - _current_size->set_value(capacity); - } -}; - -// Class to expose perf counters used by jstat. -class XServiceabilityCounters : public CHeapObj { -private: - XGenerationCounters _generation_counters; - HSpaceCounters _space_counters; - CollectorCounters _collector_counters; - -public: - XServiceabilityCounters(size_t min_capacity, size_t max_capacity); - - CollectorCounters* collector_counters(); - - void update_sizes(); -}; - -XServiceabilityCounters::XServiceabilityCounters(size_t min_capacity, size_t max_capacity) : - // generation.1 - _generation_counters("old" /* name */, - 1 /* ordinal */, - 1 /* spaces */, - min_capacity /* min_capacity */, - max_capacity /* max_capacity */, - min_capacity /* curr_capacity */), - // generation.1.space.0 - _space_counters(_generation_counters.name_space(), - "space" /* name */, - 0 /* ordinal */, - max_capacity /* max_capacity */, - min_capacity /* init_capacity */), - // gc.collector.2 - _collector_counters("Z concurrent cycle pauses" /* name */, - 2 /* ordinal */) {} - -CollectorCounters* XServiceabilityCounters::collector_counters() { - return &_collector_counters; -} - -void XServiceabilityCounters::update_sizes() { - if (UsePerfData) { - const size_t capacity = XHeap::heap()->capacity(); - const size_t used = MIN2(XHeap::heap()->used(), capacity); - - _generation_counters.update_capacity(capacity); - _space_counters.update_capacity(capacity); - _space_counters.update_used(used); - - MetaspaceCounters::update_performance_counters(); - } -} - -XServiceabilityMemoryPool::XServiceabilityMemoryPool(size_t min_capacity, size_t max_capacity) : - CollectedMemoryPool("ZHeap", - min_capacity, - max_capacity, - true /* support_usage_threshold */) {} - -size_t XServiceabilityMemoryPool::used_in_bytes() { - return XHeap::heap()->used(); -} - -MemoryUsage XServiceabilityMemoryPool::get_memory_usage() { - const size_t committed = XHeap::heap()->capacity(); - const size_t used = MIN2(XHeap::heap()->used(), committed); - - return MemoryUsage(initial_size(), used, committed, max_size()); -} - -XServiceabilityMemoryManager::XServiceabilityMemoryManager(const char* name, - XServiceabilityMemoryPool* pool) : - GCMemoryManager(name) { - add_pool(pool); -} - -XServiceability::XServiceability(size_t min_capacity, size_t max_capacity) : - _min_capacity(min_capacity), - _max_capacity(max_capacity), - _memory_pool(_min_capacity, _max_capacity), - _cycle_memory_manager("ZGC Cycles", &_memory_pool), - _pause_memory_manager("ZGC Pauses", &_memory_pool), - _counters(nullptr) {} - -void XServiceability::initialize() { - _counters = new XServiceabilityCounters(_min_capacity, _max_capacity); -} - -MemoryPool* XServiceability::memory_pool() { - return &_memory_pool; -} - -GCMemoryManager* XServiceability::cycle_memory_manager() { - return &_cycle_memory_manager; -} - -GCMemoryManager* XServiceability::pause_memory_manager() { - return &_pause_memory_manager; -} - -XServiceabilityCounters* XServiceability::counters() { - return _counters; -} - -XServiceabilityCycleTracer::XServiceabilityCycleTracer() : - _memory_manager_stats(XHeap::heap()->serviceability_cycle_memory_manager(), - XCollectedHeap::heap()->gc_cause(), - "end of GC cycle", - true /* allMemoryPoolsAffected */, - true /* recordGCBeginTime */, - true /* recordPreGCUsage */, - true /* recordPeakUsage */, - true /* recordPostGCUsage */, - true /* recordAccumulatedGCTime */, - true /* recordGCEndTime */, - true /* countCollection */) {} - -XServiceabilityPauseTracer::XServiceabilityPauseTracer() : - _svc_gc_marker(SvcGCMarker::CONCURRENT), - _counters_stats(XHeap::heap()->serviceability_counters()->collector_counters()), - _memory_manager_stats(XHeap::heap()->serviceability_pause_memory_manager(), - XCollectedHeap::heap()->gc_cause(), - "end of GC pause", - true /* allMemoryPoolsAffected */, - true /* recordGCBeginTime */, - false /* recordPreGCUsage */, - false /* recordPeakUsage */, - false /* recordPostGCUsage */, - true /* recordAccumulatedGCTime */, - true /* recordGCEndTime */, - true /* countCollection */) {} - -XServiceabilityPauseTracer::~XServiceabilityPauseTracer() { - XHeap::heap()->serviceability_counters()->update_sizes(); - MemoryService::track_memory_usage(); -} diff --git a/src/hotspot/share/gc/x/xServiceability.hpp b/src/hotspot/share/gc/x/xServiceability.hpp deleted file mode 100644 index d8e2fc9ba7973..0000000000000 --- a/src/hotspot/share/gc/x/xServiceability.hpp +++ /dev/null @@ -1,89 +0,0 @@ -/* - * Copyright (c) 2017, 2021, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#ifndef SHARE_GC_X_XSERVICEABILITY_HPP -#define SHARE_GC_X_XSERVICEABILITY_HPP - -#include "gc/shared/collectorCounters.hpp" -#include "gc/shared/gcVMOperations.hpp" -#include "memory/allocation.hpp" -#include "services/memoryManager.hpp" -#include "services/memoryPool.hpp" -#include "services/memoryService.hpp" - -class XServiceabilityCounters; - -class XServiceabilityMemoryPool : public CollectedMemoryPool { -public: - XServiceabilityMemoryPool(size_t min_capacity, size_t max_capacity); - - virtual size_t used_in_bytes(); - virtual MemoryUsage get_memory_usage(); -}; - -class XServiceabilityMemoryManager : public GCMemoryManager { -public: - XServiceabilityMemoryManager(const char* name, - XServiceabilityMemoryPool* pool); -}; - -class XServiceability { -private: - const size_t _min_capacity; - const size_t _max_capacity; - XServiceabilityMemoryPool _memory_pool; - XServiceabilityMemoryManager _cycle_memory_manager; - XServiceabilityMemoryManager _pause_memory_manager; - XServiceabilityCounters* _counters; - -public: - XServiceability(size_t min_capacity, size_t max_capacity); - - void initialize(); - - MemoryPool* memory_pool(); - GCMemoryManager* cycle_memory_manager(); - GCMemoryManager* pause_memory_manager(); - XServiceabilityCounters* counters(); -}; - -class XServiceabilityCycleTracer : public StackObj { -private: - TraceMemoryManagerStats _memory_manager_stats; - -public: - XServiceabilityCycleTracer(); -}; - -class XServiceabilityPauseTracer : public StackObj { -private: - SvcGCMarker _svc_gc_marker; - TraceCollectorStats _counters_stats; - TraceMemoryManagerStats _memory_manager_stats; - -public: - XServiceabilityPauseTracer(); - ~XServiceabilityPauseTracer(); -}; - -#endif // SHARE_GC_X_XSERVICEABILITY_HPP diff --git a/src/hotspot/share/gc/x/xStackWatermark.cpp b/src/hotspot/share/gc/x/xStackWatermark.cpp deleted file mode 100644 index b75113a7529fc..0000000000000 --- a/src/hotspot/share/gc/x/xStackWatermark.cpp +++ /dev/null @@ -1,96 +0,0 @@ -/* - * Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#include "precompiled.hpp" -#include "gc/x/xAddress.hpp" -#include "gc/x/xBarrier.inline.hpp" -#include "gc/x/xStackWatermark.hpp" -#include "gc/x/xThread.inline.hpp" -#include "gc/x/xThreadLocalAllocBuffer.hpp" -#include "gc/x/xThreadLocalData.hpp" -#include "gc/x/xVerify.hpp" -#include "memory/resourceArea.inline.hpp" -#include "runtime/frame.inline.hpp" -#include "utilities/preserveException.hpp" - -XOnStackNMethodClosure::XOnStackNMethodClosure() : - _bs_nm(BarrierSet::barrier_set()->barrier_set_nmethod()) {} - -void XOnStackNMethodClosure::do_nmethod(nmethod* nm) { - const bool result = _bs_nm->nmethod_entry_barrier(nm); - assert(result, "NMethod on-stack must be alive"); -} - -ThreadLocalAllocStats& XStackWatermark::stats() { - return _stats; -} - -uint32_t XStackWatermark::epoch_id() const { - return *XAddressBadMaskHighOrderBitsAddr; -} - -XStackWatermark::XStackWatermark(JavaThread* jt) : - StackWatermark(jt, StackWatermarkKind::gc, *XAddressBadMaskHighOrderBitsAddr), - _jt_cl(), - _nm_cl(), - _stats() {} - -OopClosure* XStackWatermark::closure_from_context(void* context) { - if (context != nullptr) { - assert(XThread::is_worker(), "Unexpected thread passing in context: " PTR_FORMAT, p2i(context)); - return reinterpret_cast(context); - } else { - return &_jt_cl; - } -} - -void XStackWatermark::start_processing_impl(void* context) { - // Verify the head (no_frames) of the thread is bad before fixing it. - XVerify::verify_thread_head_bad(_jt); - - // Process the non-frame part of the thread - _jt->oops_do_no_frames(closure_from_context(context), &_nm_cl); - XThreadLocalData::do_invisible_root(_jt, XBarrier::load_barrier_on_invisible_root_oop_field); - - // Verification of frames is done after processing of the "head" (no_frames). - // The reason is that the exception oop is fiddled with during frame processing. - XVerify::verify_thread_frames_bad(_jt); - - // Update thread local address bad mask - XThreadLocalData::set_address_bad_mask(_jt, XAddressBadMask); - - // Retire TLAB - if (XGlobalPhase == XPhaseMark) { - XThreadLocalAllocBuffer::retire(_jt, &_stats); - } else { - XThreadLocalAllocBuffer::remap(_jt); - } - - // Publishes the processing start to concurrent threads - StackWatermark::start_processing_impl(context); -} - -void XStackWatermark::process(const frame& fr, RegisterMap& register_map, void* context) { - XVerify::verify_frame_bad(fr, register_map); - fr.oops_do(closure_from_context(context), &_nm_cl, ®ister_map, DerivedPointerIterationMode::_directly); -} diff --git a/src/hotspot/share/gc/x/xStackWatermark.hpp b/src/hotspot/share/gc/x/xStackWatermark.hpp deleted file mode 100644 index 9b73860bed091..0000000000000 --- a/src/hotspot/share/gc/x/xStackWatermark.hpp +++ /dev/null @@ -1,68 +0,0 @@ -/* - * Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#ifndef SHARE_GC_X_XSTACKWATERMARK_HPP -#define SHARE_GC_X_XSTACKWATERMARK_HPP - -#include "gc/shared/barrierSet.hpp" -#include "gc/shared/barrierSetNMethod.hpp" -#include "gc/shared/threadLocalAllocBuffer.hpp" -#include "gc/x/xBarrier.hpp" -#include "memory/allocation.hpp" -#include "memory/iterator.hpp" -#include "oops/oopsHierarchy.hpp" -#include "runtime/stackWatermark.hpp" -#include "utilities/globalDefinitions.hpp" - -class frame; -class JavaThread; - -class XOnStackNMethodClosure : public NMethodClosure { -private: - BarrierSetNMethod* _bs_nm; - - virtual void do_nmethod(nmethod* nm); - -public: - XOnStackNMethodClosure(); -}; - -class XStackWatermark : public StackWatermark { -private: - XLoadBarrierOopClosure _jt_cl; - XOnStackNMethodClosure _nm_cl; - ThreadLocalAllocStats _stats; - - OopClosure* closure_from_context(void* context); - - virtual uint32_t epoch_id() const; - virtual void start_processing_impl(void* context); - virtual void process(const frame& fr, RegisterMap& register_map, void* context); - -public: - XStackWatermark(JavaThread* jt); - - ThreadLocalAllocStats& stats(); -}; - -#endif // SHARE_GC_X_XSTACKWATERMARK_HPP diff --git a/src/hotspot/share/gc/x/xStat.cpp b/src/hotspot/share/gc/x/xStat.cpp deleted file mode 100644 index c445e9513970f..0000000000000 --- a/src/hotspot/share/gc/x/xStat.cpp +++ /dev/null @@ -1,1513 +0,0 @@ -/* - * Copyright (c) 2015, 2021, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#include "precompiled.hpp" -#include "gc/shared/gc_globals.hpp" -#include "gc/x/xAbort.inline.hpp" -#include "gc/x/xCollectedHeap.hpp" -#include "gc/x/xCPU.inline.hpp" -#include "gc/x/xGlobals.hpp" -#include "gc/x/xNMethodTable.hpp" -#include "gc/x/xPageAllocator.inline.hpp" -#include "gc/x/xRelocationSetSelector.inline.hpp" -#include "gc/x/xStat.hpp" -#include "gc/x/xThread.inline.hpp" -#include "gc/x/xTracer.inline.hpp" -#include "gc/x/xUtils.hpp" -#include "memory/metaspaceUtils.hpp" -#include "memory/resourceArea.hpp" -#include "runtime/atomic.hpp" -#include "runtime/os.hpp" -#include "runtime/timer.hpp" -#include "utilities/align.hpp" -#include "utilities/debug.hpp" -#include "utilities/ticks.hpp" - -#define XSIZE_FMT SIZE_FORMAT "M(%.0f%%)" -#define XSIZE_ARGS_WITH_MAX(size, max) ((size) / M), (percent_of(size, max)) -#define XSIZE_ARGS(size) XSIZE_ARGS_WITH_MAX(size, XStatHeap::max_capacity()) - -#define XTABLE_ARGS_NA "%9s", "-" -#define XTABLE_ARGS(size) SIZE_FORMAT_W(8) "M (%.0f%%)", \ - ((size) / M), (percent_of(size, XStatHeap::max_capacity())) - -// -// Stat sampler/counter data -// -struct XStatSamplerData { - uint64_t _nsamples; - uint64_t _sum; - uint64_t _max; - - XStatSamplerData() : - _nsamples(0), - _sum(0), - _max(0) {} - - void add(const XStatSamplerData& new_sample) { - _nsamples += new_sample._nsamples; - _sum += new_sample._sum; - _max = MAX2(_max, new_sample._max); - } -}; - -struct XStatCounterData { - uint64_t _counter; - - XStatCounterData() : - _counter(0) {} -}; - -// -// Stat sampler history -// -template -class XStatSamplerHistoryInterval { -private: - size_t _next; - XStatSamplerData _samples[size]; - XStatSamplerData _accumulated; - XStatSamplerData _total; - -public: - XStatSamplerHistoryInterval() : - _next(0), - _samples(), - _accumulated(), - _total() {} - - bool add(const XStatSamplerData& new_sample) { - // Insert sample - const XStatSamplerData old_sample = _samples[_next]; - _samples[_next] = new_sample; - - // Adjust accumulated - _accumulated._nsamples += new_sample._nsamples; - _accumulated._sum += new_sample._sum; - _accumulated._max = MAX2(_accumulated._max, new_sample._max); - - // Adjust total - _total._nsamples -= old_sample._nsamples; - _total._sum -= old_sample._sum; - _total._nsamples += new_sample._nsamples; - _total._sum += new_sample._sum; - if (_total._max < new_sample._max) { - // Found new max - _total._max = new_sample._max; - } else if (_total._max == old_sample._max) { - // Removed old max, reset and find new max - _total._max = 0; - for (size_t i = 0; i < size; i++) { - if (_total._max < _samples[i]._max) { - _total._max = _samples[i]._max; - } - } - } - - // Adjust next - if (++_next == size) { - _next = 0; - - // Clear accumulated - const XStatSamplerData zero; - _accumulated = zero; - - // Became full - return true; - } - - // Not yet full - return false; - } - - const XStatSamplerData& total() const { - return _total; - } - - const XStatSamplerData& accumulated() const { - return _accumulated; - } -}; - -class XStatSamplerHistory : public CHeapObj { -private: - XStatSamplerHistoryInterval<10> _10seconds; - XStatSamplerHistoryInterval<60> _10minutes; - XStatSamplerHistoryInterval<60> _10hours; - XStatSamplerData _total; - - uint64_t avg(uint64_t sum, uint64_t nsamples) const { - return (nsamples > 0) ? sum / nsamples : 0; - } - -public: - XStatSamplerHistory() : - _10seconds(), - _10minutes(), - _10hours(), - _total() {} - - void add(const XStatSamplerData& new_sample) { - if (_10seconds.add(new_sample)) { - if (_10minutes.add(_10seconds.total())) { - if (_10hours.add(_10minutes.total())) { - _total.add(_10hours.total()); - } - } - } - } - - uint64_t avg_10_seconds() const { - const uint64_t sum = _10seconds.total()._sum; - const uint64_t nsamples = _10seconds.total()._nsamples; - return avg(sum, nsamples); - } - - uint64_t avg_10_minutes() const { - const uint64_t sum = _10seconds.accumulated()._sum + - _10minutes.total()._sum; - const uint64_t nsamples = _10seconds.accumulated()._nsamples + - _10minutes.total()._nsamples; - return avg(sum, nsamples); - } - - uint64_t avg_10_hours() const { - const uint64_t sum = _10seconds.accumulated()._sum + - _10minutes.accumulated()._sum + - _10hours.total()._sum; - const uint64_t nsamples = _10seconds.accumulated()._nsamples + - _10minutes.accumulated()._nsamples + - _10hours.total()._nsamples; - return avg(sum, nsamples); - } - - uint64_t avg_total() const { - const uint64_t sum = _10seconds.accumulated()._sum + - _10minutes.accumulated()._sum + - _10hours.accumulated()._sum + - _total._sum; - const uint64_t nsamples = _10seconds.accumulated()._nsamples + - _10minutes.accumulated()._nsamples + - _10hours.accumulated()._nsamples + - _total._nsamples; - return avg(sum, nsamples); - } - - uint64_t max_10_seconds() const { - return _10seconds.total()._max; - } - - uint64_t max_10_minutes() const { - return MAX2(_10seconds.accumulated()._max, - _10minutes.total()._max); - } - - uint64_t max_10_hours() const { - return MAX3(_10seconds.accumulated()._max, - _10minutes.accumulated()._max, - _10hours.total()._max); - } - - uint64_t max_total() const { - return MAX4(_10seconds.accumulated()._max, - _10minutes.accumulated()._max, - _10hours.accumulated()._max, - _total._max); - } -}; - -// -// Stat unit printers -// -void XStatUnitTime(LogTargetHandle log, const XStatSampler& sampler, const XStatSamplerHistory& history) { - log.print(" %10s: %-41s " - "%9.3f / %-9.3f " - "%9.3f / %-9.3f " - "%9.3f / %-9.3f " - "%9.3f / %-9.3f ms", - sampler.group(), - sampler.name(), - TimeHelper::counter_to_millis(history.avg_10_seconds()), - TimeHelper::counter_to_millis(history.max_10_seconds()), - TimeHelper::counter_to_millis(history.avg_10_minutes()), - TimeHelper::counter_to_millis(history.max_10_minutes()), - TimeHelper::counter_to_millis(history.avg_10_hours()), - TimeHelper::counter_to_millis(history.max_10_hours()), - TimeHelper::counter_to_millis(history.avg_total()), - TimeHelper::counter_to_millis(history.max_total())); -} - -void XStatUnitBytes(LogTargetHandle log, const XStatSampler& sampler, const XStatSamplerHistory& history) { - log.print(" %10s: %-41s " - UINT64_FORMAT_W(9) " / " UINT64_FORMAT_W(-9) " " - UINT64_FORMAT_W(9) " / " UINT64_FORMAT_W(-9) " " - UINT64_FORMAT_W(9) " / " UINT64_FORMAT_W(-9) " " - UINT64_FORMAT_W(9) " / " UINT64_FORMAT_W(-9) " MB", - sampler.group(), - sampler.name(), - history.avg_10_seconds() / M, - history.max_10_seconds() / M, - history.avg_10_minutes() / M, - history.max_10_minutes() / M, - history.avg_10_hours() / M, - history.max_10_hours() / M, - history.avg_total() / M, - history.max_total() / M); -} - -void XStatUnitThreads(LogTargetHandle log, const XStatSampler& sampler, const XStatSamplerHistory& history) { - log.print(" %10s: %-41s " - UINT64_FORMAT_W(9) " / " UINT64_FORMAT_W(-9) " " - UINT64_FORMAT_W(9) " / " UINT64_FORMAT_W(-9) " " - UINT64_FORMAT_W(9) " / " UINT64_FORMAT_W(-9) " " - UINT64_FORMAT_W(9) " / " UINT64_FORMAT_W(-9) " threads", - sampler.group(), - sampler.name(), - history.avg_10_seconds(), - history.max_10_seconds(), - history.avg_10_minutes(), - history.max_10_minutes(), - history.avg_10_hours(), - history.max_10_hours(), - history.avg_total(), - history.max_total()); -} - -void XStatUnitBytesPerSecond(LogTargetHandle log, const XStatSampler& sampler, const XStatSamplerHistory& history) { - log.print(" %10s: %-41s " - UINT64_FORMAT_W(9) " / " UINT64_FORMAT_W(-9) " " - UINT64_FORMAT_W(9) " / " UINT64_FORMAT_W(-9) " " - UINT64_FORMAT_W(9) " / " UINT64_FORMAT_W(-9) " " - UINT64_FORMAT_W(9) " / " UINT64_FORMAT_W(-9) " MB/s", - sampler.group(), - sampler.name(), - history.avg_10_seconds() / M, - history.max_10_seconds() / M, - history.avg_10_minutes() / M, - history.max_10_minutes() / M, - history.avg_10_hours() / M, - history.max_10_hours() / M, - history.avg_total() / M, - history.max_total() / M); -} - -void XStatUnitOpsPerSecond(LogTargetHandle log, const XStatSampler& sampler, const XStatSamplerHistory& history) { - log.print(" %10s: %-41s " - UINT64_FORMAT_W(9) " / " UINT64_FORMAT_W(-9) " " - UINT64_FORMAT_W(9) " / " UINT64_FORMAT_W(-9) " " - UINT64_FORMAT_W(9) " / " UINT64_FORMAT_W(-9) " " - UINT64_FORMAT_W(9) " / " UINT64_FORMAT_W(-9) " ops/s", - sampler.group(), - sampler.name(), - history.avg_10_seconds(), - history.max_10_seconds(), - history.avg_10_minutes(), - history.max_10_minutes(), - history.avg_10_hours(), - history.max_10_hours(), - history.avg_total(), - history.max_total()); -} - -// -// Stat value -// -uintptr_t XStatValue::_base = 0; -uint32_t XStatValue::_cpu_offset = 0; - -XStatValue::XStatValue(const char* group, - const char* name, - uint32_t id, - uint32_t size) : - _group(group), - _name(name), - _id(id), - _offset(_cpu_offset) { - assert(_base == 0, "Already initialized"); - _cpu_offset += size; -} - -template -T* XStatValue::get_cpu_local(uint32_t cpu) const { - assert(_base != 0, "Not initialized"); - const uintptr_t cpu_base = _base + (_cpu_offset * cpu); - const uintptr_t value_addr = cpu_base + _offset; - return (T*)value_addr; -} - -void XStatValue::initialize() { - // Finalize and align CPU offset - _cpu_offset = align_up(_cpu_offset, (uint32_t)XCacheLineSize); - - // Allocation aligned memory - const size_t size = _cpu_offset * XCPU::count(); - _base = XUtils::alloc_aligned(XCacheLineSize, size); -} - -const char* XStatValue::group() const { - return _group; -} - -const char* XStatValue::name() const { - return _name; -} - -uint32_t XStatValue::id() const { - return _id; -} - -// -// Stat iterable value -// - -template -XStatIterableValue::XStatIterableValue(const char* group, - const char* name, - uint32_t size) : - XStatValue(group, name, _count++, size), - _next(insert()) {} - -template -T* XStatIterableValue::insert() const { - T* const next = _first; - _first = (T*)this; - return next; -} - -template -void XStatIterableValue::sort() { - T* first_unsorted = _first; - _first = nullptr; - - while (first_unsorted != nullptr) { - T* const value = first_unsorted; - first_unsorted = value->_next; - value->_next = nullptr; - - T** current = &_first; - - while (*current != nullptr) { - // First sort by group, then by name - const int group_cmp = strcmp((*current)->group(), value->group()); - if ((group_cmp > 0) || (group_cmp == 0 && strcmp((*current)->name(), value->name()) > 0)) { - break; - } - - current = &(*current)->_next; - } - value->_next = *current; - *current = value; - } -} - -// -// Stat sampler -// -XStatSampler::XStatSampler(const char* group, const char* name, XStatUnitPrinter printer) : - XStatIterableValue(group, name, sizeof(XStatSamplerData)), - _printer(printer) {} - -XStatSamplerData* XStatSampler::get() const { - return get_cpu_local(XCPU::id()); -} - -XStatSamplerData XStatSampler::collect_and_reset() const { - XStatSamplerData all; - - const uint32_t ncpus = XCPU::count(); - for (uint32_t i = 0; i < ncpus; i++) { - XStatSamplerData* const cpu_data = get_cpu_local(i); - if (cpu_data->_nsamples > 0) { - const uint64_t nsamples = Atomic::xchg(&cpu_data->_nsamples, (uint64_t)0); - const uint64_t sum = Atomic::xchg(&cpu_data->_sum, (uint64_t)0); - const uint64_t max = Atomic::xchg(&cpu_data->_max, (uint64_t)0); - all._nsamples += nsamples; - all._sum += sum; - if (all._max < max) { - all._max = max; - } - } - } - - return all; -} - -XStatUnitPrinter XStatSampler::printer() const { - return _printer; -} - -// -// Stat counter -// -XStatCounter::XStatCounter(const char* group, const char* name, XStatUnitPrinter printer) : - XStatIterableValue(group, name, sizeof(XStatCounterData)), - _sampler(group, name, printer) {} - -XStatCounterData* XStatCounter::get() const { - return get_cpu_local(XCPU::id()); -} - -void XStatCounter::sample_and_reset() const { - uint64_t counter = 0; - - const uint32_t ncpus = XCPU::count(); - for (uint32_t i = 0; i < ncpus; i++) { - XStatCounterData* const cpu_data = get_cpu_local(i); - counter += Atomic::xchg(&cpu_data->_counter, (uint64_t)0); - } - - XStatSample(_sampler, counter); -} - -// -// Stat unsampled counter -// -XStatUnsampledCounter::XStatUnsampledCounter(const char* name) : - XStatIterableValue("Unsampled", name, sizeof(XStatCounterData)) {} - -XStatCounterData* XStatUnsampledCounter::get() const { - return get_cpu_local(XCPU::id()); -} - -XStatCounterData XStatUnsampledCounter::collect_and_reset() const { - XStatCounterData all; - - const uint32_t ncpus = XCPU::count(); - for (uint32_t i = 0; i < ncpus; i++) { - XStatCounterData* const cpu_data = get_cpu_local(i); - all._counter += Atomic::xchg(&cpu_data->_counter, (uint64_t)0); - } - - return all; -} - -// -// Stat MMU (Minimum Mutator Utilization) -// -XStatMMUPause::XStatMMUPause() : - _start(0.0), - _end(0.0) {} - -XStatMMUPause::XStatMMUPause(const Ticks& start, const Ticks& end) : - _start(TimeHelper::counter_to_millis(start.value())), - _end(TimeHelper::counter_to_millis(end.value())) {} - -double XStatMMUPause::end() const { - return _end; -} - -double XStatMMUPause::overlap(double start, double end) const { - const double start_max = MAX2(start, _start); - const double end_min = MIN2(end, _end); - - if (end_min > start_max) { - // Overlap found - return end_min - start_max; - } - - // No overlap - return 0.0; -} - -size_t XStatMMU::_next = 0; -size_t XStatMMU::_npauses = 0; -XStatMMUPause XStatMMU::_pauses[200]; -double XStatMMU::_mmu_2ms = 100.0; -double XStatMMU::_mmu_5ms = 100.0; -double XStatMMU::_mmu_10ms = 100.0; -double XStatMMU::_mmu_20ms = 100.0; -double XStatMMU::_mmu_50ms = 100.0; -double XStatMMU::_mmu_100ms = 100.0; - -const XStatMMUPause& XStatMMU::pause(size_t index) { - return _pauses[(_next - index - 1) % ARRAY_SIZE(_pauses)]; -} - -double XStatMMU::calculate_mmu(double time_slice) { - const double end = pause(0).end(); - const double start = end - time_slice; - double time_paused = 0.0; - - // Find all overlapping pauses - for (size_t i = 0; i < _npauses; i++) { - const double overlap = pause(i).overlap(start, end); - if (overlap == 0.0) { - // No overlap - break; - } - - time_paused += overlap; - } - - // Calculate MMU - const double time_mutator = time_slice - time_paused; - return percent_of(time_mutator, time_slice); -} - -void XStatMMU::register_pause(const Ticks& start, const Ticks& end) { - // Add pause - const size_t index = _next++ % ARRAY_SIZE(_pauses); - _pauses[index] = XStatMMUPause(start, end); - _npauses = MIN2(_npauses + 1, ARRAY_SIZE(_pauses)); - - // Recalculate MMUs - _mmu_2ms = MIN2(_mmu_2ms, calculate_mmu(2)); - _mmu_5ms = MIN2(_mmu_5ms, calculate_mmu(5)); - _mmu_10ms = MIN2(_mmu_10ms, calculate_mmu(10)); - _mmu_20ms = MIN2(_mmu_20ms, calculate_mmu(20)); - _mmu_50ms = MIN2(_mmu_50ms, calculate_mmu(50)); - _mmu_100ms = MIN2(_mmu_100ms, calculate_mmu(100)); -} - -void XStatMMU::print() { - log_info(gc, mmu)("MMU: 2ms/%.1f%%, 5ms/%.1f%%, 10ms/%.1f%%, 20ms/%.1f%%, 50ms/%.1f%%, 100ms/%.1f%%", - _mmu_2ms, _mmu_5ms, _mmu_10ms, _mmu_20ms, _mmu_50ms, _mmu_100ms); -} - -// -// Stat phases -// -ConcurrentGCTimer XStatPhase::_timer; - -XStatPhase::XStatPhase(const char* group, const char* name) : - _sampler(group, name, XStatUnitTime) {} - -void XStatPhase::log_start(LogTargetHandle log, bool thread) const { - if (!log.is_enabled()) { - return; - } - - if (thread) { - ResourceMark rm; - log.print("%s (%s)", name(), Thread::current()->name()); - } else { - log.print("%s", name()); - } -} - -void XStatPhase::log_end(LogTargetHandle log, const Tickspan& duration, bool thread) const { - if (!log.is_enabled()) { - return; - } - - if (thread) { - ResourceMark rm; - log.print("%s (%s) %.3fms", name(), Thread::current()->name(), TimeHelper::counter_to_millis(duration.value())); - } else { - log.print("%s %.3fms", name(), TimeHelper::counter_to_millis(duration.value())); - } -} - -ConcurrentGCTimer* XStatPhase::timer() { - return &_timer; -} - -const char* XStatPhase::name() const { - return _sampler.name(); -} - -XStatPhaseCycle::XStatPhaseCycle(const char* name) : - XStatPhase("Collector", name) {} - -void XStatPhaseCycle::register_start(const Ticks& start) const { - timer()->register_gc_start(start); - - XTracer::tracer()->report_gc_start(XCollectedHeap::heap()->gc_cause(), start); - - XCollectedHeap::heap()->print_heap_before_gc(); - XCollectedHeap::heap()->trace_heap_before_gc(XTracer::tracer()); - - log_info(gc, start)("Garbage Collection (%s)", - GCCause::to_string(XCollectedHeap::heap()->gc_cause())); -} - -void XStatPhaseCycle::register_end(const Ticks& start, const Ticks& end) const { - if (XAbort::should_abort()) { - log_info(gc)("Garbage Collection (%s) Aborted", - GCCause::to_string(XCollectedHeap::heap()->gc_cause())); - return; - } - - timer()->register_gc_end(end); - - XCollectedHeap::heap()->print_heap_after_gc(); - XCollectedHeap::heap()->trace_heap_after_gc(XTracer::tracer()); - - XTracer::tracer()->report_gc_end(end, timer()->time_partitions()); - - const Tickspan duration = end - start; - XStatSample(_sampler, duration.value()); - - XStatLoad::print(); - XStatMMU::print(); - XStatMark::print(); - XStatNMethods::print(); - XStatMetaspace::print(); - XStatReferences::print(); - XStatRelocation::print(); - XStatHeap::print(); - - log_info(gc)("Garbage Collection (%s) " XSIZE_FMT "->" XSIZE_FMT, - GCCause::to_string(XCollectedHeap::heap()->gc_cause()), - XSIZE_ARGS(XStatHeap::used_at_mark_start()), - XSIZE_ARGS(XStatHeap::used_at_relocate_end())); -} - -Tickspan XStatPhasePause::_max; - -XStatPhasePause::XStatPhasePause(const char* name) : - XStatPhase("Phase", name) {} - -const Tickspan& XStatPhasePause::max() { - return _max; -} - -void XStatPhasePause::register_start(const Ticks& start) const { - timer()->register_gc_pause_start(name(), start); - - LogTarget(Debug, gc, phases, start) log; - log_start(log); -} - -void XStatPhasePause::register_end(const Ticks& start, const Ticks& end) const { - timer()->register_gc_pause_end(end); - - const Tickspan duration = end - start; - XStatSample(_sampler, duration.value()); - - // Track max pause time - if (_max < duration) { - _max = duration; - } - - // Track minimum mutator utilization - XStatMMU::register_pause(start, end); - - LogTarget(Info, gc, phases) log; - log_end(log, duration); -} - -XStatPhaseConcurrent::XStatPhaseConcurrent(const char* name) : - XStatPhase("Phase", name) {} - -void XStatPhaseConcurrent::register_start(const Ticks& start) const { - timer()->register_gc_concurrent_start(name(), start); - - LogTarget(Debug, gc, phases, start) log; - log_start(log); -} - -void XStatPhaseConcurrent::register_end(const Ticks& start, const Ticks& end) const { - if (XAbort::should_abort()) { - return; - } - - timer()->register_gc_concurrent_end(end); - - const Tickspan duration = end - start; - XStatSample(_sampler, duration.value()); - - LogTarget(Info, gc, phases) log; - log_end(log, duration); -} - -XStatSubPhase::XStatSubPhase(const char* name) : - XStatPhase("Subphase", name) {} - -void XStatSubPhase::register_start(const Ticks& start) const { - if (XThread::is_worker()) { - LogTarget(Trace, gc, phases, start) log; - log_start(log, true /* thread */); - } else { - LogTarget(Debug, gc, phases, start) log; - log_start(log, false /* thread */); - } -} - -void XStatSubPhase::register_end(const Ticks& start, const Ticks& end) const { - if (XAbort::should_abort()) { - return; - } - - XTracer::tracer()->report_thread_phase(name(), start, end); - - const Tickspan duration = end - start; - XStatSample(_sampler, duration.value()); - - if (XThread::is_worker()) { - LogTarget(Trace, gc, phases) log; - log_end(log, duration, true /* thread */); - } else { - LogTarget(Debug, gc, phases) log; - log_end(log, duration, false /* thread */); - } -} - -XStatCriticalPhase::XStatCriticalPhase(const char* name, bool verbose) : - XStatPhase("Critical", name), - _counter("Critical", name, XStatUnitOpsPerSecond), - _verbose(verbose) {} - -void XStatCriticalPhase::register_start(const Ticks& start) const { - // This is called from sensitive contexts, for example before an allocation stall - // has been resolved. This means we must not access any oops in here since that - // could lead to infinite recursion. Without access to the thread name we can't - // really log anything useful here. -} - -void XStatCriticalPhase::register_end(const Ticks& start, const Ticks& end) const { - XTracer::tracer()->report_thread_phase(name(), start, end); - - const Tickspan duration = end - start; - XStatSample(_sampler, duration.value()); - XStatInc(_counter); - - if (_verbose) { - LogTarget(Info, gc) log; - log_end(log, duration, true /* thread */); - } else { - LogTarget(Debug, gc) log; - log_end(log, duration, true /* thread */); - } -} - -// -// Stat timer -// -THREAD_LOCAL uint32_t XStatTimerDisable::_active = 0; - -// -// Stat sample/inc -// -void XStatSample(const XStatSampler& sampler, uint64_t value) { - XStatSamplerData* const cpu_data = sampler.get(); - Atomic::add(&cpu_data->_nsamples, 1u); - Atomic::add(&cpu_data->_sum, value); - - uint64_t max = cpu_data->_max; - for (;;) { - if (max >= value) { - // Not max - break; - } - - const uint64_t new_max = value; - const uint64_t prev_max = Atomic::cmpxchg(&cpu_data->_max, max, new_max); - if (prev_max == max) { - // Success - break; - } - - // Retry - max = prev_max; - } - - XTracer::tracer()->report_stat_sampler(sampler, value); -} - -void XStatInc(const XStatCounter& counter, uint64_t increment) { - XStatCounterData* const cpu_data = counter.get(); - const uint64_t value = Atomic::add(&cpu_data->_counter, increment); - - XTracer::tracer()->report_stat_counter(counter, increment, value); -} - -void XStatInc(const XStatUnsampledCounter& counter, uint64_t increment) { - XStatCounterData* const cpu_data = counter.get(); - Atomic::add(&cpu_data->_counter, increment); -} - -// -// Stat allocation rate -// -const XStatUnsampledCounter XStatAllocRate::_counter("Allocation Rate"); -TruncatedSeq XStatAllocRate::_samples(XStatAllocRate::sample_hz); -TruncatedSeq XStatAllocRate::_rate(XStatAllocRate::sample_hz); - -const XStatUnsampledCounter& XStatAllocRate::counter() { - return _counter; -} - -uint64_t XStatAllocRate::sample_and_reset() { - const XStatCounterData bytes_per_sample = _counter.collect_and_reset(); - _samples.add(bytes_per_sample._counter); - - const uint64_t bytes_per_second = _samples.sum(); - _rate.add(bytes_per_second); - - return bytes_per_second; -} - -double XStatAllocRate::predict() { - return _rate.predict_next(); -} - -double XStatAllocRate::avg() { - return _rate.avg(); -} - -double XStatAllocRate::sd() { - return _rate.sd(); -} - -// -// Stat thread -// -XStat::XStat() : - _metronome(sample_hz) { - set_name("XStat"); - create_and_start(); -} - -void XStat::sample_and_collect(XStatSamplerHistory* history) const { - // Sample counters - for (const XStatCounter* counter = XStatCounter::first(); counter != nullptr; counter = counter->next()) { - counter->sample_and_reset(); - } - - // Collect samples - for (const XStatSampler* sampler = XStatSampler::first(); sampler != nullptr; sampler = sampler->next()) { - XStatSamplerHistory& sampler_history = history[sampler->id()]; - sampler_history.add(sampler->collect_and_reset()); - } -} - -bool XStat::should_print(LogTargetHandle log) const { - static uint64_t print_at = ZStatisticsInterval; - const uint64_t now = os::elapsedTime(); - - if (now < print_at) { - return false; - } - - print_at = ((now / ZStatisticsInterval) * ZStatisticsInterval) + ZStatisticsInterval; - - return log.is_enabled(); -} - -void XStat::print(LogTargetHandle log, const XStatSamplerHistory* history) const { - // Print - log.print("=== Garbage Collection Statistics ======================================================================================================================="); - log.print(" Last 10s Last 10m Last 10h Total"); - log.print(" Avg / Max Avg / Max Avg / Max Avg / Max"); - - for (const XStatSampler* sampler = XStatSampler::first(); sampler != nullptr; sampler = sampler->next()) { - const XStatSamplerHistory& sampler_history = history[sampler->id()]; - const XStatUnitPrinter printer = sampler->printer(); - printer(log, *sampler, sampler_history); - } - - log.print("========================================================================================================================================================="); -} - -void XStat::run_service() { - XStatSamplerHistory* const history = new XStatSamplerHistory[XStatSampler::count()]; - LogTarget(Info, gc, stats) log; - - XStatSampler::sort(); - - // Main loop - while (_metronome.wait_for_tick()) { - sample_and_collect(history); - if (should_print(log)) { - print(log, history); - } - } - - delete [] history; -} - -void XStat::stop_service() { - _metronome.stop(); -} - -// -// Stat table -// -class XStatTablePrinter { -private: - static const size_t _buffer_size = 256; - - const size_t _column0_width; - const size_t _columnN_width; - char _buffer[_buffer_size]; - -public: - class XColumn { - private: - char* const _buffer; - const size_t _position; - const size_t _width; - const size_t _width_next; - - XColumn next() const { - // Insert space between columns - _buffer[_position + _width] = ' '; - return XColumn(_buffer, _position + _width + 1, _width_next, _width_next); - } - - size_t print(size_t position, const char* fmt, va_list va) { - const int res = jio_vsnprintf(_buffer + position, _buffer_size - position, fmt, va); - if (res < 0) { - return 0; - } - - return (size_t)res; - } - - public: - XColumn(char* buffer, size_t position, size_t width, size_t width_next) : - _buffer(buffer), - _position(position), - _width(width), - _width_next(width_next) {} - - XColumn left(const char* fmt, ...) ATTRIBUTE_PRINTF(2, 3) { - va_list va; - - va_start(va, fmt); - const size_t written = print(_position, fmt, va); - va_end(va); - - if (written < _width) { - // Fill empty space - memset(_buffer + _position + written, ' ', _width - written); - } - - return next(); - } - - XColumn right(const char* fmt, ...) ATTRIBUTE_PRINTF(2, 3) { - va_list va; - - va_start(va, fmt); - const size_t written = print(_position, fmt, va); - va_end(va); - - if (written > _width) { - // Line too long - return fill('?'); - } - - if (written < _width) { - // Short line, move all to right - memmove(_buffer + _position + _width - written, _buffer + _position, written); - - // Fill empty space - memset(_buffer + _position, ' ', _width - written); - } - - return next(); - } - - XColumn center(const char* fmt, ...) ATTRIBUTE_PRINTF(2, 3) { - va_list va; - - va_start(va, fmt); - const size_t written = print(_position, fmt, va); - va_end(va); - - if (written > _width) { - // Line too long - return fill('?'); - } - - if (written < _width) { - // Short line, move all to center - const size_t start_space = (_width - written) / 2; - const size_t end_space = _width - written - start_space; - memmove(_buffer + _position + start_space, _buffer + _position, written); - - // Fill empty spaces - memset(_buffer + _position, ' ', start_space); - memset(_buffer + _position + start_space + written, ' ', end_space); - } - - return next(); - } - - XColumn fill(char filler = ' ') { - memset(_buffer + _position, filler, _width); - return next(); - } - - const char* end() { - _buffer[_position] = '\0'; - return _buffer; - } - }; - -public: - XStatTablePrinter(size_t column0_width, size_t columnN_width) : - _column0_width(column0_width), - _columnN_width(columnN_width) {} - - XColumn operator()() { - return XColumn(_buffer, 0, _column0_width, _columnN_width); - } -}; - -// -// Stat cycle -// -uint64_t XStatCycle::_nwarmup_cycles = 0; -Ticks XStatCycle::_start_of_last; -Ticks XStatCycle::_end_of_last; -NumberSeq XStatCycle::_serial_time(0.7 /* alpha */); -NumberSeq XStatCycle::_parallelizable_time(0.7 /* alpha */); -uint XStatCycle::_last_active_workers = 0; - -void XStatCycle::at_start() { - _start_of_last = Ticks::now(); -} - -void XStatCycle::at_end(GCCause::Cause cause, uint active_workers) { - _end_of_last = Ticks::now(); - - if (cause == GCCause::_z_warmup) { - _nwarmup_cycles++; - } - - _last_active_workers = active_workers; - - // Calculate serial and parallelizable GC cycle times - const double duration = (_end_of_last - _start_of_last).seconds(); - const double workers_duration = XStatWorkers::get_and_reset_duration(); - const double serial_time = duration - workers_duration; - const double parallelizable_time = workers_duration * active_workers; - _serial_time.add(serial_time); - _parallelizable_time.add(parallelizable_time); -} - -bool XStatCycle::is_warm() { - return _nwarmup_cycles >= 3; -} - -uint64_t XStatCycle::nwarmup_cycles() { - return _nwarmup_cycles; -} - -bool XStatCycle::is_time_trustable() { - // The times are considered trustable if we - // have completed at least one warmup cycle. - return _nwarmup_cycles > 0; -} - -const AbsSeq& XStatCycle::serial_time() { - return _serial_time; -} - -const AbsSeq& XStatCycle::parallelizable_time() { - return _parallelizable_time; -} - -uint XStatCycle::last_active_workers() { - return _last_active_workers; -} - -double XStatCycle::time_since_last() { - if (_end_of_last.value() == 0) { - // No end recorded yet, return time since VM start - return os::elapsedTime(); - } - - const Ticks now = Ticks::now(); - const Tickspan time_since_last = now - _end_of_last; - return time_since_last.seconds(); -} - -// -// Stat workers -// -Ticks XStatWorkers::_start_of_last; -Tickspan XStatWorkers::_accumulated_duration; - -void XStatWorkers::at_start() { - _start_of_last = Ticks::now(); -} - -void XStatWorkers::at_end() { - const Ticks now = Ticks::now(); - const Tickspan duration = now - _start_of_last; - _accumulated_duration += duration; -} - -double XStatWorkers::get_and_reset_duration() { - const double duration = _accumulated_duration.seconds(); - const Ticks now = Ticks::now(); - _accumulated_duration = now - now; - return duration; -} - -// -// Stat load -// -void XStatLoad::print() { - double loadavg[3] = {}; - os::loadavg(loadavg, ARRAY_SIZE(loadavg)); - log_info(gc, load)("Load: %.2f/%.2f/%.2f", loadavg[0], loadavg[1], loadavg[2]); -} - -// -// Stat mark -// -size_t XStatMark::_nstripes; -size_t XStatMark::_nproactiveflush; -size_t XStatMark::_nterminateflush; -size_t XStatMark::_ntrycomplete; -size_t XStatMark::_ncontinue; -size_t XStatMark::_mark_stack_usage; - -void XStatMark::set_at_mark_start(size_t nstripes) { - _nstripes = nstripes; -} - -void XStatMark::set_at_mark_end(size_t nproactiveflush, - size_t nterminateflush, - size_t ntrycomplete, - size_t ncontinue) { - _nproactiveflush = nproactiveflush; - _nterminateflush = nterminateflush; - _ntrycomplete = ntrycomplete; - _ncontinue = ncontinue; -} - -void XStatMark::set_at_mark_free(size_t mark_stack_usage) { - _mark_stack_usage = mark_stack_usage; -} - -void XStatMark::print() { - log_info(gc, marking)("Mark: " - SIZE_FORMAT " stripe(s), " - SIZE_FORMAT " proactive flush(es), " - SIZE_FORMAT " terminate flush(es), " - SIZE_FORMAT " completion(s), " - SIZE_FORMAT " continuation(s) ", - _nstripes, - _nproactiveflush, - _nterminateflush, - _ntrycomplete, - _ncontinue); - - log_info(gc, marking)("Mark Stack Usage: " SIZE_FORMAT "M", _mark_stack_usage / M); -} - -// -// Stat relocation -// -XRelocationSetSelectorStats XStatRelocation::_selector_stats; -size_t XStatRelocation::_forwarding_usage; -size_t XStatRelocation::_small_in_place_count; -size_t XStatRelocation::_medium_in_place_count; - -void XStatRelocation::set_at_select_relocation_set(const XRelocationSetSelectorStats& selector_stats) { - _selector_stats = selector_stats; -} - -void XStatRelocation::set_at_install_relocation_set(size_t forwarding_usage) { - _forwarding_usage = forwarding_usage; -} - -void XStatRelocation::set_at_relocate_end(size_t small_in_place_count, size_t medium_in_place_count) { - _small_in_place_count = small_in_place_count; - _medium_in_place_count = medium_in_place_count; -} - -void XStatRelocation::print(const char* name, - const XRelocationSetSelectorGroupStats& selector_group, - size_t in_place_count) { - log_info(gc, reloc)("%s Pages: " SIZE_FORMAT " / " SIZE_FORMAT "M, Empty: " SIZE_FORMAT "M, " - "Relocated: " SIZE_FORMAT "M, In-Place: " SIZE_FORMAT, - name, - selector_group.npages_candidates(), - selector_group.total() / M, - selector_group.empty() / M, - selector_group.relocate() / M, - in_place_count); -} - -void XStatRelocation::print() { - print("Small", _selector_stats.small(), _small_in_place_count); - if (XPageSizeMedium != 0) { - print("Medium", _selector_stats.medium(), _medium_in_place_count); - } - print("Large", _selector_stats.large(), 0 /* in_place_count */); - - log_info(gc, reloc)("Forwarding Usage: " SIZE_FORMAT "M", _forwarding_usage / M); -} - -// -// Stat nmethods -// -void XStatNMethods::print() { - log_info(gc, nmethod)("NMethods: " SIZE_FORMAT " registered, " SIZE_FORMAT " unregistered", - XNMethodTable::registered_nmethods(), - XNMethodTable::unregistered_nmethods()); -} - -// -// Stat metaspace -// -void XStatMetaspace::print() { - MetaspaceCombinedStats stats = MetaspaceUtils::get_combined_statistics(); - log_info(gc, metaspace)("Metaspace: " - SIZE_FORMAT "M used, " - SIZE_FORMAT "M committed, " SIZE_FORMAT "M reserved", - stats.used() / M, - stats.committed() / M, - stats.reserved() / M); -} - -// -// Stat references -// -XStatReferences::XCount XStatReferences::_soft; -XStatReferences::XCount XStatReferences::_weak; -XStatReferences::XCount XStatReferences::_final; -XStatReferences::XCount XStatReferences::_phantom; - -void XStatReferences::set(XCount* count, size_t encountered, size_t discovered, size_t enqueued) { - count->encountered = encountered; - count->discovered = discovered; - count->enqueued = enqueued; -} - -void XStatReferences::set_soft(size_t encountered, size_t discovered, size_t enqueued) { - set(&_soft, encountered, discovered, enqueued); -} - -void XStatReferences::set_weak(size_t encountered, size_t discovered, size_t enqueued) { - set(&_weak, encountered, discovered, enqueued); -} - -void XStatReferences::set_final(size_t encountered, size_t discovered, size_t enqueued) { - set(&_final, encountered, discovered, enqueued); -} - -void XStatReferences::set_phantom(size_t encountered, size_t discovered, size_t enqueued) { - set(&_phantom, encountered, discovered, enqueued); -} - -void XStatReferences::print(const char* name, const XStatReferences::XCount& ref) { - log_info(gc, ref)("%s: " - SIZE_FORMAT " encountered, " - SIZE_FORMAT " discovered, " - SIZE_FORMAT " enqueued", - name, - ref.encountered, - ref.discovered, - ref.enqueued); -} - -void XStatReferences::print() { - print("Soft", _soft); - print("Weak", _weak); - print("Final", _final); - print("Phantom", _phantom); -} - -// -// Stat heap -// -XStatHeap::XAtInitialize XStatHeap::_at_initialize; -XStatHeap::XAtMarkStart XStatHeap::_at_mark_start; -XStatHeap::XAtMarkEnd XStatHeap::_at_mark_end; -XStatHeap::XAtRelocateStart XStatHeap::_at_relocate_start; -XStatHeap::XAtRelocateEnd XStatHeap::_at_relocate_end; - -size_t XStatHeap::capacity_high() { - return MAX4(_at_mark_start.capacity, - _at_mark_end.capacity, - _at_relocate_start.capacity, - _at_relocate_end.capacity); -} - -size_t XStatHeap::capacity_low() { - return MIN4(_at_mark_start.capacity, - _at_mark_end.capacity, - _at_relocate_start.capacity, - _at_relocate_end.capacity); -} - -size_t XStatHeap::free(size_t used) { - return _at_initialize.max_capacity - used; -} - -size_t XStatHeap::allocated(size_t used, size_t reclaimed) { - // The amount of allocated memory between point A and B is used(B) - used(A). - // However, we might also have reclaimed memory between point A and B. This - // means the current amount of used memory must be incremented by the amount - // reclaimed, so that used(B) represents the amount of used memory we would - // have had if we had not reclaimed anything. - return (used + reclaimed) - _at_mark_start.used; -} - -size_t XStatHeap::garbage(size_t reclaimed) { - return _at_mark_end.garbage - reclaimed; -} - -void XStatHeap::set_at_initialize(const XPageAllocatorStats& stats) { - _at_initialize.min_capacity = stats.min_capacity(); - _at_initialize.max_capacity = stats.max_capacity(); -} - -void XStatHeap::set_at_mark_start(const XPageAllocatorStats& stats) { - _at_mark_start.soft_max_capacity = stats.soft_max_capacity(); - _at_mark_start.capacity = stats.capacity(); - _at_mark_start.free = free(stats.used()); - _at_mark_start.used = stats.used(); -} - -void XStatHeap::set_at_mark_end(const XPageAllocatorStats& stats) { - _at_mark_end.capacity = stats.capacity(); - _at_mark_end.free = free(stats.used()); - _at_mark_end.used = stats.used(); - _at_mark_end.allocated = allocated(stats.used(), 0 /* reclaimed */); -} - -void XStatHeap::set_at_select_relocation_set(const XRelocationSetSelectorStats& stats) { - const size_t live = stats.small().live() + stats.medium().live() + stats.large().live(); - _at_mark_end.live = live; - _at_mark_end.garbage = _at_mark_start.used - live; -} - -void XStatHeap::set_at_relocate_start(const XPageAllocatorStats& stats) { - _at_relocate_start.capacity = stats.capacity(); - _at_relocate_start.free = free(stats.used()); - _at_relocate_start.used = stats.used(); - _at_relocate_start.allocated = allocated(stats.used(), stats.reclaimed()); - _at_relocate_start.garbage = garbage(stats.reclaimed()); - _at_relocate_start.reclaimed = stats.reclaimed(); -} - -void XStatHeap::set_at_relocate_end(const XPageAllocatorStats& stats, size_t non_worker_relocated) { - const size_t reclaimed = stats.reclaimed() - MIN2(non_worker_relocated, stats.reclaimed()); - - _at_relocate_end.capacity = stats.capacity(); - _at_relocate_end.capacity_high = capacity_high(); - _at_relocate_end.capacity_low = capacity_low(); - _at_relocate_end.free = free(stats.used()); - _at_relocate_end.free_high = free(stats.used_low()); - _at_relocate_end.free_low = free(stats.used_high()); - _at_relocate_end.used = stats.used(); - _at_relocate_end.used_high = stats.used_high(); - _at_relocate_end.used_low = stats.used_low(); - _at_relocate_end.allocated = allocated(stats.used(), reclaimed); - _at_relocate_end.garbage = garbage(reclaimed); - _at_relocate_end.reclaimed = reclaimed; -} - -size_t XStatHeap::max_capacity() { - return _at_initialize.max_capacity; -} - -size_t XStatHeap::used_at_mark_start() { - return _at_mark_start.used; -} - -size_t XStatHeap::used_at_relocate_end() { - return _at_relocate_end.used; -} - -void XStatHeap::print() { - log_info(gc, heap)("Min Capacity: " - XSIZE_FMT, XSIZE_ARGS(_at_initialize.min_capacity)); - log_info(gc, heap)("Max Capacity: " - XSIZE_FMT, XSIZE_ARGS(_at_initialize.max_capacity)); - log_info(gc, heap)("Soft Max Capacity: " - XSIZE_FMT, XSIZE_ARGS(_at_mark_start.soft_max_capacity)); - - XStatTablePrinter table(10, 18); - log_info(gc, heap)("%s", table() - .fill() - .center("Mark Start") - .center("Mark End") - .center("Relocate Start") - .center("Relocate End") - .center("High") - .center("Low") - .end()); - log_info(gc, heap)("%s", table() - .right("Capacity:") - .left(XTABLE_ARGS(_at_mark_start.capacity)) - .left(XTABLE_ARGS(_at_mark_end.capacity)) - .left(XTABLE_ARGS(_at_relocate_start.capacity)) - .left(XTABLE_ARGS(_at_relocate_end.capacity)) - .left(XTABLE_ARGS(_at_relocate_end.capacity_high)) - .left(XTABLE_ARGS(_at_relocate_end.capacity_low)) - .end()); - log_info(gc, heap)("%s", table() - .right("Free:") - .left(XTABLE_ARGS(_at_mark_start.free)) - .left(XTABLE_ARGS(_at_mark_end.free)) - .left(XTABLE_ARGS(_at_relocate_start.free)) - .left(XTABLE_ARGS(_at_relocate_end.free)) - .left(XTABLE_ARGS(_at_relocate_end.free_high)) - .left(XTABLE_ARGS(_at_relocate_end.free_low)) - .end()); - log_info(gc, heap)("%s", table() - .right("Used:") - .left(XTABLE_ARGS(_at_mark_start.used)) - .left(XTABLE_ARGS(_at_mark_end.used)) - .left(XTABLE_ARGS(_at_relocate_start.used)) - .left(XTABLE_ARGS(_at_relocate_end.used)) - .left(XTABLE_ARGS(_at_relocate_end.used_high)) - .left(XTABLE_ARGS(_at_relocate_end.used_low)) - .end()); - log_info(gc, heap)("%s", table() - .right("Live:") - .left(XTABLE_ARGS_NA) - .left(XTABLE_ARGS(_at_mark_end.live)) - .left(XTABLE_ARGS(_at_mark_end.live /* Same as at mark end */)) - .left(XTABLE_ARGS(_at_mark_end.live /* Same as at mark end */)) - .left(XTABLE_ARGS_NA) - .left(XTABLE_ARGS_NA) - .end()); - log_info(gc, heap)("%s", table() - .right("Allocated:") - .left(XTABLE_ARGS_NA) - .left(XTABLE_ARGS(_at_mark_end.allocated)) - .left(XTABLE_ARGS(_at_relocate_start.allocated)) - .left(XTABLE_ARGS(_at_relocate_end.allocated)) - .left(XTABLE_ARGS_NA) - .left(XTABLE_ARGS_NA) - .end()); - log_info(gc, heap)("%s", table() - .right("Garbage:") - .left(XTABLE_ARGS_NA) - .left(XTABLE_ARGS(_at_mark_end.garbage)) - .left(XTABLE_ARGS(_at_relocate_start.garbage)) - .left(XTABLE_ARGS(_at_relocate_end.garbage)) - .left(XTABLE_ARGS_NA) - .left(XTABLE_ARGS_NA) - .end()); - log_info(gc, heap)("%s", table() - .right("Reclaimed:") - .left(XTABLE_ARGS_NA) - .left(XTABLE_ARGS_NA) - .left(XTABLE_ARGS(_at_relocate_start.reclaimed)) - .left(XTABLE_ARGS(_at_relocate_end.reclaimed)) - .left(XTABLE_ARGS_NA) - .left(XTABLE_ARGS_NA) - .end()); -} diff --git a/src/hotspot/share/gc/x/xStat.hpp b/src/hotspot/share/gc/x/xStat.hpp deleted file mode 100644 index 4983e5fcab69f..0000000000000 --- a/src/hotspot/share/gc/x/xStat.hpp +++ /dev/null @@ -1,578 +0,0 @@ -/* - * Copyright (c) 2015, 2021, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#ifndef SHARE_GC_X_XSTAT_HPP -#define SHARE_GC_X_XSTAT_HPP - -#include "gc/shared/concurrentGCThread.hpp" -#include "gc/shared/gcCause.hpp" -#include "gc/shared/gcTimer.hpp" -#include "gc/x/xMetronome.hpp" -#include "logging/logHandle.hpp" -#include "memory/allocation.hpp" -#include "utilities/globalDefinitions.hpp" -#include "utilities/numberSeq.hpp" -#include "utilities/ticks.hpp" - -class XPage; -class XPageAllocatorStats; -class XRelocationSetSelectorGroupStats; -class XRelocationSetSelectorStats; -class XStatSampler; -class XStatSamplerHistory; -struct XStatCounterData; -struct XStatSamplerData; - -// -// Stat unit printers -// -typedef void (*XStatUnitPrinter)(LogTargetHandle log, const XStatSampler&, const XStatSamplerHistory&); - -void XStatUnitTime(LogTargetHandle log, const XStatSampler& sampler, const XStatSamplerHistory& history); -void XStatUnitBytes(LogTargetHandle log, const XStatSampler& sampler, const XStatSamplerHistory& history); -void XStatUnitThreads(LogTargetHandle log, const XStatSampler& sampler, const XStatSamplerHistory& history); -void XStatUnitBytesPerSecond(LogTargetHandle log, const XStatSampler& sampler, const XStatSamplerHistory& history); -void XStatUnitOpsPerSecond(LogTargetHandle log, const XStatSampler& sampler, const XStatSamplerHistory& history); - -// -// Stat value -// -class XStatValue { -private: - static uintptr_t _base; - static uint32_t _cpu_offset; - - const char* const _group; - const char* const _name; - const uint32_t _id; - const uint32_t _offset; - -protected: - XStatValue(const char* group, - const char* name, - uint32_t id, - uint32_t size); - - template T* get_cpu_local(uint32_t cpu) const; - -public: - static void initialize(); - - const char* group() const; - const char* name() const; - uint32_t id() const; -}; - -// -// Stat iterable value -// -template -class XStatIterableValue : public XStatValue { -private: - static uint32_t _count; - static T* _first; - - T* _next; - - T* insert() const; - -protected: - XStatIterableValue(const char* group, - const char* name, - uint32_t size); - -public: - static void sort(); - - static uint32_t count() { - return _count; - } - - static T* first() { - return _first; - } - - T* next() const { - return _next; - } -}; - -template uint32_t XStatIterableValue::_count = 0; -template T* XStatIterableValue::_first = nullptr; - -// -// Stat sampler -// -class XStatSampler : public XStatIterableValue { -private: - const XStatUnitPrinter _printer; - -public: - XStatSampler(const char* group, - const char* name, - XStatUnitPrinter printer); - - XStatSamplerData* get() const; - XStatSamplerData collect_and_reset() const; - - XStatUnitPrinter printer() const; -}; - -// -// Stat counter -// -class XStatCounter : public XStatIterableValue { -private: - const XStatSampler _sampler; - -public: - XStatCounter(const char* group, - const char* name, - XStatUnitPrinter printer); - - XStatCounterData* get() const; - void sample_and_reset() const; -}; - -// -// Stat unsampled counter -// -class XStatUnsampledCounter : public XStatIterableValue { -public: - XStatUnsampledCounter(const char* name); - - XStatCounterData* get() const; - XStatCounterData collect_and_reset() const; -}; - -// -// Stat MMU (Minimum Mutator Utilization) -// -class XStatMMUPause { -private: - double _start; - double _end; - -public: - XStatMMUPause(); - XStatMMUPause(const Ticks& start, const Ticks& end); - - double end() const; - double overlap(double start, double end) const; -}; - -class XStatMMU { -private: - static size_t _next; - static size_t _npauses; - static XStatMMUPause _pauses[200]; // Record the last 200 pauses - - static double _mmu_2ms; - static double _mmu_5ms; - static double _mmu_10ms; - static double _mmu_20ms; - static double _mmu_50ms; - static double _mmu_100ms; - - static const XStatMMUPause& pause(size_t index); - static double calculate_mmu(double time_slice); - -public: - static void register_pause(const Ticks& start, const Ticks& end); - - static void print(); -}; - -// -// Stat phases -// -class XStatPhase { -private: - static ConcurrentGCTimer _timer; - -protected: - const XStatSampler _sampler; - - XStatPhase(const char* group, const char* name); - - void log_start(LogTargetHandle log, bool thread = false) const; - void log_end(LogTargetHandle log, const Tickspan& duration, bool thread = false) const; - -public: - static ConcurrentGCTimer* timer(); - - const char* name() const; - - virtual void register_start(const Ticks& start) const = 0; - virtual void register_end(const Ticks& start, const Ticks& end) const = 0; -}; - -class XStatPhaseCycle : public XStatPhase { -public: - XStatPhaseCycle(const char* name); - - virtual void register_start(const Ticks& start) const; - virtual void register_end(const Ticks& start, const Ticks& end) const; -}; - -class XStatPhasePause : public XStatPhase { -private: - static Tickspan _max; // Max pause time - -public: - XStatPhasePause(const char* name); - - static const Tickspan& max(); - - virtual void register_start(const Ticks& start) const; - virtual void register_end(const Ticks& start, const Ticks& end) const; -}; - -class XStatPhaseConcurrent : public XStatPhase { -public: - XStatPhaseConcurrent(const char* name); - - virtual void register_start(const Ticks& start) const; - virtual void register_end(const Ticks& start, const Ticks& end) const; -}; - -class XStatSubPhase : public XStatPhase { -public: - XStatSubPhase(const char* name); - - virtual void register_start(const Ticks& start) const; - virtual void register_end(const Ticks& start, const Ticks& end) const; -}; - -class XStatCriticalPhase : public XStatPhase { -private: - const XStatCounter _counter; - const bool _verbose; - -public: - XStatCriticalPhase(const char* name, bool verbose = true); - - virtual void register_start(const Ticks& start) const; - virtual void register_end(const Ticks& start, const Ticks& end) const; -}; - -// -// Stat timer -// -class XStatTimerDisable : public StackObj { -private: - static THREAD_LOCAL uint32_t _active; - -public: - XStatTimerDisable() { - _active++; - } - - ~XStatTimerDisable() { - _active--; - } - - static bool is_active() { - return _active > 0; - } -}; - -class XStatTimer : public StackObj { -private: - const bool _enabled; - const XStatPhase& _phase; - const Ticks _start; - -public: - XStatTimer(const XStatPhase& phase) : - _enabled(!XStatTimerDisable::is_active()), - _phase(phase), - _start(Ticks::now()) { - if (_enabled) { - _phase.register_start(_start); - } - } - - ~XStatTimer() { - if (_enabled) { - const Ticks end = Ticks::now(); - _phase.register_end(_start, end); - } - } -}; - -// -// Stat sample/increment -// -void XStatSample(const XStatSampler& sampler, uint64_t value); -void XStatInc(const XStatCounter& counter, uint64_t increment = 1); -void XStatInc(const XStatUnsampledCounter& counter, uint64_t increment = 1); - -// -// Stat allocation rate -// -class XStatAllocRate : public AllStatic { -private: - static const XStatUnsampledCounter _counter; - static TruncatedSeq _samples; - static TruncatedSeq _rate; - -public: - static const uint64_t sample_hz = 10; - - static const XStatUnsampledCounter& counter(); - static uint64_t sample_and_reset(); - - static double predict(); - static double avg(); - static double sd(); -}; - -// -// Stat thread -// -class XStat : public ConcurrentGCThread { -private: - static const uint64_t sample_hz = 1; - - XMetronome _metronome; - - void sample_and_collect(XStatSamplerHistory* history) const; - bool should_print(LogTargetHandle log) const; - void print(LogTargetHandle log, const XStatSamplerHistory* history) const; - -protected: - virtual void run_service(); - virtual void stop_service(); - -public: - XStat(); -}; - -// -// Stat cycle -// -class XStatCycle : public AllStatic { -private: - static uint64_t _nwarmup_cycles; - static Ticks _start_of_last; - static Ticks _end_of_last; - static NumberSeq _serial_time; - static NumberSeq _parallelizable_time; - static uint _last_active_workers; - -public: - static void at_start(); - static void at_end(GCCause::Cause cause, uint active_workers); - - static bool is_warm(); - static uint64_t nwarmup_cycles(); - - static bool is_time_trustable(); - static const AbsSeq& serial_time(); - static const AbsSeq& parallelizable_time(); - - static uint last_active_workers(); - - static double time_since_last(); -}; - -// -// Stat workers -// -class XStatWorkers : public AllStatic { -private: - static Ticks _start_of_last; - static Tickspan _accumulated_duration; - -public: - static void at_start(); - static void at_end(); - - static double get_and_reset_duration(); -}; - -// -// Stat load -// -class XStatLoad : public AllStatic { -public: - static void print(); -}; - -// -// Stat mark -// -class XStatMark : public AllStatic { -private: - static size_t _nstripes; - static size_t _nproactiveflush; - static size_t _nterminateflush; - static size_t _ntrycomplete; - static size_t _ncontinue; - static size_t _mark_stack_usage; - -public: - static void set_at_mark_start(size_t nstripes); - static void set_at_mark_end(size_t nproactiveflush, - size_t nterminateflush, - size_t ntrycomplete, - size_t ncontinue); - static void set_at_mark_free(size_t mark_stack_usage); - - static void print(); -}; - -// -// Stat relocation -// -class XStatRelocation : public AllStatic { -private: - static XRelocationSetSelectorStats _selector_stats; - static size_t _forwarding_usage; - static size_t _small_in_place_count; - static size_t _medium_in_place_count; - - static void print(const char* name, - const XRelocationSetSelectorGroupStats& selector_group, - size_t in_place_count); - -public: - static void set_at_select_relocation_set(const XRelocationSetSelectorStats& selector_stats); - static void set_at_install_relocation_set(size_t forwarding_usage); - static void set_at_relocate_end(size_t small_in_place_count, size_t medium_in_place_count); - - static void print(); -}; - -// -// Stat nmethods -// -class XStatNMethods : public AllStatic { -public: - static void print(); -}; - -// -// Stat metaspace -// -class XStatMetaspace : public AllStatic { -public: - static void print(); -}; - -// -// Stat references -// -class XStatReferences : public AllStatic { -private: - static struct XCount { - size_t encountered; - size_t discovered; - size_t enqueued; - } _soft, _weak, _final, _phantom; - - static void set(XCount* count, size_t encountered, size_t discovered, size_t enqueued); - static void print(const char* name, const XCount& ref); - -public: - static void set_soft(size_t encountered, size_t discovered, size_t enqueued); - static void set_weak(size_t encountered, size_t discovered, size_t enqueued); - static void set_final(size_t encountered, size_t discovered, size_t enqueued); - static void set_phantom(size_t encountered, size_t discovered, size_t enqueued); - - static void print(); -}; - -// -// Stat heap -// -class XStatHeap : public AllStatic { -private: - static struct XAtInitialize { - size_t min_capacity; - size_t max_capacity; - } _at_initialize; - - static struct XAtMarkStart { - size_t soft_max_capacity; - size_t capacity; - size_t free; - size_t used; - } _at_mark_start; - - static struct XAtMarkEnd { - size_t capacity; - size_t free; - size_t used; - size_t live; - size_t allocated; - size_t garbage; - } _at_mark_end; - - static struct XAtRelocateStart { - size_t capacity; - size_t free; - size_t used; - size_t allocated; - size_t garbage; - size_t reclaimed; - } _at_relocate_start; - - static struct XAtRelocateEnd { - size_t capacity; - size_t capacity_high; - size_t capacity_low; - size_t free; - size_t free_high; - size_t free_low; - size_t used; - size_t used_high; - size_t used_low; - size_t allocated; - size_t garbage; - size_t reclaimed; - } _at_relocate_end; - - static size_t capacity_high(); - static size_t capacity_low(); - static size_t free(size_t used); - static size_t allocated(size_t used, size_t reclaimed); - static size_t garbage(size_t reclaimed); - -public: - static void set_at_initialize(const XPageAllocatorStats& stats); - static void set_at_mark_start(const XPageAllocatorStats& stats); - static void set_at_mark_end(const XPageAllocatorStats& stats); - static void set_at_select_relocation_set(const XRelocationSetSelectorStats& stats); - static void set_at_relocate_start(const XPageAllocatorStats& stats); - static void set_at_relocate_end(const XPageAllocatorStats& stats, size_t non_worker_relocated); - - static size_t max_capacity(); - static size_t used_at_mark_start(); - static size_t used_at_relocate_end(); - - static void print(); -}; - -#endif // SHARE_GC_X_XSTAT_HPP diff --git a/src/hotspot/share/gc/x/xTask.cpp b/src/hotspot/share/gc/x/xTask.cpp deleted file mode 100644 index 25f6d12f33dbb..0000000000000 --- a/src/hotspot/share/gc/x/xTask.cpp +++ /dev/null @@ -1,47 +0,0 @@ -/* - * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#include "precompiled.hpp" -#include "gc/x/xTask.hpp" -#include "gc/x/xThread.hpp" - -XTask::Task::Task(XTask* task, const char* name) : - WorkerTask(name), - _task(task) {} - -void XTask::Task::work(uint worker_id) { - XThread::set_worker_id(worker_id); - _task->work(); - XThread::clear_worker_id(); -} - -XTask::XTask(const char* name) : - _worker_task(this, name) {} - -const char* XTask::name() const { - return _worker_task.name(); -} - -WorkerTask* XTask::worker_task() { - return &_worker_task; -} diff --git a/src/hotspot/share/gc/x/xTask.hpp b/src/hotspot/share/gc/x/xTask.hpp deleted file mode 100644 index 08adaed83e596..0000000000000 --- a/src/hotspot/share/gc/x/xTask.hpp +++ /dev/null @@ -1,53 +0,0 @@ -/* - * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#ifndef SHARE_GC_X_XTASK_HPP -#define SHARE_GC_X_XTASK_HPP - -#include "gc/shared/workerThread.hpp" -#include "memory/allocation.hpp" - -class XTask : public StackObj { -private: - class Task : public WorkerTask { - private: - XTask* const _task; - - public: - Task(XTask* task, const char* name); - - virtual void work(uint worker_id); - }; - - Task _worker_task; - -public: - XTask(const char* name); - - const char* name() const; - WorkerTask* worker_task(); - - virtual void work() = 0; -}; - -#endif // SHARE_GC_X_XTASK_HPP diff --git a/src/hotspot/share/gc/x/xThread.cpp b/src/hotspot/share/gc/x/xThread.cpp deleted file mode 100644 index fb9785690cff3..0000000000000 --- a/src/hotspot/share/gc/x/xThread.cpp +++ /dev/null @@ -1,80 +0,0 @@ -/* - * Copyright (c) 2015, 2021, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#include "precompiled.hpp" -#include "gc/x/xThread.inline.hpp" -#include "runtime/javaThread.hpp" -#include "runtime/nonJavaThread.hpp" -#include "utilities/debug.hpp" - -THREAD_LOCAL bool XThread::_initialized; -THREAD_LOCAL uintptr_t XThread::_id; -THREAD_LOCAL bool XThread::_is_vm; -THREAD_LOCAL bool XThread::_is_java; -THREAD_LOCAL bool XThread::_is_worker; -THREAD_LOCAL uint XThread::_worker_id; - -void XThread::initialize() { - assert(!_initialized, "Already initialized"); - const Thread* const thread = Thread::current(); - _initialized = true; - _id = (uintptr_t)thread; - _is_vm = thread->is_VM_thread(); - _is_java = thread->is_Java_thread(); - _is_worker = false; - _worker_id = (uint)-1; -} - -const char* XThread::name() { - const Thread* const thread = Thread::current(); - if (thread->is_Named_thread()) { - const NamedThread* const named = (const NamedThread*)thread; - return named->name(); - } else if (thread->is_Java_thread()) { - return "Java"; - } - - return "Unknown"; -} - -void XThread::set_worker() { - ensure_initialized(); - _is_worker = true; -} - -bool XThread::has_worker_id() { - return _initialized && - _is_worker && - _worker_id != (uint)-1; -} - -void XThread::set_worker_id(uint worker_id) { - ensure_initialized(); - assert(!has_worker_id(), "Worker id already initialized"); - _worker_id = worker_id; -} - -void XThread::clear_worker_id() { - assert(has_worker_id(), "Worker id not initialized"); - _worker_id = (uint)-1; -} diff --git a/src/hotspot/share/gc/x/xThread.hpp b/src/hotspot/share/gc/x/xThread.hpp deleted file mode 100644 index 24df6ce1ca24d..0000000000000 --- a/src/hotspot/share/gc/x/xThread.hpp +++ /dev/null @@ -1,61 +0,0 @@ -/* - * Copyright (c) 2015, 2022, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#ifndef SHARE_GC_X_XTHREAD_HPP -#define SHARE_GC_X_XTHREAD_HPP - -#include "memory/allStatic.hpp" -#include "utilities/globalDefinitions.hpp" - -class XThread : public AllStatic { - friend class XTask; - friend class XWorkersInitializeTask; - friend class XRuntimeWorkersInitializeTask; - -private: - static THREAD_LOCAL bool _initialized; - static THREAD_LOCAL uintptr_t _id; - static THREAD_LOCAL bool _is_vm; - static THREAD_LOCAL bool _is_java; - static THREAD_LOCAL bool _is_worker; - static THREAD_LOCAL uint _worker_id; - - static void initialize(); - static void ensure_initialized(); - - static void set_worker(); - - static bool has_worker_id(); - static void set_worker_id(uint worker_id); - static void clear_worker_id(); - -public: - static const char* name(); - static uintptr_t id(); - static bool is_vm(); - static bool is_java(); - static bool is_worker(); - static uint worker_id(); -}; - -#endif // SHARE_GC_X_XTHREAD_HPP diff --git a/src/hotspot/share/gc/x/xThread.inline.hpp b/src/hotspot/share/gc/x/xThread.inline.hpp deleted file mode 100644 index eb6ff63e5f7be..0000000000000 --- a/src/hotspot/share/gc/x/xThread.inline.hpp +++ /dev/null @@ -1,62 +0,0 @@ -/* - * Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#ifndef SHARE_GC_X_XTHREAD_INLINE_HPP -#define SHARE_GC_X_XTHREAD_INLINE_HPP - -#include "gc/x/xThread.hpp" - -#include "utilities/debug.hpp" - -inline void XThread::ensure_initialized() { - if (!_initialized) { - initialize(); - } -} - -inline uintptr_t XThread::id() { - ensure_initialized(); - return _id; -} - -inline bool XThread::is_vm() { - ensure_initialized(); - return _is_vm; -} - -inline bool XThread::is_java() { - ensure_initialized(); - return _is_java; -} - -inline bool XThread::is_worker() { - ensure_initialized(); - return _is_worker; -} - -inline uint XThread::worker_id() { - assert(has_worker_id(), "Worker id not initialized"); - return _worker_id; -} - -#endif // SHARE_GC_X_XTHREAD_INLINE_HPP diff --git a/src/hotspot/share/gc/x/xThreadLocalAllocBuffer.cpp b/src/hotspot/share/gc/x/xThreadLocalAllocBuffer.cpp deleted file mode 100644 index 7dc0a128b64f0..0000000000000 --- a/src/hotspot/share/gc/x/xThreadLocalAllocBuffer.cpp +++ /dev/null @@ -1,92 +0,0 @@ -/* - * Copyright (c) 2018, 2021, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#include "precompiled.hpp" -#include "gc/shared/tlab_globals.hpp" -#include "gc/x/xAddress.inline.hpp" -#include "gc/x/xStackWatermark.hpp" -#include "gc/x/xThreadLocalAllocBuffer.hpp" -#include "gc/x/xValue.inline.hpp" -#include "runtime/globals.hpp" -#include "runtime/javaThread.hpp" -#include "runtime/stackWatermarkSet.inline.hpp" - -XPerWorker* XThreadLocalAllocBuffer::_stats = nullptr; - -void XThreadLocalAllocBuffer::initialize() { - if (UseTLAB) { - assert(_stats == nullptr, "Already initialized"); - _stats = new XPerWorker(); - reset_statistics(); - } -} - -void XThreadLocalAllocBuffer::reset_statistics() { - if (UseTLAB) { - XPerWorkerIterator iter(_stats); - for (ThreadLocalAllocStats* stats; iter.next(&stats);) { - stats->reset(); - } - } -} - -void XThreadLocalAllocBuffer::publish_statistics() { - if (UseTLAB) { - ThreadLocalAllocStats total; - - XPerWorkerIterator iter(_stats); - for (ThreadLocalAllocStats* stats; iter.next(&stats);) { - total.update(*stats); - } - - total.publish(); - } -} - -static void fixup_address(HeapWord** p) { - *p = (HeapWord*)XAddress::good_or_null((uintptr_t)*p); -} - -void XThreadLocalAllocBuffer::retire(JavaThread* thread, ThreadLocalAllocStats* stats) { - if (UseTLAB) { - stats->reset(); - thread->tlab().addresses_do(fixup_address); - thread->tlab().retire(stats); - if (ResizeTLAB) { - thread->tlab().resize(); - } - } -} - -void XThreadLocalAllocBuffer::remap(JavaThread* thread) { - if (UseTLAB) { - thread->tlab().addresses_do(fixup_address); - } -} - -void XThreadLocalAllocBuffer::update_stats(JavaThread* thread) { - if (UseTLAB) { - XStackWatermark* const watermark = StackWatermarkSet::get(thread, StackWatermarkKind::gc); - _stats->addr()->update(watermark->stats()); - } -} diff --git a/src/hotspot/share/gc/x/xThreadLocalAllocBuffer.hpp b/src/hotspot/share/gc/x/xThreadLocalAllocBuffer.hpp deleted file mode 100644 index 521f4da19096a..0000000000000 --- a/src/hotspot/share/gc/x/xThreadLocalAllocBuffer.hpp +++ /dev/null @@ -1,48 +0,0 @@ -/* - * Copyright (c) 2018, 2022, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#ifndef SHARE_GC_X_XTHREADLOCALALLOCBUFFER_HPP -#define SHARE_GC_X_XTHREADLOCALALLOCBUFFER_HPP - -#include "gc/shared/threadLocalAllocBuffer.hpp" -#include "gc/x/xValue.hpp" -#include "memory/allStatic.hpp" - -class JavaThread; - -class XThreadLocalAllocBuffer : public AllStatic { -private: - static XPerWorker* _stats; - -public: - static void initialize(); - - static void reset_statistics(); - static void publish_statistics(); - - static void retire(JavaThread* thread, ThreadLocalAllocStats* stats); - static void remap(JavaThread* thread); - static void update_stats(JavaThread* thread); -}; - -#endif // SHARE_GC_X_XTHREADLOCALALLOCBUFFER_HPP diff --git a/src/hotspot/share/gc/x/xThreadLocalData.hpp b/src/hotspot/share/gc/x/xThreadLocalData.hpp deleted file mode 100644 index adc72f6ca76d8..0000000000000 --- a/src/hotspot/share/gc/x/xThreadLocalData.hpp +++ /dev/null @@ -1,91 +0,0 @@ -/* - * Copyright (c) 2018, 2020, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#ifndef SHARE_GC_X_XTHREADLOCALDATA_HPP -#define SHARE_GC_X_XTHREADLOCALDATA_HPP - -#include "gc/x/xMarkStack.hpp" -#include "gc/x/xGlobals.hpp" -#include "runtime/javaThread.hpp" -#include "utilities/debug.hpp" -#include "utilities/sizes.hpp" - -class XThreadLocalData { -private: - uintptr_t _address_bad_mask; - XMarkThreadLocalStacks _stacks; - oop* _invisible_root; - - XThreadLocalData() : - _address_bad_mask(0), - _stacks(), - _invisible_root(nullptr) {} - - static XThreadLocalData* data(Thread* thread) { - return thread->gc_data(); - } - -public: - static void create(Thread* thread) { - new (data(thread)) XThreadLocalData(); - } - - static void destroy(Thread* thread) { - data(thread)->~XThreadLocalData(); - } - - static void set_address_bad_mask(Thread* thread, uintptr_t mask) { - data(thread)->_address_bad_mask = mask; - } - - static XMarkThreadLocalStacks* stacks(Thread* thread) { - return &data(thread)->_stacks; - } - - static void set_invisible_root(Thread* thread, oop* root) { - assert(data(thread)->_invisible_root == nullptr, "Already set"); - data(thread)->_invisible_root = root; - } - - static void clear_invisible_root(Thread* thread) { - assert(data(thread)->_invisible_root != nullptr, "Should be set"); - data(thread)->_invisible_root = nullptr; - } - - template - static void do_invisible_root(Thread* thread, T f) { - if (data(thread)->_invisible_root != nullptr) { - f(data(thread)->_invisible_root); - } - } - - static ByteSize address_bad_mask_offset() { - return Thread::gc_data_offset() + byte_offset_of(XThreadLocalData, _address_bad_mask); - } - - static ByteSize nmethod_disarmed_offset() { - return address_bad_mask_offset() + in_ByteSize(XAddressBadMaskHighOrderBitsOffset); - } -}; - -#endif // SHARE_GC_X_XTHREADLOCALDATA_HPP diff --git a/src/hotspot/share/gc/x/xTracer.cpp b/src/hotspot/share/gc/x/xTracer.cpp deleted file mode 100644 index 3a0bd2b00e3bf..0000000000000 --- a/src/hotspot/share/gc/x/xTracer.cpp +++ /dev/null @@ -1,146 +0,0 @@ -/* - * Copyright (c) 2016, 2024, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#include "precompiled.hpp" -#include "gc/shared/gcId.hpp" -#include "gc/x/xGlobals.hpp" -#include "gc/x/xStat.hpp" -#include "gc/x/xTracer.hpp" -#include "jfr/jfrEvents.hpp" -#include "runtime/safepointVerifiers.hpp" -#include "utilities/debug.hpp" -#include "utilities/macros.hpp" -#if INCLUDE_JFR -#include "jfr/metadata/jfrSerializer.hpp" -#endif - -#if INCLUDE_JFR - -class XPageTypeConstant : public JfrSerializer { -public: - virtual void serialize(JfrCheckpointWriter& writer) { - writer.write_count(3); - writer.write_key(XPageTypeSmall); - writer.write("Small"); - writer.write_key(XPageTypeMedium); - writer.write("Medium"); - writer.write_key(XPageTypeLarge); - writer.write("Large"); - } -}; - -class XStatisticsCounterTypeConstant : public JfrSerializer { -public: - virtual void serialize(JfrCheckpointWriter& writer) { - writer.write_count(XStatCounter::count()); - for (XStatCounter* counter = XStatCounter::first(); counter != nullptr; counter = counter->next()) { - writer.write_key(counter->id()); - writer.write(counter->name()); - } - } -}; - -class XStatisticsSamplerTypeConstant : public JfrSerializer { -public: - virtual void serialize(JfrCheckpointWriter& writer) { - writer.write_count(XStatSampler::count()); - for (XStatSampler* sampler = XStatSampler::first(); sampler != nullptr; sampler = sampler->next()) { - writer.write_key(sampler->id()); - writer.write(sampler->name()); - } - } -}; - -static void register_jfr_type_serializers() { - JfrSerializer::register_serializer(TYPE_ZPAGETYPETYPE, - true /* permit_cache */, - new XPageTypeConstant()); - JfrSerializer::register_serializer(TYPE_ZSTATISTICSCOUNTERTYPE, - true /* permit_cache */, - new XStatisticsCounterTypeConstant()); - JfrSerializer::register_serializer(TYPE_ZSTATISTICSSAMPLERTYPE, - true /* permit_cache */, - new XStatisticsSamplerTypeConstant()); -} - -#endif // INCLUDE_JFR - -XTracer* XTracer::_tracer = nullptr; - -XTracer::XTracer() : - GCTracer(Z) {} - -void XTracer::initialize() { - assert(_tracer == nullptr, "Already initialized"); - _tracer = new XTracer(); - JFR_ONLY(register_jfr_type_serializers();) -} - -void XTracer::send_stat_counter(const XStatCounter& counter, uint64_t increment, uint64_t value) { - NoSafepointVerifier nsv; - - EventZStatisticsCounter e; - if (e.should_commit()) { - e.set_id(counter.id()); - e.set_increment(increment); - e.set_value(value); - e.commit(); - } -} - -void XTracer::send_stat_sampler(const XStatSampler& sampler, uint64_t value) { - NoSafepointVerifier nsv; - - EventZStatisticsSampler e; - if (e.should_commit()) { - e.set_id(sampler.id()); - e.set_value(value); - e.commit(); - } -} - -void XTracer::send_thread_phase(const char* name, const Ticks& start, const Ticks& end) { - NoSafepointVerifier nsv; - - EventZThreadPhase e(UNTIMED); - if (e.should_commit()) { - e.set_gcId(GCId::current_or_undefined()); - e.set_name(name); - e.set_starttime(start); - e.set_endtime(end); - e.commit(); - } -} - -void XTracer::send_thread_debug(const char* name, const Ticks& start, const Ticks& end) { - NoSafepointVerifier nsv; - - EventZThreadDebug e(UNTIMED); - if (e.should_commit()) { - e.set_gcId(GCId::current_or_undefined()); - e.set_name(name); - e.set_starttime(start); - e.set_endtime(end); - e.commit(); - } -} diff --git a/src/hotspot/share/gc/x/xTracer.hpp b/src/hotspot/share/gc/x/xTracer.hpp deleted file mode 100644 index d9219d79c51f3..0000000000000 --- a/src/hotspot/share/gc/x/xTracer.hpp +++ /dev/null @@ -1,65 +0,0 @@ -/* - * Copyright (c) 2016, 2020, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#ifndef SHARE_GC_X_XTRACER_HPP -#define SHARE_GC_X_XTRACER_HPP - -#include "gc/shared/gcTrace.hpp" - -class XStatCounter; -class XStatPhase; -class XStatSampler; - -class XTracer : public GCTracer, public CHeapObj { -private: - static XTracer* _tracer; - - XTracer(); - - void send_stat_counter(const XStatCounter& counter, uint64_t increment, uint64_t value); - void send_stat_sampler(const XStatSampler& sampler, uint64_t value); - void send_thread_phase(const char* name, const Ticks& start, const Ticks& end); - void send_thread_debug(const char* name, const Ticks& start, const Ticks& end); - -public: - static XTracer* tracer(); - static void initialize(); - - void report_stat_counter(const XStatCounter& counter, uint64_t increment, uint64_t value); - void report_stat_sampler(const XStatSampler& sampler, uint64_t value); - void report_thread_phase(const char* name, const Ticks& start, const Ticks& end); - void report_thread_debug(const char* name, const Ticks& start, const Ticks& end); -}; - -// For temporary latency measurements during development and debugging -class XTraceThreadDebug : public StackObj { -private: - const Ticks _start; - const char* const _name; - -public: - XTraceThreadDebug(const char* name); - ~XTraceThreadDebug(); -}; - -#endif // SHARE_GC_X_XTRACER_HPP diff --git a/src/hotspot/share/gc/x/xTracer.inline.hpp b/src/hotspot/share/gc/x/xTracer.inline.hpp deleted file mode 100644 index 22dd2e2b6fb43..0000000000000 --- a/src/hotspot/share/gc/x/xTracer.inline.hpp +++ /dev/null @@ -1,67 +0,0 @@ -/* - * Copyright (c) 2016, 2020, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#ifndef SHARE_GC_X_XTRACER_INLINE_HPP -#define SHARE_GC_X_XTRACER_INLINE_HPP - -#include "gc/x/xTracer.hpp" - -#include "jfr/jfrEvents.hpp" - -inline XTracer* XTracer::tracer() { - return _tracer; -} - -inline void XTracer::report_stat_counter(const XStatCounter& counter, uint64_t increment, uint64_t value) { - if (EventZStatisticsCounter::is_enabled()) { - send_stat_counter(counter, increment, value); - } -} - -inline void XTracer::report_stat_sampler(const XStatSampler& sampler, uint64_t value) { - if (EventZStatisticsSampler::is_enabled()) { - send_stat_sampler(sampler, value); - } -} - -inline void XTracer::report_thread_phase(const char* name, const Ticks& start, const Ticks& end) { - if (EventZThreadPhase::is_enabled()) { - send_thread_phase(name, start, end); - } -} - -inline void XTracer::report_thread_debug(const char* name, const Ticks& start, const Ticks& end) { - if (EventZThreadDebug::is_enabled()) { - send_thread_debug(name, start, end); - } -} - -inline XTraceThreadDebug::XTraceThreadDebug(const char* name) : - _start(Ticks::now()), - _name(name) {} - -inline XTraceThreadDebug::~XTraceThreadDebug() { - XTracer::tracer()->report_thread_debug(_name, _start, Ticks::now()); -} - -#endif // SHARE_GC_X_XTRACER_INLINE_HPP diff --git a/src/hotspot/share/gc/x/xUncommitter.cpp b/src/hotspot/share/gc/x/xUncommitter.cpp deleted file mode 100644 index ffd57b8c2a8e1..0000000000000 --- a/src/hotspot/share/gc/x/xUncommitter.cpp +++ /dev/null @@ -1,96 +0,0 @@ -/* - * Copyright (c) 2019, 2021, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#include "precompiled.hpp" -#include "gc/shared/gc_globals.hpp" -#include "gc/x/xHeap.inline.hpp" -#include "gc/x/xLock.inline.hpp" -#include "gc/x/xStat.hpp" -#include "gc/x/xUncommitter.hpp" -#include "jfr/jfrEvents.hpp" -#include "logging/log.hpp" - -static const XStatCounter XCounterUncommit("Memory", "Uncommit", XStatUnitBytesPerSecond); - -XUncommitter::XUncommitter(XPageAllocator* page_allocator) : - _page_allocator(page_allocator), - _lock(), - _stop(false) { - set_name("XUncommitter"); - create_and_start(); -} - -bool XUncommitter::wait(uint64_t timeout) const { - XLocker locker(&_lock); - while (!ZUncommit && !_stop) { - _lock.wait(); - } - - if (!_stop && timeout > 0) { - log_debug(gc, heap)("Uncommit Timeout: " UINT64_FORMAT "s", timeout); - _lock.wait(timeout * MILLIUNITS); - } - - return !_stop; -} - -bool XUncommitter::should_continue() const { - XLocker locker(&_lock); - return !_stop; -} - -void XUncommitter::run_service() { - uint64_t timeout = 0; - - while (wait(timeout)) { - EventZUncommit event; - size_t uncommitted = 0; - - while (should_continue()) { - // Uncommit chunk - const size_t flushed = _page_allocator->uncommit(&timeout); - if (flushed == 0) { - // Done - break; - } - - uncommitted += flushed; - } - - if (uncommitted > 0) { - // Update statistics - XStatInc(XCounterUncommit, uncommitted); - log_info(gc, heap)("Uncommitted: " SIZE_FORMAT "M(%.0f%%)", - uncommitted / M, percent_of(uncommitted, XHeap::heap()->max_capacity())); - - // Send event - event.commit(uncommitted); - } - } -} - -void XUncommitter::stop_service() { - XLocker locker(&_lock); - _stop = true; - _lock.notify_all(); -} diff --git a/src/hotspot/share/gc/x/xUncommitter.hpp b/src/hotspot/share/gc/x/xUncommitter.hpp deleted file mode 100644 index 9f6212fa98db9..0000000000000 --- a/src/hotspot/share/gc/x/xUncommitter.hpp +++ /dev/null @@ -1,49 +0,0 @@ -/* - * Copyright (c) 2019, 2020, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#ifndef SHARE_GC_X_XUNCOMMITTER_HPP -#define SHARE_GC_X_XUNCOMMITTER_HPP - -#include "gc/shared/concurrentGCThread.hpp" -#include "gc/x/xLock.hpp" - -class XPageAllocation; - -class XUncommitter : public ConcurrentGCThread { -private: - XPageAllocator* const _page_allocator; - mutable XConditionLock _lock; - bool _stop; - - bool wait(uint64_t timeout) const; - bool should_continue() const; - -protected: - virtual void run_service(); - virtual void stop_service(); - -public: - XUncommitter(XPageAllocator* page_allocator); -}; - -#endif // SHARE_GC_X_XUNCOMMITTER_HPP diff --git a/src/hotspot/share/gc/x/xUnload.cpp b/src/hotspot/share/gc/x/xUnload.cpp deleted file mode 100644 index c501ace7d1444..0000000000000 --- a/src/hotspot/share/gc/x/xUnload.cpp +++ /dev/null @@ -1,172 +0,0 @@ -/* - * Copyright (c) 2018, 2024, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#include "precompiled.hpp" -#include "classfile/classLoaderDataGraph.hpp" -#include "classfile/systemDictionary.hpp" -#include "code/codeBehaviours.hpp" -#include "code/codeCache.hpp" -#include "code/dependencyContext.hpp" -#include "gc/shared/gcBehaviours.hpp" -#include "gc/shared/suspendibleThreadSet.hpp" -#include "gc/x/xBarrier.inline.hpp" -#include "gc/x/xLock.inline.hpp" -#include "gc/x/xNMethod.hpp" -#include "gc/x/xStat.hpp" -#include "gc/x/xUnload.hpp" -#include "memory/metaspaceUtils.hpp" -#include "oops/access.inline.hpp" - -static const XStatSubPhase XSubPhaseConcurrentClassesUnlink("Concurrent Classes Unlink"); -static const XStatSubPhase XSubPhaseConcurrentClassesPurge("Concurrent Classes Purge"); - -class XPhantomIsAliveObjectClosure : public BoolObjectClosure { -public: - virtual bool do_object_b(oop o) { - return XBarrier::is_alive_barrier_on_phantom_oop(o); - } -}; - -class XIsUnloadingOopClosure : public OopClosure { -private: - XPhantomIsAliveObjectClosure _is_alive; - bool _is_unloading; - -public: - XIsUnloadingOopClosure() : - _is_alive(), - _is_unloading(false) {} - - virtual void do_oop(oop* p) { - const oop o = RawAccess<>::oop_load(p); - if (o != nullptr && !_is_alive.do_object_b(o)) { - _is_unloading = true; - } - } - - virtual void do_oop(narrowOop* p) { - ShouldNotReachHere(); - } - - bool is_unloading() const { - return _is_unloading; - } -}; - -class XIsUnloadingBehaviour : public IsUnloadingBehaviour { -public: - virtual bool has_dead_oop(nmethod* nm) const { - XReentrantLock* const lock = XNMethod::lock_for_nmethod(nm); - XLocker locker(lock); - XIsUnloadingOopClosure cl; - XNMethod::nmethod_oops_do_inner(nm, &cl); - return cl.is_unloading(); - } -}; - -class XCompiledICProtectionBehaviour : public CompiledICProtectionBehaviour { -public: - virtual bool lock(nmethod* nm) { - XReentrantLock* const lock = XNMethod::ic_lock_for_nmethod(nm); - lock->lock(); - return true; - } - - virtual void unlock(nmethod* nm) { - XReentrantLock* const lock = XNMethod::ic_lock_for_nmethod(nm); - lock->unlock(); - } - - virtual bool is_safe(nmethod* nm) { - if (SafepointSynchronize::is_at_safepoint() || nm->is_unloading()) { - return true; - } - - XReentrantLock* const lock = XNMethod::ic_lock_for_nmethod(nm); - return lock->is_owned(); - } -}; - -XUnload::XUnload(XWorkers* workers) : - _workers(workers) { - - if (!ClassUnloading) { - return; - } - - static XIsUnloadingBehaviour is_unloading_behaviour; - IsUnloadingBehaviour::set_current(&is_unloading_behaviour); - - static XCompiledICProtectionBehaviour ic_protection_behaviour; - CompiledICProtectionBehaviour::set_current(&ic_protection_behaviour); -} - -void XUnload::prepare() { - if (!ClassUnloading) { - return; - } - - CodeCache::increment_unloading_cycle(); - DependencyContext::cleaning_start(); -} - -void XUnload::unlink() { - if (!ClassUnloading) { - return; - } - - XStatTimer timer(XSubPhaseConcurrentClassesUnlink); - SuspendibleThreadSetJoiner sts; - bool unloading_occurred; - - { - MutexLocker ml(ClassLoaderDataGraph_lock); - unloading_occurred = SystemDictionary::do_unloading(XStatPhase::timer()); - } - - Klass::clean_weak_klass_links(unloading_occurred); - XNMethod::unlink(_workers, unloading_occurred); - DependencyContext::cleaning_end(); -} - -void XUnload::purge() { - if (!ClassUnloading) { - return; - } - - XStatTimer timer(XSubPhaseConcurrentClassesPurge); - - { - SuspendibleThreadSetJoiner sts; - XNMethod::purge(); - } - - ClassLoaderDataGraph::purge(/*at_safepoint*/false); - CodeCache::purge_exception_caches(); -} - -void XUnload::finish() { - // Resize and verify metaspace - MetaspaceGC::compute_new_size(); - DEBUG_ONLY(MetaspaceUtils::verify();) -} diff --git a/src/hotspot/share/gc/x/xUnload.hpp b/src/hotspot/share/gc/x/xUnload.hpp deleted file mode 100644 index df6ba7ed2eb71..0000000000000 --- a/src/hotspot/share/gc/x/xUnload.hpp +++ /dev/null @@ -1,42 +0,0 @@ -/* - * Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#ifndef SHARE_GC_X_XUNLOAD_HPP -#define SHARE_GC_X_XUNLOAD_HPP - -class XWorkers; - -class XUnload { -private: - XWorkers* const _workers; - -public: - XUnload(XWorkers* workers); - - void prepare(); - void unlink(); - void purge(); - void finish(); -}; - -#endif // SHARE_GC_X_XUNLOAD_HPP diff --git a/src/hotspot/share/gc/x/xUnmapper.cpp b/src/hotspot/share/gc/x/xUnmapper.cpp deleted file mode 100644 index 17371cf1394d6..0000000000000 --- a/src/hotspot/share/gc/x/xUnmapper.cpp +++ /dev/null @@ -1,135 +0,0 @@ -/* - * Copyright (c) 2020, 2021, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#include "precompiled.hpp" -#include "gc/shared/gc_globals.hpp" -#include "gc/shared/gcLogPrecious.hpp" -#include "gc/x/xList.inline.hpp" -#include "gc/x/xLock.inline.hpp" -#include "gc/x/xPage.inline.hpp" -#include "gc/x/xPageAllocator.hpp" -#include "gc/x/xUnmapper.hpp" -#include "jfr/jfrEvents.hpp" -#include "runtime/globals.hpp" - -XUnmapper::XUnmapper(XPageAllocator* page_allocator) : - _page_allocator(page_allocator), - _lock(), - _queue(), - _enqueued_bytes(0), - _warned_sync_unmapping(false), - _stop(false) { - set_name("XUnmapper"); - create_and_start(); -} - -XPage* XUnmapper::dequeue() { - XLocker locker(&_lock); - - for (;;) { - if (_stop) { - return nullptr; - } - - XPage* const page = _queue.remove_first(); - if (page != nullptr) { - _enqueued_bytes -= page->size(); - return page; - } - - _lock.wait(); - } -} - -bool XUnmapper::try_enqueue(XPage* page) { - if (ZVerifyViews) { - // Asynchronous unmap and destroy is not supported with ZVerifyViews - return false; - } - - // Enqueue for asynchronous unmap and destroy - XLocker locker(&_lock); - if (is_saturated()) { - // The unmapper thread is lagging behind and is unable to unmap memory fast enough - if (!_warned_sync_unmapping) { - _warned_sync_unmapping = true; - log_warning_p(gc)("WARNING: Encountered synchronous unmapping because asynchronous unmapping could not keep up"); - } - log_debug(gc, unmap)("Synchronous unmapping " SIZE_FORMAT "M page", page->size() / M); - return false; - } - - log_trace(gc, unmap)("Asynchronous unmapping " SIZE_FORMAT "M page (" SIZE_FORMAT "M / " SIZE_FORMAT "M enqueued)", - page->size() / M, _enqueued_bytes / M, queue_capacity() / M); - - _queue.insert_last(page); - _enqueued_bytes += page->size(); - _lock.notify_all(); - - return true; -} - -size_t XUnmapper::queue_capacity() const { - return align_up(_page_allocator->max_capacity() * ZAsyncUnmappingLimit / 100.0, XGranuleSize); -} - -bool XUnmapper::is_saturated() const { - return _enqueued_bytes >= queue_capacity(); -} - -void XUnmapper::do_unmap_and_destroy_page(XPage* page) const { - EventZUnmap event; - const size_t unmapped = page->size(); - - // Unmap and destroy - _page_allocator->unmap_page(page); - _page_allocator->destroy_page(page); - - // Send event - event.commit(unmapped); -} - -void XUnmapper::unmap_and_destroy_page(XPage* page) { - if (!try_enqueue(page)) { - // Synchronously unmap and destroy - do_unmap_and_destroy_page(page); - } -} - -void XUnmapper::run_service() { - for (;;) { - XPage* const page = dequeue(); - if (page == nullptr) { - // Stop - return; - } - - do_unmap_and_destroy_page(page); - } -} - -void XUnmapper::stop_service() { - XLocker locker(&_lock); - _stop = true; - _lock.notify_all(); -} diff --git a/src/hotspot/share/gc/x/xUnmapper.hpp b/src/hotspot/share/gc/x/xUnmapper.hpp deleted file mode 100644 index 811588f14d6f8..0000000000000 --- a/src/hotspot/share/gc/x/xUnmapper.hpp +++ /dev/null @@ -1,59 +0,0 @@ -/* - * Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#ifndef SHARE_GC_X_XUNMAPPER_HPP -#define SHARE_GC_X_XUNMAPPER_HPP - -#include "gc/shared/concurrentGCThread.hpp" -#include "gc/x/xList.hpp" -#include "gc/x/xLock.hpp" - -class XPage; -class XPageAllocator; - -class XUnmapper : public ConcurrentGCThread { -private: - XPageAllocator* const _page_allocator; - XConditionLock _lock; - XList _queue; - size_t _enqueued_bytes; - bool _warned_sync_unmapping; - bool _stop; - - XPage* dequeue(); - bool try_enqueue(XPage* page); - size_t queue_capacity() const; - bool is_saturated() const; - void do_unmap_and_destroy_page(XPage* page) const; - -protected: - virtual void run_service(); - virtual void stop_service(); - -public: - XUnmapper(XPageAllocator* page_allocator); - - void unmap_and_destroy_page(XPage* page); -}; - -#endif // SHARE_GC_X_XUNMAPPER_HPP diff --git a/src/hotspot/share/gc/x/xUtils.hpp b/src/hotspot/share/gc/x/xUtils.hpp deleted file mode 100644 index 26f14c0e98f78..0000000000000 --- a/src/hotspot/share/gc/x/xUtils.hpp +++ /dev/null @@ -1,45 +0,0 @@ -/* - * Copyright (c) 2015, 2022, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#ifndef SHARE_GC_X_XUTILS_HPP -#define SHARE_GC_X_XUTILS_HPP - -#include "memory/allStatic.hpp" -#include "utilities/globalDefinitions.hpp" - -class XUtils : public AllStatic { -public: - // Allocation - static uintptr_t alloc_aligned(size_t alignment, size_t size); - - // Size conversion - static size_t bytes_to_words(size_t size_in_words); - static size_t words_to_bytes(size_t size_in_words); - - // Object - static size_t object_size(uintptr_t addr); - static void object_copy_disjoint(uintptr_t from, uintptr_t to, size_t size); - static void object_copy_conjoint(uintptr_t from, uintptr_t to, size_t size); -}; - -#endif // SHARE_GC_X_XUTILS_HPP diff --git a/src/hotspot/share/gc/x/xUtils.inline.hpp b/src/hotspot/share/gc/x/xUtils.inline.hpp deleted file mode 100644 index 09180959311d8..0000000000000 --- a/src/hotspot/share/gc/x/xUtils.inline.hpp +++ /dev/null @@ -1,59 +0,0 @@ -/* - * Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#ifndef SHARE_GC_X_XUTILS_INLINE_HPP -#define SHARE_GC_X_XUTILS_INLINE_HPP - -#include "gc/x/xUtils.hpp" - -#include "gc/x/xOop.inline.hpp" -#include "oops/oop.inline.hpp" -#include "utilities/align.hpp" -#include "utilities/copy.hpp" -#include "utilities/debug.hpp" -#include "utilities/globalDefinitions.hpp" - -inline size_t XUtils::bytes_to_words(size_t size_in_bytes) { - assert(is_aligned(size_in_bytes, BytesPerWord), "Size not word aligned"); - return size_in_bytes >> LogBytesPerWord; -} - -inline size_t XUtils::words_to_bytes(size_t size_in_words) { - return size_in_words << LogBytesPerWord; -} - -inline size_t XUtils::object_size(uintptr_t addr) { - return words_to_bytes(XOop::from_address(addr)->size()); -} - -inline void XUtils::object_copy_disjoint(uintptr_t from, uintptr_t to, size_t size) { - Copy::aligned_disjoint_words((HeapWord*)from, (HeapWord*)to, bytes_to_words(size)); -} - -inline void XUtils::object_copy_conjoint(uintptr_t from, uintptr_t to, size_t size) { - if (from != to) { - Copy::aligned_conjoint_words((HeapWord*)from, (HeapWord*)to, bytes_to_words(size)); - } -} - -#endif // SHARE_GC_X_XUTILS_INLINE_HPP diff --git a/src/hotspot/share/gc/x/xValue.hpp b/src/hotspot/share/gc/x/xValue.hpp deleted file mode 100644 index 4b2838c8a2c28..0000000000000 --- a/src/hotspot/share/gc/x/xValue.hpp +++ /dev/null @@ -1,140 +0,0 @@ -/* - * Copyright (c) 2015, 2022, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#ifndef SHARE_GC_X_XVALUE_HPP -#define SHARE_GC_X_XVALUE_HPP - -#include "memory/allStatic.hpp" -#include "utilities/globalDefinitions.hpp" - -// -// Storage -// - -template -class XValueStorage : public AllStatic { -private: - static uintptr_t _top; - static uintptr_t _end; - -public: - static const size_t offset = 4 * K; - - static uintptr_t alloc(size_t size); -}; - -class XContendedStorage : public XValueStorage { -public: - static size_t alignment(); - static uint32_t count(); - static uint32_t id(); -}; - -class XPerCPUStorage : public XValueStorage { -public: - static size_t alignment(); - static uint32_t count(); - static uint32_t id(); -}; - -class XPerNUMAStorage : public XValueStorage { -public: - static size_t alignment(); - static uint32_t count(); - static uint32_t id(); -}; - -class XPerWorkerStorage : public XValueStorage { -public: - static size_t alignment(); - static uint32_t count(); - static uint32_t id(); -}; - -// -// Value -// - -template -class XValue : public CHeapObj { -private: - const uintptr_t _addr; - - uintptr_t value_addr(uint32_t value_id) const; - -public: - XValue(); - XValue(const T& value); - - const T* addr(uint32_t value_id = S::id()) const; - T* addr(uint32_t value_id = S::id()); - - const T& get(uint32_t value_id = S::id()) const; - T& get(uint32_t value_id = S::id()); - - void set(const T& value, uint32_t value_id = S::id()); - void set_all(const T& value); -}; - -template using XContended = XValue; -template using XPerCPU = XValue; -template using XPerNUMA = XValue; -template using XPerWorker = XValue; - -// -// Iterator -// - -template -class XValueIterator { -private: - XValue* const _value; - uint32_t _value_id; - -public: - XValueIterator(XValue* value); - - bool next(T** value); -}; - -template using XPerCPUIterator = XValueIterator; -template using XPerNUMAIterator = XValueIterator; -template using XPerWorkerIterator = XValueIterator; - -template -class XValueConstIterator { -private: - const XValue* const _value; - uint32_t _value_id; - -public: - XValueConstIterator(const XValue* value); - - bool next(const T** value); -}; - -template using XPerCPUConstIterator = XValueConstIterator; -template using XPerNUMAConstIterator = XValueConstIterator; -template using XPerWorkerConstIterator = XValueConstIterator; - -#endif // SHARE_GC_X_XVALUE_HPP diff --git a/src/hotspot/share/gc/x/xValue.inline.hpp b/src/hotspot/share/gc/x/xValue.inline.hpp deleted file mode 100644 index 1b12eb7d55525..0000000000000 --- a/src/hotspot/share/gc/x/xValue.inline.hpp +++ /dev/null @@ -1,210 +0,0 @@ -/* - * Copyright (c) 2015, 2021, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#ifndef SHARE_GC_X_XVALUE_INLINE_HPP -#define SHARE_GC_X_XVALUE_INLINE_HPP - -#include "gc/x/xValue.hpp" - -#include "gc/shared/gc_globals.hpp" -#include "gc/x/xCPU.inline.hpp" -#include "gc/x/xGlobals.hpp" -#include "gc/x/xNUMA.hpp" -#include "gc/x/xThread.inline.hpp" -#include "gc/x/xUtils.hpp" -#include "runtime/globals.hpp" -#include "utilities/align.hpp" - -// -// Storage -// - -template uintptr_t XValueStorage::_end = 0; -template uintptr_t XValueStorage::_top = 0; - -template -uintptr_t XValueStorage::alloc(size_t size) { - assert(size <= offset, "Allocation too large"); - - // Allocate entry in existing memory block - const uintptr_t addr = align_up(_top, S::alignment()); - _top = addr + size; - - if (_top < _end) { - // Success - return addr; - } - - // Allocate new block of memory - const size_t block_alignment = offset; - const size_t block_size = offset * S::count(); - _top = XUtils::alloc_aligned(block_alignment, block_size); - _end = _top + offset; - - // Retry allocation - return alloc(size); -} - -inline size_t XContendedStorage::alignment() { - return XCacheLineSize; -} - -inline uint32_t XContendedStorage::count() { - return 1; -} - -inline uint32_t XContendedStorage::id() { - return 0; -} - -inline size_t XPerCPUStorage::alignment() { - return sizeof(uintptr_t); -} - -inline uint32_t XPerCPUStorage::count() { - return XCPU::count(); -} - -inline uint32_t XPerCPUStorage::id() { - return XCPU::id(); -} - -inline size_t XPerNUMAStorage::alignment() { - return sizeof(uintptr_t); -} - -inline uint32_t XPerNUMAStorage::count() { - return XNUMA::count(); -} - -inline uint32_t XPerNUMAStorage::id() { - return XNUMA::id(); -} - -inline size_t XPerWorkerStorage::alignment() { - return sizeof(uintptr_t); -} - -inline uint32_t XPerWorkerStorage::count() { - return UseDynamicNumberOfGCThreads ? ConcGCThreads : MAX2(ConcGCThreads, ParallelGCThreads); -} - -inline uint32_t XPerWorkerStorage::id() { - return XThread::worker_id(); -} - -// -// Value -// - -template -inline uintptr_t XValue::value_addr(uint32_t value_id) const { - return _addr + (value_id * S::offset); -} - -template -inline XValue::XValue() : - _addr(S::alloc(sizeof(T))) { - // Initialize all instances - XValueIterator iter(this); - for (T* addr; iter.next(&addr);) { - ::new (addr) T; - } -} - -template -inline XValue::XValue(const T& value) : - _addr(S::alloc(sizeof(T))) { - // Initialize all instances - XValueIterator iter(this); - for (T* addr; iter.next(&addr);) { - ::new (addr) T(value); - } -} - -template -inline const T* XValue::addr(uint32_t value_id) const { - return reinterpret_cast(value_addr(value_id)); -} - -template -inline T* XValue::addr(uint32_t value_id) { - return reinterpret_cast(value_addr(value_id)); -} - -template -inline const T& XValue::get(uint32_t value_id) const { - return *addr(value_id); -} - -template -inline T& XValue::get(uint32_t value_id) { - return *addr(value_id); -} - -template -inline void XValue::set(const T& value, uint32_t value_id) { - get(value_id) = value; -} - -template -inline void XValue::set_all(const T& value) { - XValueIterator iter(this); - for (T* addr; iter.next(&addr);) { - *addr = value; - } -} - -// -// Iterator -// - -template -inline XValueIterator::XValueIterator(XValue* value) : - _value(value), - _value_id(0) {} - -template -inline bool XValueIterator::next(T** value) { - if (_value_id < S::count()) { - *value = _value->addr(_value_id++); - return true; - } - return false; -} - -template -inline XValueConstIterator::XValueConstIterator(const XValue* value) : - _value(value), - _value_id(0) {} - -template -inline bool XValueConstIterator::next(const T** value) { - if (_value_id < S::count()) { - *value = _value->addr(_value_id++); - return true; - } - return false; -} - -#endif // SHARE_GC_X_XVALUE_INLINE_HPP diff --git a/src/hotspot/share/gc/x/xVerify.cpp b/src/hotspot/share/gc/x/xVerify.cpp deleted file mode 100644 index ac6e8ee65d0f3..0000000000000 --- a/src/hotspot/share/gc/x/xVerify.cpp +++ /dev/null @@ -1,405 +0,0 @@ -/* - * Copyright (c) 2019, 2022, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#include "precompiled.hpp" -#include "classfile/classLoaderData.hpp" -#include "gc/shared/gc_globals.hpp" -#include "gc/x/xAddress.inline.hpp" -#include "gc/x/xHeap.inline.hpp" -#include "gc/x/xNMethod.hpp" -#include "gc/x/xOop.hpp" -#include "gc/x/xPageAllocator.hpp" -#include "gc/x/xResurrection.hpp" -#include "gc/x/xRootsIterator.hpp" -#include "gc/x/xStackWatermark.hpp" -#include "gc/x/xStat.hpp" -#include "gc/x/xVerify.hpp" -#include "memory/iterator.inline.hpp" -#include "memory/resourceArea.hpp" -#include "oops/oop.hpp" -#include "runtime/frame.inline.hpp" -#include "runtime/globals.hpp" -#include "runtime/handles.hpp" -#include "runtime/javaThread.hpp" -#include "runtime/safepoint.hpp" -#include "runtime/stackFrameStream.inline.hpp" -#include "runtime/stackWatermark.inline.hpp" -#include "runtime/stackWatermarkSet.inline.hpp" -#include "utilities/debug.hpp" -#include "utilities/globalDefinitions.hpp" -#include "utilities/preserveException.hpp" - -#define BAD_OOP_ARG(o, p) "Bad oop " PTR_FORMAT " found at " PTR_FORMAT, p2i(o), p2i(p) - -static void z_verify_oop(oop* p) { - const oop o = RawAccess<>::oop_load(p); - if (o != nullptr) { - const uintptr_t addr = XOop::to_address(o); - guarantee(XAddress::is_good(addr), BAD_OOP_ARG(o, p)); - guarantee(oopDesc::is_oop(XOop::from_address(addr)), BAD_OOP_ARG(o, p)); - } -} - -static void z_verify_possibly_weak_oop(oop* p) { - const oop o = RawAccess<>::oop_load(p); - if (o != nullptr) { - const uintptr_t addr = XOop::to_address(o); - guarantee(XAddress::is_good(addr) || XAddress::is_finalizable_good(addr), BAD_OOP_ARG(o, p)); - guarantee(oopDesc::is_oop(XOop::from_address(XAddress::good(addr))), BAD_OOP_ARG(o, p)); - } -} - -class XVerifyRootClosure : public OopClosure { -private: - const bool _verify_fixed; - -public: - XVerifyRootClosure(bool verify_fixed) : - _verify_fixed(verify_fixed) {} - - virtual void do_oop(oop* p) { - if (_verify_fixed) { - z_verify_oop(p); - } else { - // Don't know the state of the oop. - oop obj = *p; - obj = NativeAccess::oop_load(&obj); - z_verify_oop(&obj); - } - } - - virtual void do_oop(narrowOop*) { - ShouldNotReachHere(); - } - - bool verify_fixed() const { - return _verify_fixed; - } -}; - -class XVerifyStack : public OopClosure { -private: - XVerifyRootClosure* const _cl; - JavaThread* const _jt; - uint64_t _last_good; - bool _verifying_bad_frames; - -public: - XVerifyStack(XVerifyRootClosure* cl, JavaThread* jt) : - _cl(cl), - _jt(jt), - _last_good(0), - _verifying_bad_frames(false) { - XStackWatermark* const stack_watermark = StackWatermarkSet::get(jt, StackWatermarkKind::gc); - - if (_cl->verify_fixed()) { - assert(stack_watermark->processing_started(), "Should already have been fixed"); - assert(stack_watermark->processing_completed(), "Should already have been fixed"); - } else { - // We don't really know the state of the stack, verify watermark. - if (!stack_watermark->processing_started()) { - _verifying_bad_frames = true; - } else { - // Not time yet to verify bad frames - _last_good = stack_watermark->last_processed(); - } - } - } - - void do_oop(oop* p) { - if (_verifying_bad_frames) { - const oop obj = *p; - guarantee(!XAddress::is_good(XOop::to_address(obj)), BAD_OOP_ARG(obj, p)); - } - _cl->do_oop(p); - } - - void do_oop(narrowOop* p) { - ShouldNotReachHere(); - } - - void prepare_next_frame(frame& frame) { - if (_cl->verify_fixed()) { - // All frames need to be good - return; - } - - // The verification has two modes, depending on whether we have reached the - // last processed frame or not. Before it is reached, we expect everything to - // be good. After reaching it, we expect everything to be bad. - const uintptr_t sp = reinterpret_cast(frame.sp()); - - if (!_verifying_bad_frames && sp == _last_good) { - // Found the last good frame, now verify the bad ones - _verifying_bad_frames = true; - } - } - - void verify_frames() { - NMethodToOopClosure nm_cl(_cl, false /* fix_relocations */); - for (StackFrameStream frames(_jt, true /* update */, false /* process_frames */); - !frames.is_done(); - frames.next()) { - frame& frame = *frames.current(); - frame.oops_do(this, &nm_cl, frames.register_map(), DerivedPointerIterationMode::_ignore); - prepare_next_frame(frame); - } - } -}; - -class XVerifyOopClosure : public ClaimMetadataVisitingOopIterateClosure { -private: - const bool _verify_weaks; - -public: - XVerifyOopClosure(bool verify_weaks) : - ClaimMetadataVisitingOopIterateClosure(ClassLoaderData::_claim_other), - _verify_weaks(verify_weaks) {} - - virtual void do_oop(oop* p) { - if (_verify_weaks) { - z_verify_possibly_weak_oop(p); - } else { - // We should never encounter finalizable oops through strong - // paths. This assumes we have only visited strong roots. - z_verify_oop(p); - } - } - - virtual void do_oop(narrowOop* p) { - ShouldNotReachHere(); - } - - virtual ReferenceIterationMode reference_iteration_mode() { - return _verify_weaks ? DO_FIELDS : DO_FIELDS_EXCEPT_REFERENT; - } - - // Don't follow this metadata when verifying oops - virtual void do_method(Method* m) {} - virtual void do_nmethod(nmethod* nm) {} -}; - -typedef ClaimingCLDToOopClosure XVerifyCLDClosure; - -class XVerifyThreadClosure : public ThreadClosure { -private: - XVerifyRootClosure* const _cl; - -public: - XVerifyThreadClosure(XVerifyRootClosure* cl) : - _cl(cl) {} - - virtual void do_thread(Thread* thread) { - thread->oops_do_no_frames(_cl, nullptr); - - JavaThread* const jt = JavaThread::cast(thread); - if (!jt->has_last_Java_frame()) { - return; - } - - XVerifyStack verify_stack(_cl, jt); - verify_stack.verify_frames(); - } -}; - -class XVerifyNMethodClosure : public NMethodClosure { -private: - OopClosure* const _cl; - BarrierSetNMethod* const _bs_nm; - const bool _verify_fixed; - - bool trust_nmethod_state() const { - // The root iterator will visit non-processed - // nmethods class unloading is turned off. - return ClassUnloading || _verify_fixed; - } - -public: - XVerifyNMethodClosure(OopClosure* cl, bool verify_fixed) : - _cl(cl), - _bs_nm(BarrierSet::barrier_set()->barrier_set_nmethod()), - _verify_fixed(verify_fixed) {} - - virtual void do_nmethod(nmethod* nm) { - assert(!trust_nmethod_state() || !_bs_nm->is_armed(nm), "Should not encounter any armed nmethods"); - - XNMethod::nmethod_oops_do(nm, _cl); - } -}; - -void XVerify::roots_strong(bool verify_fixed) { - assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint"); - assert(!XResurrection::is_blocked(), "Invalid phase"); - - XVerifyRootClosure cl(verify_fixed); - XVerifyCLDClosure cld_cl(&cl); - XVerifyThreadClosure thread_cl(&cl); - XVerifyNMethodClosure nm_cl(&cl, verify_fixed); - - XRootsIterator iter(ClassLoaderData::_claim_none); - iter.apply(&cl, - &cld_cl, - &thread_cl, - &nm_cl); -} - -void XVerify::roots_weak() { - assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint"); - assert(!XResurrection::is_blocked(), "Invalid phase"); - - XVerifyRootClosure cl(true /* verify_fixed */); - XWeakRootsIterator iter; - iter.apply(&cl); -} - -void XVerify::objects(bool verify_weaks) { - assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint"); - assert(XGlobalPhase == XPhaseMarkCompleted, "Invalid phase"); - assert(!XResurrection::is_blocked(), "Invalid phase"); - - XVerifyOopClosure cl(verify_weaks); - ObjectToOopClosure object_cl(&cl); - XHeap::heap()->object_iterate(&object_cl, verify_weaks); -} - -void XVerify::before_zoperation() { - // Verify strong roots - XStatTimerDisable disable; - if (ZVerifyRoots) { - roots_strong(false /* verify_fixed */); - } -} - -void XVerify::after_mark() { - // Verify all strong roots and strong references - XStatTimerDisable disable; - if (ZVerifyRoots) { - roots_strong(true /* verify_fixed */); - } - if (ZVerifyObjects) { - objects(false /* verify_weaks */); - } -} - -void XVerify::after_weak_processing() { - // Verify all roots and all references - XStatTimerDisable disable; - if (ZVerifyRoots) { - roots_strong(true /* verify_fixed */); - roots_weak(); - } - if (ZVerifyObjects) { - objects(true /* verify_weaks */); - } -} - -template -class XPageDebugMapOrUnmapClosure : public XPageClosure { -private: - const XPageAllocator* const _allocator; - -public: - XPageDebugMapOrUnmapClosure(const XPageAllocator* allocator) : - _allocator(allocator) {} - - void do_page(const XPage* page) { - if (Map) { - _allocator->debug_map_page(page); - } else { - _allocator->debug_unmap_page(page); - } - } -}; - -XVerifyViewsFlip::XVerifyViewsFlip(const XPageAllocator* allocator) : - _allocator(allocator) { - if (ZVerifyViews) { - // Unmap all pages - XPageDebugMapOrUnmapClosure cl(_allocator); - XHeap::heap()->pages_do(&cl); - } -} - -XVerifyViewsFlip::~XVerifyViewsFlip() { - if (ZVerifyViews) { - // Map all pages - XPageDebugMapOrUnmapClosure cl(_allocator); - XHeap::heap()->pages_do(&cl); - } -} - -#ifdef ASSERT - -class XVerifyBadOopClosure : public OopClosure { -public: - virtual void do_oop(oop* p) { - const oop o = *p; - assert(!XAddress::is_good(XOop::to_address(o)), "Should not be good: " PTR_FORMAT, p2i(o)); - } - - virtual void do_oop(narrowOop* p) { - ShouldNotReachHere(); - } -}; - -// This class encapsulates various marks we need to deal with calling the -// frame iteration code from arbitrary points in the runtime. It is mostly -// due to problems that we might want to eventually clean up inside of the -// frame iteration code, such as creating random handles even though there -// is no safepoint to protect against, and fiddling around with exceptions. -class StackWatermarkProcessingMark { - ResetNoHandleMark _rnhm; - HandleMark _hm; - PreserveExceptionMark _pem; - ResourceMark _rm; - -public: - StackWatermarkProcessingMark(Thread* thread) : - _rnhm(), - _hm(thread), - _pem(thread), - _rm(thread) {} -}; - -void XVerify::verify_frame_bad(const frame& fr, RegisterMap& register_map) { - XVerifyBadOopClosure verify_cl; - fr.oops_do(&verify_cl, nullptr, ®ister_map, DerivedPointerIterationMode::_ignore); -} - -void XVerify::verify_thread_head_bad(JavaThread* jt) { - XVerifyBadOopClosure verify_cl; - jt->oops_do_no_frames(&verify_cl, nullptr); -} - -void XVerify::verify_thread_frames_bad(JavaThread* jt) { - if (jt->has_last_Java_frame()) { - XVerifyBadOopClosure verify_cl; - StackWatermarkProcessingMark swpm(Thread::current()); - // Traverse the execution stack - for (StackFrameStream fst(jt, true /* update */, false /* process_frames */); !fst.is_done(); fst.next()) { - fst.current()->oops_do(&verify_cl, nullptr /* code_cl */, fst.register_map(), DerivedPointerIterationMode::_ignore); - } - } -} - -#endif // ASSERT diff --git a/src/hotspot/share/gc/x/xVerify.hpp b/src/hotspot/share/gc/x/xVerify.hpp deleted file mode 100644 index bbe10f376fa6a..0000000000000 --- a/src/hotspot/share/gc/x/xVerify.hpp +++ /dev/null @@ -1,58 +0,0 @@ -/* - * Copyright (c) 2019, 2022, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#ifndef SHARE_GC_X_XVERIFY_HPP -#define SHARE_GC_X_XVERIFY_HPP - -#include "memory/allStatic.hpp" - -class frame; -class XPageAllocator; - -class XVerify : public AllStatic { -private: - static void roots_strong(bool verify_fixed); - static void roots_weak(); - - static void objects(bool verify_weaks); - -public: - static void before_zoperation(); - static void after_mark(); - static void after_weak_processing(); - - static void verify_thread_head_bad(JavaThread* thread) NOT_DEBUG_RETURN; - static void verify_thread_frames_bad(JavaThread* thread) NOT_DEBUG_RETURN; - static void verify_frame_bad(const frame& fr, RegisterMap& register_map) NOT_DEBUG_RETURN; -}; - -class XVerifyViewsFlip { -private: - const XPageAllocator* const _allocator; - -public: - XVerifyViewsFlip(const XPageAllocator* allocator); - ~XVerifyViewsFlip(); -}; - -#endif // SHARE_GC_X_XVERIFY_HPP diff --git a/src/hotspot/share/gc/x/xVirtualMemory.cpp b/src/hotspot/share/gc/x/xVirtualMemory.cpp deleted file mode 100644 index 63cb789d8de12..0000000000000 --- a/src/hotspot/share/gc/x/xVirtualMemory.cpp +++ /dev/null @@ -1,208 +0,0 @@ -/* - * Copyright (c) 2015, 2024, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#include "precompiled.hpp" -#include "gc/shared/gcLogPrecious.hpp" -#include "gc/x/xAddress.inline.hpp" -#include "gc/x/xAddressSpaceLimit.hpp" -#include "gc/x/xGlobals.hpp" -#include "gc/x/xVirtualMemory.inline.hpp" -#include "nmt/memTracker.hpp" -#include "utilities/align.hpp" -#include "utilities/debug.hpp" - -XVirtualMemoryManager::XVirtualMemoryManager(size_t max_capacity) : - _manager(), - _reserved(0), - _initialized(false) { - - // Check max supported heap size - if (max_capacity > XAddressOffsetMax) { - log_error_p(gc)("Java heap too large (max supported heap size is " SIZE_FORMAT "G)", - XAddressOffsetMax / G); - return; - } - - // Initialize platform specific parts before reserving address space - pd_initialize_before_reserve(); - - // Reserve address space - if (!reserve(max_capacity)) { - log_error_pd(gc)("Failed to reserve enough address space for Java heap"); - return; - } - - // Initialize platform specific parts after reserving address space - pd_initialize_after_reserve(); - - // Successfully initialized - _initialized = true; -} - -size_t XVirtualMemoryManager::reserve_discontiguous(uintptr_t start, size_t size, size_t min_range) { - if (size < min_range) { - // Too small - return 0; - } - - assert(is_aligned(size, XGranuleSize), "Misaligned"); - - if (reserve_contiguous(start, size)) { - return size; - } - - const size_t half = size / 2; - if (half < min_range) { - // Too small - return 0; - } - - // Divide and conquer - const size_t first_part = align_down(half, XGranuleSize); - const size_t second_part = size - first_part; - return reserve_discontiguous(start, first_part, min_range) + - reserve_discontiguous(start + first_part, second_part, min_range); -} - -size_t XVirtualMemoryManager::reserve_discontiguous(size_t size) { - // Don't try to reserve address ranges smaller than 1% of the requested size. - // This avoids an explosion of reservation attempts in case large parts of the - // address space is already occupied. - const size_t min_range = align_up(size / 100, XGranuleSize); - size_t start = 0; - size_t reserved = 0; - - // Reserve size somewhere between [0, XAddressOffsetMax) - while (reserved < size && start < XAddressOffsetMax) { - const size_t remaining = MIN2(size - reserved, XAddressOffsetMax - start); - reserved += reserve_discontiguous(start, remaining, min_range); - start += remaining; - } - - return reserved; -} - -bool XVirtualMemoryManager::reserve_contiguous(uintptr_t start, size_t size) { - assert(is_aligned(size, XGranuleSize), "Must be granule aligned"); - - // Reserve address views - const uintptr_t marked0 = XAddress::marked0(start); - const uintptr_t marked1 = XAddress::marked1(start); - const uintptr_t remapped = XAddress::remapped(start); - - // Reserve address space - if (!pd_reserve(marked0, size)) { - return false; - } - - if (!pd_reserve(marked1, size)) { - pd_unreserve(marked0, size); - return false; - } - - if (!pd_reserve(remapped, size)) { - pd_unreserve(marked0, size); - pd_unreserve(marked1, size); - return false; - } - - // Register address views with native memory tracker - nmt_reserve(marked0, size); - nmt_reserve(marked1, size); - nmt_reserve(remapped, size); - - // Make the address range free - _manager.free(start, size); - - return true; -} - -bool XVirtualMemoryManager::reserve_contiguous(size_t size) { - // Allow at most 8192 attempts spread evenly across [0, XAddressOffsetMax) - const size_t unused = XAddressOffsetMax - size; - const size_t increment = MAX2(align_up(unused / 8192, XGranuleSize), XGranuleSize); - - for (size_t start = 0; start + size <= XAddressOffsetMax; start += increment) { - if (reserve_contiguous(start, size)) { - // Success - return true; - } - } - - // Failed - return false; -} - -bool XVirtualMemoryManager::reserve(size_t max_capacity) { - const size_t limit = MIN2(XAddressOffsetMax, XAddressSpaceLimit::heap_view()); - const size_t size = MIN2(max_capacity * XVirtualToPhysicalRatio, limit); - - size_t reserved = size; - bool contiguous = true; - - // Prefer a contiguous address space - if (!reserve_contiguous(size)) { - // Fall back to a discontiguous address space - reserved = reserve_discontiguous(size); - contiguous = false; - } - - log_info_p(gc, init)("Address Space Type: %s/%s/%s", - (contiguous ? "Contiguous" : "Discontiguous"), - (limit == XAddressOffsetMax ? "Unrestricted" : "Restricted"), - (reserved == size ? "Complete" : "Degraded")); - log_info_p(gc, init)("Address Space Size: " SIZE_FORMAT "M x " SIZE_FORMAT " = " SIZE_FORMAT "M", - reserved / M, XHeapViews, (reserved * XHeapViews) / M); - - // Record reserved - _reserved = reserved; - - return reserved >= max_capacity; -} - -void XVirtualMemoryManager::nmt_reserve(uintptr_t start, size_t size) { - MemTracker::record_virtual_memory_reserve((void*)start, size, CALLER_PC); - MemTracker::record_virtual_memory_tag((void*)start, mtJavaHeap); -} - -bool XVirtualMemoryManager::is_initialized() const { - return _initialized; -} - -XVirtualMemory XVirtualMemoryManager::alloc(size_t size, bool force_low_address) { - uintptr_t start; - - // Small pages are allocated at low addresses, while medium/large pages - // are allocated at high addresses (unless forced to be at a low address). - if (force_low_address || size <= XPageSizeSmall) { - start = _manager.alloc_low_address(size); - } else { - start = _manager.alloc_high_address(size); - } - - return XVirtualMemory(start, size); -} - -void XVirtualMemoryManager::free(const XVirtualMemory& vmem) { - _manager.free(vmem.start(), vmem.size()); -} diff --git a/src/hotspot/share/gc/x/xVirtualMemory.hpp b/src/hotspot/share/gc/x/xVirtualMemory.hpp deleted file mode 100644 index c9e5c67ea5750..0000000000000 --- a/src/hotspot/share/gc/x/xVirtualMemory.hpp +++ /dev/null @@ -1,82 +0,0 @@ -/* - * Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#ifndef SHARE_GC_X_XVIRTUALMEMORY_HPP -#define SHARE_GC_X_XVIRTUALMEMORY_HPP - -#include "gc/x/xMemory.hpp" - -class VMStructs; - -class XVirtualMemory { - friend class ::VMStructs; - -private: - uintptr_t _start; - uintptr_t _end; - -public: - XVirtualMemory(); - XVirtualMemory(uintptr_t start, size_t size); - - bool is_null() const; - uintptr_t start() const; - uintptr_t end() const; - size_t size() const; - - XVirtualMemory split(size_t size); -}; - -class XVirtualMemoryManager { -private: - XMemoryManager _manager; - uintptr_t _reserved; - bool _initialized; - - // Platform specific implementation - void pd_initialize_before_reserve(); - void pd_initialize_after_reserve(); - bool pd_reserve(uintptr_t addr, size_t size); - void pd_unreserve(uintptr_t addr, size_t size); - - bool reserve_contiguous(uintptr_t start, size_t size); - bool reserve_contiguous(size_t size); - size_t reserve_discontiguous(uintptr_t start, size_t size, size_t min_range); - size_t reserve_discontiguous(size_t size); - bool reserve(size_t max_capacity); - - void nmt_reserve(uintptr_t start, size_t size); - -public: - XVirtualMemoryManager(size_t max_capacity); - - bool is_initialized() const; - - size_t reserved() const; - uintptr_t lowest_available_address() const; - - XVirtualMemory alloc(size_t size, bool force_low_address); - void free(const XVirtualMemory& vmem); -}; - -#endif // SHARE_GC_X_XVIRTUALMEMORY_HPP diff --git a/src/hotspot/share/gc/x/xVirtualMemory.inline.hpp b/src/hotspot/share/gc/x/xVirtualMemory.inline.hpp deleted file mode 100644 index 8c834b42c7f47..0000000000000 --- a/src/hotspot/share/gc/x/xVirtualMemory.inline.hpp +++ /dev/null @@ -1,68 +0,0 @@ -/* - * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#ifndef SHARE_GC_X_XVIRTUALMEMORY_INLINE_HPP -#define SHARE_GC_X_XVIRTUALMEMORY_INLINE_HPP - -#include "gc/x/xVirtualMemory.hpp" - -#include "gc/x/xMemory.inline.hpp" - -inline XVirtualMemory::XVirtualMemory() : - _start(UINTPTR_MAX), - _end(UINTPTR_MAX) {} - -inline XVirtualMemory::XVirtualMemory(uintptr_t start, size_t size) : - _start(start), - _end(start + size) {} - -inline bool XVirtualMemory::is_null() const { - return _start == UINTPTR_MAX; -} - -inline uintptr_t XVirtualMemory::start() const { - return _start; -} - -inline uintptr_t XVirtualMemory::end() const { - return _end; -} - -inline size_t XVirtualMemory::size() const { - return _end - _start; -} - -inline XVirtualMemory XVirtualMemory::split(size_t size) { - _start += size; - return XVirtualMemory(_start - size, size); -} - -inline size_t XVirtualMemoryManager::reserved() const { - return _reserved; -} - -inline uintptr_t XVirtualMemoryManager::lowest_available_address() const { - return _manager.peek_low_address(); -} - -#endif // SHARE_GC_X_XVIRTUALMEMORY_INLINE_HPP diff --git a/src/hotspot/share/gc/x/xWeakRootsProcessor.cpp b/src/hotspot/share/gc/x/xWeakRootsProcessor.cpp deleted file mode 100644 index 0271fcd8c3d66..0000000000000 --- a/src/hotspot/share/gc/x/xWeakRootsProcessor.cpp +++ /dev/null @@ -1,79 +0,0 @@ -/* - * Copyright (c) 2015, 2021, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#include "precompiled.hpp" -#include "gc/x/xBarrier.inline.hpp" -#include "gc/x/xRootsIterator.hpp" -#include "gc/x/xTask.hpp" -#include "gc/x/xWeakRootsProcessor.hpp" -#include "gc/x/xWorkers.hpp" - -class XPhantomCleanOopClosure : public OopClosure { -public: - virtual void do_oop(oop* p) { - // Read the oop once, to make sure the liveness check - // and the later clearing uses the same value. - const oop obj = Atomic::load(p); - if (XBarrier::is_alive_barrier_on_phantom_oop(obj)) { - XBarrier::keep_alive_barrier_on_phantom_oop_field(p); - } else { - // The destination could have been modified/reused, in which case - // we don't want to clear it. However, no one could write the same - // oop here again (the object would be strongly live and we would - // not consider clearing such oops), so therefore we don't have an - // ABA problem here. - Atomic::cmpxchg(p, obj, oop(nullptr)); - } - } - - virtual void do_oop(narrowOop* p) { - ShouldNotReachHere(); - } -}; - -XWeakRootsProcessor::XWeakRootsProcessor(XWorkers* workers) : - _workers(workers) {} - -class XProcessWeakRootsTask : public XTask { -private: - XWeakRootsIterator _weak_roots; - -public: - XProcessWeakRootsTask() : - XTask("XProcessWeakRootsTask"), - _weak_roots() {} - - ~XProcessWeakRootsTask() { - _weak_roots.report_num_dead(); - } - - virtual void work() { - XPhantomCleanOopClosure cl; - _weak_roots.apply(&cl); - } -}; - -void XWeakRootsProcessor::process_weak_roots() { - XProcessWeakRootsTask task; - _workers->run(&task); -} diff --git a/src/hotspot/share/gc/x/xWeakRootsProcessor.hpp b/src/hotspot/share/gc/x/xWeakRootsProcessor.hpp deleted file mode 100644 index c63b2702374b2..0000000000000 --- a/src/hotspot/share/gc/x/xWeakRootsProcessor.hpp +++ /dev/null @@ -1,39 +0,0 @@ -/* - * Copyright (c) 2017, 2020, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#ifndef SHARE_GC_X_XWEAKROOTSPROCESSOR_HPP -#define SHARE_GC_X_XWEAKROOTSPROCESSOR_HPP - -class XWorkers; - -class XWeakRootsProcessor { -private: - XWorkers* const _workers; - -public: - XWeakRootsProcessor(XWorkers* workers); - - void process_weak_roots(); -}; - -#endif // SHARE_GC_X_XWEAKROOTSPROCESSOR_HPP diff --git a/src/hotspot/share/gc/x/xWorkers.cpp b/src/hotspot/share/gc/x/xWorkers.cpp deleted file mode 100644 index 642c63f0531e5..0000000000000 --- a/src/hotspot/share/gc/x/xWorkers.cpp +++ /dev/null @@ -1,117 +0,0 @@ -/* - * Copyright (c) 2015, 2021, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#include "precompiled.hpp" -#include "gc/shared/gc_globals.hpp" -#include "gc/shared/gcLogPrecious.hpp" -#include "gc/x/xLock.inline.hpp" -#include "gc/x/xStat.hpp" -#include "gc/x/xTask.hpp" -#include "gc/x/xThread.hpp" -#include "gc/x/xWorkers.hpp" -#include "runtime/java.hpp" - -class XWorkersInitializeTask : public WorkerTask { -private: - const uint _nworkers; - uint _started; - XConditionLock _lock; - -public: - XWorkersInitializeTask(uint nworkers) : - WorkerTask("XWorkersInitializeTask"), - _nworkers(nworkers), - _started(0), - _lock() {} - - virtual void work(uint worker_id) { - // Register as worker - XThread::set_worker(); - - // Wait for all threads to start - XLocker locker(&_lock); - if (++_started == _nworkers) { - // All threads started - _lock.notify_all(); - } else { - while (_started != _nworkers) { - _lock.wait(); - } - } - } -}; - -XWorkers::XWorkers() : - _workers("XWorker", - UseDynamicNumberOfGCThreads ? ConcGCThreads : MAX2(ConcGCThreads, ParallelGCThreads)) { - - if (UseDynamicNumberOfGCThreads) { - log_info_p(gc, init)("GC Workers: %u (dynamic)", _workers.max_workers()); - } else { - log_info_p(gc, init)("GC Workers: %u/%u (static)", ConcGCThreads, _workers.max_workers()); - } - - // Initialize worker threads - _workers.initialize_workers(); - _workers.set_active_workers(_workers.max_workers()); - if (_workers.active_workers() != _workers.max_workers()) { - vm_exit_during_initialization("Failed to create XWorkers"); - } - - // Execute task to register threads as workers - XWorkersInitializeTask task(_workers.max_workers()); - _workers.run_task(&task); -} - -uint XWorkers::active_workers() const { - return _workers.active_workers(); -} - -void XWorkers::set_active_workers(uint nworkers) { - log_info(gc, task)("Using %u workers", nworkers); - _workers.set_active_workers(nworkers); -} - -void XWorkers::run(XTask* task) { - log_debug(gc, task)("Executing Task: %s, Active Workers: %u", task->name(), active_workers()); - XStatWorkers::at_start(); - _workers.run_task(task->worker_task()); - XStatWorkers::at_end(); -} - -void XWorkers::run_all(XTask* task) { - // Save number of active workers - const uint prev_active_workers = _workers.active_workers(); - - // Execute task using all workers - _workers.set_active_workers(_workers.max_workers()); - log_debug(gc, task)("Executing Task: %s, Active Workers: %u", task->name(), active_workers()); - _workers.run_task(task->worker_task()); - - // Restore number of active workers - _workers.set_active_workers(prev_active_workers); -} - -void XWorkers::threads_do(ThreadClosure* tc) const { - _workers.threads_do(tc); -} diff --git a/src/hotspot/share/gc/x/xWorkers.hpp b/src/hotspot/share/gc/x/xWorkers.hpp deleted file mode 100644 index 33c49bb7fef5c..0000000000000 --- a/src/hotspot/share/gc/x/xWorkers.hpp +++ /dev/null @@ -1,48 +0,0 @@ -/* - * Copyright (c) 2015, 2021, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#ifndef SHARE_GC_X_XWORKERS_HPP -#define SHARE_GC_X_XWORKERS_HPP - -#include "gc/shared/workerThread.hpp" - -class ThreadClosure; -class XTask; - -class XWorkers { -private: - WorkerThreads _workers; - -public: - XWorkers(); - - uint active_workers() const; - void set_active_workers(uint nworkers); - - void run(XTask* task); - void run_all(XTask* task); - - void threads_do(ThreadClosure* tc) const; -}; - -#endif // SHARE_GC_X_XWORKERS_HPP diff --git a/src/hotspot/share/gc/x/x_globals.hpp b/src/hotspot/share/gc/x/x_globals.hpp deleted file mode 100644 index ab47d7ba9c8a3..0000000000000 --- a/src/hotspot/share/gc/x/x_globals.hpp +++ /dev/null @@ -1,39 +0,0 @@ -/* - * Copyright (c) 2018, 2024, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#ifndef SHARE_GC_X_X_GLOBALS_HPP -#define SHARE_GC_X_X_GLOBALS_HPP - -#define GC_X_FLAGS(develop, \ - develop_pd, \ - product, \ - product_pd, \ - range, \ - constraint) \ - \ - product(bool, ZVerifyViews, false, DIAGNOSTIC, \ - "Verify heap view accesses") \ - \ -// end of GC_X_FLAGS - -#endif // SHARE_GC_X_X_GLOBALS_HPP diff --git a/src/hotspot/share/gc/z/shared/vmStructs_z_shared.hpp b/src/hotspot/share/gc/z/shared/vmStructs_z_shared.hpp deleted file mode 100644 index f0c03abbf7ac7..0000000000000 --- a/src/hotspot/share/gc/z/shared/vmStructs_z_shared.hpp +++ /dev/null @@ -1,68 +0,0 @@ -/* - * Copyright (c) 2017, 2023, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#ifndef SHARE_GC_Z_SHARED_VMSTRUCTS_Z_SHARED_HPP -#define SHARE_GC_Z_SHARED_VMSTRUCTS_Z_SHARED_HPP - -#include "gc/x/vmStructs_x.hpp" -#include "gc/z/vmStructs_z.hpp" - -#define VM_STRUCTS_Z_SHARED(nonstatic_field, volatile_nonstatic_field, static_field) \ - VM_STRUCTS_X( \ - nonstatic_field, \ - volatile_nonstatic_field, \ - static_field) \ - \ - VM_STRUCTS_Z( \ - nonstatic_field, \ - volatile_nonstatic_field, \ - static_field) - -#define VM_INT_CONSTANTS_Z_SHARED(declare_constant, declare_constant_with_value) \ - VM_INT_CONSTANTS_X( \ - declare_constant, \ - declare_constant_with_value) \ - \ - VM_INT_CONSTANTS_Z( \ - declare_constant, \ - declare_constant_with_value) - -#define VM_LONG_CONSTANTS_Z_SHARED(declare_constant) \ - VM_LONG_CONSTANTS_X( \ - declare_constant) \ - \ - VM_LONG_CONSTANTS_Z( \ - declare_constant) - -#define VM_TYPES_Z_SHARED(declare_type, declare_toplevel_type, declare_integer_type) \ - VM_TYPES_X( \ - declare_type, \ - declare_toplevel_type, \ - declare_integer_type) \ - \ - VM_TYPES_Z( \ - declare_type, \ - declare_toplevel_type, \ - declare_integer_type) - -#endif // SHARE_GC_Z_SHARED_VMSTRUCTS_Z_SHARED_HPP diff --git a/src/hotspot/share/gc/z/shared/zSharedArguments.cpp b/src/hotspot/share/gc/z/shared/zSharedArguments.cpp deleted file mode 100644 index 4d7e9827f18a0..0000000000000 --- a/src/hotspot/share/gc/z/shared/zSharedArguments.cpp +++ /dev/null @@ -1,87 +0,0 @@ -/* - * Copyright (c) 2017, 2023, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#include "precompiled.hpp" -#include "gc/shared/gcArguments.hpp" -#include "gc/x/xArguments.hpp" -#include "gc/z/shared/zSharedArguments.hpp" -#include "gc/z/zArguments.hpp" -#include "runtime/globals.hpp" -#include "runtime/globals_extension.hpp" -#include "runtime/java.hpp" - -void ZSharedArguments::initialize_alignments() { - if (ZGenerational) { - ZArguments::initialize_alignments(); - } else { - XArguments::initialize_alignments(); - } -} - -void ZSharedArguments::initialize_heap_flags_and_sizes() { - GCArguments::initialize_heap_flags_and_sizes(); - - if (ZGenerational) { - ZArguments::initialize_heap_flags_and_sizes(); - } else { - XArguments::initialize_heap_flags_and_sizes(); - } -} - -void ZSharedArguments::initialize() { - GCArguments::initialize(); - - if (ZGenerational) { - ZArguments::initialize(); - } else { - XArguments::initialize(); - } -} - -size_t ZSharedArguments::heap_virtual_to_physical_ratio() { - if (ZGenerational) { - return ZArguments::heap_virtual_to_physical_ratio(); - } else { - return XArguments::heap_virtual_to_physical_ratio(); - } -} - -size_t ZSharedArguments::conservative_max_heap_alignment() { - return 0; -} - -CollectedHeap* ZSharedArguments::create_heap() { - if (ZGenerational) { - return ZArguments::create_heap(); - } else { - return XArguments::create_heap(); - } -} - -bool ZSharedArguments::is_supported() const { - if (ZGenerational) { - return ZArguments::is_os_supported(); - } else { - return XArguments::is_os_supported(); - } -} diff --git a/src/hotspot/share/gc/z/shared/zSharedArguments.hpp b/src/hotspot/share/gc/z/shared/zSharedArguments.hpp deleted file mode 100644 index c53f28ee0f97c..0000000000000 --- a/src/hotspot/share/gc/z/shared/zSharedArguments.hpp +++ /dev/null @@ -1,46 +0,0 @@ -/* - * Copyright (c) 2017, 2023, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#ifndef SHARE_GC_Z_SHARED_ZSHAREDARGUMENTS_HPP -#define SHARE_GC_Z_SHARED_ZSHAREDARGUMENTS_HPP - -#include "gc/shared/gcArguments.hpp" - -class CollectedHeap; - -class ZSharedArguments : public GCArguments { -private: - virtual void initialize_alignments(); - virtual void initialize_heap_flags_and_sizes(); - - virtual void initialize(); - virtual size_t conservative_max_heap_alignment(); - virtual size_t heap_virtual_to_physical_ratio(); - virtual CollectedHeap* create_heap(); - - virtual bool is_supported() const; - - bool is_os_supported() const; -}; - -#endif // SHARE_GC_Z_SHARED_ZSHAREDARGUMENTS_HPP diff --git a/src/hotspot/share/gc/z/shared/z_shared_globals.hpp b/src/hotspot/share/gc/z/shared/z_shared_globals.hpp deleted file mode 100644 index 4421d3fb0c3cc..0000000000000 --- a/src/hotspot/share/gc/z/shared/z_shared_globals.hpp +++ /dev/null @@ -1,107 +0,0 @@ -/* - * Copyright (c) 2018, 2024, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#ifndef SHARE_GC_Z_SHARED_Z_SHARED_GLOBALS_HPP -#define SHARE_GC_Z_SHARED_Z_SHARED_GLOBALS_HPP - -#include "gc/x/x_globals.hpp" -#include "gc/z/z_globals.hpp" - -#define GC_Z_SHARED_FLAGS(develop, \ - develop_pd, \ - product, \ - product_pd, \ - range, \ - constraint) \ - \ - product(double, ZAllocationSpikeTolerance, 2.0, \ - "Allocation spike tolerance factor") \ - \ - /* Updated in arguments parsing to ZGenerational ? 5.0 : 25.0 */ \ - product(double, ZFragmentationLimit, 0 /* ignored */, \ - "Maximum allowed heap fragmentation") \ - range(0, 100) \ - \ - product(size_t, ZMarkStackSpaceLimit, 8*G, \ - "Maximum number of bytes allocated for mark stacks") \ - range(32*M, 1024*G) \ - \ - product(double, ZCollectionInterval, 0, \ - "Force GC at a fixed time interval (in seconds). " \ - "Backwards compatible alias for ZCollectionIntervalMajor") \ - \ - product(bool, ZProactive, true, \ - "Enable proactive GC cycles") \ - \ - product(bool, ZUncommit, true, \ - "Uncommit unused memory") \ - \ - product(uintx, ZUncommitDelay, 5 * 60, \ - "Uncommit memory if it has been unused for the specified " \ - "amount of time (in seconds)") \ - \ - product(double, ZAsyncUnmappingLimit, 100.0, DIAGNOSTIC, \ - "Specify the max amount (percentage of max heap size) of async " \ - "unmapping that can be in-flight before unmapping requests are " \ - "temporarily forced to be synchronous instead. " \ - "The default means after an amount of pages proportional to the " \ - "max capacity is enqueued, we resort to synchronous unmapping.") \ - \ - product(uint, ZStatisticsInterval, 10, DIAGNOSTIC, \ - "Time between statistics print outs (in seconds)") \ - range(1, (uint)-1) \ - \ - product(bool, ZStressRelocateInPlace, false, DIAGNOSTIC, \ - "Always relocate pages in-place") \ - \ - product(bool, ZVerifyRoots, trueInDebug, DIAGNOSTIC, \ - "Verify roots") \ - \ - product(bool, ZVerifyObjects, false, DIAGNOSTIC, \ - "Verify objects") \ - \ - product(bool, ZVerifyMarking, trueInDebug, DIAGNOSTIC, \ - "Verify marking stacks") \ - \ - product(bool, ZVerifyForwarding, false, DIAGNOSTIC, \ - "Verify forwarding tables") \ - \ - GC_X_FLAGS( \ - develop, \ - develop_pd, \ - product, \ - product_pd, \ - range, \ - constraint) \ - \ - GC_Z_FLAGS( \ - develop, \ - develop_pd, \ - product, \ - product_pd, \ - range, \ - constraint) - -// end of GC_Z_SHARED_FLAGS - -#endif // SHARE_GC_Z_SHARED_Z_SHARED_GLOBALS_HPP diff --git a/src/hotspot/share/gc/z/zArguments.cpp b/src/hotspot/share/gc/z/zArguments.cpp index f3ff568c64d14..331ca9f7c9423 100644 --- a/src/hotspot/share/gc/z/zArguments.cpp +++ b/src/hotspot/share/gc/z/zArguments.cpp @@ -38,6 +38,8 @@ void ZArguments::initialize_alignments() { } void ZArguments::initialize_heap_flags_and_sizes() { + GCArguments::initialize_heap_flags_and_sizes(); + if (!FLAG_IS_CMDLINE(MaxHeapSize) && !FLAG_IS_CMDLINE(MaxRAMPercentage) && !FLAG_IS_CMDLINE(SoftMaxHeapSize)) { @@ -117,6 +119,8 @@ void ZArguments::select_max_gc_threads() { } void ZArguments::initialize() { + GCArguments::initialize(); + // Check mark stack size const size_t mark_stack_space_limit = ZAddressSpaceLimit::mark_stack(); if (ZMarkStackSpaceLimit > mark_stack_space_limit) { @@ -220,6 +224,10 @@ void ZArguments::initialize() { #endif } +size_t ZArguments::conservative_max_heap_alignment() { + return 0; +} + size_t ZArguments::heap_virtual_to_physical_ratio() { return ZVirtualToPhysicalRatio; } @@ -228,6 +236,6 @@ CollectedHeap* ZArguments::create_heap() { return new ZCollectedHeap(); } -bool ZArguments::is_supported() { +bool ZArguments::is_supported() const { return is_os_supported(); } diff --git a/src/hotspot/share/gc/z/zArguments.hpp b/src/hotspot/share/gc/z/zArguments.hpp index 7d1c00d30d1cc..b51eb116dbfe6 100644 --- a/src/hotspot/share/gc/z/zArguments.hpp +++ b/src/hotspot/share/gc/z/zArguments.hpp @@ -28,20 +28,21 @@ class CollectedHeap; -class ZArguments : AllStatic { +class ZArguments : public GCArguments { private: static void select_max_gc_threads(); -public: - static void initialize_alignments(); - static void initialize_heap_flags_and_sizes(); - static void initialize(); - static size_t heap_virtual_to_physical_ratio(); - static CollectedHeap* create_heap(); - - static bool is_supported(); - static bool is_os_supported(); + +public: + virtual void initialize_alignments(); + virtual void initialize_heap_flags_and_sizes(); + virtual void initialize(); + virtual size_t conservative_max_heap_alignment(); + virtual size_t heap_virtual_to_physical_ratio(); + virtual CollectedHeap* create_heap(); + + virtual bool is_supported() const; }; #endif // SHARE_GC_Z_ZARGUMENTS_HPP diff --git a/src/hotspot/share/gc/z/z_globals.hpp b/src/hotspot/share/gc/z/z_globals.hpp index c3e4bde73e44c..4e3076329691b 100644 --- a/src/hotspot/share/gc/z/z_globals.hpp +++ b/src/hotspot/share/gc/z/z_globals.hpp @@ -34,6 +34,31 @@ range, \ constraint) \ \ + product(double, ZAllocationSpikeTolerance, 2.0, \ + "Allocation spike tolerance factor") \ + \ + product(double, ZFragmentationLimit, 5.0, \ + "Maximum allowed heap fragmentation") \ + range(0, 100) \ + \ + product(size_t, ZMarkStackSpaceLimit, 8*G, \ + "Maximum number of bytes allocated for mark stacks") \ + range(32*M, 1024*G) \ + \ + product(double, ZCollectionInterval, 0, \ + "Force GC at a fixed time interval (in seconds). " \ + "Backwards compatible alias for ZCollectionIntervalMajor") \ + \ + product(bool, ZProactive, true, \ + "Enable proactive GC cycles") \ + \ + product(bool, ZUncommit, true, \ + "Uncommit unused memory") \ + \ + product(uintx, ZUncommitDelay, 5 * 60, \ + "Uncommit memory if it has been unused for the specified " \ + "amount of time (in seconds)") \ + \ product(double, ZYoungCompactionLimit, 25.0, \ "Maximum allowed garbage in young pages") \ range(0, 100) \ @@ -47,6 +72,32 @@ product(bool, ZCollectionIntervalOnly, false, \ "Only use timers for GC heuristics") \ \ + product(double, ZAsyncUnmappingLimit, 100.0, DIAGNOSTIC, \ + "Specify the max amount (percentage of max heap size) of async " \ + "unmapping that can be in-flight before unmapping requests are " \ + "temporarily forced to be synchronous instead. " \ + "The default means after an amount of pages proportional to the " \ + "max capacity is enqueued, we resort to synchronous unmapping.") \ + \ + product(uint, ZStatisticsInterval, 10, DIAGNOSTIC, \ + "Time between statistics print outs (in seconds)") \ + range(1, (uint)-1) \ + \ + product(bool, ZStressRelocateInPlace, false, DIAGNOSTIC, \ + "Always relocate pages in-place") \ + \ + product(bool, ZVerifyRoots, trueInDebug, DIAGNOSTIC, \ + "Verify roots") \ + \ + product(bool, ZVerifyObjects, false, DIAGNOSTIC, \ + "Verify objects") \ + \ + product(bool, ZVerifyMarking, trueInDebug, DIAGNOSTIC, \ + "Verify marking stacks") \ + \ + product(bool, ZVerifyForwarding, false, DIAGNOSTIC, \ + "Verify forwarding tables") \ + \ product(bool, ZBufferStoreBarriers, true, DIAGNOSTIC, \ "Buffer store barriers") \ \ @@ -64,13 +115,13 @@ product(bool, ZVerifyRemembered, trueInDebug, DIAGNOSTIC, \ "Verify remembered sets") \ \ - develop(bool, ZVerifyOops, false, \ - "Verify accessed oops") \ - \ product(int, ZTenuringThreshold, -1, DIAGNOSTIC, \ "Young generation tenuring threshold, -1 for dynamic computation")\ range(-1, static_cast(ZPageAgeMax)) \ \ + develop(bool, ZVerifyOops, false, \ + "Verify accessed oops") \ + \ develop(size_t, ZForceDiscontiguousHeapReservations, 0, \ "The gc will attempt to split the heap reservation into this " \ "many reservations, subject to available virtual address space " \ diff --git a/src/hotspot/share/jvmci/jvmciCompilerToVMInit.cpp b/src/hotspot/share/jvmci/jvmciCompilerToVMInit.cpp index 1612038008a32..ab536a81e6e1b 100644 --- a/src/hotspot/share/jvmci/jvmciCompilerToVMInit.cpp +++ b/src/hotspot/share/jvmci/jvmciCompilerToVMInit.cpp @@ -36,8 +36,6 @@ #include "gc/shared/gc_globals.hpp" #include "gc/shared/tlab_globals.hpp" #if INCLUDE_ZGC -#include "gc/x/xBarrierSetRuntime.hpp" -#include "gc/x/xThreadLocalData.hpp" #include "gc/z/zBarrierSetRuntime.hpp" #include "gc/z/zThreadLocalData.hpp" #endif @@ -173,23 +171,9 @@ void CompilerToVM::Data::initialize(JVMCI_TRAPS) { #if INCLUDE_ZGC if (UseZGC) { - if (ZGenerational) { - ZPointerVectorLoadBadMask_address = (address) &ZPointerVectorLoadBadMask; - ZPointerVectorStoreBadMask_address = (address) &ZPointerVectorStoreBadMask; - ZPointerVectorStoreGoodMask_address = (address) &ZPointerVectorStoreGoodMask; - } else { - thread_address_bad_mask_offset = in_bytes(XThreadLocalData::address_bad_mask_offset()); - // Initialize the old names for compatibility. The proper XBarrierSetRuntime names are - // exported as addresses in vmStructs_jvmci.cpp as are the new ZBarrierSetRuntime names. - ZBarrierSetRuntime_load_barrier_on_oop_field_preloaded = XBarrierSetRuntime::load_barrier_on_oop_field_preloaded_addr(); - ZBarrierSetRuntime_load_barrier_on_weak_oop_field_preloaded = XBarrierSetRuntime::load_barrier_on_weak_oop_field_preloaded_addr(); - ZBarrierSetRuntime_load_barrier_on_phantom_oop_field_preloaded = XBarrierSetRuntime::load_barrier_on_phantom_oop_field_preloaded_addr(); - ZBarrierSetRuntime_weak_load_barrier_on_oop_field_preloaded = XBarrierSetRuntime::weak_load_barrier_on_oop_field_preloaded_addr(); - ZBarrierSetRuntime_weak_load_barrier_on_weak_oop_field_preloaded = XBarrierSetRuntime::weak_load_barrier_on_weak_oop_field_preloaded_addr(); - ZBarrierSetRuntime_weak_load_barrier_on_phantom_oop_field_preloaded = XBarrierSetRuntime::weak_load_barrier_on_phantom_oop_field_preloaded_addr(); - ZBarrierSetRuntime_load_barrier_on_oop_array = XBarrierSetRuntime::load_barrier_on_oop_array_addr(); - ZBarrierSetRuntime_clone = XBarrierSetRuntime::clone_addr(); - } + ZPointerVectorLoadBadMask_address = (address) &ZPointerVectorLoadBadMask; + ZPointerVectorStoreBadMask_address = (address) &ZPointerVectorStoreBadMask; + ZPointerVectorStoreGoodMask_address = (address) &ZPointerVectorStoreGoodMask; } #endif diff --git a/src/hotspot/share/jvmci/vmStructs_jvmci.cpp b/src/hotspot/share/jvmci/vmStructs_jvmci.cpp index 02e6eaf40f375..530b02db46aae 100644 --- a/src/hotspot/share/jvmci/vmStructs_jvmci.cpp +++ b/src/hotspot/share/jvmci/vmStructs_jvmci.cpp @@ -50,7 +50,6 @@ #include "gc/g1/g1ThreadLocalData.hpp" #endif #if INCLUDE_ZGC -#include "gc/x/xBarrierSetRuntime.hpp" #include "gc/z/zBarrierSetAssembler.hpp" #include "gc/z/zBarrierSetRuntime.hpp" #include "gc/z/zThreadLocalData.hpp" @@ -833,15 +832,6 @@ declare_function(os::javaTimeMillis) \ declare_function(os::javaTimeNanos) \ \ - ZGC_ONLY(DECLARE_FUNCTION_FROM_ADDR(declare_function_with_value, XBarrierSetRuntime::load_barrier_on_oop_field_preloaded)) \ - ZGC_ONLY(DECLARE_FUNCTION_FROM_ADDR(declare_function_with_value, XBarrierSetRuntime::load_barrier_on_weak_oop_field_preloaded)) \ - ZGC_ONLY(DECLARE_FUNCTION_FROM_ADDR(declare_function_with_value, XBarrierSetRuntime::load_barrier_on_phantom_oop_field_preloaded)) \ - ZGC_ONLY(DECLARE_FUNCTION_FROM_ADDR(declare_function_with_value, XBarrierSetRuntime::weak_load_barrier_on_oop_field_preloaded)) \ - ZGC_ONLY(DECLARE_FUNCTION_FROM_ADDR(declare_function_with_value, XBarrierSetRuntime::weak_load_barrier_on_weak_oop_field_preloaded)) \ - ZGC_ONLY(DECLARE_FUNCTION_FROM_ADDR(declare_function_with_value, XBarrierSetRuntime::weak_load_barrier_on_phantom_oop_field_preloaded)) \ - ZGC_ONLY(DECLARE_FUNCTION_FROM_ADDR(declare_function_with_value, XBarrierSetRuntime::load_barrier_on_oop_array)) \ - ZGC_ONLY(DECLARE_FUNCTION_FROM_ADDR(declare_function_with_value, XBarrierSetRuntime::clone)) \ - \ ZGC_ONLY(DECLARE_FUNCTION_FROM_ADDR(declare_function_with_value, ZBarrierSetRuntime::load_barrier_on_oop_field_preloaded)) \ ZGC_ONLY(DECLARE_FUNCTION_FROM_ADDR(declare_function_with_value, ZBarrierSetRuntime::load_barrier_on_weak_oop_field_preloaded)) \ ZGC_ONLY(DECLARE_FUNCTION_FROM_ADDR(declare_function_with_value, ZBarrierSetRuntime::load_barrier_on_phantom_oop_field_preloaded)) \ diff --git a/src/hotspot/share/oops/oop.cpp b/src/hotspot/share/oops/oop.cpp index 38dee491a103e..acb47d4c7cf25 100644 --- a/src/hotspot/share/oops/oop.cpp +++ b/src/hotspot/share/oops/oop.cpp @@ -178,7 +178,7 @@ void* oopDesc::load_oop_raw(oop obj, int offset) { oop oopDesc::obj_field_acquire(int offset) const { return HeapAccess::oop_load_at(as_oop(), offset); } -void oopDesc::obj_field_put_raw(int offset, oop value) { assert(!(UseZGC && ZGenerational), "Generational ZGC must use store barriers"); +void oopDesc::obj_field_put_raw(int offset, oop value) { assert(!UseZGC, "ZGC must use store barriers"); RawAccess<>::oop_store_at(as_oop(), offset, value); } void oopDesc::release_obj_field_put(int offset, oop value) { HeapAccess::oop_store_at(as_oop(), offset, value); } void oopDesc::obj_field_put_volatile(int offset, oop value) { HeapAccess::oop_store_at(as_oop(), offset, value); } diff --git a/src/hotspot/share/oops/stackChunkOop.inline.hpp b/src/hotspot/share/oops/stackChunkOop.inline.hpp index a54b8159e7ef5..7b955d551d7ae 100644 --- a/src/hotspot/share/oops/stackChunkOop.inline.hpp +++ b/src/hotspot/share/oops/stackChunkOop.inline.hpp @@ -88,20 +88,7 @@ inline void stackChunkOopDesc::set_max_thawing_size(int value) { jdk_internal_vm_StackChunk::set_maxThawingSize(this, (jint)value); } -inline oop stackChunkOopDesc::cont() const { - if (UseZGC && !ZGenerational) { - assert(!UseCompressedOops, "Non-generational ZGC does not support compressed oops"); - // The state of the cont oop is used by XCollectedHeap::requires_barriers, - // to determine the age of the stackChunkOopDesc. For that to work, it is - // only the GC that is allowed to perform a load barrier on the oop. - // This function is used by non-GC code and therfore create a stack-local - // copy on the oop and perform the load barrier on that copy instead. - oop obj = jdk_internal_vm_StackChunk::cont_raw(as_oop()); - obj = (oop)NativeAccess<>::oop_load(&obj); - return obj; - } - return jdk_internal_vm_StackChunk::cont(as_oop()); -} +inline oop stackChunkOopDesc::cont() const { return jdk_internal_vm_StackChunk::cont(as_oop()); } inline void stackChunkOopDesc::set_cont(oop value) { jdk_internal_vm_StackChunk::set_cont(this, value); } template inline void stackChunkOopDesc::set_cont_raw(oop value) { jdk_internal_vm_StackChunk::set_cont_raw

(this, value); } diff --git a/src/hotspot/share/prims/whitebox.cpp b/src/hotspot/share/prims/whitebox.cpp index 7636250a78096..54c5279512ecf 100644 --- a/src/hotspot/share/prims/whitebox.cpp +++ b/src/hotspot/share/prims/whitebox.cpp @@ -424,11 +424,7 @@ WB_ENTRY(jboolean, WB_isObjectInOldGen(JNIEnv* env, jobject o, jobject obj)) #endif #if INCLUDE_ZGC if (UseZGC) { - if (ZGenerational) { - return ZHeap::heap()->is_old(to_zaddress(p)); - } else { - return Universe::heap()->is_in(p); - } + return ZHeap::heap()->is_old(to_zaddress(p)); } #endif #if INCLUDE_SHENANDOAHGC diff --git a/src/hotspot/share/runtime/arguments.cpp b/src/hotspot/share/runtime/arguments.cpp index fe9641063b33e..ce854a258a78a 100644 --- a/src/hotspot/share/runtime/arguments.cpp +++ b/src/hotspot/share/runtime/arguments.cpp @@ -505,7 +505,6 @@ static SpecialFlag const special_jvm_flags[] = { // --- Non-alias flags - sorted by obsolete_in then expired_in: { "AllowRedefinitionToAddDeleteMethods", JDK_Version::jdk(13), JDK_Version::undefined(), JDK_Version::undefined() }, { "FlightRecorder", JDK_Version::jdk(13), JDK_Version::undefined(), JDK_Version::undefined() }, - { "ZGenerational", JDK_Version::jdk(23), JDK_Version::undefined(), JDK_Version::undefined() }, { "DumpSharedSpaces", JDK_Version::jdk(18), JDK_Version::jdk(19), JDK_Version::undefined() }, { "DynamicDumpSharedSpaces", JDK_Version::jdk(18), JDK_Version::jdk(19), JDK_Version::undefined() }, { "RequireSharedSpaces", JDK_Version::jdk(18), JDK_Version::jdk(19), JDK_Version::undefined() }, @@ -521,7 +520,7 @@ static SpecialFlag const special_jvm_flags[] = { // -------------- Obsolete Flags - sorted by expired_in -------------- { "MetaspaceReclaimPolicy", JDK_Version::undefined(), JDK_Version::jdk(21), JDK_Version::undefined() }, - + { "ZGenerational", JDK_Version::jdk(23), JDK_Version::jdk(24), JDK_Version::undefined() }, { "UseNotificationThread", JDK_Version::jdk(23), JDK_Version::jdk(24), JDK_Version::jdk(25) }, { "PreserveAllAnnotations", JDK_Version::jdk(23), JDK_Version::jdk(24), JDK_Version::jdk(25) }, { "UseEmptySlotsInSupers", JDK_Version::jdk(23), JDK_Version::jdk(24), JDK_Version::jdk(25) }, diff --git a/src/hotspot/share/runtime/continuationFreezeThaw.cpp b/src/hotspot/share/runtime/continuationFreezeThaw.cpp index e36b252362b78..ce2e2bdb9ff80 100644 --- a/src/hotspot/share/runtime/continuationFreezeThaw.cpp +++ b/src/hotspot/share/runtime/continuationFreezeThaw.cpp @@ -1442,9 +1442,7 @@ stackChunkOop Freeze::allocate_chunk(size_t stack_size, int argsize_md) #if INCLUDE_ZGC if (UseZGC) { - if (ZGenerational) { - ZStackChunkGCData::initialize(chunk); - } + ZStackChunkGCData::initialize(chunk); assert(!chunk->requires_barriers(), "ZGC always allocates in the young generation"); _barriers = false; } else diff --git a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/HSDB.java b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/HSDB.java index 870621421c85b..d8c4d1a781e8f 100644 --- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/HSDB.java +++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/HSDB.java @@ -39,7 +39,6 @@ import sun.jvm.hotspot.gc.shared.*; import sun.jvm.hotspot.gc.shenandoah.*; import sun.jvm.hotspot.gc.g1.*; -import sun.jvm.hotspot.gc.x.*; import sun.jvm.hotspot.gc.z.*; import sun.jvm.hotspot.interpreter.*; import sun.jvm.hotspot.oops.*; @@ -1124,10 +1123,6 @@ public void addAnnotation(Address addr, OopHandle handle) { ShenandoahHeap heap = (ShenandoahHeap) collHeap; anno = "ShenandoahHeap "; bad = false; - } else if (collHeap instanceof XCollectedHeap) { - XCollectedHeap heap = (XCollectedHeap) collHeap; - anno = "ZHeap "; - bad = false; } else if (collHeap instanceof ZCollectedHeap) { ZCollectedHeap heap = (ZCollectedHeap) collHeap; anno = "ZHeap "; diff --git a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/x/XAddress.java b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/x/XAddress.java deleted file mode 100644 index fbd151108906e..0000000000000 --- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/x/XAddress.java +++ /dev/null @@ -1,77 +0,0 @@ -/* - * Copyright (c) 2018, 2021, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - * - */ - -package sun.jvm.hotspot.gc.x; - -import sun.jvm.hotspot.debugger.Address; -import sun.jvm.hotspot.runtime.VM; - -class XAddress { - static long as_long(Address value) { - if (value == null) { - return 0; - } - return value.asLongValue(); - }; - - static boolean is_null(Address value) { - return value == null; - } - - static boolean is_weak_bad(Address value) { - return (as_long(value) & XGlobals.XAddressWeakBadMask()) != 0L; - } - - static boolean is_weak_good(Address value) { - return !is_weak_bad(value) && !is_null(value); - } - - static boolean is_weak_good_or_null(Address value) { - return !is_weak_bad(value); - } - - static long offset(Address address) { - return as_long(address) & XGlobals.XAddressOffsetMask(); - } - - static Address good(Address value) { - return VM.getVM().getDebugger().newAddress(offset(value) | XGlobals.XAddressGoodMask()); - } - - static Address good_or_null(Address value) { - return is_null(value) ? value : good(value); - } - - private static boolean isPowerOf2(long value) { - return (value != 0L) && ((value & (value - 1)) == 0L); - } - - static boolean isIn(Address addr) { - long value = as_long(addr); - if (!isPowerOf2(value & ~XGlobals.XAddressOffsetMask())) { - return false; - } - return (value & (XGlobals.XAddressMetadataMask() & ~XGlobals.XAddressMetadataFinalizable())) != 0L; - } -} diff --git a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/x/XAttachedArrayForForwarding.java b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/x/XAttachedArrayForForwarding.java deleted file mode 100644 index 0c8bb38a7767e..0000000000000 --- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/x/XAttachedArrayForForwarding.java +++ /dev/null @@ -1,71 +0,0 @@ -/* - * Copyright (c) 2021, Oracle and/or its affiliates. All rights reserved. - * Copyright (c) 2021, NTT DATA. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - * - */ - -package sun.jvm.hotspot.gc.x; - -import sun.jvm.hotspot.debugger.Address; -import sun.jvm.hotspot.runtime.VM; -import sun.jvm.hotspot.runtime.VMObject; -import sun.jvm.hotspot.runtime.VMObjectFactory; -import sun.jvm.hotspot.types.CIntegerField; -import sun.jvm.hotspot.types.Type; -import sun.jvm.hotspot.types.TypeDataBase; - -public class XAttachedArrayForForwarding extends VMObject { - private static CIntegerField lengthField; - - static { - VM.registerVMInitializedObserver((o, d) -> initialize(VM.getVM().getTypeDataBase())); - } - - private static synchronized void initialize(TypeDataBase db) { - Type type = db.lookupType("XAttachedArrayForForwarding"); - - lengthField = type.getCIntegerField("_length"); - } - - public XAttachedArrayForForwarding(Address addr) { - super(addr); - } - - public long length() { - return lengthField.getValue(addr); - } - - // ObjectT: XForwarding - // ArrayT: XForwardingEntry - // - // template - // inline size_t XAttachedArray::object_size() - private long objectSize() { - return XUtils.alignUp(XForwarding.getSize(), XForwardingEntry.getSize()); - } - - // ArrayT* operator()(const ObjectT* obj) const - public XForwardingEntry get(XForwarding obj) { - Address o = obj.getAddress().addOffsetTo(objectSize()); - return VMObjectFactory.newObject(XForwardingEntry.class, o); - } -} diff --git a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/x/XBarrier.java b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/x/XBarrier.java deleted file mode 100644 index 54f9323a4e67c..0000000000000 --- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/x/XBarrier.java +++ /dev/null @@ -1,71 +0,0 @@ -/* - * Copyright (c) 2018, 2021, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - * - */ - -package sun.jvm.hotspot.gc.x; - -import sun.jvm.hotspot.debugger.Address; -import sun.jvm.hotspot.runtime.VM; - -class XBarrier { - private static boolean is_weak_good_or_null_fast_path(Address addr) { - return XAddress.is_weak_good_or_null(addr); - } - - private static Address weak_load_barrier_on_oop_slow_path(Address addr) { - return XAddress.is_weak_good(addr) ? XAddress.good(addr) : relocate_or_remap(addr); - } - - private static boolean during_relocate() { - return XGlobals.XGlobalPhase() == XGlobals.XPhaseRelocate; - } - - private static Address relocate(Address addr) { - return zheap().relocate_object(addr); - } - - private static XHeap zheap() { - XCollectedHeap zCollectedHeap = (XCollectedHeap)VM.getVM().getUniverse().heap(); - return zCollectedHeap.heap(); - } - - private static Address remap(Address addr) { - return zheap().remapObject(addr); - } - - private static Address relocate_or_remap(Address addr) { - return during_relocate() ? relocate(addr) : remap(addr); - } - - static Address weak_barrier(Address o) { - // Fast path - if (is_weak_good_or_null_fast_path(o)) { - // Return the good address instead of the weak good address - // to ensure that the currently active heap view is used. - return XAddress.good_or_null(o); - } - - // Slow path - return weak_load_barrier_on_oop_slow_path(o); - } -} diff --git a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/x/XCollectedHeap.java b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/x/XCollectedHeap.java deleted file mode 100644 index 5455a841fabfa..0000000000000 --- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/x/XCollectedHeap.java +++ /dev/null @@ -1,139 +0,0 @@ -/* - * Copyright (c) 2017, 2022, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - * - */ - -package sun.jvm.hotspot.gc.x; - -import java.io.PrintStream; -import java.util.Iterator; - -import sun.jvm.hotspot.debugger.Address; -import sun.jvm.hotspot.debugger.OopHandle; -import sun.jvm.hotspot.gc.shared.CollectedHeap; -import sun.jvm.hotspot.gc.shared.CollectedHeapName; -import sun.jvm.hotspot.gc.shared.LiveRegionsClosure; -import sun.jvm.hotspot.runtime.VM; -import sun.jvm.hotspot.runtime.VMObjectFactory; -import sun.jvm.hotspot.types.Type; -import sun.jvm.hotspot.types.TypeDataBase; -import sun.jvm.hotspot.utilities.BitMapInterface; - -// Mirror class for XCollectedHeap. - -public class XCollectedHeap extends CollectedHeap { - private static long zHeapFieldOffset; - - static { - VM.registerVMInitializedObserver((o, d) -> initialize(VM.getVM().getTypeDataBase())); - } - - private static synchronized void initialize(TypeDataBase db) { - Type type = db.lookupType("XCollectedHeap"); - - zHeapFieldOffset = type.getAddressField("_heap").getOffset(); - } - - public XHeap heap() { - Address heapAddr = addr.addOffsetTo(zHeapFieldOffset); - return VMObjectFactory.newObject(XHeap.class, heapAddr); - } - - @Override - public CollectedHeapName kind() { - return CollectedHeapName.Z; - } - - @Override - public void printOn(PrintStream tty) { - heap().printOn(tty); - } - - public XCollectedHeap(Address addr) { - super(addr); - } - - @Override - public long capacity() { - return heap().capacity(); - } - - @Override - public long used() { - return heap().used(); - } - - @Override - public boolean isInReserved(Address a) { - return heap().isIn(a); - } - - private OopHandle oop_load_barrier(Address oopAddress) { - oopAddress = XBarrier.weak_barrier(oopAddress); - if (oopAddress == null) { - return null; - } - - return oopAddress.addOffsetToAsOopHandle(0); - } - - @Override - public OopHandle oop_load_at(OopHandle handle, long offset) { - assert(!VM.getVM().isCompressedOopsEnabled()); - - Address oopAddress = handle.getAddressAt(offset); - - return oop_load_barrier(oopAddress); - } - - // addr can be either in heap or in native - @Override - public OopHandle oop_load_in_native(Address addr) { - Address oopAddress = addr.getAddressAt(0); - return oop_load_barrier(oopAddress); - } - - public String oopAddressDescription(OopHandle handle) { - Address origOop = XOop.to_address(handle); - Address loadBarrieredOop = XBarrier.weak_barrier(origOop); - if (!origOop.equals(loadBarrieredOop)) { - return origOop + " (" + loadBarrieredOop.toString() + ")"; - } else { - return handle.toString(); - } - } - - @Override - public void liveRegionsIterate(LiveRegionsClosure closure) { - Iterator iter = heap().pageTable().activePagesIterator(); - while (iter.hasNext()) { - XPage page = iter.next(); - closure.doLiveRegions(page); - } - } - - @Override - public BitMapInterface createBitMap(long size) { - // Ignores the size - return new XExternalBitMap(this); - } -} diff --git a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/x/XExternalBitMap.java b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/x/XExternalBitMap.java deleted file mode 100644 index 5a2f033e6a1d5..0000000000000 --- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/x/XExternalBitMap.java +++ /dev/null @@ -1,111 +0,0 @@ -/* - * Copyright (c) 2019, 2021, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - * - */ - -package sun.jvm.hotspot.gc.x; - -import java.util.HashMap; - -import sun.jvm.hotspot.runtime.VM; -import sun.jvm.hotspot.utilities.BitMap; -import sun.jvm.hotspot.utilities.BitMapInterface; - -/** Discontiguous bitmap for ZGC. */ -public class XExternalBitMap implements BitMapInterface { - private XPageTable pageTable; - private final long oopSize; - - private HashMap pageToBitMap = new HashMap(); - - public XExternalBitMap(XCollectedHeap collectedHeap) { - pageTable = collectedHeap.heap().pageTable(); - oopSize = VM.getVM().getOopSize(); - } - - private XPage getPage(long zOffset) { - if (zOffset > XGlobals.XAddressOffsetMask()) { - throw new RuntimeException("Not a Z offset: " + zOffset); - } - - XPage page = pageTable.get(XUtils.longToAddress(zOffset)); - if (page == null) { - throw new RuntimeException("Address not in pageTable: " + zOffset); - } - return page; - } - - private BitMap getOrAddBitMap(XPage page) { - BitMap bitMap = pageToBitMap.get(page); - if (bitMap == null) { - long size = page.size(); - - long maxNumObjects = size >>> page.object_alignment_shift(); - if (maxNumObjects > Integer.MAX_VALUE) { - throw new RuntimeException("int overflow"); - } - int intMaxNumObjects = (int)maxNumObjects; - - bitMap = new BitMap(intMaxNumObjects); - pageToBitMap.put(page, bitMap); - } - - return bitMap; - } - - private int pageLocalBitMapIndex(XPage page, long zOffset) { - long pageLocalZOffset = zOffset - page.start(); - return (int)(pageLocalZOffset >>> page.object_alignment_shift()); - } - - private long convertToZOffset(long offset) { - long addr = oopSize * offset; - return addr & XGlobals.XAddressOffsetMask(); - } - - @Override - public boolean at(long offset) { - long zOffset = convertToZOffset(offset); - XPage page = getPage(zOffset); - BitMap bitMap = getOrAddBitMap(page); - int index = pageLocalBitMapIndex(page, zOffset); - - return bitMap.at(index); - } - - @Override - public void atPut(long offset, boolean value) { - long zOffset = convertToZOffset(offset); - XPage page = getPage(zOffset); - BitMap bitMap = getOrAddBitMap(page); - int index = pageLocalBitMapIndex(page, zOffset); - - bitMap.atPut(index, value); - } - - @Override - public void clear() { - for (BitMap bitMap : pageToBitMap.values()) { - bitMap.clear(); - } - } -} diff --git a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/x/XForwarding.java b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/x/XForwarding.java deleted file mode 100644 index 727014a847956..0000000000000 --- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/x/XForwarding.java +++ /dev/null @@ -1,136 +0,0 @@ -/* - * Copyright (c) 2021, Oracle and/or its affiliates. All rights reserved. - * Copyright (c) 2021, NTT DATA. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - * - */ - -package sun.jvm.hotspot.gc.x; - -import java.util.Iterator; - -import sun.jvm.hotspot.debugger.Address; -import sun.jvm.hotspot.runtime.VM; -import sun.jvm.hotspot.runtime.VMObject; -import sun.jvm.hotspot.runtime.VMObjectFactory; -import sun.jvm.hotspot.types.CIntegerField; -import sun.jvm.hotspot.types.Type; -import sun.jvm.hotspot.types.TypeDataBase; - -public class XForwarding extends VMObject { - private static Type type; - private static long virtualFieldOffset; - private static long entriesFieldOffset; - private static CIntegerField objectAlignmentShiftField; - private static CIntegerField refCountField; - - static { - VM.registerVMInitializedObserver((o, d) -> initialize(VM.getVM().getTypeDataBase())); - } - - private static synchronized void initialize(TypeDataBase db) { - type = db.lookupType("XForwarding"); - - virtualFieldOffset = type.getField("_virtual").getOffset(); - entriesFieldOffset = type.getField("_entries").getOffset(); - objectAlignmentShiftField = type.getCIntegerField("_object_alignment_shift"); - refCountField = type.getCIntegerField("_ref_count"); - } - - public XForwarding(Address addr) { - super(addr); - } - - public static long getSize() { - return type.getSize(); - } - - private XVirtualMemory virtual() { - return VMObjectFactory.newObject(XVirtualMemory.class, addr.addOffsetTo(virtualFieldOffset)); - } - - private XAttachedArrayForForwarding entries() { - return VMObjectFactory.newObject(XAttachedArrayForForwarding.class, addr.addOffsetTo(entriesFieldOffset)); - } - - public long start() { - return virtual().start(); - } - - public int objectAlignmentShift() { - return (int)objectAlignmentShiftField.getValue(addr); - } - - public boolean retainPage() { - return refCountField.getValue(addr) > 0; - } - - private XForwardingEntry at(long cursor) { - long offset = XForwardingEntry.getSize() * cursor; - Address entryAddress = entries().get(this).getAddress().addOffsetTo(offset); - return VMObjectFactory.newObject(XForwardingEntry.class, entryAddress); - } - - private class XForwardEntryIterator implements Iterator { - - private long cursor; - - private XForwardingEntry nextEntry; - - public XForwardEntryIterator(long fromIndex) { - long mask = entries().length() - 1; - long hash = XHash.uint32_to_uint32(fromIndex); - cursor = hash & mask; - nextEntry = at(cursor); - } - - @Override - public boolean hasNext() { - return nextEntry.populated(); - } - - @Override - public XForwardingEntry next() { - XForwardingEntry entry = nextEntry; - - long mask = entries().length() - 1; - cursor = (cursor + 1) & mask; - nextEntry = at(cursor); - - return entry; - } - - public XForwardingEntry peak() { - return nextEntry; - } - } - - public XForwardingEntry find(long fromIndex) { - XForwardEntryIterator itr = new XForwardEntryIterator(fromIndex); - while (itr.hasNext()) { - XForwardingEntry entry = itr.next(); - if (entry.fromIndex() == fromIndex) { - return entry; - } - } - return itr.peak(); - } -} diff --git a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/x/XForwardingEntry.java b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/x/XForwardingEntry.java deleted file mode 100644 index aa4b55775ec79..0000000000000 --- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/x/XForwardingEntry.java +++ /dev/null @@ -1,97 +0,0 @@ -/* - * Copyright (c) 2021, Oracle and/or its affiliates. All rights reserved. - * Copyright (c) 2021, NTT DATA. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - * - */ - -package sun.jvm.hotspot.gc.x; - -import sun.jvm.hotspot.debugger.Address; -import sun.jvm.hotspot.runtime.VM; -import sun.jvm.hotspot.runtime.VMObject; -import sun.jvm.hotspot.runtime.VMObjectFactory; -import sun.jvm.hotspot.types.CIntegerField; -import sun.jvm.hotspot.types.Type; -import sun.jvm.hotspot.types.TypeDataBase; - -public class XForwardingEntry extends VMObject { - private static Type type; - private static CIntegerField entryField; - - static { - VM.registerVMInitializedObserver((o, d) -> initialize(VM.getVM().getTypeDataBase())); - } - - private static synchronized void initialize(TypeDataBase db) { - type = db.lookupType("XForwardingEntry"); - - entryField = type.getCIntegerField("_entry"); - } - - public static long getSize() { - return type.getSize(); - } - - public XForwardingEntry(Address addr) { - super(addr); - } - - public long entry() { - return entryField.getValue(addr); - } - - // typedef XBitField field_populated - private boolean fieldPopulatedDecode(long value) { - long FieldMask = (1L << 1) - 1; - int FieldShift = 1; - int ValueShift = 0; - return (((value >>> FieldShift) & FieldMask) << ValueShift) != 0L; - } - - // typedef XBitField field_to_offset; - private long fieldToOffsetDecode(long value) { - long FieldMask = (1L << 45) - 1; - int FieldShift = 1; - int ValueShift = 0; - return ((value >>> FieldShift) & FieldMask) << ValueShift; - } - - // typedef XBitField field_from_index; - private long fieldFromIndexDecode(long value) { - long FieldMask = (1L << 18) - 1; - int FieldShift = 46; - int ValueShift = 0; - return ((value >>> FieldShift) & FieldMask) << ValueShift; - } - - public boolean populated() { - return fieldPopulatedDecode(entry()); - } - - public long toOffset() { - return fieldToOffsetDecode(entry()); - } - - public long fromIndex() { - return fieldFromIndexDecode(entry()); - } -} diff --git a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/x/XForwardingTable.java b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/x/XForwardingTable.java deleted file mode 100644 index 259f48a37b6cb..0000000000000 --- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/x/XForwardingTable.java +++ /dev/null @@ -1,60 +0,0 @@ -/* - * Copyright (c) 2018, 2021, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - * - */ - -package sun.jvm.hotspot.gc.x; - -import sun.jvm.hotspot.debugger.Address; -import sun.jvm.hotspot.runtime.VM; -import sun.jvm.hotspot.runtime.VMObject; -import sun.jvm.hotspot.runtime.VMObjectFactory; -import sun.jvm.hotspot.types.AddressField; -import sun.jvm.hotspot.types.CIntegerField; -import sun.jvm.hotspot.types.Type; -import sun.jvm.hotspot.types.TypeDataBase; - -public class XForwardingTable extends VMObject { - private static long mapFieldOffset; - - static { - VM.registerVMInitializedObserver((o, d) -> initialize(VM.getVM().getTypeDataBase())); - } - - private static synchronized void initialize(TypeDataBase db) { - Type type = db.lookupType("XForwardingTable"); - - mapFieldOffset = type.getAddressField("_map").getOffset(); - } - - public XForwardingTable(Address addr) { - super(addr); - } - - private XGranuleMapForForwarding map() { - return VMObjectFactory.newObject(XGranuleMapForForwarding.class, addr.addOffsetTo(mapFieldOffset)); - } - - public XForwarding get(Address o) { - return VMObjectFactory.newObject(XForwarding.class, map().get(XAddress.offset(o))); - } -} diff --git a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/x/XForwardingTableCursor.java b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/x/XForwardingTableCursor.java deleted file mode 100644 index 40103cd1e4331..0000000000000 --- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/x/XForwardingTableCursor.java +++ /dev/null @@ -1,29 +0,0 @@ -/* - * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - * - */ - -package sun.jvm.hotspot.gc.x; - -class XForwardingTableCursor { - long _value; -} diff --git a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/x/XForwardingTableEntry.java b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/x/XForwardingTableEntry.java deleted file mode 100644 index 7c629f7c5cf9f..0000000000000 --- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/x/XForwardingTableEntry.java +++ /dev/null @@ -1,55 +0,0 @@ -/* - * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - * - */ - -package sun.jvm.hotspot.gc.x; - -import sun.jvm.hotspot.debugger.Address; - -class XForwardingTableEntry { - private Address entry; - - XForwardingTableEntry(Address addr) { - entry = addr; - } - - private static long empty() { - return ~0L; - } - - boolean is_empty() { - return entry.asLongValue() == empty(); - } - - Address to_offset() { - return entry.andWithMask((1L << 42) - 1); - } - - long from_index() { - return entry.asLongValue() >>> 42; - } - - public String toString() { - return entry + " - from_index: " + from_index() + " to_offset: " + to_offset(); - } -} diff --git a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/x/XGlobals.java b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/x/XGlobals.java deleted file mode 100644 index 1cfc6dd76638b..0000000000000 --- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/x/XGlobals.java +++ /dev/null @@ -1,132 +0,0 @@ -/* - * Copyright (c) 2018, 2021, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - * - */ - -package sun.jvm.hotspot.gc.x; - -import sun.jvm.hotspot.runtime.VM; -import sun.jvm.hotspot.types.Field; -import sun.jvm.hotspot.types.Type; -import sun.jvm.hotspot.types.TypeDataBase; - -public class XGlobals { - private static Field instanceField; - - // Global phase state - public static int XPhaseRelocate; - - public static byte XPageTypeSmall; - public static byte XPageTypeMedium; - public static byte XPageTypeLarge; - - // Granule size shift - public static long XGranuleSizeShift; - - // Page size shifts - public static long XPageSizeSmallShift; - public static long XPageSizeMediumShift; - - // Object alignment shifts - public static int XObjectAlignmentMediumShift; - public static int XObjectAlignmentLargeShift; - - // Pointer part of address - public static long XAddressOffsetShift; - - // Pointer part of address - public static long XAddressOffsetBits; - public static long XAddressOffsetMax; - - static { - VM.registerVMInitializedObserver((o, d) -> initialize(VM.getVM().getTypeDataBase())); - } - - private static synchronized void initialize(TypeDataBase db) { - Type type = db.lookupType("XGlobalsForVMStructs"); - - instanceField = type.getField("_instance_p"); - - XPhaseRelocate = db.lookupIntConstant("XPhaseRelocate").intValue(); - - XPageTypeSmall = db.lookupIntConstant("XPageTypeSmall").byteValue(); - XPageTypeMedium = db.lookupIntConstant("XPageTypeMedium").byteValue(); - XPageTypeLarge = db.lookupIntConstant("XPageTypeLarge").byteValue(); - - XGranuleSizeShift = db.lookupLongConstant("XGranuleSizeShift").longValue(); - - XPageSizeSmallShift = db.lookupLongConstant("XPageSizeSmallShift").longValue(); - XPageSizeMediumShift = db.lookupLongConstant("XPageSizeMediumShift").longValue(); - - XObjectAlignmentMediumShift = db.lookupIntConstant("XObjectAlignmentMediumShift").intValue(); - XObjectAlignmentLargeShift = db.lookupIntConstant("XObjectAlignmentLargeShift").intValue(); - - XAddressOffsetShift = db.lookupLongConstant("XAddressOffsetShift").longValue(); - - XAddressOffsetBits = db.lookupLongConstant("XAddressOffsetBits").longValue(); - XAddressOffsetMax = db.lookupLongConstant("XAddressOffsetMax").longValue(); - } - - private static XGlobalsForVMStructs instance() { - return new XGlobalsForVMStructs(instanceField.getAddress()); - } - - public static int XGlobalPhase() { - return instance().XGlobalPhase(); - } - - public static int XGlobalSeqNum() { - return instance().XGlobalSeqNum(); - } - - public static long XAddressOffsetMask() { - return instance().XAddressOffsetMask(); - } - - public static long XAddressMetadataMask() { - return instance().XAddressMetadataMask(); - } - - public static long XAddressMetadataFinalizable() { - return instance().XAddressMetadataFinalizable(); - } - - public static long XAddressGoodMask() { - return instance().XAddressGoodMask(); - } - - public static long XAddressBadMask() { - return instance().XAddressBadMask(); - } - - public static long XAddressWeakBadMask() { - return instance().XAddressWeakBadMask(); - } - - public static int XObjectAlignmentSmallShift() { - return instance().XObjectAlignmentSmallShift(); - } - - public static int XObjectAlignmentSmall() { - return instance().XObjectAlignmentSmall(); - } -} diff --git a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/x/XGlobalsForVMStructs.java b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/x/XGlobalsForVMStructs.java deleted file mode 100644 index d4930dcd5dceb..0000000000000 --- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/x/XGlobalsForVMStructs.java +++ /dev/null @@ -1,108 +0,0 @@ -/* - * Copyright (c) 2018, 2021, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - * - */ - -package sun.jvm.hotspot.gc.x; - -import sun.jvm.hotspot.debugger.Address; -import sun.jvm.hotspot.runtime.VM; -import sun.jvm.hotspot.runtime.VMObject; -import sun.jvm.hotspot.types.AddressField; -import sun.jvm.hotspot.types.Type; -import sun.jvm.hotspot.types.TypeDataBase; - -class XGlobalsForVMStructs extends VMObject { - private static AddressField XGlobalPhaseField; - private static AddressField XGlobalSeqNumField; - private static AddressField XAddressOffsetMaskField; - private static AddressField XAddressMetadataMaskField; - private static AddressField XAddressMetadataFinalizableField; - private static AddressField XAddressGoodMaskField; - private static AddressField XAddressBadMaskField; - private static AddressField XAddressWeakBadMaskField; - private static AddressField XObjectAlignmentSmallShiftField; - private static AddressField XObjectAlignmentSmallField; - - static { - VM.registerVMInitializedObserver((o, d) -> initialize(VM.getVM().getTypeDataBase())); - } - - private static synchronized void initialize(TypeDataBase db) { - Type type = db.lookupType("XGlobalsForVMStructs"); - - XGlobalPhaseField = type.getAddressField("_XGlobalPhase"); - XGlobalSeqNumField = type.getAddressField("_XGlobalSeqNum"); - XAddressOffsetMaskField = type.getAddressField("_XAddressOffsetMask"); - XAddressMetadataMaskField = type.getAddressField("_XAddressMetadataMask"); - XAddressMetadataFinalizableField = type.getAddressField("_XAddressMetadataFinalizable"); - XAddressGoodMaskField = type.getAddressField("_XAddressGoodMask"); - XAddressBadMaskField = type.getAddressField("_XAddressBadMask"); - XAddressWeakBadMaskField = type.getAddressField("_XAddressWeakBadMask"); - XObjectAlignmentSmallShiftField = type.getAddressField("_XObjectAlignmentSmallShift"); - XObjectAlignmentSmallField = type.getAddressField("_XObjectAlignmentSmall"); - } - - XGlobalsForVMStructs(Address addr) { - super(addr); - } - - int XGlobalPhase() { - return XGlobalPhaseField.getValue(addr).getJIntAt(0); - } - - int XGlobalSeqNum() { - return XGlobalSeqNumField.getValue(addr).getJIntAt(0); - } - - long XAddressOffsetMask() { - return XAddressOffsetMaskField.getValue(addr).getJLongAt(0); - } - - long XAddressMetadataMask() { - return XAddressMetadataMaskField.getValue(addr).getJLongAt(0); - } - - long XAddressMetadataFinalizable() { - return XAddressMetadataFinalizableField.getValue(addr).getJLongAt(0); - } - - long XAddressGoodMask() { - return XAddressGoodMaskField.getValue(addr).getJLongAt(0); - } - - long XAddressBadMask() { - return XAddressBadMaskField.getValue(addr).getJLongAt(0); - } - - long XAddressWeakBadMask() { - return XAddressWeakBadMaskField.getValue(addr).getJLongAt(0); - } - - int XObjectAlignmentSmallShift() { - return XObjectAlignmentSmallShiftField.getValue(addr).getJIntAt(0); - } - - int XObjectAlignmentSmall() { - return XObjectAlignmentSmallField.getValue(addr).getJIntAt(0); - } -} diff --git a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/x/XGranuleMapForForwarding.java b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/x/XGranuleMapForForwarding.java deleted file mode 100644 index 347f1405729a9..0000000000000 --- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/x/XGranuleMapForForwarding.java +++ /dev/null @@ -1,90 +0,0 @@ -/* - * Copyright (c) 2021, Oracle and/or its affiliates. All rights reserved. - * Copyright (c) 2021, NTT DATA. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - * - */ - -package sun.jvm.hotspot.gc.x; - -import sun.jvm.hotspot.debugger.Address; -import sun.jvm.hotspot.runtime.VM; -import sun.jvm.hotspot.runtime.VMObject; -import sun.jvm.hotspot.types.AddressField; -import sun.jvm.hotspot.types.Type; -import sun.jvm.hotspot.types.TypeDataBase; - -public class XGranuleMapForForwarding extends VMObject { - private static AddressField mapField; - - static { - VM.registerVMInitializedObserver((o, d) -> initialize(VM.getVM().getTypeDataBase())); - } - - private static synchronized void initialize(TypeDataBase db) { - Type type = db.lookupType("XGranuleMapForForwarding"); - - mapField = type.getAddressField("_map"); - } - - public XGranuleMapForForwarding(Address addr) { - super(addr); - } - - private Address map() { - return mapField.getValue(addr); - } - - public long size() { - return XGlobals.XAddressOffsetMax >> XGlobals.XGranuleSizeShift; - } - - private long index_for_offset(long offset) { - long index = offset >>> XGlobals.XGranuleSizeShift; - - return index; - } - - Address at(long index) { - return map().getAddressAt(index * VM.getVM().getAddressSize()); - } - - Address get(long offset) { - long index = index_for_offset(offset); - return at(index); - } - - public class Iterator { - private long next = 0; - - boolean hasNext() { - return next < size(); - } - - Address next() { - if (next >= size()) { - throw new RuntimeException("OOIBE"); - } - - return at(next++); - } - } -} diff --git a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/x/XGranuleMapForPageTable.java b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/x/XGranuleMapForPageTable.java deleted file mode 100644 index 468a3e2457da3..0000000000000 --- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/x/XGranuleMapForPageTable.java +++ /dev/null @@ -1,89 +0,0 @@ -/* - * Copyright (c) 2018, 2021, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - * - */ - -package sun.jvm.hotspot.gc.x; - -import sun.jvm.hotspot.debugger.Address; -import sun.jvm.hotspot.runtime.VM; -import sun.jvm.hotspot.runtime.VMObject; -import sun.jvm.hotspot.types.AddressField; -import sun.jvm.hotspot.types.Type; -import sun.jvm.hotspot.types.TypeDataBase; - -public class XGranuleMapForPageTable extends VMObject { - private static AddressField mapField; - - static { - VM.registerVMInitializedObserver((o, d) -> initialize(VM.getVM().getTypeDataBase())); - } - - private static synchronized void initialize(TypeDataBase db) { - Type type = db.lookupType("XGranuleMapForPageTable"); - - mapField = type.getAddressField("_map"); - } - - public XGranuleMapForPageTable(Address addr) { - super(addr); - } - - private Address map() { - return mapField.getValue(addr); - } - - public long size() { - return XGlobals.XAddressOffsetMax >> XGlobals.XGranuleSizeShift; - } - - private long index_for_addr(Address addr) { - long index = XAddress.offset(addr) >> XGlobals.XGranuleSizeShift; - - return index; - } - - Address at(long index) { - return map().getAddressAt(index * VM.getVM().getBytesPerLong()); - } - - Address get(Address addr) { - long index = index_for_addr(addr); - return at(index); - } - - public class Iterator { - private long next = 0; - - boolean hasNext() { - return next < size(); - } - - Address next() { - if (next >= size()) { - throw new RuntimeException("OOIBE"); - } - - return at(next++); - } - } -} diff --git a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/x/XHash.java b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/x/XHash.java deleted file mode 100644 index 79b1f5c2e70a0..0000000000000 --- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/x/XHash.java +++ /dev/null @@ -1,41 +0,0 @@ -/* - * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - * - */ - -package sun.jvm.hotspot.gc.x; - -class XHash { - private static long uint32(long value) { - return value & 0xFFFFFFFFL; - } - - static long uint32_to_uint32(long key) { - key = uint32(~key + (key << 15)); - key = uint32(key ^ (key >>> 12)); - key = uint32(key + (key << 2)); - key = uint32(key ^ (key >>> 4)); - key = uint32(key * 2057); - key = uint32(key ^ (key >>> 16)); - return key; - } -} diff --git a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/x/XHeap.java b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/x/XHeap.java deleted file mode 100644 index c309ce2194438..0000000000000 --- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/x/XHeap.java +++ /dev/null @@ -1,127 +0,0 @@ -/* - * Copyright (c) 2017, 2022, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - * - */ - -package sun.jvm.hotspot.gc.x; - -import java.io.PrintStream; - -import sun.jvm.hotspot.debugger.Address; -import sun.jvm.hotspot.runtime.VM; -import sun.jvm.hotspot.runtime.VMObject; -import sun.jvm.hotspot.runtime.VMObjectFactory; -import sun.jvm.hotspot.types.Type; -import sun.jvm.hotspot.types.TypeDataBase; - -// Mirror class for XHeap - -public class XHeap extends VMObject { - - private static long pageAllocatorFieldOffset; - private static long pageTableFieldOffset; - private static long forwardingTableFieldOffset; - private static long relocateFieldOffset; - - static { - VM.registerVMInitializedObserver((o, d) -> initialize(VM.getVM().getTypeDataBase())); - } - - private static synchronized void initialize(TypeDataBase db) { - Type type = db.lookupType("XHeap"); - - pageAllocatorFieldOffset = type.getAddressField("_page_allocator").getOffset(); - pageTableFieldOffset = type.getAddressField("_page_table").getOffset(); - forwardingTableFieldOffset = type.getAddressField("_forwarding_table").getOffset(); - relocateFieldOffset = type.getAddressField("_relocate").getOffset(); - } - - public XHeap(Address addr) { - super(addr); - } - - private XPageAllocator pageAllocator() { - Address pageAllocatorAddr = addr.addOffsetTo(pageAllocatorFieldOffset); - return VMObjectFactory.newObject(XPageAllocator.class, pageAllocatorAddr); - } - - XPageTable pageTable() { - return VMObjectFactory.newObject(XPageTable.class, addr.addOffsetTo(pageTableFieldOffset)); - } - - XForwardingTable forwardingTable() { - return VMObjectFactory.newObject(XForwardingTable.class, addr.addOffsetTo(forwardingTableFieldOffset)); - } - - XRelocate relocate() { - return VMObjectFactory.newObject(XRelocate.class, addr.addOffsetTo(relocateFieldOffset)); - } - - public long maxCapacity() { - return pageAllocator().maxCapacity(); - } - - public long capacity() { - return pageAllocator().capacity(); - } - - public long used() { - return pageAllocator().used(); - } - - boolean is_relocating(Address o) { - return pageTable().is_relocating(o); - } - - Address relocate_object(Address addr) { - XForwarding forwarding = forwardingTable().get(addr); - if (forwarding == null) { - return XAddress.good(addr); - } - return relocate().relocateObject(forwarding, XAddress.good(addr)); - } - - public boolean isIn(Address addr) { - if (XAddress.isIn(addr)) { - XPage page = pageTable().get(addr); - if (page != null) { - return page.isIn(addr); - } - } - return false; - } - - public Address remapObject(Address o) { - XForwarding forwarding = forwardingTable().get(addr); - if (forwarding == null) { - return XAddress.good(o); - } - return relocate().forwardObject(forwarding, XAddress.good(o)); - } - - public void printOn(PrintStream tty) { - tty.print(" ZHeap "); - tty.print("used " + (used() / 1024 / 1024) + "M, "); - tty.print("capacity " + (capacity() / 1024 / 1024) + "M, "); - tty.println("max capacity " + (maxCapacity() / 1024 / 1024) + "M"); - } -} diff --git a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/x/XOop.java b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/x/XOop.java deleted file mode 100644 index bbe296f658bc2..0000000000000 --- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/x/XOop.java +++ /dev/null @@ -1,34 +0,0 @@ -/* - * Copyright (c) 2018, 2021, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - * - */ - -package sun.jvm.hotspot.gc.x; - -import sun.jvm.hotspot.debugger.Address; -import sun.jvm.hotspot.debugger.OopHandle; - -class XOop { - static Address to_address(OopHandle oop) { - return oop; - } -} diff --git a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/x/XPage.java b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/x/XPage.java deleted file mode 100644 index a6315f10130fc..0000000000000 --- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/x/XPage.java +++ /dev/null @@ -1,141 +0,0 @@ -/* - * Copyright (c) 2018, 2021, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - * - */ - -package sun.jvm.hotspot.gc.x; - -import java.util.ArrayList; -import java.util.List; - -import sun.jvm.hotspot.debugger.Address; -import sun.jvm.hotspot.debugger.OopHandle; -import sun.jvm.hotspot.gc.shared.LiveRegionsProvider; -import sun.jvm.hotspot.memory.MemRegion; -import sun.jvm.hotspot.oops.Oop; -import sun.jvm.hotspot.oops.UnknownOopException; -import sun.jvm.hotspot.runtime.VM; -import sun.jvm.hotspot.runtime.VMObject; -import sun.jvm.hotspot.runtime.VMObjectFactory; -import sun.jvm.hotspot.types.AddressField; -import sun.jvm.hotspot.types.CIntegerField; -import sun.jvm.hotspot.types.Type; -import sun.jvm.hotspot.types.TypeDataBase; - -public class XPage extends VMObject implements LiveRegionsProvider { - private static CIntegerField typeField; - private static CIntegerField seqnumField; - private static long virtualFieldOffset; - private static AddressField topField; - - static { - VM.registerVMInitializedObserver((o, d) -> initialize(VM.getVM().getTypeDataBase())); - } - - private static synchronized void initialize(TypeDataBase db) { - Type type = db.lookupType("XPage"); - - typeField = type.getCIntegerField("_type"); - seqnumField = type.getCIntegerField("_seqnum"); - virtualFieldOffset = type.getField("_virtual").getOffset(); - topField = type.getAddressField("_top"); - } - - public XPage(Address addr) { - super(addr); - } - - private byte type() { - return typeField.getJByte(addr); - } - - private int seqnum() { - return seqnumField.getJInt(addr); - } - - private XVirtualMemory virtual() { - return VMObjectFactory.newObject(XVirtualMemory.class, addr.addOffsetTo(virtualFieldOffset)); - } - - private Address top() { - return topField.getValue(addr); - } - - private boolean is_relocatable() { - return seqnum() < XGlobals.XGlobalSeqNum(); - } - - long start() { - return virtual().start(); - } - - long size() { - return virtual().end() - virtual().start(); - } - - long object_alignment_shift() { - if (type() == XGlobals.XPageTypeSmall) { - return XGlobals.XObjectAlignmentSmallShift(); - } else if (type() == XGlobals.XPageTypeMedium) { - return XGlobals.XObjectAlignmentMediumShift; - } else { - assert(type() == XGlobals.XPageTypeLarge); - return XGlobals.XObjectAlignmentLargeShift; - } - } - - long objectAlignmentSize() { - return 1 << object_alignment_shift(); - } - - public boolean isIn(Address addr) { - long offset = XAddress.offset(addr); - // FIXME: it does not consider the sign. - return (offset >= start()) && (offset < top().asLongValue()); - } - - private long getObjectSize(Address good) { - OopHandle handle = good.addOffsetToAsOopHandle(0); - Oop obj = null; - - try { - obj = VM.getVM().getObjectHeap().newOop(handle); - } catch (UnknownOopException exp) { - throw new RuntimeException(" UnknownOopException " + exp); - } - - return VM.getVM().alignUp(obj.getObjectSize(), objectAlignmentSize()); - } - - public List getLiveRegions() { - Address start = XAddress.good(XUtils.longToAddress(start())); - - // Can't convert top() to a "good" address because it might - // be at the top of the "offset" range, and therefore also - // looks like one of the color bits. Instead use the "good" - // address and add the size. - long size = top().asLongValue() - start(); - Address end = start.addOffsetTo(size); - - return List.of(new MemRegion(start, end)); - } -} diff --git a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/x/XPageAllocator.java b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/x/XPageAllocator.java deleted file mode 100644 index 1af19ea875f2a..0000000000000 --- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/x/XPageAllocator.java +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Copyright (c) 2017, 2021, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - * - */ - -package sun.jvm.hotspot.gc.x; - -import sun.jvm.hotspot.debugger.Address; -import sun.jvm.hotspot.runtime.VM; -import sun.jvm.hotspot.runtime.VMObject; -import sun.jvm.hotspot.types.CIntegerField; -import sun.jvm.hotspot.types.Type; -import sun.jvm.hotspot.types.TypeDataBase; - -// Mirror class for XPageAllocator - -public class XPageAllocator extends VMObject { - - private static CIntegerField maxCapacityField; - private static CIntegerField capacityField; - private static CIntegerField usedField; - - static { - VM.registerVMInitializedObserver((o, d) -> initialize(VM.getVM().getTypeDataBase())); - } - - private static synchronized void initialize(TypeDataBase db) { - Type type = db.lookupType("XPageAllocator"); - - maxCapacityField = type.getCIntegerField("_max_capacity"); - capacityField = type.getCIntegerField("_capacity"); - usedField = type.getCIntegerField("_used"); - } - - public long maxCapacity() { - return maxCapacityField.getValue(addr); - } - - public long capacity() { - return capacityField.getValue(addr); - } - - public long used() { - return usedField.getValue(addr); - } - - public XPageAllocator(Address addr) { - super(addr); - } -} diff --git a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/x/XPageTable.java b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/x/XPageTable.java deleted file mode 100644 index c2aba43b91294..0000000000000 --- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/x/XPageTable.java +++ /dev/null @@ -1,176 +0,0 @@ -/* - * Copyright (c) 2018, 2022, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - * - */ - -package sun.jvm.hotspot.gc.x; - -import java.util.Iterator; - -import sun.jvm.hotspot.debugger.Address; -import sun.jvm.hotspot.runtime.VM; -import sun.jvm.hotspot.runtime.VMObject; -import sun.jvm.hotspot.runtime.VMObjectFactory; -import sun.jvm.hotspot.types.Type; -import sun.jvm.hotspot.types.TypeDataBase; - -public class XPageTable extends VMObject { - private static long mapFieldOffset; - - static { - VM.registerVMInitializedObserver((o, d) -> initialize(VM.getVM().getTypeDataBase())); - } - - private static synchronized void initialize(TypeDataBase db) { - Type type = db.lookupType("XPageTable"); - - mapFieldOffset = type.getAddressField("_map").getOffset(); - } - - public XPageTable(Address addr) { - super(addr); - } - - private XGranuleMapForPageTable map() { - return VMObjectFactory.newObject(XGranuleMapForPageTable.class, addr.addOffsetTo(mapFieldOffset)); - } - - private XPageTableEntry getEntry(Address o) { - return new XPageTableEntry(map().get(o)); - } - - XPage get(Address o) { - return VMObjectFactory.newObject(XPage.class, map().get(VM.getVM().getDebugger().newAddress(XAddress.offset(o)))); - } - - boolean is_relocating(Address o) { - return getEntry(o).relocating(); - } - - private class XPagesIterator implements Iterator { - private XGranuleMapForPageTable.Iterator mapIter; - private XPage next; - - XPagesIterator() { - mapIter = map().new Iterator(); - positionToNext(); - } - - private XPage positionToNext() { - XPage current = next; - - // Find next - XPage found = null; - while (mapIter.hasNext()) { - XPageTableEntry entry = new XPageTableEntry(mapIter.next()); - if (!entry.isEmpty()) { - XPage page = entry.page(); - // Medium pages have repeated entries for all covered slots, - // therefore we need to compare against the current page. - if (page != null && !page.equals(current)) { - found = page; - break; - } - } - } - - next = found; - - return current; - } - - @Override - public boolean hasNext() { - return next != null; - } - - @Override - public XPage next() { - return positionToNext(); - } - - @Override - public void remove() { - /* not supported */ - } - } - - abstract class XPageFilter { - public abstract boolean accept(XPage page); - } - - class XPagesFilteredIterator implements Iterator { - private XPage next; - private XPagesIterator iter = new XPagesIterator(); - private XPageFilter filter; - - XPagesFilteredIterator(XPageFilter filter) { - this.filter = filter; - positionToNext(); - } - - public XPage positionToNext() { - XPage current = next; - - // Find next - XPage found = null; - while (iter.hasNext()) { - XPage page = iter.next(); - if (filter.accept(page)) { - found = page; - break; - } - } - - next = found; - - return current; - } - - @Override - public boolean hasNext() { - return next != null; - } - - @Override - public XPage next() { - return positionToNext(); - } - - @Override - public void remove() { - /* not supported */ - } - } - - public Iterator iterator() { - return new XPagesIterator(); - } - - public Iterator activePagesIterator() { - return new XPagesFilteredIterator(new XPageFilter() { - public boolean accept(XPage page) { - return page != null; - } - }); - } -} diff --git a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/x/XPageTableEntry.java b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/x/XPageTableEntry.java deleted file mode 100644 index 42a29878eccd9..0000000000000 --- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/x/XPageTableEntry.java +++ /dev/null @@ -1,52 +0,0 @@ -/* - * Copyright (c) 2018, 2022, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - * - */ - -package sun.jvm.hotspot.gc.x; - -import sun.jvm.hotspot.debugger.Address; -import sun.jvm.hotspot.runtime.VMObjectFactory; - -class XPageTableEntry { - Address entry; - - XPageTableEntry(Address address) { - entry = address; - } - - XPage page() { - return VMObjectFactory.newObject(XPage.class, zPageBits()); - } - - private Address zPageBits() { - return entry.andWithMask(~1L); - } - - boolean relocating() { - return (entry.asLongValue() & 1) == 1; - } - - boolean isEmpty() { - return entry == null || zPageBits() == null; - } -} diff --git a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/x/XRelocate.java b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/x/XRelocate.java deleted file mode 100644 index a4b0dc1c992ec..0000000000000 --- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/x/XRelocate.java +++ /dev/null @@ -1,74 +0,0 @@ -/* - * Copyright (c) 2021, Oracle and/or its affiliates. All rights reserved. - * Copyright (c) 2021, NTT DATA. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - * - */ - -package sun.jvm.hotspot.gc.x; - -import sun.jvm.hotspot.debugger.Address; -import sun.jvm.hotspot.runtime.VM; -import sun.jvm.hotspot.runtime.VMObject; -import sun.jvm.hotspot.types.AddressField; -import sun.jvm.hotspot.types.Type; -import sun.jvm.hotspot.types.TypeDataBase; - -public class XRelocate extends VMObject { - - static { - VM.registerVMInitializedObserver((o, d) -> initialize(VM.getVM().getTypeDataBase())); - } - - private static synchronized void initialize(TypeDataBase db) { - Type type = db.lookupType("XRelocate"); - } - - public XRelocate(Address addr) { - super(addr); - } - - private long forwardingIndex(XForwarding forwarding, Address from) { - long fromOffset = XAddress.offset(from); - return (fromOffset - forwarding.start()) >>> forwarding.objectAlignmentShift(); - } - - private Address forwardingFind(XForwarding forwarding, Address from) { - long fromIndex = forwardingIndex(forwarding, from); - XForwardingEntry entry = forwarding.find(fromIndex); - return entry.populated() ? XAddress.good(VM.getVM().getDebugger().newAddress(entry.toOffset())) : null; - } - - public Address forwardObject(XForwarding forwarding, Address from) { - return forwardingFind(forwarding, from); - } - - public Address relocateObject(XForwarding forwarding, Address o) { - Address toAddr = forwardingFind(forwarding, o); - if (toAddr != null) { - // Already relocated. - return toAddr; - } else { - // Return original address because it is not yet relocated. - return o; - } - } -} diff --git a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/x/XUtils.java b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/x/XUtils.java deleted file mode 100644 index a5c9ffde371dd..0000000000000 --- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/x/XUtils.java +++ /dev/null @@ -1,40 +0,0 @@ -/* - * Copyright (c) 2018, 2021, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - * - */ - -package sun.jvm.hotspot.gc.x; - -import sun.jvm.hotspot.debugger.Address; -import sun.jvm.hotspot.runtime.VM; - -class XUtils { - static Address longToAddress(long value) { - return VM.getVM().getDebugger().newAddress(value); - } - - static long alignUp(long size, long alignment) { - long mask = alignment - 1; - long adjusted = size + mask; - return adjusted & ~mask; - } -} diff --git a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/x/XVirtualMemory.java b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/x/XVirtualMemory.java deleted file mode 100644 index de225ccdcbd22..0000000000000 --- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/x/XVirtualMemory.java +++ /dev/null @@ -1,60 +0,0 @@ -/* - * Copyright (c) 2018, 2021, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - * - */ - -package sun.jvm.hotspot.gc.x; - -import sun.jvm.hotspot.debugger.Address; -import sun.jvm.hotspot.runtime.VM; -import sun.jvm.hotspot.runtime.VMObject; -import sun.jvm.hotspot.types.CIntegerField; -import sun.jvm.hotspot.types.Type; -import sun.jvm.hotspot.types.TypeDataBase; - -public class XVirtualMemory extends VMObject { - private static CIntegerField startField; - private static CIntegerField endField; - - static { - VM.registerVMInitializedObserver((o, d) -> initialize(VM.getVM().getTypeDataBase())); - } - - private static synchronized void initialize(TypeDataBase db) { - Type type = db.lookupType("XVirtualMemory"); - - startField = type.getCIntegerField("_start"); - endField = type.getCIntegerField("_end"); - } - - public XVirtualMemory(Address addr) { - super(addr); - } - - long start() { - return startField.getJLong(addr); - } - - long end() { - return endField.getJLong(addr); - } -} diff --git a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/memory/Universe.java b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/memory/Universe.java index c4ab8e32c0bb0..01fd0f7430afe 100644 --- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/memory/Universe.java +++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/memory/Universe.java @@ -36,7 +36,6 @@ import sun.jvm.hotspot.gc.serial.SerialHeap; import sun.jvm.hotspot.gc.shared.CollectedHeap; import sun.jvm.hotspot.gc.shenandoah.ShenandoahHeap; -import sun.jvm.hotspot.gc.x.XCollectedHeap; import sun.jvm.hotspot.gc.z.ZCollectedHeap; import sun.jvm.hotspot.oops.Oop; import sun.jvm.hotspot.runtime.BasicType; @@ -87,7 +86,6 @@ private static synchronized void initialize(TypeDataBase db) { addHeapTypeIfInDB(db, ParallelScavengeHeap.class); addHeapTypeIfInDB(db, G1CollectedHeap.class); addHeapTypeIfInDB(db, EpsilonHeap.class); - addHeapTypeIfInDB(db, XCollectedHeap.class); addHeapTypeIfInDB(db, ZCollectedHeap.class); addHeapTypeIfInDB(db, ShenandoahHeap.class); diff --git a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/oops/ObjectHeap.java b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/oops/ObjectHeap.java index 2c1ad426b6a05..be6c8522fc58f 100644 --- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/oops/ObjectHeap.java +++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/oops/ObjectHeap.java @@ -37,7 +37,6 @@ import sun.jvm.hotspot.gc.g1.*; import sun.jvm.hotspot.gc.shenandoah.*; import sun.jvm.hotspot.gc.parallel.*; -import sun.jvm.hotspot.gc.x.*; import sun.jvm.hotspot.memory.*; import sun.jvm.hotspot.runtime.*; import sun.jvm.hotspot.types.*; diff --git a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/tools/HeapSummary.java b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/tools/HeapSummary.java index 86a1216bbd3d4..58a9c1b451954 100644 --- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/tools/HeapSummary.java +++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/tools/HeapSummary.java @@ -32,7 +32,6 @@ import sun.jvm.hotspot.gc.serial.*; import sun.jvm.hotspot.gc.shenandoah.*; import sun.jvm.hotspot.gc.shared.*; -import sun.jvm.hotspot.gc.x.*; import sun.jvm.hotspot.gc.z.*; import sun.jvm.hotspot.debugger.JVMDebugger; import sun.jvm.hotspot.memory.*; @@ -145,9 +144,6 @@ public void run() { } else if (heap instanceof EpsilonHeap) { EpsilonHeap eh = (EpsilonHeap) heap; printSpace(eh.space()); - } else if (heap instanceof XCollectedHeap) { - XCollectedHeap zheap = (XCollectedHeap) heap; - zheap.printOn(System.out); } else if (heap instanceof ZCollectedHeap) { ZCollectedHeap zheap = (ZCollectedHeap) heap; zheap.printOn(System.out); diff --git a/test/hotspot/gtest/gc/x/test_xAddress.cpp b/test/hotspot/gtest/gc/x/test_xAddress.cpp deleted file mode 100644 index 3f769dc7eead9..0000000000000 --- a/test/hotspot/gtest/gc/x/test_xAddress.cpp +++ /dev/null @@ -1,135 +0,0 @@ -/* - * Copyright (c) 2015, 2024, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#include "precompiled.hpp" -#include "gc/x/xAddress.inline.hpp" -#include "gc/x/xGlobals.hpp" -#include "unittest.hpp" - -class XAddressTest : public ::testing::Test { -protected: - static void is_good_bit(uintptr_t bit_mask) { - // Setup - XAddress::initialize(); - XAddress::set_good_mask(bit_mask); - - // Test that a pointer with only the given bit is considered good. - EXPECT_EQ(XAddress::is_good(XAddressMetadataMarked0), (bit_mask == XAddressMetadataMarked0)); - EXPECT_EQ(XAddress::is_good(XAddressMetadataMarked1), (bit_mask == XAddressMetadataMarked1)); - EXPECT_EQ(XAddress::is_good(XAddressMetadataRemapped), (bit_mask == XAddressMetadataRemapped)); - - // Test that a pointer with the given bit and some extra bits is considered good. - EXPECT_EQ(XAddress::is_good(XAddressMetadataMarked0 | 0x8),(bit_mask == XAddressMetadataMarked0)); - EXPECT_EQ(XAddress::is_good(XAddressMetadataMarked1 | 0x8), (bit_mask == XAddressMetadataMarked1)); - EXPECT_EQ(XAddress::is_good(XAddressMetadataRemapped | 0x8), (bit_mask == XAddressMetadataRemapped)); - - // Test that null is not considered good. - EXPECT_FALSE(XAddress::is_good(0)); - } - - static void is_good_or_null_bit(uintptr_t bit_mask) { - // Setup - XAddress::initialize(); - XAddress::set_good_mask(bit_mask); - - // Test that a pointer with only the given bit is considered good. - EXPECT_EQ(XAddress::is_good_or_null(XAddressMetadataMarked0), (bit_mask == XAddressMetadataMarked0)); - EXPECT_EQ(XAddress::is_good_or_null(XAddressMetadataMarked1), (bit_mask == XAddressMetadataMarked1)); - EXPECT_EQ(XAddress::is_good_or_null(XAddressMetadataRemapped), (bit_mask == XAddressMetadataRemapped)); - - // Test that a pointer with the given bit and some extra bits is considered good. - EXPECT_EQ(XAddress::is_good_or_null(XAddressMetadataMarked0 | 0x8), (bit_mask == XAddressMetadataMarked0)); - EXPECT_EQ(XAddress::is_good_or_null(XAddressMetadataMarked1 | 0x8), (bit_mask == XAddressMetadataMarked1)); - EXPECT_EQ(XAddress::is_good_or_null(XAddressMetadataRemapped | 0x8), (bit_mask == XAddressMetadataRemapped)); - - // Test that null is considered good_or_null. - EXPECT_TRUE(XAddress::is_good_or_null(0)); - } - - static void finalizable() { - // Setup - XAddress::initialize(); - XAddress::flip_to_marked(); - - // Test that a normal good pointer is good and weak good, but not finalizable - const uintptr_t addr1 = XAddress::good(1); - EXPECT_FALSE(XAddress::is_finalizable(addr1)); - EXPECT_TRUE(XAddress::is_marked(addr1)); - EXPECT_FALSE(XAddress::is_remapped(addr1)); - EXPECT_TRUE(XAddress::is_weak_good(addr1)); - EXPECT_TRUE(XAddress::is_weak_good_or_null(addr1)); - EXPECT_TRUE(XAddress::is_good(addr1)); - EXPECT_TRUE(XAddress::is_good_or_null(addr1)); - - // Test that a finalizable good pointer is finalizable and weak good, but not good - const uintptr_t addr2 = XAddress::finalizable_good(1); - EXPECT_TRUE(XAddress::is_finalizable(addr2)); - EXPECT_TRUE(XAddress::is_marked(addr2)); - EXPECT_FALSE(XAddress::is_remapped(addr2)); - EXPECT_TRUE(XAddress::is_weak_good(addr2)); - EXPECT_TRUE(XAddress::is_weak_good_or_null(addr2)); - EXPECT_FALSE(XAddress::is_good(addr2)); - EXPECT_FALSE(XAddress::is_good_or_null(addr2)); - - // Flip to remapped and test that it's no longer weak good - XAddress::flip_to_remapped(); - EXPECT_TRUE(XAddress::is_finalizable(addr2)); - EXPECT_TRUE(XAddress::is_marked(addr2)); - EXPECT_FALSE(XAddress::is_remapped(addr2)); - EXPECT_FALSE(XAddress::is_weak_good(addr2)); - EXPECT_FALSE(XAddress::is_weak_good_or_null(addr2)); - EXPECT_FALSE(XAddress::is_good(addr2)); - EXPECT_FALSE(XAddress::is_good_or_null(addr2)); - } -}; - -TEST_F(XAddressTest, is_good) { - is_good_bit(XAddressMetadataMarked0); - is_good_bit(XAddressMetadataMarked1); - is_good_bit(XAddressMetadataRemapped); -} - -TEST_F(XAddressTest, is_good_or_null) { - is_good_or_null_bit(XAddressMetadataMarked0); - is_good_or_null_bit(XAddressMetadataMarked1); - is_good_or_null_bit(XAddressMetadataRemapped); -} - -TEST_F(XAddressTest, is_weak_good_or_null) { -#define check_is_weak_good_or_null(value) \ - EXPECT_EQ(XAddress::is_weak_good_or_null(value), \ - (XAddress::is_good_or_null(value) || XAddress::is_remapped(value))) \ - << "is_good_or_null: " << XAddress::is_good_or_null(value) \ - << " is_remaped: " << XAddress::is_remapped(value) \ - << " is_good_or_null_or_remapped: " << XAddress::is_weak_good_or_null(value) - - check_is_weak_good_or_null((uintptr_t)nullptr); - check_is_weak_good_or_null(XAddressMetadataMarked0); - check_is_weak_good_or_null(XAddressMetadataMarked1); - check_is_weak_good_or_null(XAddressMetadataRemapped); - check_is_weak_good_or_null((uintptr_t)0x123); -} - -TEST_F(XAddressTest, finalizable) { - finalizable(); -} diff --git a/test/hotspot/gtest/gc/x/test_xArray.cpp b/test/hotspot/gtest/gc/x/test_xArray.cpp deleted file mode 100644 index 36c0b73ad6f51..0000000000000 --- a/test/hotspot/gtest/gc/x/test_xArray.cpp +++ /dev/null @@ -1,83 +0,0 @@ -/* - * Copyright (c) 2017, 2022, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#include "precompiled.hpp" -#include "gc/x/xArray.inline.hpp" -#include "unittest.hpp" - -TEST(XArray, sanity) { - XArray a; - - // Add elements - for (int i = 0; i < 10; i++) { - a.append(i); - } - - XArray b; - - b.swap(&a); - - // Check size - ASSERT_EQ(a.length(), 0); - ASSERT_EQ(a.capacity(), 0); - ASSERT_EQ(a.is_empty(), true); - - ASSERT_EQ(b.length(), 10); - ASSERT_GE(b.capacity(), 10); - ASSERT_EQ(b.is_empty(), false); - - // Clear elements - a.clear(); - - // Check that b is unaffected - ASSERT_EQ(b.length(), 10); - ASSERT_GE(b.capacity(), 10); - ASSERT_EQ(b.is_empty(), false); - - a.append(1); - - // Check that b is unaffected - ASSERT_EQ(b.length(), 10); - ASSERT_GE(b.capacity(), 10); - ASSERT_EQ(b.is_empty(), false); -} - -TEST(XArray, iterator) { - XArray a; - - // Add elements - for (int i = 0; i < 10; i++) { - a.append(i); - } - - // Iterate - int count = 0; - XArrayIterator iter(&a); - for (int value; iter.next(&value);) { - ASSERT_EQ(a.at(count), count); - count++; - } - - // Check count - ASSERT_EQ(count, 10); -} diff --git a/test/hotspot/gtest/gc/x/test_xBitField.cpp b/test/hotspot/gtest/gc/x/test_xBitField.cpp deleted file mode 100644 index 248322b2a0715..0000000000000 --- a/test/hotspot/gtest/gc/x/test_xBitField.cpp +++ /dev/null @@ -1,79 +0,0 @@ -/* - * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#include "precompiled.hpp" -#include "gc/x/xBitField.hpp" -#include "unittest.hpp" - -TEST(XBitFieldTest, test) { - typedef XBitField field_bool; - typedef XBitField field_uint8; - typedef XBitField field_uint16; - typedef XBitField field_uint32; - typedef XBitField field_uint64; - typedef XBitField field_pointer; - - uint64_t entry; - - { - const bool value = false; - entry = field_bool::encode(value); - EXPECT_EQ(field_bool::decode(entry), value) << "Should be equal"; - } - - { - const bool value = true; - entry = field_bool::encode(value); - EXPECT_EQ(field_bool::decode(entry), value) << "Should be equal"; - } - - { - const uint8_t value = ~(uint8_t)0; - entry = field_uint8::encode(value); - EXPECT_EQ(field_uint8::decode(entry), value) << "Should be equal"; - } - - { - const uint16_t value = ~(uint16_t)0; - entry = field_uint16::encode(value); - EXPECT_EQ(field_uint16::decode(entry), value) << "Should be equal"; - } - - { - const uint32_t value = ~(uint32_t)0; - entry = field_uint32::encode(value); - EXPECT_EQ(field_uint32::decode(entry), value) << "Should be equal"; - } - - { - const uint64_t value = ~(uint64_t)0 >> 1; - entry = field_uint64::encode(value); - EXPECT_EQ(field_uint64::decode(entry), value) << "Should be equal"; - } - - { - void* const value = (void*)(~(uintptr_t)0 << 3); - entry = field_pointer::encode(value); - EXPECT_EQ(field_pointer::decode(entry), value) << "Should be equal"; - } -} diff --git a/test/hotspot/gtest/gc/x/test_xBitMap.cpp b/test/hotspot/gtest/gc/x/test_xBitMap.cpp deleted file mode 100644 index 2d3cb09c7ed8b..0000000000000 --- a/test/hotspot/gtest/gc/x/test_xBitMap.cpp +++ /dev/null @@ -1,108 +0,0 @@ -/* - * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#include "precompiled.hpp" -#include "gc/x/xBitMap.inline.hpp" -#include "unittest.hpp" - -class XBitMapTest : public ::testing::Test { -protected: - static void test_set_pair_unset(size_t size, bool finalizable) { - XBitMap bitmap(size); - - for (BitMap::idx_t i = 0; i < size - 1; i++) { - if ((i + 1) % BitsPerWord == 0) { - // Can't set pairs of bits in different words. - continue; - } - - // XBitMaps are not cleared when constructed. - bitmap.clear(); - - bool inc_live = false; - - bool ret = bitmap.par_set_bit_pair(i, finalizable, inc_live); - EXPECT_TRUE(ret) << "Failed to set bit"; - EXPECT_TRUE(inc_live) << "Should have set inc_live"; - - // First bit should always be set - EXPECT_TRUE(bitmap.at(i)) << "Should be set"; - - // Second bit should only be set when marking strong - EXPECT_NE(bitmap.at(i + 1), finalizable); - } - } - - static void test_set_pair_set(size_t size, bool finalizable) { - XBitMap bitmap(size); - - for (BitMap::idx_t i = 0; i < size - 1; i++) { - if ((i + 1) % BitsPerWord == 0) { - // Can't set pairs of bits in different words. - continue; - } - - // Fill the bitmap with ones. - bitmap.set_range(0, size); - - bool inc_live = false; - - bool ret = bitmap.par_set_bit_pair(i, finalizable, inc_live); - EXPECT_FALSE(ret) << "Should not succeed setting bit"; - EXPECT_FALSE(inc_live) << "Should not have set inc_live"; - - // Both bits were pre-set. - EXPECT_TRUE(bitmap.at(i)) << "Should be set"; - EXPECT_TRUE(bitmap.at(i + 1)) << "Should be set"; - } - } - - static void test_set_pair_set(bool finalizable) { - test_set_pair_set(2, finalizable); - test_set_pair_set(62, finalizable); - test_set_pair_set(64, finalizable); - test_set_pair_set(66, finalizable); - test_set_pair_set(126, finalizable); - test_set_pair_set(128, finalizable); - } - - static void test_set_pair_unset(bool finalizable) { - test_set_pair_unset(2, finalizable); - test_set_pair_unset(62, finalizable); - test_set_pair_unset(64, finalizable); - test_set_pair_unset(66, finalizable); - test_set_pair_unset(126, finalizable); - test_set_pair_unset(128, finalizable); - } - -}; - -TEST_F(XBitMapTest, test_set_pair_set) { - test_set_pair_set(false); - test_set_pair_set(true); -} - -TEST_F(XBitMapTest, test_set_pair_unset) { - test_set_pair_unset(false); - test_set_pair_unset(true); -} diff --git a/test/hotspot/gtest/gc/x/test_xForwarding.cpp b/test/hotspot/gtest/gc/x/test_xForwarding.cpp deleted file mode 100644 index de850304ebb0d..0000000000000 --- a/test/hotspot/gtest/gc/x/test_xForwarding.cpp +++ /dev/null @@ -1,205 +0,0 @@ -/* - * Copyright (c) 2016, 2020, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#include "precompiled.hpp" -#include "gc/x/xAddress.inline.hpp" -#include "gc/x/xForwarding.inline.hpp" -#include "gc/x/xForwardingAllocator.inline.hpp" -#include "gc/x/xGlobals.hpp" -#include "gc/x/xPage.inline.hpp" -#include "unittest.hpp" - -using namespace testing; - -#define CAPTURE_DELIM "\n" -#define CAPTURE1(expression) #expression << " evaluates to " << expression -#define CAPTURE2(e0, e1) CAPTURE1(e0) << CAPTURE_DELIM << CAPTURE1(e1) - -#define CAPTURE(expression) CAPTURE1(expression) - -class XForwardingTest : public Test { -public: - // Helper functions - - class SequenceToFromIndex : AllStatic { - public: - static uintptr_t even(size_t sequence_number) { - return sequence_number * 2; - } - static uintptr_t odd(size_t sequence_number) { - return even(sequence_number) + 1; - } - static uintptr_t one_to_one(size_t sequence_number) { - return sequence_number; - } - }; - - // Test functions - - static void setup(XForwarding* forwarding) { - EXPECT_PRED1(is_power_of_2, forwarding->_entries.length()) << CAPTURE(forwarding->_entries.length()); - } - - static void find_empty(XForwarding* forwarding) { - size_t size = forwarding->_entries.length(); - size_t entries_to_check = size * 2; - - for (size_t i = 0; i < entries_to_check; i++) { - uintptr_t from_index = SequenceToFromIndex::one_to_one(i); - - XForwardingCursor cursor; - XForwardingEntry entry = forwarding->find(from_index, &cursor); - EXPECT_FALSE(entry.populated()) << CAPTURE2(from_index, size); - } - } - - static void find_full(XForwarding* forwarding) { - size_t size = forwarding->_entries.length(); - size_t entries_to_populate = size; - - // Populate - for (size_t i = 0; i < entries_to_populate; i++) { - uintptr_t from_index = SequenceToFromIndex::one_to_one(i); - - XForwardingCursor cursor; - XForwardingEntry entry = forwarding->find(from_index, &cursor); - ASSERT_FALSE(entry.populated()) << CAPTURE2(from_index, size); - - forwarding->insert(from_index, from_index, &cursor); - } - - // Verify - for (size_t i = 0; i < entries_to_populate; i++) { - uintptr_t from_index = SequenceToFromIndex::one_to_one(i); - - XForwardingCursor cursor; - XForwardingEntry entry = forwarding->find(from_index, &cursor); - ASSERT_TRUE(entry.populated()) << CAPTURE2(from_index, size); - - ASSERT_EQ(entry.from_index(), from_index) << CAPTURE(size); - ASSERT_EQ(entry.to_offset(), from_index) << CAPTURE(size); - } - } - - static void find_every_other(XForwarding* forwarding) { - size_t size = forwarding->_entries.length(); - size_t entries_to_populate = size / 2; - - // Populate even from indices - for (size_t i = 0; i < entries_to_populate; i++) { - uintptr_t from_index = SequenceToFromIndex::even(i); - - XForwardingCursor cursor; - XForwardingEntry entry = forwarding->find(from_index, &cursor); - ASSERT_FALSE(entry.populated()) << CAPTURE2(from_index, size); - - forwarding->insert(from_index, from_index, &cursor); - } - - // Verify populated even indices - for (size_t i = 0; i < entries_to_populate; i++) { - uintptr_t from_index = SequenceToFromIndex::even(i); - - XForwardingCursor cursor; - XForwardingEntry entry = forwarding->find(from_index, &cursor); - ASSERT_TRUE(entry.populated()) << CAPTURE2(from_index, size); - - ASSERT_EQ(entry.from_index(), from_index) << CAPTURE(size); - ASSERT_EQ(entry.to_offset(), from_index) << CAPTURE(size); - } - - // Verify empty odd indices - // - // This check could be done on a larger range of sequence numbers, - // but currently entries_to_populate is used. - for (size_t i = 0; i < entries_to_populate; i++) { - uintptr_t from_index = SequenceToFromIndex::odd(i); - - XForwardingCursor cursor; - XForwardingEntry entry = forwarding->find(from_index, &cursor); - - ASSERT_FALSE(entry.populated()) << CAPTURE2(from_index, size); - } - } - - static void test(void (*function)(XForwarding*), uint32_t size) { - // Create page - const XVirtualMemory vmem(0, XPageSizeSmall); - const XPhysicalMemory pmem(XPhysicalMemorySegment(0, XPageSizeSmall, true)); - XPage page(XPageTypeSmall, vmem, pmem); - - page.reset(); - - const size_t object_size = 16; - const uintptr_t object = page.alloc_object(object_size); - - XGlobalSeqNum++; - - bool dummy = false; - page.mark_object(XAddress::marked(object), dummy, dummy); - - const uint32_t live_objects = size; - const size_t live_bytes = live_objects * object_size; - page.inc_live(live_objects, live_bytes); - - // Setup allocator - XForwardingAllocator allocator; - const uint32_t nentries = XForwarding::nentries(&page); - allocator.reset((sizeof(XForwarding)) + (nentries * sizeof(XForwardingEntry))); - - // Setup forwarding - XForwarding* const forwarding = XForwarding::alloc(&allocator, &page); - - // Actual test function - (*function)(forwarding); - } - - // Run the given function with a few different input values. - static void test(void (*function)(XForwarding*)) { - test(function, 1); - test(function, 2); - test(function, 3); - test(function, 4); - test(function, 7); - test(function, 8); - test(function, 1023); - test(function, 1024); - test(function, 1025); - } -}; - -TEST_F(XForwardingTest, setup) { - test(&XForwardingTest::setup); -} - -TEST_F(XForwardingTest, find_empty) { - test(&XForwardingTest::find_empty); -} - -TEST_F(XForwardingTest, find_full) { - test(&XForwardingTest::find_full); -} - -TEST_F(XForwardingTest, find_every_other) { - test(&XForwardingTest::find_every_other); -} diff --git a/test/hotspot/gtest/gc/x/test_xList.cpp b/test/hotspot/gtest/gc/x/test_xList.cpp deleted file mode 100644 index f4766ce99e249..0000000000000 --- a/test/hotspot/gtest/gc/x/test_xList.cpp +++ /dev/null @@ -1,155 +0,0 @@ -/* - * Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#include "precompiled.hpp" -#include "gc/x/xList.inline.hpp" -#include "unittest.hpp" - -#ifndef PRODUCT - -class XTestEntry { - friend class XList; - -private: - const int _id; - XListNode _node; - -public: - XTestEntry(int id) : - _id(id), - _node() {} - - int id() const { - return _id; - } -}; - -class XListTest : public ::testing::Test { -protected: - static void assert_sorted(XList* list) { - // Iterate forward - { - int count = list->first()->id(); - XListIterator iter(list); - for (XTestEntry* entry; iter.next(&entry);) { - ASSERT_EQ(entry->id(), count); - count++; - } - } - - // Iterate backward - { - int count = list->last()->id(); - XListReverseIterator iter(list); - for (XTestEntry* entry; iter.next(&entry);) { - EXPECT_EQ(entry->id(), count); - count--; - } - } - } -}; - -TEST_F(XListTest, test_insert) { - XList list; - XTestEntry e0(0); - XTestEntry e1(1); - XTestEntry e2(2); - XTestEntry e3(3); - XTestEntry e4(4); - XTestEntry e5(5); - - list.insert_first(&e2); - list.insert_before(&e2, &e1); - list.insert_after(&e2, &e3); - list.insert_last(&e4); - list.insert_first(&e0); - list.insert_last(&e5); - - EXPECT_EQ(list.size(), 6u); - assert_sorted(&list); - - for (int i = 0; i < 6; i++) { - XTestEntry* e = list.remove_first(); - EXPECT_EQ(e->id(), i); - } - - EXPECT_EQ(list.size(), 0u); -} - -TEST_F(XListTest, test_remove) { - // Remove first - { - XList list; - XTestEntry e0(0); - XTestEntry e1(1); - XTestEntry e2(2); - XTestEntry e3(3); - XTestEntry e4(4); - XTestEntry e5(5); - - list.insert_last(&e0); - list.insert_last(&e1); - list.insert_last(&e2); - list.insert_last(&e3); - list.insert_last(&e4); - list.insert_last(&e5); - - EXPECT_EQ(list.size(), 6u); - - for (int i = 0; i < 6; i++) { - XTestEntry* e = list.remove_first(); - EXPECT_EQ(e->id(), i); - } - - EXPECT_EQ(list.size(), 0u); - } - - // Remove last - { - XList list; - XTestEntry e0(0); - XTestEntry e1(1); - XTestEntry e2(2); - XTestEntry e3(3); - XTestEntry e4(4); - XTestEntry e5(5); - - list.insert_last(&e0); - list.insert_last(&e1); - list.insert_last(&e2); - list.insert_last(&e3); - list.insert_last(&e4); - list.insert_last(&e5); - - EXPECT_EQ(list.size(), 6u); - - for (int i = 5; i >= 0; i--) { - XTestEntry* e = list.remove_last(); - EXPECT_EQ(e->id(), i); - } - - EXPECT_EQ(list.size(), 0u); - } -} - -#endif // PRODUCT diff --git a/test/hotspot/gtest/gc/x/test_xLiveMap.cpp b/test/hotspot/gtest/gc/x/test_xLiveMap.cpp deleted file mode 100644 index d57790e9dabf9..0000000000000 --- a/test/hotspot/gtest/gc/x/test_xLiveMap.cpp +++ /dev/null @@ -1,55 +0,0 @@ -/* - * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#include "precompiled.hpp" -#include "gc/x/xLiveMap.inline.hpp" -#include "unittest.hpp" - -class XLiveMapTest : public ::testing::Test { -protected: - static void strongly_live_for_large_xpage() { - // Large XPages only have room for one object. - XLiveMap livemap(1); - - bool inc_live; - uintptr_t object = 0u; - - // Mark the object strong. - livemap.set(object, false /* finalizable */, inc_live); - - // Check that both bits are in the same segment. - ASSERT_EQ(livemap.index_to_segment(0), livemap.index_to_segment(1)); - - // Check that the object was marked. - ASSERT_TRUE(livemap.get(0)); - - // Check that the object was strongly marked. - ASSERT_TRUE(livemap.get(1)); - - ASSERT_TRUE(inc_live); - } -}; - -TEST_F(XLiveMapTest, strongly_live_for_large_xpage) { - strongly_live_for_large_xpage(); -} diff --git a/test/hotspot/gtest/gc/x/test_xPhysicalMemory.cpp b/test/hotspot/gtest/gc/x/test_xPhysicalMemory.cpp deleted file mode 100644 index f22032632e9d4..0000000000000 --- a/test/hotspot/gtest/gc/x/test_xPhysicalMemory.cpp +++ /dev/null @@ -1,174 +0,0 @@ -/* - * Copyright (c) 2016, 2020, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#include "precompiled.hpp" -#include "gc/x/xPhysicalMemory.inline.hpp" -#include "unittest.hpp" - -TEST(XPhysicalMemoryTest, copy) { - const XPhysicalMemorySegment seg0(0, 100, true); - const XPhysicalMemorySegment seg1(200, 100, true); - - XPhysicalMemory pmem0; - pmem0.add_segment(seg0); - EXPECT_EQ(pmem0.nsegments(), 1); - EXPECT_EQ(pmem0.segment(0).size(), 100u); - - XPhysicalMemory pmem1; - pmem1.add_segment(seg0); - pmem1.add_segment(seg1); - EXPECT_EQ(pmem1.nsegments(), 2); - EXPECT_EQ(pmem1.segment(0).size(), 100u); - EXPECT_EQ(pmem1.segment(1).size(), 100u); - - XPhysicalMemory pmem2(pmem0); - EXPECT_EQ(pmem2.nsegments(), 1); - EXPECT_EQ(pmem2.segment(0).size(), 100u); - - pmem2 = pmem1; - EXPECT_EQ(pmem2.nsegments(), 2); - EXPECT_EQ(pmem2.segment(0).size(), 100u); - EXPECT_EQ(pmem2.segment(1).size(), 100u); -} - -TEST(XPhysicalMemoryTest, add) { - const XPhysicalMemorySegment seg0(0, 1, true); - const XPhysicalMemorySegment seg1(1, 1, true); - const XPhysicalMemorySegment seg2(2, 1, true); - const XPhysicalMemorySegment seg3(3, 1, true); - const XPhysicalMemorySegment seg4(4, 1, true); - const XPhysicalMemorySegment seg5(5, 1, true); - const XPhysicalMemorySegment seg6(6, 1, true); - - XPhysicalMemory pmem0; - EXPECT_EQ(pmem0.nsegments(), 0); - EXPECT_EQ(pmem0.is_null(), true); - - XPhysicalMemory pmem1; - pmem1.add_segment(seg0); - pmem1.add_segment(seg1); - pmem1.add_segment(seg2); - pmem1.add_segment(seg3); - pmem1.add_segment(seg4); - pmem1.add_segment(seg5); - pmem1.add_segment(seg6); - EXPECT_EQ(pmem1.nsegments(), 1); - EXPECT_EQ(pmem1.segment(0).size(), 7u); - EXPECT_EQ(pmem1.is_null(), false); - - XPhysicalMemory pmem2; - pmem2.add_segment(seg0); - pmem2.add_segment(seg1); - pmem2.add_segment(seg2); - pmem2.add_segment(seg4); - pmem2.add_segment(seg5); - pmem2.add_segment(seg6); - EXPECT_EQ(pmem2.nsegments(), 2); - EXPECT_EQ(pmem2.segment(0).size(), 3u); - EXPECT_EQ(pmem2.segment(1).size(), 3u); - EXPECT_EQ(pmem2.is_null(), false); - - XPhysicalMemory pmem3; - pmem3.add_segment(seg0); - pmem3.add_segment(seg2); - pmem3.add_segment(seg3); - pmem3.add_segment(seg4); - pmem3.add_segment(seg6); - EXPECT_EQ(pmem3.nsegments(), 3); - EXPECT_EQ(pmem3.segment(0).size(), 1u); - EXPECT_EQ(pmem3.segment(1).size(), 3u); - EXPECT_EQ(pmem3.segment(2).size(), 1u); - EXPECT_EQ(pmem3.is_null(), false); - - XPhysicalMemory pmem4; - pmem4.add_segment(seg0); - pmem4.add_segment(seg2); - pmem4.add_segment(seg4); - pmem4.add_segment(seg6); - EXPECT_EQ(pmem4.nsegments(), 4); - EXPECT_EQ(pmem4.segment(0).size(), 1u); - EXPECT_EQ(pmem4.segment(1).size(), 1u); - EXPECT_EQ(pmem4.segment(2).size(), 1u); - EXPECT_EQ(pmem4.segment(3).size(), 1u); - EXPECT_EQ(pmem4.is_null(), false); -} - -TEST(XPhysicalMemoryTest, remove) { - XPhysicalMemory pmem; - - pmem.add_segment(XPhysicalMemorySegment(10, 10, true)); - pmem.add_segment(XPhysicalMemorySegment(30, 10, true)); - pmem.add_segment(XPhysicalMemorySegment(50, 10, true)); - EXPECT_EQ(pmem.nsegments(), 3); - EXPECT_EQ(pmem.size(), 30u); - EXPECT_FALSE(pmem.is_null()); - - pmem.remove_segments(); - EXPECT_EQ(pmem.nsegments(), 0); - EXPECT_EQ(pmem.size(), 0u); - EXPECT_TRUE(pmem.is_null()); -} - -TEST(XPhysicalMemoryTest, split) { - XPhysicalMemory pmem; - - pmem.add_segment(XPhysicalMemorySegment(0, 10, true)); - pmem.add_segment(XPhysicalMemorySegment(10, 10, true)); - pmem.add_segment(XPhysicalMemorySegment(30, 10, true)); - EXPECT_EQ(pmem.nsegments(), 2); - EXPECT_EQ(pmem.size(), 30u); - - XPhysicalMemory pmem0 = pmem.split(1); - EXPECT_EQ(pmem0.nsegments(), 1); - EXPECT_EQ(pmem0.size(), 1u); - EXPECT_EQ(pmem.nsegments(), 2); - EXPECT_EQ(pmem.size(), 29u); - - XPhysicalMemory pmem1 = pmem.split(25); - EXPECT_EQ(pmem1.nsegments(), 2); - EXPECT_EQ(pmem1.size(), 25u); - EXPECT_EQ(pmem.nsegments(), 1); - EXPECT_EQ(pmem.size(), 4u); - - XPhysicalMemory pmem2 = pmem.split(4); - EXPECT_EQ(pmem2.nsegments(), 1); - EXPECT_EQ(pmem2.size(), 4u); - EXPECT_EQ(pmem.nsegments(), 0); - EXPECT_EQ(pmem.size(), 0u); -} - -TEST(XPhysicalMemoryTest, split_committed) { - XPhysicalMemory pmem0; - pmem0.add_segment(XPhysicalMemorySegment(0, 10, true)); - pmem0.add_segment(XPhysicalMemorySegment(10, 10, false)); - pmem0.add_segment(XPhysicalMemorySegment(20, 10, true)); - pmem0.add_segment(XPhysicalMemorySegment(30, 10, false)); - EXPECT_EQ(pmem0.nsegments(), 4); - EXPECT_EQ(pmem0.size(), 40u); - - XPhysicalMemory pmem1 = pmem0.split_committed(); - EXPECT_EQ(pmem0.nsegments(), 2); - EXPECT_EQ(pmem0.size(), 20u); - EXPECT_EQ(pmem1.nsegments(), 2); - EXPECT_EQ(pmem1.size(), 20u); -} diff --git a/test/hotspot/gtest/gc/x/test_xVirtualMemory.cpp b/test/hotspot/gtest/gc/x/test_xVirtualMemory.cpp deleted file mode 100644 index 6698ccfa045b8..0000000000000 --- a/test/hotspot/gtest/gc/x/test_xVirtualMemory.cpp +++ /dev/null @@ -1,45 +0,0 @@ -/* - * Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#include "precompiled.hpp" -#include "gc/x/xVirtualMemory.inline.hpp" -#include "unittest.hpp" - -TEST(XVirtualMemory, split) { - XVirtualMemory vmem(0, 10); - - XVirtualMemory vmem0 = vmem.split(0); - EXPECT_EQ(vmem0.size(), 0u); - EXPECT_EQ(vmem.size(), 10u); - - XVirtualMemory vmem1 = vmem.split(5); - EXPECT_EQ(vmem1.size(), 5u); - EXPECT_EQ(vmem.size(), 5u); - - XVirtualMemory vmem2 = vmem.split(5); - EXPECT_EQ(vmem2.size(), 5u); - EXPECT_EQ(vmem.size(), 0u); - - XVirtualMemory vmem3 = vmem.split(0); - EXPECT_EQ(vmem3.size(), 0u); -} diff --git a/test/hotspot/jtreg/ProblemList-generational-zgc.txt b/test/hotspot/jtreg/ProblemList-generational-zgc.txt deleted file mode 100644 index 801328ec4aec5..0000000000000 --- a/test/hotspot/jtreg/ProblemList-generational-zgc.txt +++ /dev/null @@ -1,118 +0,0 @@ -# -# Copyright (c) 2019, 2024, Oracle and/or its affiliates. All rights reserved. -# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. -# -# This code is free software; you can redistribute it and/or modify it -# under the terms of the GNU General Public License version 2 only, as -# published by the Free Software Foundation. -# -# This code is distributed in the hope that it will be useful, but WITHOUT -# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License -# version 2 for more details (a copy is included in the LICENSE file that -# accompanied this code). -# -# You should have received a copy of the GNU General Public License version -# 2 along with this work; if not, write to the Free Software Foundation, -# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. -# -# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA -# or visit www.oracle.com if you need additional information or have any -# questions. -# - -############################################################################# -# -# List of quarantined tests for testing with Generational ZGC. -# -############################################################################# - -# Quiet all SA tests - -resourcehogs/serviceability/sa/TestHeapDumpForLargeArray.java 8307393 generic-all -serviceability/sa/CDSJMapClstats.java 8307393 generic-all -serviceability/sa/ClhsdbAttach.java 8307393 generic-all -serviceability/sa/ClhsdbAttachDifferentJVMs.java 8307393 generic-all -serviceability/sa/ClhsdbCDSCore.java 8307393 generic-all -serviceability/sa/ClhsdbCDSJstackPrintAll.java 8307393 generic-all -serviceability/sa/ClhsdbClasses.java 8307393 generic-all -serviceability/sa/ClhsdbDumpclass.java 8307393 generic-all -serviceability/sa/ClhsdbDumpheap.java 8307393 generic-all -serviceability/sa/ClhsdbField.java 8307393 generic-all -serviceability/sa/ClhsdbFindPC.java#apa 8307393 generic-all -serviceability/sa/ClhsdbFindPC.java#no-xcomp-core 8307393 generic-all -serviceability/sa/ClhsdbFindPC.java#no-xcomp-process 8307393 generic-all -serviceability/sa/ClhsdbFindPC.java#xcomp-core 8307393 generic-all -serviceability/sa/ClhsdbFindPC.java#xcomp-process 8307393 generic-all -serviceability/sa/ClhsdbFlags.java 8307393 generic-all -serviceability/sa/ClhsdbHistory.java 8307393 generic-all -serviceability/sa/ClhsdbInspect.java 8307393 generic-all -serviceability/sa/ClhsdbJdis.java 8307393 generic-all -serviceability/sa/ClhsdbJhisto.java 8307393 generic-all -serviceability/sa/ClhsdbJstack.java#id0 8307393 generic-all -serviceability/sa/ClhsdbJstack.java#id1 8307393 generic-all -serviceability/sa/ClhsdbJstackWithConcurrentLock.java 8307393 generic-all -serviceability/sa/ClhsdbJstackXcompStress.java 8307393 generic-all -serviceability/sa/ClhsdbLauncher.java 8307393 generic-all -serviceability/sa/ClhsdbLongConstant.java 8307393 generic-all -serviceability/sa/ClhsdbPmap.java 8307393 generic-all -serviceability/sa/ClhsdbPmap.java#core 8307393 generic-all -serviceability/sa/ClhsdbPmap.java#process 8307393 generic-all -serviceability/sa/ClhsdbPrintAll.java 8307393 generic-all -serviceability/sa/ClhsdbPrintAs.java 8307393 generic-all -serviceability/sa/ClhsdbPrintStatics.java 8307393 generic-all -serviceability/sa/ClhsdbPstack.java#core 8307393 generic-all -serviceability/sa/ClhsdbPstack.java#process 8307393 generic-all -serviceability/sa/ClhsdbScanOops.java 8307393 generic-all -serviceability/sa/ClhsdbSource.java 8307393 generic-all -serviceability/sa/ClhsdbSymbol.java 8307393 generic-all -serviceability/sa/ClhsdbThread.java 8307393 generic-all -serviceability/sa/ClhsdbThreadContext.java 8307393 generic-all -serviceability/sa/ClhsdbVmStructsDump.java 8307393 generic-all -serviceability/sa/ClhsdbWhere.java 8307393 generic-all -serviceability/sa/DeadlockDetectionTest.java 8307393 generic-all -serviceability/sa/JhsdbThreadInfoTest.java 8307393 generic-all -serviceability/sa/LingeredAppSysProps.java 8307393 generic-all -serviceability/sa/LingeredAppWithDefaultMethods.java 8307393 generic-all -serviceability/sa/LingeredAppWithEnum.java 8307393 generic-all -serviceability/sa/LingeredAppWithInterface.java 8307393 generic-all -serviceability/sa/LingeredAppWithInvokeDynamic.java 8307393 generic-all -serviceability/sa/LingeredAppWithLock.java 8307393 generic-all -serviceability/sa/LingeredAppWithNativeMethod.java 8307393 generic-all -serviceability/sa/LingeredAppWithRecComputation.java 8307393 generic-all -serviceability/sa/TestClassDump.java 8307393 generic-all -serviceability/sa/TestClhsdbJstackLock.java 8307393 generic-all -serviceability/sa/TestCpoolForInvokeDynamic.java 8307393 generic-all -serviceability/sa/TestDefaultMethods.java 8307393 generic-all -serviceability/sa/TestG1HeapRegion.java 8307393 generic-all -serviceability/sa/TestHeapDumpForInvokeDynamic.java 8307393 generic-all -serviceability/sa/TestInstanceKlassSize.java 8307393 generic-all -serviceability/sa/TestInstanceKlassSizeForInterface.java 8307393 generic-all -serviceability/sa/TestIntConstant.java 8307393 generic-all -serviceability/sa/TestJhsdbJstackLineNumbers.java 8307393 generic-all -serviceability/sa/TestJhsdbJstackLock.java 8307393 generic-all -serviceability/sa/TestJhsdbJstackMixed.java 8307393 generic-all -serviceability/sa/TestJhsdbJstackUpcall.java 8307393 generic-all -serviceability/sa/TestJmapCore.java 8307393 generic-all -serviceability/sa/TestJmapCoreMetaspace.java 8307393 generic-all -serviceability/sa/TestObjectAlignment.java 8307393 generic-all -serviceability/sa/TestObjectMonitorIterate.java 8307393 generic-all -serviceability/sa/TestPrintMdo.java 8307393 generic-all -serviceability/sa/TestRevPtrsForInvokeDynamic.java 8307393 generic-all -serviceability/sa/TestSysProps.java 8307393 generic-all -serviceability/sa/TestType.java 8307393 generic-all -serviceability/sa/TestUniverse.java 8307393 generic-all -serviceability/sa/UniqueVtableTest.java 8307393 generic-all -serviceability/sa/jmap-hprof/JMapHProfLargeHeapProc.java 8307393 generic-all -serviceability/sa/jmap-hprof/JMapHProfLargeHeapTest.java 8307393 generic-all -serviceability/sa/sadebugd/ClhsdbAttachToDebugServer.java 8307393 generic-all -serviceability/sa/sadebugd/ClhsdbTestConnectArgument.java 8307393 generic-all -serviceability/sa/ClhsdbTestAllocationMerge.java 8307393 generic-all -serviceability/sa/sadebugd/DebugdConnectTest.java 8307393 generic-all -serviceability/sa/sadebugd/DebugdUtils.java 8307393 generic-all -serviceability/sa/sadebugd/DisableRegistryTest.java 8307393 generic-all -serviceability/sa/sadebugd/PmapOnDebugdTest.java 8307393 generic-all -serviceability/sa/sadebugd/RunCommandOnServerTest.java 8307393 generic-all -serviceability/sa/sadebugd/SADebugDTest.java 8307393 generic-all - -vmTestbase/gc/gctests/MemoryEaterMT/MemoryEaterMT.java 8289582 windows-x64 diff --git a/test/hotspot/jtreg/ProblemList-zgc.txt b/test/hotspot/jtreg/ProblemList-zgc.txt index 1afe56c99f8af..7b2978ba4916a 100644 --- a/test/hotspot/jtreg/ProblemList-zgc.txt +++ b/test/hotspot/jtreg/ProblemList-zgc.txt @@ -27,22 +27,92 @@ # ############################################################################# -resourcehogs/serviceability/sa/TestHeapDumpForLargeArray.java 8276539 generic-all -serviceability/sa/CDSJMapClstats.java 8276539 generic-all -serviceability/sa/ClhsdbJhisto.java 8276539 generic-all -serviceability/sa/ClhsdbJstackWithConcurrentLock.java 8276539 generic-all -serviceability/sa/jmap-hprof/JMapHProfLargeHeapTest.java 8276539 generic-all +# Quiet all SA tests -serviceability/sa/ClhsdbFindPC.java#xcomp-core 8284045 generic-all -serviceability/sa/TestJmapCore.java 8268283,8270202 generic-all -serviceability/sa/TestJmapCoreMetaspace.java 8268636 generic-all - -serviceability/sa/TestJhsdbJstackMixed.java 8248912 generic-all -serviceability/sa/ClhsdbPstack.java#process 8248912 generic-all -serviceability/sa/ClhsdbPstack.java#core 8248912 generic-all - -serviceability/sa/TestSysProps.java 8302055 generic-all - -serviceability/sa/TestHeapDumpForInvokeDynamic.java 8315646 generic-all +resourcehogs/serviceability/sa/TestHeapDumpForLargeArray.java 8307393 generic-all +serviceability/sa/CDSJMapClstats.java 8307393 generic-all +serviceability/sa/ClhsdbAttach.java 8307393 generic-all +serviceability/sa/ClhsdbAttachDifferentJVMs.java 8307393 generic-all +serviceability/sa/ClhsdbCDSCore.java 8307393 generic-all +serviceability/sa/ClhsdbCDSJstackPrintAll.java 8307393 generic-all +serviceability/sa/ClhsdbClasses.java 8307393 generic-all +serviceability/sa/ClhsdbDumpclass.java 8307393 generic-all +serviceability/sa/ClhsdbDumpheap.java 8307393 generic-all +serviceability/sa/ClhsdbField.java 8307393 generic-all +serviceability/sa/ClhsdbFindPC.java#apa 8307393 generic-all +serviceability/sa/ClhsdbFindPC.java#no-xcomp-core 8307393 generic-all +serviceability/sa/ClhsdbFindPC.java#no-xcomp-process 8307393 generic-all +serviceability/sa/ClhsdbFindPC.java#xcomp-core 8307393 generic-all +serviceability/sa/ClhsdbFindPC.java#xcomp-process 8307393 generic-all +serviceability/sa/ClhsdbFlags.java 8307393 generic-all +serviceability/sa/ClhsdbHistory.java 8307393 generic-all +serviceability/sa/ClhsdbInspect.java 8307393 generic-all +serviceability/sa/ClhsdbJdis.java 8307393 generic-all +serviceability/sa/ClhsdbJhisto.java 8307393 generic-all +serviceability/sa/ClhsdbJstack.java#id0 8307393 generic-all +serviceability/sa/ClhsdbJstack.java#id1 8307393 generic-all +serviceability/sa/ClhsdbJstackWithConcurrentLock.java 8307393 generic-all +serviceability/sa/ClhsdbJstackXcompStress.java 8307393 generic-all +serviceability/sa/ClhsdbLauncher.java 8307393 generic-all +serviceability/sa/ClhsdbLongConstant.java 8307393 generic-all +serviceability/sa/ClhsdbPmap.java 8307393 generic-all +serviceability/sa/ClhsdbPmap.java#core 8307393 generic-all +serviceability/sa/ClhsdbPmap.java#process 8307393 generic-all +serviceability/sa/ClhsdbPrintAll.java 8307393 generic-all +serviceability/sa/ClhsdbPrintAs.java 8307393 generic-all +serviceability/sa/ClhsdbPrintStatics.java 8307393 generic-all +serviceability/sa/ClhsdbPstack.java#core 8307393 generic-all +serviceability/sa/ClhsdbPstack.java#process 8307393 generic-all +serviceability/sa/ClhsdbScanOops.java 8307393 generic-all +serviceability/sa/ClhsdbSource.java 8307393 generic-all +serviceability/sa/ClhsdbSymbol.java 8307393 generic-all +serviceability/sa/ClhsdbThread.java 8307393 generic-all +serviceability/sa/ClhsdbThreadContext.java 8307393 generic-all +serviceability/sa/ClhsdbVmStructsDump.java 8307393 generic-all +serviceability/sa/ClhsdbWhere.java 8307393 generic-all +serviceability/sa/DeadlockDetectionTest.java 8307393 generic-all +serviceability/sa/JhsdbThreadInfoTest.java 8307393 generic-all +serviceability/sa/LingeredAppSysProps.java 8307393 generic-all +serviceability/sa/LingeredAppWithDefaultMethods.java 8307393 generic-all +serviceability/sa/LingeredAppWithEnum.java 8307393 generic-all +serviceability/sa/LingeredAppWithInterface.java 8307393 generic-all +serviceability/sa/LingeredAppWithInvokeDynamic.java 8307393 generic-all +serviceability/sa/LingeredAppWithLock.java 8307393 generic-all +serviceability/sa/LingeredAppWithNativeMethod.java 8307393 generic-all +serviceability/sa/LingeredAppWithRecComputation.java 8307393 generic-all +serviceability/sa/TestClassDump.java 8307393 generic-all +serviceability/sa/TestClhsdbJstackLock.java 8307393 generic-all +serviceability/sa/TestCpoolForInvokeDynamic.java 8307393 generic-all +serviceability/sa/TestDefaultMethods.java 8307393 generic-all +serviceability/sa/TestG1HeapRegion.java 8307393 generic-all +serviceability/sa/TestHeapDumpForInvokeDynamic.java 8307393 generic-all +serviceability/sa/TestInstanceKlassSize.java 8307393 generic-all +serviceability/sa/TestInstanceKlassSizeForInterface.java 8307393 generic-all +serviceability/sa/TestIntConstant.java 8307393 generic-all +serviceability/sa/TestJhsdbJstackLineNumbers.java 8307393 generic-all +serviceability/sa/TestJhsdbJstackLock.java 8307393 generic-all +serviceability/sa/TestJhsdbJstackMixed.java 8307393 generic-all +serviceability/sa/TestJhsdbJstackUpcall.java 8307393 generic-all +serviceability/sa/TestJmapCore.java 8307393 generic-all +serviceability/sa/TestJmapCoreMetaspace.java 8307393 generic-all +serviceability/sa/TestObjectAlignment.java 8307393 generic-all +serviceability/sa/TestObjectMonitorIterate.java 8307393 generic-all +serviceability/sa/TestPrintMdo.java 8307393 generic-all +serviceability/sa/TestRevPtrsForInvokeDynamic.java 8307393 generic-all +serviceability/sa/TestSysProps.java 8307393 generic-all +serviceability/sa/TestType.java 8307393 generic-all +serviceability/sa/TestUniverse.java 8307393 generic-all +serviceability/sa/UniqueVtableTest.java 8307393 generic-all +serviceability/sa/jmap-hprof/JMapHProfLargeHeapProc.java 8307393 generic-all +serviceability/sa/jmap-hprof/JMapHProfLargeHeapTest.java 8307393 generic-all +serviceability/sa/sadebugd/ClhsdbAttachToDebugServer.java 8307393 generic-all +serviceability/sa/sadebugd/ClhsdbTestConnectArgument.java 8307393 generic-all +serviceability/sa/ClhsdbTestAllocationMerge.java 8307393 generic-all +serviceability/sa/sadebugd/DebugdConnectTest.java 8307393 generic-all +serviceability/sa/sadebugd/DebugdUtils.java 8307393 generic-all +serviceability/sa/sadebugd/DisableRegistryTest.java 8307393 generic-all +serviceability/sa/sadebugd/PmapOnDebugdTest.java 8307393 generic-all +serviceability/sa/sadebugd/RunCommandOnServerTest.java 8307393 generic-all +serviceability/sa/sadebugd/SADebugDTest.java 8307393 generic-all vmTestbase/gc/gctests/MemoryEaterMT/MemoryEaterMT.java 8289582 windows-x64 diff --git a/test/hotspot/jtreg/ProblemList.txt b/test/hotspot/jtreg/ProblemList.txt index 43ec66bade5f4..ce9e97e1715fd 100644 --- a/test/hotspot/jtreg/ProblemList.txt +++ b/test/hotspot/jtreg/ProblemList.txt @@ -59,8 +59,7 @@ compiler/codecache/CheckLargePages.java 8332654 linux-x64 compiler/vectorapi/reshape/TestVectorReinterpret.java 8320897 aix-ppc64,linux-ppc64le compiler/vectorapi/VectorLogicalOpIdentityTest.java 8302459 linux-x64,windows-x64 -compiler/vectorapi/VectorRebracket128Test.java#ZSinglegen 8330538 generic-all -compiler/vectorapi/VectorRebracket128Test.java#ZGenerational 8330538 generic-all +compiler/vectorapi/VectorRebracket128Test.java#Z 8330538 generic-all compiler/jvmci/TestUncaughtErrorInCompileMethod.java 8309073 generic-all compiler/jvmci/jdk.vm.ci.code.test/src/jdk/vm/ci/code/test/DataPatchTest.java 8331704 linux-riscv64 @@ -94,8 +93,7 @@ gc/TestAlwaysPreTouchBehavior.java#ParallelCollector 8334513 generic-all gc/TestAlwaysPreTouchBehavior.java#SerialCollector 8334513 generic-all gc/TestAlwaysPreTouchBehavior.java#Shenandoah 8334513 generic-all gc/TestAlwaysPreTouchBehavior.java#G1 8334513 generic-all -gc/TestAlwaysPreTouchBehavior.java#ZGenerational 8334513 generic-all -gc/TestAlwaysPreTouchBehavior.java#ZSinglegen 8334513 generic-all +gc/TestAlwaysPreTouchBehavior.java#Z 8334513 generic-all gc/TestAlwaysPreTouchBehavior.java#Epsilon 8334513 generic-all gc/stress/gclocker/TestExcessGCLockerCollections.java 8229120 generic-all diff --git a/test/hotspot/jtreg/TEST.ROOT b/test/hotspot/jtreg/TEST.ROOT index 962fc36838c37..21c5aebaa716c 100644 --- a/test/hotspot/jtreg/TEST.ROOT +++ b/test/hotspot/jtreg/TEST.ROOT @@ -61,8 +61,6 @@ requires.properties= \ vm.gc.Shenandoah \ vm.gc.Epsilon \ vm.gc.Z \ - vm.gc.ZGenerational \ - vm.gc.ZSinglegen \ vm.jvmci \ vm.jvmci.enabled \ vm.emulatedClient \ diff --git a/test/hotspot/jtreg/compiler/gcbarriers/TestArrayCopyWithLargeObjectAlignment.java b/test/hotspot/jtreg/compiler/gcbarriers/TestArrayCopyWithLargeObjectAlignment.java index dd2d485fb76df..494c571450dc8 100644 --- a/test/hotspot/jtreg/compiler/gcbarriers/TestArrayCopyWithLargeObjectAlignment.java +++ b/test/hotspot/jtreg/compiler/gcbarriers/TestArrayCopyWithLargeObjectAlignment.java @@ -30,11 +30,11 @@ * @summary Test that, when using a larger object alignment, ZGC arraycopy * barriers are only applied to actual OOPs, and not to object * alignment padding words. - * @requires vm.gc.ZGenerational + * @requires vm.gc.Z * @run main/othervm -Xbatch -XX:-TieredCompilation * -XX:CompileOnly=compiler.gcbarriers.TestArrayCopyWithLargeObjectAlignment::* * -XX:ObjectAlignmentInBytes=16 - * -XX:+UseZGC -XX:+ZGenerational + * -XX:+UseZGC * compiler.gcbarriers.TestArrayCopyWithLargeObjectAlignment */ diff --git a/test/hotspot/jtreg/compiler/gcbarriers/TestZGCBarrierElision.java b/test/hotspot/jtreg/compiler/gcbarriers/TestZGCBarrierElision.java index af047dd54572a..6f39ba7a8a1e9 100644 --- a/test/hotspot/jtreg/compiler/gcbarriers/TestZGCBarrierElision.java +++ b/test/hotspot/jtreg/compiler/gcbarriers/TestZGCBarrierElision.java @@ -34,7 +34,7 @@ * necessary barriers. The tests use volatile memory accesses and * blackholes to prevent C2 from simply optimizing them away. * @library /test/lib / - * @requires vm.gc.ZGenerational + * @requires vm.gc.Z * @run driver compiler.gcbarriers.TestZGCBarrierElision test-correctness */ @@ -43,7 +43,7 @@ * @summary Test that the ZGC barrier elision optimization elides unnecessary * barriers following simple allocation and domination rules. * @library /test/lib / - * @requires vm.gc.ZGenerational & (vm.simpleArch == "x64" | vm.simpleArch == "aarch64") + * @requires vm.gc.Z & (vm.simpleArch == "x64" | vm.simpleArch == "aarch64") * @run driver compiler.gcbarriers.TestZGCBarrierElision test-effectiveness */ @@ -99,7 +99,7 @@ public static void main(String[] args) { } String commonName = Common.class.getName(); TestFramework test = new TestFramework(testClass); - test.addFlags("-XX:+UseZGC", "-XX:+ZGenerational", "-XX:+UnlockExperimentalVMOptions", + test.addFlags("-XX:+UseZGC", "-XX:+UnlockExperimentalVMOptions", "-XX:CompileCommand=blackhole," + commonName + "::blackhole", "-XX:CompileCommand=dontinline," + commonName + "::nonInlinedMethod", "-XX:LoopMaxUnroll=0"); diff --git a/test/hotspot/jtreg/compiler/gcbarriers/TestZGCUnrolling.java b/test/hotspot/jtreg/compiler/gcbarriers/TestZGCUnrolling.java index 618b03e4cfb79..0c30531285e89 100644 --- a/test/hotspot/jtreg/compiler/gcbarriers/TestZGCUnrolling.java +++ b/test/hotspot/jtreg/compiler/gcbarriers/TestZGCUnrolling.java @@ -34,7 +34,7 @@ * The tests use volatile memory accesses to prevent C2 from simply * optimizing them away. * @library /test/lib / - * @requires vm.gc.ZGenerational + * @requires vm.gc.Z * @run driver compiler.gcbarriers.TestZGCUnrolling */ @@ -55,8 +55,7 @@ static class Outer { } public static void main(String[] args) { - TestFramework.runWithFlags("-XX:+UseZGC", "-XX:+ZGenerational", - "-XX:LoopUnrollLimit=24"); + TestFramework.runWithFlags("-XX:+UseZGC", "-XX:LoopUnrollLimit=24"); } @Test diff --git a/test/hotspot/jtreg/compiler/gcbarriers/UnsafeIntrinsicsTest.java b/test/hotspot/jtreg/compiler/gcbarriers/UnsafeIntrinsicsTest.java index 6a511fd60d96a..65901f5e65656 100644 --- a/test/hotspot/jtreg/compiler/gcbarriers/UnsafeIntrinsicsTest.java +++ b/test/hotspot/jtreg/compiler/gcbarriers/UnsafeIntrinsicsTest.java @@ -22,14 +22,14 @@ */ /* - * @test id=ZSinglegenDebug + * @test id=ZDebug * @key randomness * @bug 8059022 8271855 * @modules java.base/jdk.internal.misc:+open * @summary Validate barriers after Unsafe getReference, CAS and swap (GetAndSet) - * @requires vm.gc.ZSinglegen & vm.debug + * @requires vm.gc.Z & vm.debug * @library /test/lib - * @run main/othervm -XX:+UseZGC -XX:-ZGenerational + * @run main/othervm -XX:+UseZGC * -XX:+UnlockDiagnosticVMOptions * -XX:+ZVerifyOops -XX:ZCollectionInterval=1 * -XX:-CreateCoredumpOnCrash @@ -38,46 +38,14 @@ */ /* - * @test id=ZSinglegen + * @test id=Z * @key randomness * @bug 8059022 8271855 * @modules java.base/jdk.internal.misc:+open * @summary Validate barriers after Unsafe getReference, CAS and swap (GetAndSet) - * @requires vm.gc.ZSinglegen & !vm.debug + * @requires vm.gc.Z & !vm.debug * @library /test/lib - * @run main/othervm -XX:+UseZGC -XX:-ZGenerational - * -XX:+UnlockDiagnosticVMOptions - * -XX:ZCollectionInterval=1 - * -XX:-CreateCoredumpOnCrash - * -XX:CompileCommand=dontinline,*::mergeImpl* - * compiler.gcbarriers.UnsafeIntrinsicsTest - */ - -/* - * @test id=ZGenerationalDebug - * @key randomness - * @bug 8059022 8271855 - * @modules java.base/jdk.internal.misc:+open - * @summary Validate barriers after Unsafe getReference, CAS and swap (GetAndSet) - * @requires vm.gc.ZGenerational & vm.debug - * @library /test/lib - * @run main/othervm -XX:+UseZGC -XX:+ZGenerational - * -XX:+UnlockDiagnosticVMOptions - * -XX:+ZVerifyOops -XX:ZCollectionInterval=1 - * -XX:-CreateCoredumpOnCrash - * -XX:CompileCommand=dontinline,*::mergeImpl* - * compiler.gcbarriers.UnsafeIntrinsicsTest - */ - -/* - * @test id=ZGenerational - * @key randomness - * @bug 8059022 8271855 - * @modules java.base/jdk.internal.misc:+open - * @summary Validate barriers after Unsafe getReference, CAS and swap (GetAndSet) - * @requires vm.gc.ZGenerational & !vm.debug - * @library /test/lib - * @run main/othervm -XX:+UseZGC -XX:+ZGenerational + * @run main/othervm -XX:+UseZGC * -XX:+UnlockDiagnosticVMOptions * -XX:ZCollectionInterval=1 * -XX:-CreateCoredumpOnCrash diff --git a/test/hotspot/jtreg/compiler/loopopts/TestRangeCheckPredicatesControl.java b/test/hotspot/jtreg/compiler/loopopts/TestRangeCheckPredicatesControl.java index a46de67de052e..1f64ed28d8aa1 100644 --- a/test/hotspot/jtreg/compiler/loopopts/TestRangeCheckPredicatesControl.java +++ b/test/hotspot/jtreg/compiler/loopopts/TestRangeCheckPredicatesControl.java @@ -22,25 +22,14 @@ */ /* - * @test id=ZSinglegen + * @test id=Z * @key stress randomness - * @requires vm.gc.ZSinglegen + * @requires vm.gc.Z * @bug 8237859 * @summary A LoadP node has a wrong control input (too early) which results in an out-of-bounds read of an object array with ZGC. * - * @run main/othervm -XX:+UseZGC -XX:-ZGenerational compiler.loopopts.TestRangeCheckPredicatesControl - * @run main/othervm -XX:+UseZGC -XX:-ZGenerational -XX:+UnlockDiagnosticVMOptions -XX:+IgnoreUnrecognizedVMOptions -XX:+StressGCM compiler.loopopts.TestRangeCheckPredicatesControl - */ - -/* - * @test id=ZGenerational - * @key stress randomness - * @requires vm.gc.ZGenerational - * @bug 8237859 - * @summary A LoadP node has a wrong control input (too early) which results in an out-of-bounds read of an object array with ZGC. - * - * @run main/othervm -XX:+UseZGC -XX:+ZGenerational compiler.loopopts.TestRangeCheckPredicatesControl - * @run main/othervm -XX:+UseZGC -XX:+ZGenerational -XX:+UnlockDiagnosticVMOptions -XX:+IgnoreUnrecognizedVMOptions -XX:+StressGCM compiler.loopopts.TestRangeCheckPredicatesControl + * @run main/othervm -XX:+UseZGC compiler.loopopts.TestRangeCheckPredicatesControl + * @run main/othervm -XX:+UseZGC -XX:+UnlockDiagnosticVMOptions -XX:+IgnoreUnrecognizedVMOptions -XX:+StressGCM compiler.loopopts.TestRangeCheckPredicatesControl */ package compiler.loopopts; diff --git a/test/hotspot/jtreg/compiler/loopstripmining/TestNoWarningLoopStripMiningIterSet.java b/test/hotspot/jtreg/compiler/loopstripmining/TestNoWarningLoopStripMiningIterSet.java index c356e4495c245..c7fedf8982e68 100644 --- a/test/hotspot/jtreg/compiler/loopstripmining/TestNoWarningLoopStripMiningIterSet.java +++ b/test/hotspot/jtreg/compiler/loopstripmining/TestNoWarningLoopStripMiningIterSet.java @@ -44,25 +44,14 @@ */ /* - * @test id=ZSinglegen + * @test id=Z * @bug 8241486 * @summary G1/Z give warning when using LoopStripMiningIter and turn off LoopStripMiningIter (0) * @requires vm.flagless * @requires vm.flavor == "server" & !vm.graal.enabled - * @requires vm.gc.ZSinglegen + * @requires vm.gc.Z * @library /test/lib - * @run driver TestNoWarningLoopStripMiningIterSet Z -XX:-ZGenerational - */ - -/* - * @test id=ZGenerational - * @bug 8241486 - * @summary G1/Z give warning when using LoopStripMiningIter and turn off LoopStripMiningIter (0) - * @requires vm.flagless - * @requires vm.flavor == "server" & !vm.graal.enabled - * @requires vm.gc.ZGenerational - * @library /test/lib - * @run driver TestNoWarningLoopStripMiningIterSet Z -XX:+ZGenerational + * @run driver TestNoWarningLoopStripMiningIterSet Z */ /* @@ -106,18 +95,9 @@ public static void testWith(Consumer check, String msg, boolean public static void main(String[] args) throws Exception { String gc = "-XX:+Use" + args[0] + "GC"; - if (args.length > 1) { - String extraVMArg = args[1]; - testWith(output -> output.shouldNotContain(CLSOffLSMGreaterZero), "should have CLS and LSM enabled", true, 100, "-XX:LoopStripMiningIter=100", gc, extraVMArg); - testWith(output -> output.shouldContain(CLSOffLSMGreaterZero), "should have CLS and LSM disabled", false, 0, "-XX:-UseCountedLoopSafepoints", "-XX:LoopStripMiningIter=100", gc, extraVMArg); - testWith(output -> output.shouldContain(CLSOnLSMEqualZero), "should have CLS and LSM enabled", true, 1, "-XX:LoopStripMiningIter=0", gc, extraVMArg); - testWith(output -> output.shouldNotContain(CLSOnLSMEqualZero), "should have CLS and LSM disabled", false, 0, "-XX:-UseCountedLoopSafepoints", "-XX:LoopStripMiningIter=0", gc, extraVMArg); - } else { - testWith(output -> output.shouldNotContain(CLSOffLSMGreaterZero), "should have CLS and LSM enabled", true, 100, "-XX:LoopStripMiningIter=100", gc); - testWith(output -> output.shouldContain(CLSOffLSMGreaterZero), "should have CLS and LSM disabled", false, 0, "-XX:-UseCountedLoopSafepoints", "-XX:LoopStripMiningIter=100", gc); - testWith(output -> output.shouldContain(CLSOnLSMEqualZero), "should have CLS and LSM enabled", true, 1, "-XX:LoopStripMiningIter=0", gc); - testWith(output -> output.shouldNotContain(CLSOnLSMEqualZero), "should have CLS and LSM disabled", false, 0, "-XX:-UseCountedLoopSafepoints", "-XX:LoopStripMiningIter=0", gc); - - } + testWith(output -> output.shouldNotContain(CLSOffLSMGreaterZero), "should have CLS and LSM enabled", true, 100, "-XX:LoopStripMiningIter=100", gc); + testWith(output -> output.shouldContain(CLSOffLSMGreaterZero), "should have CLS and LSM disabled", false, 0, "-XX:-UseCountedLoopSafepoints", "-XX:LoopStripMiningIter=100", gc); + testWith(output -> output.shouldContain(CLSOnLSMEqualZero), "should have CLS and LSM enabled", true, 1, "-XX:LoopStripMiningIter=0", gc); + testWith(output -> output.shouldNotContain(CLSOnLSMEqualZero), "should have CLS and LSM disabled", false, 0, "-XX:-UseCountedLoopSafepoints", "-XX:LoopStripMiningIter=0", gc); } } diff --git a/test/hotspot/jtreg/compiler/uncommontrap/TestDeoptOOM.java b/test/hotspot/jtreg/compiler/uncommontrap/TestDeoptOOM.java index 6f1f4138435f7..a0a2aacde3f4a 100644 --- a/test/hotspot/jtreg/compiler/uncommontrap/TestDeoptOOM.java +++ b/test/hotspot/jtreg/compiler/uncommontrap/TestDeoptOOM.java @@ -34,28 +34,15 @@ */ /* - * @test id=ZSinglegen + * @test id=Z * @bug 8273456 * @summary Test that ttyLock is ranked above StackWatermark_lock - * @requires !vm.graal.enabled & vm.gc.ZSinglegen + * @requires !vm.graal.enabled & vm.gc.Z * @run main/othervm -XX:-BackgroundCompilation -Xmx128M -XX:+IgnoreUnrecognizedVMOptions -XX:+VerifyStack * -XX:CompileCommand=exclude,compiler.uncommontrap.TestDeoptOOM::main * -XX:CompileCommand=exclude,compiler.uncommontrap.TestDeoptOOM::m9_1 * -XX:+UnlockDiagnosticVMOptions - * -XX:+UseZGC -XX:-ZGenerational -XX:+LogCompilation -XX:+PrintDeoptimizationDetails -XX:+TraceDeoptimization -XX:+Verbose - * compiler.uncommontrap.TestDeoptOOM - */ - -/* - * @test id=ZGenerational - * @bug 8273456 - * @summary Test that ttyLock is ranked above StackWatermark_lock - * @requires !vm.graal.enabled & vm.gc.ZGenerational - * @run main/othervm -XX:-BackgroundCompilation -Xmx128M -XX:+IgnoreUnrecognizedVMOptions -XX:+VerifyStack - * -XX:CompileCommand=exclude,compiler.uncommontrap.TestDeoptOOM::main - * -XX:CompileCommand=exclude,compiler.uncommontrap.TestDeoptOOM::m9_1 - * -XX:+UnlockDiagnosticVMOptions - * -XX:+UseZGC -XX:+ZGenerational -XX:+LogCompilation -XX:+PrintDeoptimizationDetails -XX:+TraceDeoptimization -XX:+Verbose + * -XX:+UseZGC -XX:+LogCompilation -XX:+PrintDeoptimizationDetails -XX:+TraceDeoptimization -XX:+Verbose * compiler.uncommontrap.TestDeoptOOM */ diff --git a/test/hotspot/jtreg/compiler/vectorapi/VectorRebracket128Test.java b/test/hotspot/jtreg/compiler/vectorapi/VectorRebracket128Test.java index 239f525640509..4f7f03590dda3 100644 --- a/test/hotspot/jtreg/compiler/vectorapi/VectorRebracket128Test.java +++ b/test/hotspot/jtreg/compiler/vectorapi/VectorRebracket128Test.java @@ -35,23 +35,13 @@ import jdk.internal.vm.annotation.ForceInline; /* - * @test id=ZSinglegen + * @test id=Z * @bug 8260473 - * @requires vm.gc.ZSinglegen + * @requires vm.gc.Z * @modules jdk.incubator.vector * @modules java.base/jdk.internal.vm.annotation * @run testng/othervm -XX:CompileCommand=compileonly,jdk/incubator/vector/ByteVector.fromMemorySegment - * -XX:-TieredCompilation -XX:CICompilerCount=1 -XX:+UseZGC -XX:-ZGenerational -Xbatch -Xmx256m VectorRebracket128Test - */ - -/* - * @test id=ZGenerational - * @bug 8260473 - * @requires vm.gc.ZGenerational - * @modules jdk.incubator.vector - * @modules java.base/jdk.internal.vm.annotation - * @run testng/othervm -XX:CompileCommand=compileonly,jdk/incubator/vector/ByteVector.fromMemorySegment - * -XX:-TieredCompilation -XX:CICompilerCount=1 -XX:+UseZGC -XX:+ZGenerational -Xbatch -Xmx256m VectorRebracket128Test + * -XX:-TieredCompilation -XX:CICompilerCount=1 -XX:+UseZGC -Xbatch -Xmx256m VectorRebracket128Test */ @Test diff --git a/test/hotspot/jtreg/gc/TestAlwaysPreTouchBehavior.java b/test/hotspot/jtreg/gc/TestAlwaysPreTouchBehavior.java index c282c2876eaee..9f6c915d1c00d 100644 --- a/test/hotspot/jtreg/gc/TestAlwaysPreTouchBehavior.java +++ b/test/hotspot/jtreg/gc/TestAlwaysPreTouchBehavior.java @@ -73,27 +73,15 @@ */ /** - * @test id=ZGenerational + * @test id=Z * @summary tests AlwaysPreTouch - * @requires vm.gc.ZGenerational + * @requires vm.gc.Z * @requires os.maxMemory > 2G * @requires os.family != "aix" * @library /test/lib * @build jdk.test.whitebox.WhiteBox * @run driver jdk.test.lib.helpers.ClassFileInstaller jdk.test.whitebox.WhiteBox - * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -XX:+UseZGC -XX:+ZGenerational -Xmx512m -Xms512m -XX:+AlwaysPreTouch gc.TestAlwaysPreTouchBehavior - */ - -/** - * @test id=ZSinglegen - * @summary tests AlwaysPreTouch - * @requires vm.gc.ZSinglegen - * @requires os.maxMemory > 2G - * @requires os.family != "aix" - * @library /test/lib - * @build jdk.test.whitebox.WhiteBox - * @run driver jdk.test.lib.helpers.ClassFileInstaller jdk.test.whitebox.WhiteBox - * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -XX:+UseZGC -XX:-ZGenerational -Xmx512m -Xms512m -XX:+AlwaysPreTouch gc.TestAlwaysPreTouchBehavior + * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -XX:+UseZGC -Xmx512m -Xms512m -XX:+AlwaysPreTouch gc.TestAlwaysPreTouchBehavior */ /** diff --git a/test/hotspot/jtreg/gc/TestReferenceClearDuringReferenceProcessing.java b/test/hotspot/jtreg/gc/TestReferenceClearDuringReferenceProcessing.java index f66387b4cd70d..3be7ba241e77d 100644 --- a/test/hotspot/jtreg/gc/TestReferenceClearDuringReferenceProcessing.java +++ b/test/hotspot/jtreg/gc/TestReferenceClearDuringReferenceProcessing.java @@ -36,27 +36,15 @@ * gc.TestReferenceClearDuringReferenceProcessing */ -/* @test id=ZSinglegen +/* @test id=Z * @bug 8256517 - * @requires vm.gc.ZSinglegen + * @requires vm.gc.Z * @library /test/lib * @build jdk.test.whitebox.WhiteBox * @run driver jdk.test.lib.helpers.ClassFileInstaller jdk.test.whitebox.WhiteBox * @run main/othervm * -Xbootclasspath/a:. - * -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -XX:+UseZGC -XX:-ZGenerational - * gc.TestReferenceClearDuringReferenceProcessing - */ - -/* @test id=ZGenerational - * @bug 8256517 - * @requires vm.gc.ZGenerational - * @library /test/lib - * @build jdk.test.whitebox.WhiteBox - * @run driver jdk.test.lib.helpers.ClassFileInstaller jdk.test.whitebox.WhiteBox - * @run main/othervm - * -Xbootclasspath/a:. - * -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -XX:+UseZGC -XX:+ZGenerational + * -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -XX:+UseZGC * gc.TestReferenceClearDuringReferenceProcessing */ diff --git a/test/hotspot/jtreg/gc/TestSystemGC.java b/test/hotspot/jtreg/gc/TestSystemGC.java index c81b98a562f8c..6d37dc3d44b0a 100644 --- a/test/hotspot/jtreg/gc/TestSystemGC.java +++ b/test/hotspot/jtreg/gc/TestSystemGC.java @@ -58,21 +58,12 @@ */ /* - * @test id=ZSinglegen - * @requires vm.gc.ZSinglegen + * @test id=Z + * @requires vm.gc.Z * @comment ZGC will not start when LargePages cannot be allocated, therefore * we do not run such configuration. * @summary Runs System.gc() with different flags. - * @run main/othervm -XX:+UseZGC -XX:-ZGenerational gc.TestSystemGC - */ - -/* - * @test id=ZGenerational - * @requires vm.gc.ZGenerational - * @comment ZGC will not start when LargePages cannot be allocated, therefore - * we do not run such configuration. - * @summary Runs System.gc() with different flags. - * @run main/othervm -XX:+UseZGC -XX:+ZGenerational gc.TestSystemGC + * @run main/othervm -XX:+UseZGC gc.TestSystemGC */ public class TestSystemGC { diff --git a/test/hotspot/jtreg/gc/TestVerifySubSet.java b/test/hotspot/jtreg/gc/TestVerifySubSet.java index 08cddc74a00be..3dc28549b56cf 100644 --- a/test/hotspot/jtreg/gc/TestVerifySubSet.java +++ b/test/hotspot/jtreg/gc/TestVerifySubSet.java @@ -26,9 +26,9 @@ /* @test TestVerifySubSet.java * @bug 8072725 * @summary Test VerifySubSet option - * @comment Generational ZGC can't use the generic Universe::verify - * because there's no guarantee that we will ever have - * a stable snapshot where all roots can be verified. + * @comment ZGC can't use the generic Universe::verify because + * there's no guarantee that we will ever have a stable + * snapshot where all roots can be verified. * @requires vm.gc != "Z" * @library /test/lib * @modules java.base/jdk.internal.misc diff --git a/test/hotspot/jtreg/gc/cslocker/TestCSLocker.java b/test/hotspot/jtreg/gc/cslocker/TestCSLocker.java index 2a6e3a1bfd045..bd5b6e28aa810 100644 --- a/test/hotspot/jtreg/gc/cslocker/TestCSLocker.java +++ b/test/hotspot/jtreg/gc/cslocker/TestCSLocker.java @@ -33,11 +33,11 @@ * @summary completely in JNI CS, while other is trying to allocate memory * @summary provoking GC. OOM means FAIL, deadlock means PASS. * - * @comment This test assumes that no allocation happens during the sleep loop, \ - * which is something that we can't guarantee. With Generational ZGC we \ - * see test timeouts because the main thread allocates and waits for the \ - * GC, which waits for the CSLocker, which waits for the main thread. \ - * @requires !vm.opt.final.ZGenerational + * @comment This test assumes that no allocation happens during the sleep loop, + * which is something that we can't guarantee. With ZGC we see test + * timeouts because the main thread allocates and waits for the GC, + * which waits for the CSLocker, which waits for the main thread. + * @requires vm.gc != "Z" * * @run main/native/othervm -Xmx256m gc.cslocker.TestCSLocker */ diff --git a/test/hotspot/jtreg/gc/stress/gcbasher/TestGCBasherWithZ.java b/test/hotspot/jtreg/gc/stress/gcbasher/TestGCBasherWithZ.java index dd54556697dd4..fb58d5784d44c 100644 --- a/test/hotspot/jtreg/gc/stress/gcbasher/TestGCBasherWithZ.java +++ b/test/hotspot/jtreg/gc/stress/gcbasher/TestGCBasherWithZ.java @@ -27,47 +27,27 @@ import java.io.IOException; /* - * @test TestGCBasherWithZGenerational + * @test TestGCBasherWithZ * @key stress * @library / - * @requires vm.gc.ZGenerational + * @requires vm.gc.Z * @requires vm.flavor == "server" & !vm.emulatedClient * @summary Stress ZGC - * @run main/othervm/timeout=200 -Xlog:gc*=info -Xmx384m -server -XX:+UseZGC -XX:+ZGenerational gc.stress.gcbasher.TestGCBasherWithZ 120000 - */ -/* - * @test TestGCBasherWithZSinglegen - * @key stress - * @library / - * @requires vm.gc.ZSinglegen - * @requires vm.flavor == "server" & !vm.emulatedClient - * @summary Stress ZGC - * @run main/othervm/timeout=200 -Xlog:gc*=info -Xmx384m -server -XX:+UseZGC -XX:-ZGenerational gc.stress.gcbasher.TestGCBasherWithZ 120000 + * @run main/othervm/timeout=200 -Xlog:gc*=info -Xmx384m -server -XX:+UseZGC gc.stress.gcbasher.TestGCBasherWithZ 120000 */ /* - * @test TestGCBasherDeoptWithZGenerational + * @test TestGCBasherDeoptWithZ * @key stress * @library / - * @requires vm.gc.ZGenerational + * @requires vm.gc.Z * @requires vm.flavor == "server" & !vm.emulatedClient & vm.opt.ClassUnloading != false * @summary Stress ZGC with nmethod barrier forced deoptimization enabled. - * @run main/othervm/timeout=200 -Xlog:gc*=info,nmethod+barrier=trace -Xmx384m -server -XX:+UseZGC -XX:+ZGenerational + * @run main/othervm/timeout=200 -Xlog:gc*=info,nmethod+barrier=trace -Xmx384m -server -XX:+UseZGC * -XX:+UnlockDiagnosticVMOptions -XX:+DeoptimizeNMethodBarriersALot -XX:-Inline * gc.stress.gcbasher.TestGCBasherWithZ 120000 */ -/* - * @test TestGCBasherDeoptWithZSinglegen - * @key stress - * @library / - * @requires vm.gc.ZSinglegen - * @requires vm.flavor == "server" & !vm.emulatedClient & vm.opt.ClassUnloading != false - * @summary Stress ZGC with nmethod barrier forced deoptimization enabled. - * @run main/othervm/timeout=200 -Xlog:gc*=info,nmethod+barrier=trace -Xmx384m -server -XX:+UseZGC -XX:-ZGenerational - * -XX:+UnlockDiagnosticVMOptions -XX:+DeoptimizeNMethodBarriersALot -XX:-Inline - * gc.stress.gcbasher.TestGCBasherWithZ 120000 - */ public class TestGCBasherWithZ { public static void main(String[] args) throws IOException { TestGCBasher.main(args); diff --git a/test/hotspot/jtreg/gc/stress/gcold/TestGCOldWithZ.java b/test/hotspot/jtreg/gc/stress/gcold/TestGCOldWithZ.java index 0f77a6c286ab3..0741cf1fba310 100644 --- a/test/hotspot/jtreg/gc/stress/gcold/TestGCOldWithZ.java +++ b/test/hotspot/jtreg/gc/stress/gcold/TestGCOldWithZ.java @@ -25,24 +25,15 @@ package gc.stress.gcold; /* - * @test TestGCOldWithZGenerational + * @test TestGCOldWithZ * @key randomness * @library / /test/lib - * @requires vm.gc.ZGenerational + * @requires vm.gc.Z * @summary Stress the Z - * @run main/othervm -Xmx384M -XX:+UseZGC -XX:+ZGenerational gc.stress.gcold.TestGCOldWithZ 50 1 20 10 10000 - * @run main/othervm -Xmx256m -XX:+UseZGC -XX:+ZGenerational gc.stress.gcold.TestGCOldWithZ 50 5 20 1 5000 + * @run main/othervm -Xmx384M -XX:+UseZGC gc.stress.gcold.TestGCOldWithZ 50 1 20 10 10000 + * @run main/othervm -Xmx256m -XX:+UseZGC gc.stress.gcold.TestGCOldWithZ 50 5 20 1 5000 */ -/* - * @test TestGCOldWithZSinglegen - * @key randomness - * @library / /test/lib - * @requires vm.gc.ZSinglegen - * @summary Stress the Z - * @run main/othervm -Xmx384M -XX:+UseZGC -XX:-ZGenerational gc.stress.gcold.TestGCOldWithZ 50 1 20 10 10000 - * @run main/othervm -Xmx256m -XX:+UseZGC -XX:-ZGenerational gc.stress.gcold.TestGCOldWithZ 50 5 20 1 5000 - */ public class TestGCOldWithZ { public static void main(String[] args) { TestGCOld.main(args); diff --git a/test/hotspot/jtreg/gc/stringdedup/TestStringDeduplicationAgeThreshold.java b/test/hotspot/jtreg/gc/stringdedup/TestStringDeduplicationAgeThreshold.java index e70d63cf39718..090a49aa80b2f 100644 --- a/test/hotspot/jtreg/gc/stringdedup/TestStringDeduplicationAgeThreshold.java +++ b/test/hotspot/jtreg/gc/stringdedup/TestStringDeduplicationAgeThreshold.java @@ -76,29 +76,16 @@ */ /* - * @test id=ZSinglegen + * @test id=Z * @summary Test string deduplication age threshold * @bug 8029075 - * @requires vm.gc.ZSinglegen + * @requires vm.gc.Z * @library /test/lib * @library / * @modules java.base/jdk.internal.misc:open * @modules java.base/java.lang:open * java.management - * @run driver gc.stringdedup.TestStringDeduplicationAgeThreshold Z -XX:-ZGenerational - */ - -/* - * @test id=ZGenerational - * @summary Test string deduplication age threshold - * @bug 8029075 - * @requires vm.gc.ZGenerational - * @library /test/lib - * @library / - * @modules java.base/jdk.internal.misc:open - * @modules java.base/java.lang:open - * java.management - * @run driver gc.stringdedup.TestStringDeduplicationAgeThreshold Z -XX:+ZGenerational + * @run driver gc.stringdedup.TestStringDeduplicationAgeThreshold Z */ public class TestStringDeduplicationAgeThreshold { diff --git a/test/hotspot/jtreg/gc/stringdedup/TestStringDeduplicationFullGC.java b/test/hotspot/jtreg/gc/stringdedup/TestStringDeduplicationFullGC.java index 03793f03b1bc1..7105be7d47830 100644 --- a/test/hotspot/jtreg/gc/stringdedup/TestStringDeduplicationFullGC.java +++ b/test/hotspot/jtreg/gc/stringdedup/TestStringDeduplicationFullGC.java @@ -76,29 +76,16 @@ */ /* - * @test id=ZSinglegen + * @test id=Z * @summary Test string deduplication during full GC * @bug 8029075 - * @requires vm.gc.ZSinglegen + * @requires vm.gc.Z * @library /test/lib * @library / * @modules java.base/jdk.internal.misc:open * @modules java.base/java.lang:open * java.management - * @run driver gc.stringdedup.TestStringDeduplicationFullGC Z -XX:-ZGenerational - */ - -/* - * @test id=ZGenerational - * @summary Test string deduplication during full GC - * @bug 8029075 - * @requires vm.gc.ZGenerational - * @library /test/lib - * @library / - * @modules java.base/jdk.internal.misc:open - * @modules java.base/java.lang:open - * java.management - * @run driver gc.stringdedup.TestStringDeduplicationFullGC Z -XX:+ZGenerational + * @run driver gc.stringdedup.TestStringDeduplicationFullGC Z */ public class TestStringDeduplicationFullGC { diff --git a/test/hotspot/jtreg/gc/stringdedup/TestStringDeduplicationInterned.java b/test/hotspot/jtreg/gc/stringdedup/TestStringDeduplicationInterned.java index 0981be49aecb4..124bf9d5cf94b 100644 --- a/test/hotspot/jtreg/gc/stringdedup/TestStringDeduplicationInterned.java +++ b/test/hotspot/jtreg/gc/stringdedup/TestStringDeduplicationInterned.java @@ -76,29 +76,16 @@ */ /* - * @test id=ZSinglegen + * @test id=Z * @summary Test string deduplication of interned strings * @bug 8029075 - * @requires vm.gc.ZSinglegen + * @requires vm.gc.Z * @library /test/lib * @library / * @modules java.base/jdk.internal.misc:open * @modules java.base/java.lang:open * java.management - * @run driver gc.stringdedup.TestStringDeduplicationInterned Z -XX:-ZGenerational - */ - -/* - * @test id=ZGenerational - * @summary Test string deduplication of interned strings - * @bug 8029075 - * @requires vm.gc.ZGenerational - * @library /test/lib - * @library / - * @modules java.base/jdk.internal.misc:open - * @modules java.base/java.lang:open - * java.management - * @run driver gc.stringdedup.TestStringDeduplicationInterned Z -XX:+ZGenerational + * @run driver gc.stringdedup.TestStringDeduplicationInterned Z */ public class TestStringDeduplicationInterned { diff --git a/test/hotspot/jtreg/gc/stringdedup/TestStringDeduplicationPrintOptions.java b/test/hotspot/jtreg/gc/stringdedup/TestStringDeduplicationPrintOptions.java index 265cb1b9dd378..0659bc5aea378 100644 --- a/test/hotspot/jtreg/gc/stringdedup/TestStringDeduplicationPrintOptions.java +++ b/test/hotspot/jtreg/gc/stringdedup/TestStringDeduplicationPrintOptions.java @@ -76,29 +76,16 @@ */ /* - * @test id=ZSinglegen + * @test id=Z * @summary Test string deduplication print options * @bug 8029075 - * @requires vm.gc.ZSinglegen + * @requires vm.gc.Z * @library /test/lib * @library / * @modules java.base/jdk.internal.misc:open * @modules java.base/java.lang:open * java.management - * @run driver gc.stringdedup.TestStringDeduplicationPrintOptions Z -XX:-ZGenerational - */ - -/* - * @test id=ZGenerational - * @summary Test string deduplication print options - * @bug 8029075 - * @requires vm.gc.ZGenerational - * @library /test/lib - * @library / - * @modules java.base/jdk.internal.misc:open - * @modules java.base/java.lang:open - * java.management - * @run driver gc.stringdedup.TestStringDeduplicationPrintOptions Z -XX:+ZGenerational + * @run driver gc.stringdedup.TestStringDeduplicationPrintOptions Z */ public class TestStringDeduplicationPrintOptions { diff --git a/test/hotspot/jtreg/gc/stringdedup/TestStringDeduplicationTableResize.java b/test/hotspot/jtreg/gc/stringdedup/TestStringDeduplicationTableResize.java index 2c16e9c4c4a65..d82244ef07a18 100644 --- a/test/hotspot/jtreg/gc/stringdedup/TestStringDeduplicationTableResize.java +++ b/test/hotspot/jtreg/gc/stringdedup/TestStringDeduplicationTableResize.java @@ -76,29 +76,16 @@ */ /* - * @test id=ZSinglegen + * @test id=Z * @summary Test string deduplication table resize * @bug 8029075 - * @requires vm.gc.ZSinglegen + * @requires vm.gc.Z * @library /test/lib * @library / * @modules java.base/jdk.internal.misc:open * @modules java.base/java.lang:open * java.management - * @run driver gc.stringdedup.TestStringDeduplicationTableResize Z -XX:-ZGenerational - */ - -/* - * @test id=ZGenerational - * @summary Test string deduplication table resize - * @bug 8029075 - * @requires vm.gc.ZGenerational - * @library /test/lib - * @library / - * @modules java.base/jdk.internal.misc:open - * @modules java.base/java.lang:open - * java.management - * @run driver gc.stringdedup.TestStringDeduplicationTableResize Z -XX:+ZGenerational + * @run driver gc.stringdedup.TestStringDeduplicationTableResize Z */ public class TestStringDeduplicationTableResize { diff --git a/test/hotspot/jtreg/gc/stringdedup/TestStringDeduplicationTools.java b/test/hotspot/jtreg/gc/stringdedup/TestStringDeduplicationTools.java index 2a6652eb06ef4..3dbedd61d124f 100644 --- a/test/hotspot/jtreg/gc/stringdedup/TestStringDeduplicationTools.java +++ b/test/hotspot/jtreg/gc/stringdedup/TestStringDeduplicationTools.java @@ -57,7 +57,6 @@ class TestStringDeduplicationTools { private static byte[] dummy; private static String selectedGC = null; - private static String selectedGCMode = null; static { try { @@ -74,9 +73,6 @@ class TestStringDeduplicationTools { public static void selectGC(String[] args) { selectedGC = args[0]; - if (args.length > 1) { - selectedGCMode = args[1]; - } } private static Object getValue(String string) { @@ -137,16 +133,10 @@ public void handleNotification(Notification n, Object o) { gcCount++; } } else if (info.getGcName().startsWith("ZGC")) { - // Generational ZGC only triggers string deduplications from major collections + // ZGC only triggers string deduplications from major collections if (info.getGcName().startsWith("ZGC Major") && "end of GC cycle".equals(info.getGcAction())) { gcCount++; } - - // Single-gen ZGC - if (!info.getGcName().startsWith("ZGC Major") && !info.getGcName().startsWith("ZGC Minor") && - "end of GC cycle".equals(info.getGcAction())) { - gcCount++; - } } else if (info.getGcName().startsWith("G1")) { if ("end of minor GC".equals(info.getGcAction())) { gcCount++; @@ -325,9 +315,6 @@ private static OutputAnalyzer runTest(String... extraArgs) throws Exception { ArrayList args = new ArrayList(); args.add("-XX:+Use" + selectedGC + "GC"); - if (selectedGCMode != null) { - args.add(selectedGCMode); - } args.addAll(Arrays.asList(defaultArgs)); args.addAll(Arrays.asList(extraArgs)); diff --git a/test/hotspot/jtreg/gc/stringdedup/TestStringDeduplicationYoungGC.java b/test/hotspot/jtreg/gc/stringdedup/TestStringDeduplicationYoungGC.java index d8787cc70bac2..053dc0a28621a 100644 --- a/test/hotspot/jtreg/gc/stringdedup/TestStringDeduplicationYoungGC.java +++ b/test/hotspot/jtreg/gc/stringdedup/TestStringDeduplicationYoungGC.java @@ -76,29 +76,16 @@ */ /* - * @test id=ZSinglegen + * @test id=Z * @summary Test string deduplication during young GC * @bug 8029075 - * @requires vm.gc.ZSinglegen + * @requires vm.gc.Z * @library /test/lib * @library / * @modules java.base/jdk.internal.misc:open * @modules java.base/java.lang:open * java.management - * @run driver gc.stringdedup.TestStringDeduplicationYoungGC Z -XX:-ZGenerational - */ - -/* - * @test id=ZGenerational - * @summary Test string deduplication during young GC - * @bug 8029075 - * @requires vm.gc.ZGenerational - * @library /test/lib - * @library / - * @modules java.base/jdk.internal.misc:open - * @modules java.base/java.lang:open - * java.management - * @run driver gc.stringdedup.TestStringDeduplicationYoungGC Z -XX:+ZGenerational + * @run driver gc.stringdedup.TestStringDeduplicationYoungGC Z */ public class TestStringDeduplicationYoungGC { diff --git a/test/hotspot/jtreg/gc/x/TestAllocateHeapAt.java b/test/hotspot/jtreg/gc/x/TestAllocateHeapAt.java deleted file mode 100644 index 6a9768de7e6e2..0000000000000 --- a/test/hotspot/jtreg/gc/x/TestAllocateHeapAt.java +++ /dev/null @@ -1,57 +0,0 @@ -/* - * Copyright (c) 2020, 2024, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -package gc.x; - -/* - * @test TestAllocateHeapAt - * @requires vm.gc.ZSinglegen & os.family == "linux" - * @requires !vm.opt.final.UseLargePages - * @summary Test ZGC with -XX:AllocateHeapAt - * @library /test/lib - * @run main/othervm gc.x.TestAllocateHeapAt . true - * @run main/othervm gc.x.TestAllocateHeapAt non-existing-directory false - */ - -import jdk.test.lib.process.ProcessTools; - -public class TestAllocateHeapAt { - public static void main(String[] args) throws Exception { - final String directory = args[0]; - final boolean exists = Boolean.parseBoolean(args[1]); - final String heapBackingFile = "Heap Backing File: " + directory; - final String failedToCreateFile = "Failed to create file " + directory; - - ProcessTools.executeTestJava( - "-XX:+UseZGC", - "-XX:-ZGenerational", - "-Xlog:gc*", - "-Xms32M", - "-Xmx32M", - "-XX:AllocateHeapAt=" + directory, - "-version") - .shouldContain(exists ? heapBackingFile : failedToCreateFile) - .shouldNotContain(exists ? failedToCreateFile : heapBackingFile) - .shouldHaveExitValue(exists ? 0 : 1); - } -} diff --git a/test/hotspot/jtreg/gc/x/TestAlwaysPreTouch.java b/test/hotspot/jtreg/gc/x/TestAlwaysPreTouch.java deleted file mode 100644 index b6ba6bf7a0593..0000000000000 --- a/test/hotspot/jtreg/gc/x/TestAlwaysPreTouch.java +++ /dev/null @@ -1,41 +0,0 @@ -/* - * Copyright (c) 2019, 2020, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -package gc.x; - -/* - * @test TestAlwaysPreTouch - * @requires vm.gc.ZSinglegen - * @summary Test ZGC parallel pre-touch - * @run main/othervm -XX:+UseZGC -XX:-ZGenerational -Xlog:gc* -XX:-AlwaysPreTouch -Xms128M -Xmx128M gc.x.TestAlwaysPreTouch - * @run main/othervm -XX:+UseZGC -XX:-ZGenerational -Xlog:gc* -XX:+AlwaysPreTouch -XX:ParallelGCThreads=1 -Xms2M -Xmx128M gc.x.TestAlwaysPreTouch - * @run main/othervm -XX:+UseZGC -XX:-ZGenerational -Xlog:gc* -XX:+AlwaysPreTouch -XX:ParallelGCThreads=8 -Xms2M -Xmx128M gc.x.TestAlwaysPreTouch - * @run main/othervm -XX:+UseZGC -XX:-ZGenerational -Xlog:gc* -XX:+AlwaysPreTouch -XX:ParallelGCThreads=1 -Xms128M -Xmx128M gc.x.TestAlwaysPreTouch - * @run main/othervm -XX:+UseZGC -XX:-ZGenerational -Xlog:gc* -XX:+AlwaysPreTouch -XX:ParallelGCThreads=8 -Xms128M -Xmx128M gc.x.TestAlwaysPreTouch - */ - -public class TestAlwaysPreTouch { - public static void main(String[] args) throws Exception { - System.out.println("Success"); - } -} diff --git a/test/hotspot/jtreg/gc/x/TestDeprecated.java b/test/hotspot/jtreg/gc/x/TestDeprecated.java deleted file mode 100644 index 39b0318d52b99..0000000000000 --- a/test/hotspot/jtreg/gc/x/TestDeprecated.java +++ /dev/null @@ -1,50 +0,0 @@ -/* - * Copyright (c) 2024, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -package gc.x; - -/* - * @test TestDeprecated - * @requires vm.gc.ZSinglegen - * @summary Test ZGenerational Deprecated - * @library /test/lib - * @run driver gc.x.TestDeprecated - */ - -import java.util.LinkedList; -import jdk.test.lib.process.ProcessTools; - -public class TestDeprecated { - static class Test { - public static void main(String[] args) throws Exception {} - } - public static void main(String[] args) throws Exception { - ProcessTools.executeLimitedTestJava("-XX:+UseZGC", - "-XX:-ZGenerational", - "-Xlog:gc+init", - Test.class.getName()) - .shouldContain("Option ZGenerational was deprecated") - .shouldContain("Using deprecated non-generational mode") - .shouldHaveExitValue(0); - } -} diff --git a/test/hotspot/jtreg/gc/x/TestGarbageCollectorMXBean.java b/test/hotspot/jtreg/gc/x/TestGarbageCollectorMXBean.java deleted file mode 100644 index 193b93ee2d051..0000000000000 --- a/test/hotspot/jtreg/gc/x/TestGarbageCollectorMXBean.java +++ /dev/null @@ -1,221 +0,0 @@ -/* - * Copyright (c) 2020, 2022, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -package gc.x; - -/** - * @test TestGarbageCollectorMXBean - * @requires vm.gc.ZSinglegen - * @summary Test ZGC garbage collector MXBean - * @modules java.management - * @requires vm.compMode != "Xcomp" - * @run main/othervm -XX:+UseZGC -XX:-ZGenerational -Xms256M -Xmx512M -Xlog:gc gc.x.TestGarbageCollectorMXBean 256 512 - * @run main/othervm -XX:+UseZGC -XX:-ZGenerational -Xms512M -Xmx512M -Xlog:gc gc.x.TestGarbageCollectorMXBean 512 512 - */ - -import java.lang.management.ManagementFactory; -import java.util.concurrent.atomic.AtomicInteger; -import java.util.concurrent.atomic.AtomicLong; -import javax.management.Notification; -import javax.management.NotificationEmitter; -import javax.management.NotificationListener; -import javax.management.openmbean.CompositeData; - -import com.sun.management.GarbageCollectionNotificationInfo; - -public class TestGarbageCollectorMXBean { - private static final long startTime = System.nanoTime(); - - private static void log(String msg) { - final String elapsedSeconds = String.format("%.3fs", (System.nanoTime() - startTime) / 1_000_000_000.0); - System.out.println("[" + elapsedSeconds + "] (" + Thread.currentThread().getName() + ") " + msg); - } - - public static void main(String[] args) throws Exception { - final long M = 1024 * 1024; - final long initialCapacity = Long.parseLong(args[0]) * M; - final long maxCapacity = Long.parseLong(args[1]) * M; - final AtomicInteger cycles = new AtomicInteger(); - final AtomicInteger pauses = new AtomicInteger(); - final AtomicInteger errors = new AtomicInteger(); - - final NotificationListener listener = (Notification notification, Object ignored) -> { - final var type = notification.getType(); - if (!type.equals(GarbageCollectionNotificationInfo.GARBAGE_COLLECTION_NOTIFICATION)) { - // Ignore - return; - } - - final var data = (CompositeData)notification.getUserData(); - final var info = GarbageCollectionNotificationInfo.from(data); - final var name = info.getGcName(); - final var id = info.getGcInfo().getId(); - final var action = info.getGcAction(); - final var cause = info.getGcCause(); - final var startTime = info.getGcInfo().getStartTime(); - final var endTime = info.getGcInfo().getEndTime(); - final var duration = info.getGcInfo().getDuration(); - final var memoryUsageBeforeGC = info.getGcInfo().getMemoryUsageBeforeGc().get("ZHeap"); - final var memoryUsageAfterGC = info.getGcInfo().getMemoryUsageAfterGc().get("ZHeap"); - - log(name + " (" + type + ")"); - log(" Id: " + id); - log(" Action: " + action); - log(" Cause: " + cause); - log(" StartTime: " + startTime); - log(" EndTime: " + endTime); - log(" Duration: " + duration); - log(" MemoryUsageBeforeGC: " + memoryUsageBeforeGC); - log(" MemoryUsageAfterGC: " + memoryUsageAfterGC); - log(""); - - if (name.equals("ZGC Cycles")) { - cycles.incrementAndGet(); - - if (!action.equals("end of GC cycle")) { - log("ERROR: Action"); - errors.incrementAndGet(); - } - - if (memoryUsageBeforeGC.getInit() != initialCapacity) { - log("ERROR: MemoryUsageBeforeGC.init"); - errors.incrementAndGet(); - } - - if (memoryUsageBeforeGC.getUsed() > initialCapacity) { - log("ERROR: MemoryUsageBeforeGC.used"); - errors.incrementAndGet(); - } - - if (memoryUsageBeforeGC.getCommitted() != initialCapacity) { - log("ERROR: MemoryUsageBeforeGC.committed"); - errors.incrementAndGet(); - } - - if (memoryUsageBeforeGC.getMax() != maxCapacity) { - log("ERROR: MemoryUsageBeforeGC.max"); - errors.incrementAndGet(); - } - } else if (name.equals("ZGC Pauses")) { - pauses.incrementAndGet(); - - if (!action.equals("end of GC pause")) { - log("ERROR: Action"); - errors.incrementAndGet(); - } - - if (memoryUsageBeforeGC.getInit() != 0) { - log("ERROR: MemoryUsageBeforeGC.init"); - errors.incrementAndGet(); - } - - if (memoryUsageBeforeGC.getUsed() != 0) { - log("ERROR: MemoryUsageBeforeGC.used"); - errors.incrementAndGet(); - } - - if (memoryUsageBeforeGC.getCommitted() != 0) { - log("ERROR: MemoryUsageBeforeGC.committed"); - errors.incrementAndGet(); - } - - if (memoryUsageBeforeGC.getMax() != 0) { - log("ERROR: MemoryUsageBeforeGC.max"); - errors.incrementAndGet(); - } - } else { - log("ERROR: Name"); - errors.incrementAndGet(); - } - - if (!cause.equals("System.gc()")) { - log("ERROR: Cause"); - errors.incrementAndGet(); - } - - if (startTime > endTime) { - log("ERROR: StartTime"); - errors.incrementAndGet(); - } - - if (endTime - startTime != duration) { - log("ERROR: Duration"); - errors.incrementAndGet(); - } - }; - - // Collect garbage created at startup - System.gc(); - - // Register GC event listener - for (final var collector : ManagementFactory.getGarbageCollectorMXBeans()) { - final NotificationEmitter emitter = (NotificationEmitter)collector; - emitter.addNotificationListener(listener, null, null); - } - - final int minCycles = 5; - final int minPauses = minCycles * 3; - - // Run GCs - for (int i = 0; i < minCycles; i++) { - log("Starting GC " + i); - System.gc(); - } - - // Wait at most 90 seconds - for (int i = 0; i < 90; i++) { - log("Waiting..."); - Thread.sleep(1000); - - if (cycles.get() >= minCycles) { - log("All events received!"); - break; - } - } - - final int actualCycles = cycles.get(); - final int actualPauses = pauses.get(); - final int actualErrors = errors.get(); - - log(" minCycles: " + minCycles); - log(" minPauses: " + minPauses); - log("actualCycles: " + actualCycles); - log("actualPauses: " + actualPauses); - log("actualErrors: " + actualErrors); - - // Verify number of cycle events - if (actualCycles < minCycles) { - throw new Exception("Unexpected cycles"); - } - - // Verify number of pause events - if (actualPauses < minPauses) { - throw new Exception("Unexpected pauses"); - } - - // Verify number of errors - if (actualErrors != 0) { - throw new Exception("Unexpected errors"); - } - } -} diff --git a/test/hotspot/jtreg/gc/x/TestHighUsage.java b/test/hotspot/jtreg/gc/x/TestHighUsage.java deleted file mode 100644 index 32b0af19e4b79..0000000000000 --- a/test/hotspot/jtreg/gc/x/TestHighUsage.java +++ /dev/null @@ -1,101 +0,0 @@ -/* - * Copyright (c) 2019, 2020, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -package gc.x; - -/* - * @test TestHighUsage - * @requires vm.gc.ZSinglegen - * @summary Test ZGC "High Usage" rule - * @library /test/lib - * @run main/othervm gc.x.TestHighUsage - */ - -import java.util.LinkedList; -import jdk.test.lib.process.ProcessTools; - -public class TestHighUsage { - static class Test { - private static final int K = 1024; - private static final int M = K * K; - private static final long maxCapacity = Runtime.getRuntime().maxMemory(); - private static final long slowAllocationThreshold = 16 * M; - private static final long highUsageThreshold = maxCapacity / 20; // 5% - private static volatile LinkedList keepAlive; - private static volatile Object dummy; - - public static void main(String[] args) throws Exception { - System.out.println("Max capacity: " + (maxCapacity / M) + "M"); - System.out.println("High usage threshold: " + (highUsageThreshold / M) + "M"); - System.out.println("Allocating live-set"); - - // Allocate live-set - keepAlive = new LinkedList<>(); - while (Runtime.getRuntime().freeMemory() > slowAllocationThreshold) { - while (Runtime.getRuntime().freeMemory() > slowAllocationThreshold) { - keepAlive.add(new byte[128 * K]); - } - - // Compact live-set and let allocation rate settle down - System.gc(); - Thread.sleep(2000); - } - - System.out.println("Allocating garbage slowly"); - - // Allocate garbage slowly, so that the sampled allocation rate on average - // becomes zero MB/s for the last 1 second windows. Once we reach the high - // usage threshold we idle to allow for a "High Usage" GC cycle to happen. - // We need to allocate slowly to avoid an "Allocation Rate" GC cycle. - for (int i = 0; i < 300; i++) { - if (Runtime.getRuntime().freeMemory() > highUsageThreshold) { - // Allocate - dummy = new byte[128 * K]; - System.out.println("Free: " + (Runtime.getRuntime().freeMemory() / M) + "M (Allocating)"); - } else { - // Idle - System.out.println("Free: " + (Runtime.getRuntime().freeMemory() / M) + "M (Idling)"); - } - - Thread.sleep(250); - } - - System.out.println("Done"); - } - } - - public static void main(String[] args) throws Exception { - ProcessTools.executeTestJava("-XX:+UseZGC", - "-XX:-ZGenerational", - "-XX:-ZProactive", - "-Xms128M", - "-Xmx128M", - "-XX:ParallelGCThreads=1", - "-XX:ConcGCThreads=1", - "-Xlog:gc,gc+start", - Test.class.getName()) - .shouldNotContain("Allocation Stall") - .shouldContain("High Usage") - .shouldHaveExitValue(0); - } -} diff --git a/test/hotspot/jtreg/gc/x/TestMemoryMXBean.java b/test/hotspot/jtreg/gc/x/TestMemoryMXBean.java deleted file mode 100644 index fad1febe15807..0000000000000 --- a/test/hotspot/jtreg/gc/x/TestMemoryMXBean.java +++ /dev/null @@ -1,65 +0,0 @@ -/* - * Copyright (c) 2020, 2022, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -package gc.x; - -/** - * @test TestMemoryMXBean - * @requires vm.gc.ZSinglegen - * @summary Test ZGC heap memory MXBean - * @modules java.management - * @run main/othervm -XX:+UseZGC -XX:-ZGenerational -Xms128M -Xmx256M -Xlog:gc* gc.x.TestMemoryMXBean 128 256 - * @run main/othervm -XX:+UseZGC -XX:-ZGenerational -Xms256M -Xmx256M -Xlog:gc* gc.x.TestMemoryMXBean 256 256 - */ - -import java.lang.management.ManagementFactory; - -public class TestMemoryMXBean { - public static void main(String[] args) throws Exception { - final long M = 1024 * 1024; - final long expectedInitialCapacity = Long.parseLong(args[0]) * M; - final long expectedMaxCapacity = Long.parseLong(args[1]) * M; - final var memoryUsage = ManagementFactory.getMemoryMXBean().getHeapMemoryUsage(); - final long initialCapacity = memoryUsage.getInit(); - final long capacity = memoryUsage.getCommitted(); - final long maxCapacity = memoryUsage.getMax(); - - System.out.println("expectedInitialCapacity: " + expectedInitialCapacity); - System.out.println(" expectedMaxCapacity: " + expectedMaxCapacity); - System.out.println(" initialCapacity: " + initialCapacity); - System.out.println(" capacity: " + capacity); - System.out.println(" maxCapacity: " + maxCapacity); - - if (initialCapacity != expectedInitialCapacity) { - throw new Exception("Unexpected initial capacity"); - } - - if (maxCapacity != expectedMaxCapacity) { - throw new Exception("Unexpected max capacity"); - } - - if (capacity < initialCapacity || capacity > maxCapacity) { - throw new Exception("Unexpected capacity"); - } - } -} diff --git a/test/hotspot/jtreg/gc/x/TestMemoryManagerMXBean.java b/test/hotspot/jtreg/gc/x/TestMemoryManagerMXBean.java deleted file mode 100644 index 70ce6c23b2ee9..0000000000000 --- a/test/hotspot/jtreg/gc/x/TestMemoryManagerMXBean.java +++ /dev/null @@ -1,92 +0,0 @@ -/* - * Copyright (c) 2020, 2022, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -package gc.x; - -/** - * @test TestMemoryManagerMXBean - * @requires vm.gc.ZSinglegen - * @summary Test ZGC memory manager MXBean - * @modules java.management - * @run main/othervm -XX:+UseZGC -XX:-ZGenerational -Xmx128M gc.x.TestMemoryManagerMXBean - */ - -import java.lang.management.ManagementFactory; - -public class TestMemoryManagerMXBean { - private static void checkName(String name) throws Exception { - if (name == null || name.length() == 0) { - throw new Exception("Invalid name"); - } - } - - public static void main(String[] args) throws Exception { - int zgcCyclesMemoryManagers = 0; - int zgcPausesMemoryManagers = 0; - int zgcCyclesMemoryPools = 0; - int zgcPausesMemoryPools = 0; - - for (final var memoryManager : ManagementFactory.getMemoryManagerMXBeans()) { - final var memoryManagerName = memoryManager.getName(); - checkName(memoryManagerName); - - System.out.println("MemoryManager: " + memoryManagerName); - - if (memoryManagerName.equals("ZGC Cycles")) { - zgcCyclesMemoryManagers++; - } else if (memoryManagerName.equals("ZGC Pauses")) { - zgcPausesMemoryManagers++; - } - - for (final var memoryPoolName : memoryManager.getMemoryPoolNames()) { - checkName(memoryPoolName); - - System.out.println(" MemoryPool: " + memoryPoolName); - - if (memoryPoolName.equals("ZHeap")) { - if (memoryManagerName.equals("ZGC Cycles")) { - zgcCyclesMemoryPools++; - } else if (memoryManagerName.equals("ZGC Pauses")) { - zgcPausesMemoryPools++; - } - } - } - } - - if (zgcCyclesMemoryManagers != 1) { - throw new Exception("Unexpected number of cycle MemoryManagers"); - } - - if (zgcPausesMemoryManagers != 1) { - throw new Exception("Unexpected number of pause MemoryManagers"); - } - - if (zgcCyclesMemoryPools != 1) { - throw new Exception("Unexpected number of cycle MemoryPools"); - } - - if (zgcPausesMemoryPools != 1) { - throw new Exception("Unexpected number of pause MemoryPools"); - } - } -} diff --git a/test/hotspot/jtreg/gc/x/TestNoUncommit.java b/test/hotspot/jtreg/gc/x/TestNoUncommit.java deleted file mode 100644 index be5aa950509a9..0000000000000 --- a/test/hotspot/jtreg/gc/x/TestNoUncommit.java +++ /dev/null @@ -1,63 +0,0 @@ -/* - * Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -package gc.x; - -/* - * @test TestNoUncommit - * @requires vm.gc.ZSinglegen & !vm.graal.enabled - * @summary Test ZGC uncommit unused memory disabled - * @run main/othervm -XX:+UseZGC -XX:-ZGenerational -Xlog:gc*,gc+heap=debug,gc+stats=off -Xms512M -Xmx512M -XX:ZUncommitDelay=1 gc.x.TestNoUncommit - * @run main/othervm -XX:+UseZGC -XX:-ZGenerational -Xlog:gc*,gc+heap=debug,gc+stats=off -Xms128M -Xmx512M -XX:ZUncommitDelay=1 -XX:-ZUncommit gc.x.TestNoUncommit - */ - -public class TestNoUncommit { - private static final int allocSize = 200 * 1024 * 1024; // 200M - private static volatile Object keepAlive = null; - - private static long capacity() { - return Runtime.getRuntime().totalMemory(); - } - - public static void main(String[] args) throws Exception { - System.out.println("Allocating"); - keepAlive = new byte[allocSize]; - final var afterAlloc = capacity(); - - System.out.println("Reclaiming"); - keepAlive = null; - System.gc(); - - // Wait longer than the uncommit delay (which is 1 second) - Thread.sleep(5 * 1000); - - final var afterDelay = capacity(); - - // Verify - if (afterAlloc > afterDelay) { - throw new Exception("Should not uncommit"); - } - - System.out.println("Success"); - } -} diff --git a/test/hotspot/jtreg/gc/x/TestPageCacheFlush.java b/test/hotspot/jtreg/gc/x/TestPageCacheFlush.java deleted file mode 100644 index a48b6f77e17ef..0000000000000 --- a/test/hotspot/jtreg/gc/x/TestPageCacheFlush.java +++ /dev/null @@ -1,83 +0,0 @@ -/* - * Copyright (c) 2020, 2023, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -package gc.x; - -/* - * @test TestPageCacheFlush - * @requires vm.gc.ZSinglegen - * @summary Test ZGC page cache flushing - * @library /test/lib - * @run driver gc.x.TestPageCacheFlush - */ - -import java.util.LinkedList; -import jdk.test.lib.process.ProcessTools; - -public class TestPageCacheFlush { - static class Test { - private static final int K = 1024; - private static final int M = K * K; - private static volatile LinkedList keepAlive; - - public static void fillPageCache(int size) { - System.out.println("Begin allocate (" + size + ")"); - - keepAlive = new LinkedList<>(); - - try { - for (;;) { - keepAlive.add(new byte[size]); - } - } catch (OutOfMemoryError e) { - keepAlive = null; - System.gc(); - } - - System.out.println("End allocate (" + size + ")"); - } - - public static void main(String[] args) throws Exception { - // Allocate small objects to fill the page cache with small pages - fillPageCache(10 * K); - - // Allocate large objects to provoke page cache flushing to rebuild - // cached small pages into large pages - fillPageCache(10 * M); - } - } - - public static void main(String[] args) throws Exception { - ProcessTools.executeTestJava( - "-XX:+UseZGC", - "-XX:-ZGenerational", - "-Xms128M", - "-Xmx128M", - "-Xlog:gc,gc+init,gc+heap=debug", - Test.class.getName()) - .outputTo(System.out) - .errorTo(System.out) - .shouldContain("Page Cache Flushed:") - .shouldHaveExitValue(0); - } -} diff --git a/test/hotspot/jtreg/gc/x/TestRelocateInPlace.java b/test/hotspot/jtreg/gc/x/TestRelocateInPlace.java deleted file mode 100644 index dba08b23a5d34..0000000000000 --- a/test/hotspot/jtreg/gc/x/TestRelocateInPlace.java +++ /dev/null @@ -1,74 +0,0 @@ -/* - * Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -package gc.x; - -/* - * @test TestRelocateInPlace - * @requires vm.gc.ZSinglegen - * @summary Test ZGC in-place relocateion - * @run main/othervm -XX:+UseZGC -XX:-ZGenerational -Xlog:gc*,gc+stats=off -Xmx256M -XX:+UnlockDiagnosticVMOptions -XX:+ZStressRelocateInPlace gc.x.TestRelocateInPlace - */ - -import java.util.ArrayList; - -public class TestRelocateInPlace { - private static final int allocSize = 100 * 1024 * 1024; // 100M - private static final int smallObjectSize = 4 * 1024; // 4K - private static final int mediumObjectSize = 2 * 1024 * 1024; // 2M - - private static volatile ArrayList keepAlive; - - private static void allocate(int objectSize) { - keepAlive = new ArrayList<>(); - for (int i = 0; i < allocSize; i+= objectSize) { - keepAlive.add(new byte[objectSize]); - } - } - - private static void fragment() { - // Release every other reference to cause lots of fragmentation - for (int i = 0; i < keepAlive.size(); i += 2) { - keepAlive.set(i, null); - } - } - - private static void test(int objectSize) throws Exception { - System.out.println("Allocating"); - allocate(objectSize); - - System.out.println("Fragmenting"); - fragment(); - - System.out.println("Reclaiming"); - System.gc(); - } - - public static void main(String[] args) throws Exception { - for (int i = 0; i < 10; i++) { - System.out.println("Iteration " + i); - test(smallObjectSize); - test(mediumObjectSize); - } - } -} diff --git a/test/hotspot/jtreg/gc/x/TestSmallHeap.java b/test/hotspot/jtreg/gc/x/TestSmallHeap.java deleted file mode 100644 index a7e8042f92474..0000000000000 --- a/test/hotspot/jtreg/gc/x/TestSmallHeap.java +++ /dev/null @@ -1,68 +0,0 @@ -/* - * Copyright (c) 2019, 2023, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -package gc.x; - -/* - * @test TestSmallHeap - * @requires vm.gc.ZSinglegen - * @summary Test ZGC with small heaps - * @library / /test/lib - * @run driver gc.x.TestSmallHeap 8M 16M 32M 64M 128M 256M 512M 1024M - */ - -import jdk.test.lib.process.ProcessTools; -import static gc.testlibrary.Allocation.blackHole; - -public class TestSmallHeap { - public static class Test { - public static void main(String[] args) throws Exception { - final long maxCapacity = Runtime.getRuntime().maxMemory(); - System.out.println("Max Capacity " + maxCapacity + " bytes"); - - // Allocate byte arrays of increasing length, so that - // all allocation paths (small/medium/large) are tested. - for (int length = 16; length <= maxCapacity / 16; length *= 2) { - System.out.println("Allocating " + length + " bytes"); - blackHole(new byte[length]); - } - - System.out.println("Success"); - } - } - - public static void main(String[] args) throws Exception { - for (var maxCapacity: args) { - ProcessTools.executeTestJava( - "-XX:+UseZGC", - "-XX:-ZGenerational", - "-Xlog:gc,gc+init,gc+reloc,gc+heap", - "-Xmx" + maxCapacity, - Test.class.getName()) - .outputTo(System.out) - .errorTo(System.out) - .shouldContain("Success") - .shouldHaveExitValue(0); - } - } -} diff --git a/test/hotspot/jtreg/gc/x/TestUncommit.java b/test/hotspot/jtreg/gc/x/TestUncommit.java deleted file mode 100644 index febd6b9958851..0000000000000 --- a/test/hotspot/jtreg/gc/x/TestUncommit.java +++ /dev/null @@ -1,136 +0,0 @@ -/* - * Copyright (c) 2019, 2020, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -package gc.x; - -/* - * @test TestUncommit - * @requires vm.gc.ZSinglegen - * @summary Test ZGC uncommit unused memory - * @library /test/lib - * @run main/othervm -XX:+UseZGC -XX:-ZGenerational -Xlog:gc*,gc+heap=debug,gc+stats=off -Xms128M -Xmx512M -XX:ZUncommitDelay=10 gc.x.TestUncommit - */ - -import java.util.ArrayList; -import jdk.test.lib.Utils; - -public class TestUncommit { - private static final int delay = 10 * 1000; // milliseconds - private static final int allocSize = 200 * 1024 * 1024; // 200M - private static final int smallObjectSize = 4 * 1024; // 4K - private static final int mediumObjectSize = 2 * 1024 * 1024; // 2M - private static final int largeObjectSize = allocSize; - - private static volatile ArrayList keepAlive; - - private static final long startTime = System.nanoTime(); - - private static void log(String msg) { - final String elapsedSeconds = String.format("%.3fs", (System.nanoTime() - startTime) / 1_000_000_000.0); - System.out.println("[" + elapsedSeconds + "] (" + Thread.currentThread().getName() + ") " + msg); - } - - private static long capacity() { - return Runtime.getRuntime().totalMemory(); - } - - private static void allocate(int objectSize) { - keepAlive = new ArrayList<>(); - for (int i = 0; i < allocSize; i+= objectSize) { - keepAlive.add(new byte[objectSize]); - } - } - - private static void reclaim() { - keepAlive = null; - System.gc(); - } - - private static void test(int objectSize) throws Exception { - final var beforeAlloc = capacity(); - final var timeBeforeAlloc = System.nanoTime(); - - // Allocate memory - log("Allocating"); - allocate(objectSize); - - final var afterAlloc = capacity(); - - // Reclaim memory - log("Reclaiming"); - reclaim(); - - log("Waiting for uncommit to start"); - while (capacity() >= afterAlloc) { - Thread.sleep(1000); - } - - log("Uncommit started"); - final var timeUncommitStart = System.nanoTime(); - final var actualDelay = (timeUncommitStart - timeBeforeAlloc) / 1_000_000; - - log("Waiting for uncommit to complete"); - while (capacity() > beforeAlloc) { - Thread.sleep(1000); - } - - log("Uncommit completed"); - final var afterUncommit = capacity(); - - log(" Uncommit Delay: " + delay); - log(" Object Size: " + objectSize); - log(" Alloc Size: " + allocSize); - log(" Before Alloc: " + beforeAlloc); - log(" After Alloc: " + afterAlloc); - log(" After Uncommit: " + afterUncommit); - log(" Actual Uncommit Delay: " + actualDelay); - - // Verify - if (actualDelay < delay) { - throw new Exception("Uncommitted too fast"); - } - - if (actualDelay > delay * 2 * Utils.TIMEOUT_FACTOR) { - throw new Exception("Uncommitted too slow"); - } - - if (afterUncommit < beforeAlloc) { - throw new Exception("Uncommitted too much"); - } - - if (afterUncommit > beforeAlloc) { - throw new Exception("Uncommitted too little"); - } - - log("Success"); - } - - public static void main(String[] args) throws Exception { - for (int i = 0; i < 2; i++) { - log("Iteration " + i); - test(smallObjectSize); - test(mediumObjectSize); - test(largeObjectSize); - } - } -} diff --git a/test/hotspot/jtreg/gc/z/TestAllocateHeapAt.java b/test/hotspot/jtreg/gc/z/TestAllocateHeapAt.java index 2fb040840b4da..dbcca704fab4f 100644 --- a/test/hotspot/jtreg/gc/z/TestAllocateHeapAt.java +++ b/test/hotspot/jtreg/gc/z/TestAllocateHeapAt.java @@ -25,7 +25,7 @@ /* * @test TestAllocateHeapAt - * @requires vm.gc.ZGenerational & os.family == "linux" + * @requires vm.gc.Z & os.family == "linux" * @requires !vm.opt.final.UseLargePages * @summary Test ZGC with -XX:AllocateHeapAt * @library /test/lib @@ -44,7 +44,6 @@ public static void main(String[] args) throws Exception { ProcessTools.executeTestJava( "-XX:+UseZGC", - "-XX:+ZGenerational", "-Xlog:gc*", "-Xms32M", "-Xmx32M", diff --git a/test/hotspot/jtreg/gc/z/TestAllocateHeapAtWithHugeTLBFS.java b/test/hotspot/jtreg/gc/z/TestAllocateHeapAtWithHugeTLBFS.java index ac647bbd013fd..4134ce838d4f5 100644 --- a/test/hotspot/jtreg/gc/z/TestAllocateHeapAtWithHugeTLBFS.java +++ b/test/hotspot/jtreg/gc/z/TestAllocateHeapAtWithHugeTLBFS.java @@ -25,7 +25,7 @@ /* * @test TestAllocateHeapAtWithHugeTLBFS - * @requires vm.gc.ZGenerational & os.family == "linux" + * @requires vm.gc.Z & os.family == "linux" * @summary Test ZGC with -XX:AllocateHeapAt and -XX:+UseLargePages * @library /test/lib * @run driver gc.z.TestAllocateHeapAtWithHugeTLBFS true @@ -77,7 +77,6 @@ public static void main(String[] args) throws Exception { ProcessTools.executeTestJava( "-XX:+UseZGC", - "-XX:+ZGenerational", "-Xlog:gc*", "-Xms32M", "-Xmx32M", diff --git a/test/hotspot/jtreg/gc/z/TestAlwaysPreTouch.java b/test/hotspot/jtreg/gc/z/TestAlwaysPreTouch.java index 8020c82c4fd3f..db0471431d1aa 100644 --- a/test/hotspot/jtreg/gc/z/TestAlwaysPreTouch.java +++ b/test/hotspot/jtreg/gc/z/TestAlwaysPreTouch.java @@ -25,13 +25,13 @@ /* * @test TestAlwaysPreTouch - * @requires vm.gc.ZGenerational + * @requires vm.gc.Z * @summary Test ZGC parallel pre-touch - * @run main/othervm -XX:+UseZGC -XX:+ZGenerational -Xlog:gc* -XX:-AlwaysPreTouch -Xms128M -Xmx128M gc.z.TestAlwaysPreTouch - * @run main/othervm -XX:+UseZGC -XX:+ZGenerational -Xlog:gc* -XX:+AlwaysPreTouch -XX:ParallelGCThreads=1 -Xms2M -Xmx128M gc.z.TestAlwaysPreTouch - * @run main/othervm -XX:+UseZGC -XX:+ZGenerational -Xlog:gc* -XX:+AlwaysPreTouch -XX:ParallelGCThreads=8 -Xms2M -Xmx128M gc.z.TestAlwaysPreTouch - * @run main/othervm -XX:+UseZGC -XX:+ZGenerational -Xlog:gc* -XX:+AlwaysPreTouch -XX:ParallelGCThreads=1 -Xms128M -Xmx128M gc.z.TestAlwaysPreTouch - * @run main/othervm -XX:+UseZGC -XX:+ZGenerational -Xlog:gc* -XX:+AlwaysPreTouch -XX:ParallelGCThreads=8 -Xms128M -Xmx128M gc.z.TestAlwaysPreTouch + * @run main/othervm -XX:+UseZGC -Xlog:gc* -XX:-AlwaysPreTouch -Xms128M -Xmx128M gc.z.TestAlwaysPreTouch + * @run main/othervm -XX:+UseZGC -Xlog:gc* -XX:+AlwaysPreTouch -XX:ParallelGCThreads=1 -Xms2M -Xmx128M gc.z.TestAlwaysPreTouch + * @run main/othervm -XX:+UseZGC -Xlog:gc* -XX:+AlwaysPreTouch -XX:ParallelGCThreads=8 -Xms2M -Xmx128M gc.z.TestAlwaysPreTouch + * @run main/othervm -XX:+UseZGC -Xlog:gc* -XX:+AlwaysPreTouch -XX:ParallelGCThreads=1 -Xms128M -Xmx128M gc.z.TestAlwaysPreTouch + * @run main/othervm -XX:+UseZGC -Xlog:gc* -XX:+AlwaysPreTouch -XX:ParallelGCThreads=8 -Xms128M -Xmx128M gc.z.TestAlwaysPreTouch */ public class TestAlwaysPreTouch { diff --git a/test/hotspot/jtreg/gc/z/TestDefault.java b/test/hotspot/jtreg/gc/z/TestDefault.java deleted file mode 100644 index 1f1b7d49408e7..0000000000000 --- a/test/hotspot/jtreg/gc/z/TestDefault.java +++ /dev/null @@ -1,51 +0,0 @@ -/* - * Copyright (c) 2024, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -package gc.z; - -/* - * @test TestDefault - * @requires vm.gc.ZGenerational - * @summary Test that ZGC Generational Mode is Default - * @library /test/lib - * @run driver gc.z.TestDefault - */ - -import java.util.LinkedList; -import jdk.test.lib.process.ProcessTools; - -public class TestDefault { - static class Test { - public static void main(String[] args) throws Exception {} - } - public static void main(String[] args) throws Exception { - ProcessTools.executeLimitedTestJava("-XX:+UseZGC", - "-Xlog:gc+init", - Test.class.getName()) - .shouldNotContain("Option ZGenerational was deprecated") - .shouldNotContain("Using deprecated non-generational mode") - .shouldContain("GC Workers for Old Generation") - .shouldContain("GC Workers for Young Generation") - .shouldHaveExitValue(0); - } -} diff --git a/test/hotspot/jtreg/gc/z/TestGarbageCollectorMXBean.java b/test/hotspot/jtreg/gc/z/TestGarbageCollectorMXBean.java index b3ecc28ff657e..d3cf46d51e853 100644 --- a/test/hotspot/jtreg/gc/z/TestGarbageCollectorMXBean.java +++ b/test/hotspot/jtreg/gc/z/TestGarbageCollectorMXBean.java @@ -25,12 +25,12 @@ /** * @test TestGarbageCollectorMXBean - * @requires vm.gc.ZGenerational + * @requires vm.gc.Z * @summary Test ZGC garbage collector MXBean * @modules java.management * @requires vm.compMode != "Xcomp" - * @run main/othervm -XX:+UseZGC -XX:+ZGenerational -Xms256M -Xmx512M -Xlog:gc gc.z.TestGarbageCollectorMXBean 256 512 - * @run main/othervm -XX:+UseZGC -XX:+ZGenerational -Xms512M -Xmx512M -Xlog:gc gc.z.TestGarbageCollectorMXBean 512 512 + * @run main/othervm -XX:+UseZGC -Xms256M -Xmx512M -Xlog:gc gc.z.TestGarbageCollectorMXBean 256 512 + * @run main/othervm -XX:+UseZGC -Xms512M -Xmx512M -Xlog:gc gc.z.TestGarbageCollectorMXBean 512 512 */ import java.lang.management.ManagementFactory; diff --git a/test/hotspot/jtreg/gc/z/TestMemoryMXBean.java b/test/hotspot/jtreg/gc/z/TestMemoryMXBean.java index 6f4505a64bf14..ec1daaa00fbd6 100644 --- a/test/hotspot/jtreg/gc/z/TestMemoryMXBean.java +++ b/test/hotspot/jtreg/gc/z/TestMemoryMXBean.java @@ -25,11 +25,11 @@ /** * @test TestMemoryMXBean - * @requires vm.gc.ZGenerational + * @requires vm.gc.Z * @summary Test ZGC heap memory MXBean * @modules java.management - * @run main/othervm -XX:+UseZGC -XX:+ZGenerational -Xms128M -Xmx256M -Xlog:gc* gc.z.TestMemoryMXBean 128 256 - * @run main/othervm -XX:+UseZGC -XX:+ZGenerational -Xms256M -Xmx256M -Xlog:gc* gc.z.TestMemoryMXBean 256 256 + * @run main/othervm -XX:+UseZGC -Xms128M -Xmx256M -Xlog:gc* gc.z.TestMemoryMXBean 128 256 + * @run main/othervm -XX:+UseZGC -Xms256M -Xmx256M -Xlog:gc* gc.z.TestMemoryMXBean 256 256 */ import java.lang.management.ManagementFactory; diff --git a/test/hotspot/jtreg/gc/z/TestMemoryManagerMXBean.java b/test/hotspot/jtreg/gc/z/TestMemoryManagerMXBean.java index 5a0c481a42fed..6540d67e855c2 100644 --- a/test/hotspot/jtreg/gc/z/TestMemoryManagerMXBean.java +++ b/test/hotspot/jtreg/gc/z/TestMemoryManagerMXBean.java @@ -25,10 +25,10 @@ /** * @test TestMemoryManagerMXBean - * @requires vm.gc.ZGenerational + * @requires vm.gc.Z * @summary Test ZGC memory manager MXBean * @modules java.management - * @run main/othervm -XX:+UseZGC -XX:+ZGenerational -Xmx128M gc.z.TestMemoryManagerMXBean + * @run main/othervm -XX:+UseZGC -Xmx128M gc.z.TestMemoryManagerMXBean */ import java.lang.management.ManagementFactory; diff --git a/test/hotspot/jtreg/gc/z/TestNoUncommit.java b/test/hotspot/jtreg/gc/z/TestNoUncommit.java index 6115681552e73..cd5833f308fe0 100644 --- a/test/hotspot/jtreg/gc/z/TestNoUncommit.java +++ b/test/hotspot/jtreg/gc/z/TestNoUncommit.java @@ -25,10 +25,10 @@ /* * @test TestNoUncommit - * @requires vm.gc.ZGenerational & !vm.graal.enabled + * @requires vm.gc.Z & !vm.graal.enabled * @summary Test ZGC uncommit unused memory disabled - * @run main/othervm -XX:+UseZGC -XX:+ZGenerational -Xlog:gc*,gc+heap=debug,gc+stats=off -Xms512M -Xmx512M -XX:ZUncommitDelay=1 gc.z.TestNoUncommit - * @run main/othervm -XX:+UseZGC -XX:+ZGenerational -Xlog:gc*,gc+heap=debug,gc+stats=off -Xms128M -Xmx512M -XX:ZUncommitDelay=1 -XX:-ZUncommit gc.z.TestNoUncommit + * @run main/othervm -XX:+UseZGC -Xlog:gc*,gc+heap=debug,gc+stats=off -Xms512M -Xmx512M -XX:ZUncommitDelay=1 gc.z.TestNoUncommit + * @run main/othervm -XX:+UseZGC -Xlog:gc*,gc+heap=debug,gc+stats=off -Xms128M -Xmx512M -XX:ZUncommitDelay=1 -XX:-ZUncommit gc.z.TestNoUncommit */ public class TestNoUncommit { diff --git a/test/hotspot/jtreg/gc/z/TestPageCacheFlush.java b/test/hotspot/jtreg/gc/z/TestPageCacheFlush.java index 3b666ddc2c8a6..847c7b2d1171e 100644 --- a/test/hotspot/jtreg/gc/z/TestPageCacheFlush.java +++ b/test/hotspot/jtreg/gc/z/TestPageCacheFlush.java @@ -25,7 +25,7 @@ /* * @test TestPageCacheFlush - * @requires vm.gc.ZGenerational + * @requires vm.gc.Z * @summary Test ZGC page cache flushing * @library /test/lib * @run driver gc.z.TestPageCacheFlush @@ -70,7 +70,6 @@ public static void main(String[] args) throws Exception { public static void main(String[] args) throws Exception { ProcessTools.executeTestJava( "-XX:+UseZGC", - "-XX:+ZGenerational", "-Xms128M", "-Xmx128M", "-Xlog:gc,gc+init,gc+heap=debug", diff --git a/test/hotspot/jtreg/gc/z/TestRegistersPushPopAtZGCLoadBarrierStub.java b/test/hotspot/jtreg/gc/z/TestRegistersPushPopAtZGCLoadBarrierStub.java index 71aa634c761d3..9730fdaafd8e4 100644 --- a/test/hotspot/jtreg/gc/z/TestRegistersPushPopAtZGCLoadBarrierStub.java +++ b/test/hotspot/jtreg/gc/z/TestRegistersPushPopAtZGCLoadBarrierStub.java @@ -31,7 +31,7 @@ * @library /test/lib / * @modules jdk.incubator.vector * - * @requires vm.gc.ZGenerational & vm.debug + * @requires vm.gc.Z & vm.debug * @requires os.arch=="aarch64" * * @run driver gc.z.TestRegistersPushPopAtZGCLoadBarrierStub @@ -316,7 +316,6 @@ static String launchJavaTestProcess(String test_name) throws Exception { command.add("-XX:-UseOnStackReplacement"); command.add("-XX:-TieredCompilation"); command.add("-XX:+UseZGC"); - command.add("-XX:+ZGenerational"); command.add("--add-modules=jdk.incubator.vector"); command.add("-XX:CompileCommand=print," + Launcher.class.getName() + "::" + test_name); command.add(Launcher.class.getName()); diff --git a/test/hotspot/jtreg/gc/z/TestRelocateInPlace.java b/test/hotspot/jtreg/gc/z/TestRelocateInPlace.java index 5115fe3c96545..723a6504c6acf 100644 --- a/test/hotspot/jtreg/gc/z/TestRelocateInPlace.java +++ b/test/hotspot/jtreg/gc/z/TestRelocateInPlace.java @@ -25,9 +25,9 @@ /* * @test TestRelocateInPlace - * @requires vm.gc.ZGenerational + * @requires vm.gc.Z * @summary Test ZGC in-place relocateion - * @run main/othervm -XX:+UseZGC -XX:+ZGenerational -Xlog:gc*,gc+stats=off -Xmx256M -XX:+UnlockDiagnosticVMOptions -XX:+ZStressRelocateInPlace gc.z.TestRelocateInPlace + * @run main/othervm -XX:+UseZGC -Xlog:gc*,gc+stats=off -Xmx256M -XX:+UnlockDiagnosticVMOptions -XX:+ZStressRelocateInPlace gc.z.TestRelocateInPlace */ import java.util.ArrayList; diff --git a/test/hotspot/jtreg/gc/z/TestSmallHeap.java b/test/hotspot/jtreg/gc/z/TestSmallHeap.java index 67d9d33d2815e..9ac65d05108ca 100644 --- a/test/hotspot/jtreg/gc/z/TestSmallHeap.java +++ b/test/hotspot/jtreg/gc/z/TestSmallHeap.java @@ -25,7 +25,7 @@ /* * @test TestSmallHeap - * @requires vm.gc.ZGenerational + * @requires vm.gc.Z * @summary Test ZGC with small heaps * @library / /test/lib * @run driver gc.z.TestSmallHeap 16M 32M 64M 128M 256M 512M 1024M @@ -55,7 +55,6 @@ public static void main(String[] args) throws Exception { for (var maxCapacity: args) { ProcessTools.executeTestJava( "-XX:+UseZGC", - "-XX:+ZGenerational", "-Xlog:gc,gc+init,gc+reloc,gc+heap", "-Xmx" + maxCapacity, Test.class.getName()) diff --git a/test/hotspot/jtreg/gc/z/TestUncommit.java b/test/hotspot/jtreg/gc/z/TestUncommit.java index fea0721cce311..e02773e868f60 100644 --- a/test/hotspot/jtreg/gc/z/TestUncommit.java +++ b/test/hotspot/jtreg/gc/z/TestUncommit.java @@ -25,10 +25,10 @@ /* * @test TestUncommit - * @requires vm.gc.ZGenerational + * @requires vm.gc.Z * @summary Test ZGC uncommit unused memory * @library /test/lib - * @run main/othervm -XX:+UseZGC -XX:+ZGenerational -Xlog:gc*,gc+heap=debug,gc+stats=off -Xms128M -Xmx512M -XX:ZUncommitDelay=10 gc.z.TestUncommit + * @run main/othervm -XX:+UseZGC -Xlog:gc*,gc+heap=debug,gc+stats=off -Xms128M -Xmx512M -XX:ZUncommitDelay=10 gc.z.TestUncommit */ import java.util.ArrayList; diff --git a/test/hotspot/jtreg/gc/z/TestZForceDiscontiguousHeapReservations.java b/test/hotspot/jtreg/gc/z/TestZForceDiscontiguousHeapReservations.java index f1a14f0cf902b..fa2485073dda5 100644 --- a/test/hotspot/jtreg/gc/z/TestZForceDiscontiguousHeapReservations.java +++ b/test/hotspot/jtreg/gc/z/TestZForceDiscontiguousHeapReservations.java @@ -25,7 +25,7 @@ /** * @test TestZForceDiscontiguousHeapReservations - * @requires vm.gc.ZGenerational & vm.debug + * @requires vm.gc.Z & vm.debug * @summary Test the ZForceDiscontiguousHeapReservations development flag * @library /test/lib * @run driver gc.z.TestZForceDiscontiguousHeapReservations @@ -47,7 +47,6 @@ private static void testValue(int n) throws Exception { final int XmsInM = Math.min(16 * XmxInM / (n + 1), XmxInM); OutputAnalyzer oa = ProcessTools.executeTestJava( "-XX:+UseZGC", - "-XX:+ZGenerational", "-Xms" + XmsInM + "M", "-Xmx" + XmxInM + "M", "-Xlog:gc,gc+init", diff --git a/test/hotspot/jtreg/gc/z/TestZNMT.java b/test/hotspot/jtreg/gc/z/TestZNMT.java index 889cc77b0e4b7..b536f3eab8e96 100644 --- a/test/hotspot/jtreg/gc/z/TestZNMT.java +++ b/test/hotspot/jtreg/gc/z/TestZNMT.java @@ -26,8 +26,8 @@ /** * @test TestZNMT * @bug 8310743 - * @requires vm.gc.ZGenerational & vm.debug - * @summary Test NMT and ZGenerational heap reservation / commits interactions. + * @requires vm.gc.Z & vm.debug + * @summary Test NMT and ZGC heap reservation / commits interactions. * @library / /test/lib * @run driver gc.z.TestZNMT */ @@ -70,7 +70,6 @@ private static void testValue(int zForceDiscontiguousHeapReservations) throws Ex final int XmsInM = Math.min(16 * XmxInM / (zForceDiscontiguousHeapReservations + 1), XmxInM); OutputAnalyzer oa = ProcessTools.executeTestJava( "-XX:+UseZGC", - "-XX:+ZGenerational", "-Xms" + XmsInM + "M", "-Xmx" + XmxInM + "M", "-Xlog:gc,gc+init", diff --git a/test/hotspot/jtreg/runtime/CommandLine/VMDeprecatedOptions.java b/test/hotspot/jtreg/runtime/CommandLine/VMDeprecatedOptions.java index 96bc22dfb1d05..0ecfbe2c8dbc3 100644 --- a/test/hotspot/jtreg/runtime/CommandLine/VMDeprecatedOptions.java +++ b/test/hotspot/jtreg/runtime/CommandLine/VMDeprecatedOptions.java @@ -57,7 +57,6 @@ public class VMDeprecatedOptions { Arrays.asList(new String[][] { // deprecated non-alias flags: {"AllowRedefinitionToAddDeleteMethods", "true"}, - {"ZGenerational", "false"}, {"LockingMode", "1"}, // deprecated alias flags (see also aliased_jvm_flags): diff --git a/test/hotspot/jtreg/runtime/cds/appcds/TestZGCWithCDS.java b/test/hotspot/jtreg/runtime/cds/appcds/TestZGCWithCDS.java index ea51b198f5999..89fc346ffbb79 100644 --- a/test/hotspot/jtreg/runtime/cds/appcds/TestZGCWithCDS.java +++ b/test/hotspot/jtreg/runtime/cds/appcds/TestZGCWithCDS.java @@ -22,29 +22,16 @@ */ /* - * @test id=ZSinglegen + * @test * @bug 8232069 * @requires vm.cds * @requires vm.bits == 64 - * @requires vm.gc.ZSinglegen + * @requires vm.gc.Z * @requires vm.gc.Serial * @requires vm.gc == null * @library /test/lib /test/hotspot/jtreg/runtime/cds/appcds * @compile test-classes/Hello.java - * @run driver TestZGCWithCDS -XX:-ZGenerational - */ - -/* - * @test id=ZGenerational - * @bug 8232069 - * @requires vm.cds - * @requires vm.bits == 64 - * @requires vm.gc.ZGenerational - * @requires vm.gc.Serial - * @requires vm.gc == null - * @library /test/lib /test/hotspot/jtreg/runtime/cds/appcds - * @compile test-classes/Hello.java - * @run driver TestZGCWithCDS -XX:+ZGenerational + * @run driver TestZGCWithCDS */ import jdk.test.lib.Platform; @@ -55,14 +42,12 @@ public class TestZGCWithCDS { public final static String UNABLE_TO_USE_ARCHIVE = "Unable to use shared archive."; public final static String ERR_MSG = "The saved state of UseCompressedOops and UseCompressedClassPointers is different from runtime, CDS will be disabled."; public static void main(String... args) throws Exception { - String zGenerational = args[0]; String helloJar = JarBuilder.build("hello", "Hello"); System.out.println("0. Dump with ZGC"); OutputAnalyzer out = TestCommon .dump(helloJar, new String[] {"Hello"}, "-XX:+UseZGC", - zGenerational, "-Xlog:cds"); out.shouldContain("Dumping shared data to file:"); out.shouldHaveExitValue(0); @@ -71,7 +56,6 @@ public static void main(String... args) throws Exception { out = TestCommon .exec(helloJar, "-XX:+UseZGC", - zGenerational, "-Xlog:cds", "Hello"); out.shouldContain(HELLO); @@ -151,7 +135,6 @@ public static void main(String... args) throws Exception { out = TestCommon .exec(helloJar, "-XX:+UseZGC", - zGenerational, "-Xlog:cds", "Hello"); out.shouldContain(HELLO); diff --git a/test/hotspot/jtreg/runtime/cds/appcds/loaderConstraints/DynamicLoaderConstraintsTest.java b/test/hotspot/jtreg/runtime/cds/appcds/loaderConstraints/DynamicLoaderConstraintsTest.java index 6de08da4673cb..004cc7bb5dbbd 100644 --- a/test/hotspot/jtreg/runtime/cds/appcds/loaderConstraints/DynamicLoaderConstraintsTest.java +++ b/test/hotspot/jtreg/runtime/cds/appcds/loaderConstraints/DynamicLoaderConstraintsTest.java @@ -53,9 +53,9 @@ */ /** - * @test id=custom-cl-zgc-singlegen + * @test id=custom-cl-zgc * @requires vm.cds.custom.loaders - * @requires vm.gc.ZSinglegen + * @requires vm.gc.Z * @summary Test dumptime_table entries are removed with zgc eager class unloading * @bug 8274935 * @library /test/lib @@ -69,23 +69,6 @@ * @run main/othervm/timeout=180 -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -Xbootclasspath/a:. DynamicLoaderConstraintsTest custom-zgc */ -/** - * @test id=custom-cl-zgc-generational - * @requires vm.cds.custom.loaders - * @requires vm.gc.ZGenerational - * @summary Test dumptime_table entries are removed with zgc eager class unloading - * @bug 8274935 - * @library /test/lib - * /test/hotspot/jtreg/runtime/cds/appcds - * /test/hotspot/jtreg/runtime/cds/appcds/test-classes - * /test/hotspot/jtreg/runtime/cds/appcds/dynamicArchive - * @modules java.base/jdk.internal.misc - * jdk.httpserver - * @build jdk.test.whitebox.WhiteBox - * @run driver jdk.test.lib.helpers.ClassFileInstaller jdk.test.whitebox.WhiteBox - * @run main/othervm/timeout=180 -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -Xbootclasspath/a:. DynamicLoaderConstraintsTest custom-zgc-generational - */ - import com.sun.net.httpserver.HttpExchange; import com.sun.net.httpserver.HttpHandler; import jdk.test.lib.Asserts; @@ -118,12 +101,10 @@ public class DynamicLoaderConstraintsTest extends DynamicArchiveTestBase { */ static boolean useCustomLoader; static boolean useZGC; - static boolean useZGenerational; public static void main(String[] args) throws Exception { useCustomLoader = (args.length != 0); - useZGenerational = (args.length != 0 && args[0].equals("custom-zgc-generational")); - useZGC = useZGenerational || (args.length != 0 && args[0].equals("custom-zgc")); + useZGC = (args.length != 0 && args[0].equals("custom-zgc")); runTest(DynamicLoaderConstraintsTest::doTest); } @@ -150,7 +131,7 @@ static void doTest(boolean errorInDump) throws Exception { for (int i = 1; i <= 3; i++) { System.out.println("========================================"); System.out.println("errorInDump: " + errorInDump + ", useCustomLoader: " + useCustomLoader + - ", useZGC: " + useZGC + ", ZGenerational: " + useZGenerational + ", case: " + i); + ", useZGC: " + useZGC + ", case: " + i); System.out.println("========================================"); String topArchiveName = getNewArchiveName(); String testCase = Integer.toString(i); @@ -164,10 +145,9 @@ static void doTest(boolean errorInDump) throws Exception { if (useCustomLoader) { if (useZGC) { - String zGenerational = "-XX:" + (useZGenerational ? "+" : "-") + "ZGenerational"; // Add options to force eager class unloading. cmdLine = TestCommon.concat(cmdLine, "-cp", loaderJar, - "-XX:+UseZGC", zGenerational, "-XX:ZCollectionInterval=0.01", + "-XX:+UseZGC", "-XX:ZCollectionInterval=0.01", loaderMainClass, appJar); setBaseArchiveOptions("-XX:+UseZGC", "-Xlog:cds"); } else { diff --git a/test/hotspot/jtreg/serviceability/dcmd/gc/HeapDumpCompressedTest.java b/test/hotspot/jtreg/serviceability/dcmd/gc/HeapDumpCompressedTest.java index 1e6d99a504866..3a960cc8c7d37 100644 --- a/test/hotspot/jtreg/serviceability/dcmd/gc/HeapDumpCompressedTest.java +++ b/test/hotspot/jtreg/serviceability/dcmd/gc/HeapDumpCompressedTest.java @@ -71,27 +71,15 @@ */ /* - * @test id=ZSinglegen - * @requires vm.gc.ZSinglegen + * @test id=Z + * @requires vm.gc.Z * @summary Test of diagnostic command GC.heap_dump with gzipped output (Z GC) * @library /test/lib * @modules java.base/jdk.internal.misc * java.compiler * java.management * jdk.internal.jvmstat/sun.jvmstat.monitor - * @run main/othervm -XX:+UseZGC -XX:-ZGenerational HeapDumpCompressedTest - */ - -/* - * @test id=ZGenerational - * @requires vm.gc.ZGenerational - * @summary Test of diagnostic command GC.heap_dump with gzipped output (Z GC) - * @library /test/lib - * @modules java.base/jdk.internal.misc - * java.compiler - * java.management - * jdk.internal.jvmstat/sun.jvmstat.monitor - * @run main/othervm -XX:+UseZGC -XX:+ZGenerational HeapDumpCompressedTest + * @run main/othervm -XX:+UseZGC HeapDumpCompressedTest */ /* diff --git a/test/hotspot/jtreg/testlibrary_tests/ir_framework/examples/GCBarrierIRExample.java b/test/hotspot/jtreg/testlibrary_tests/ir_framework/examples/GCBarrierIRExample.java index e0287fc39fe0a..11db83a1b3142 100644 --- a/test/hotspot/jtreg/testlibrary_tests/ir_framework/examples/GCBarrierIRExample.java +++ b/test/hotspot/jtreg/testlibrary_tests/ir_framework/examples/GCBarrierIRExample.java @@ -33,7 +33,7 @@ * @summary Example test that illustrates the use of the IR test framework for * verification of late-expanded GC barriers. * @library /test/lib / - * @requires vm.gc.ZGenerational + * @requires vm.gc.Z * @run driver ir_framework.examples.GCBarrierIRExample */ @@ -61,7 +61,7 @@ public static void main(String[] args) { // emission, such as ZGC. Because the collector selection flags are not // whitelisted (see IR framework's README.md file), the user (as opposed // to jtreg) needs to set these flags here. - TestFramework.runWithFlags("-XX:+UseZGC", "-XX:+ZGenerational"); + TestFramework.runWithFlags("-XX:+UseZGC"); } @Test diff --git a/test/jdk/ProblemList-generational-zgc.txt b/test/jdk/ProblemList-generational-zgc.txt deleted file mode 100644 index 9fa9874d20c64..0000000000000 --- a/test/jdk/ProblemList-generational-zgc.txt +++ /dev/null @@ -1,40 +0,0 @@ -# -# Copyright (c) 2020, 2024, Oracle and/or its affiliates. All rights reserved. -# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. -# -# This code is free software; you can redistribute it and/or modify it -# under the terms of the GNU General Public License version 2 only, as -# published by the Free Software Foundation. -# -# This code is distributed in the hope that it will be useful, but WITHOUT -# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License -# version 2 for more details (a copy is included in the LICENSE file that -# accompanied this code). -# -# You should have received a copy of the GNU General Public License version -# 2 along with this work; if not, write to the Free Software Foundation, -# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. -# -# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA -# or visit www.oracle.com if you need additional information or have any -# questions. -# - -############################################################################# -# -# List of quarantined tests for testing with Generational ZGC. -# -############################################################################# - -# Quiet all SA tests - -sun/tools/jhsdb/HeapDumpTest.java 8307393 generic-all -sun/tools/jhsdb/BasicLauncherTest.java 8307393 generic-all -sun/tools/jhsdb/JStackStressTest.java 8307393 generic-all -sun/tools/jhsdb/JShellHeapDumpTest.java 8307393 generic-all -sun/tools/jhsdb/SAGetoptTest.java 8307393 generic-all -sun/tools/jhsdb/heapconfig/JMapHeapConfigTest.java 8307393 generic-all -sun/tools/jhsdb/HeapDumpTestWithActiveProcess.java 8307393 generic-all - -com/sun/jdi/ThreadMemoryLeakTest.java 8307402 generic-all diff --git a/test/jdk/ProblemList-zgc.txt b/test/jdk/ProblemList-zgc.txt index 9fae070e25d1f..e81ac8137471c 100644 --- a/test/jdk/ProblemList-zgc.txt +++ b/test/jdk/ProblemList-zgc.txt @@ -27,5 +27,14 @@ # ############################################################################# -sun/tools/jhsdb/JShellHeapDumpTest.java 8276539 generic-all -sun/tools/jhsdb/HeapDumpTestWithActiveProcess.java 8276539 generic-all +# Quiet all SA tests + +sun/tools/jhsdb/HeapDumpTest.java 8307393 generic-all +sun/tools/jhsdb/BasicLauncherTest.java 8307393 generic-all +sun/tools/jhsdb/JStackStressTest.java 8307393 generic-all +sun/tools/jhsdb/JShellHeapDumpTest.java 8307393 generic-all +sun/tools/jhsdb/SAGetoptTest.java 8307393 generic-all +sun/tools/jhsdb/heapconfig/JMapHeapConfigTest.java 8307393 generic-all +sun/tools/jhsdb/HeapDumpTestWithActiveProcess.java 8307393 generic-all + +com/sun/jdi/ThreadMemoryLeakTest.java 8307402 generic-all diff --git a/test/jdk/TEST.ROOT b/test/jdk/TEST.ROOT index c8db6b89a71c7..6276932afbd14 100644 --- a/test/jdk/TEST.ROOT +++ b/test/jdk/TEST.ROOT @@ -84,8 +84,6 @@ requires.properties= \ vm.gc.Shenandoah \ vm.gc.Epsilon \ vm.gc.Z \ - vm.gc.ZGenerational \ - vm.gc.ZSinglegen \ vm.graal.enabled \ vm.compiler1.enabled \ vm.compiler2.enabled \ diff --git a/test/jdk/java/awt/Graphics2D/LargeWindowPaintTest.java b/test/jdk/java/awt/Graphics2D/LargeWindowPaintTest.java index 991c22938b8a3..2c61a29aacba8 100644 --- a/test/jdk/java/awt/Graphics2D/LargeWindowPaintTest.java +++ b/test/jdk/java/awt/Graphics2D/LargeWindowPaintTest.java @@ -33,25 +33,14 @@ */ /* - * @test id=ZSinglegen + * @test id=Z * @bug 8240654 * @summary Test painting a large window works * @key headful * @requires (os.family == "windows") - * @requires vm.gc.ZSinglegen - * @run main/othervm -XX:+UseZGC -XX:-ZGenerational -Dsun.java2d.uiScale=1 LargeWindowPaintTest - * @run main/othervm -XX:+UseZGC -XX:-ZGenerational -Dsun.java2d.uiScale=1 -Dsun.java2d.d3d=false LargeWindowPaintTest - */ - -/* - * @test id=ZGenerational - * @bug 8240654 - * @summary Test painting a large window works - * @key headful - * @requires (os.family == "windows") - * @requires vm.gc.ZGenerational - * @run main/othervm -XX:+UseZGC -XX:+ZGenerational -Dsun.java2d.uiScale=1 LargeWindowPaintTest - * @run main/othervm -XX:+UseZGC -XX:+ZGenerational -Dsun.java2d.uiScale=1 -Dsun.java2d.d3d=false LargeWindowPaintTest + * @requires vm.gc.Z + * @run main/othervm -XX:+UseZGC -Dsun.java2d.uiScale=1 LargeWindowPaintTest + * @run main/othervm -XX:+UseZGC -Dsun.java2d.uiScale=1 -Dsun.java2d.d3d=false LargeWindowPaintTest */ import java.awt.Color; diff --git a/test/jdk/java/awt/print/PrinterJob/AlphaPrintTest.java b/test/jdk/java/awt/print/PrinterJob/AlphaPrintTest.java index 5214ab11ec581..9a1ff616ee93f 100644 --- a/test/jdk/java/awt/print/PrinterJob/AlphaPrintTest.java +++ b/test/jdk/java/awt/print/PrinterJob/AlphaPrintTest.java @@ -22,23 +22,13 @@ */ /* - * @test id=ZSinglegen + * @test id=Z * @bug 8240654 * @summary Test printing alpha colors - banded printing works with ZGC. * @key headful printer * @requires (os.family == "windows") - * @requires vm.gc.ZSinglegen - * @run main/manual/othervm -XX:+UseZGC -XX:-ZGenerational -Dsun.java2d.d3d=false AlphaPrintTest - */ - -/* - * @test id=ZGenerational - * @bug 8240654 - * @summary Test printing alpha colors - banded printing works with ZGC. - * @key headful printer - * @requires (os.family == "windows") - * @requires vm.gc.ZGenerational - * @run main/manual/othervm -XX:+UseZGC -XX:+ZGenerational -Dsun.java2d.d3d=false AlphaPrintTest + * @requires vm.gc.Z + * @run main/manual/othervm -XX:+UseZGC -Dsun.java2d.d3d=false AlphaPrintTest */ import java.awt.Color; diff --git a/test/jdk/java/foreign/stackwalk/TestAsyncStackWalk.java b/test/jdk/java/foreign/stackwalk/TestAsyncStackWalk.java index c21225575f9e9..1c234a9c3c658 100644 --- a/test/jdk/java/foreign/stackwalk/TestAsyncStackWalk.java +++ b/test/jdk/java/foreign/stackwalk/TestAsyncStackWalk.java @@ -39,8 +39,8 @@ */ /* - * @test id=ZSinglegen - * @requires vm.gc.ZSinglegen + * @test id=Z + * @requires vm.gc.Z * @library /test/lib * @library ../ * @build jdk.test.whitebox.WhiteBox @@ -52,25 +52,7 @@ * -XX:+WhiteBoxAPI * --enable-native-access=ALL-UNNAMED * -Xbatch - * -XX:+UseZGC -XX:-ZGenerational - * TestAsyncStackWalk - */ - -/* - * @test id=ZGenerational - * @requires vm.gc.ZGenerational - * @library /test/lib - * @library ../ - * @build jdk.test.whitebox.WhiteBox - * @run driver jdk.test.lib.helpers.ClassFileInstaller jdk.test.whitebox.WhiteBox - * - * @run main/othervm - * -Xbootclasspath/a:. - * -XX:+UnlockDiagnosticVMOptions - * -XX:+WhiteBoxAPI - * --enable-native-access=ALL-UNNAMED - * -Xbatch - * -XX:+UseZGC -XX:+ZGenerational + * -XX:+UseZGC * TestAsyncStackWalk */ diff --git a/test/jdk/java/foreign/stackwalk/TestStackWalk.java b/test/jdk/java/foreign/stackwalk/TestStackWalk.java index 5218792658f0c..193a71affdd93 100644 --- a/test/jdk/java/foreign/stackwalk/TestStackWalk.java +++ b/test/jdk/java/foreign/stackwalk/TestStackWalk.java @@ -39,8 +39,8 @@ */ /* - * @test id=ZSinglegen - * @requires vm.gc.ZSinglegen + * @test id=Z + * @requires vm.gc.Z * @library /test/lib * @library ../ * @build jdk.test.whitebox.WhiteBox @@ -52,25 +52,7 @@ * -XX:+WhiteBoxAPI * --enable-native-access=ALL-UNNAMED * -Xbatch - * -XX:+UseZGC -XX:-ZGenerational - * TestStackWalk - */ - -/* - * @test id=ZGenerational - * @requires vm.gc.ZGenerational - * @library /test/lib - * @library ../ - * @build jdk.test.whitebox.WhiteBox - * @run driver jdk.test.lib.helpers.ClassFileInstaller jdk.test.whitebox.WhiteBox - * - * @run main/othervm - * -Xbootclasspath/a:. - * -XX:+UnlockDiagnosticVMOptions - * -XX:+WhiteBoxAPI - * --enable-native-access=ALL-UNNAMED - * -Xbatch - * -XX:+UseZGC -XX:+ZGenerational + * -XX:+UseZGC * TestStackWalk */ diff --git a/test/jdk/java/io/ObjectStreamClass/ObjectStreamClassCaching.java b/test/jdk/java/io/ObjectStreamClass/ObjectStreamClassCaching.java index 115481243f77d..4004cbcf859e9 100644 --- a/test/jdk/java/io/ObjectStreamClass/ObjectStreamClassCaching.java +++ b/test/jdk/java/io/ObjectStreamClass/ObjectStreamClassCaching.java @@ -49,12 +49,12 @@ */ /* - * @test id=ZGenerational - * @requires vm.gc.ZGenerational + * @test id=Z + * @requires vm.gc.Z * @bug 8277072 8327180 * @library /test/lib/ * @summary ObjectStreamClass caches keep ClassLoaders alive (ZGC) - * @run testng/othervm -Xmx64m -XX:+UseZGC -XX:+ZGenerational ObjectStreamClassCaching + * @run testng/othervm -Xmx64m -XX:+UseZGC ObjectStreamClassCaching */ /* diff --git a/test/jdk/java/lang/ProcessBuilder/CloseRace.java b/test/jdk/java/lang/ProcessBuilder/CloseRace.java index e7eab128d60cf..b0ca352b8acf6 100644 --- a/test/jdk/java/lang/ProcessBuilder/CloseRace.java +++ b/test/jdk/java/lang/ProcessBuilder/CloseRace.java @@ -22,24 +22,24 @@ */ /** - * @test + * @test id=Default * @bug 8024521 8315721 * @summary Closing ProcessPipeInputStream at the time the process exits is racy * and leads to data corruption. Run this test manually (as * an ordinary java program) with -Xmx8M to repro bug 8024521. - * @requires !vm.opt.final.ZGenerational + * @requires vm.gc != "Z" * @comment Don't allow -Xcomp, it disturbs the timing * @requires (vm.compMode != "Xcomp") * @run main/othervm -Xmx8M -Dtest.duration=2 CloseRace */ /** - * @test + * @test id=Z * @comment Turn up heap size to lower amount of GCs - * @requires vm.gc.Z & vm.opt.final.ZGenerational + * @requires vm.gc.Z * @comment Don't allow -Xcomp, it disturbs the timing * @requires (vm.compMode != "Xcomp") - * @run main/othervm -XX:+UseZGC -XX:+ZGenerational -Xmx32M -Dtest.duration=2 CloseRace + * @run main/othervm -XX:+UseZGC -Xmx32M -Dtest.duration=2 CloseRace */ import java.io.*; diff --git a/test/jdk/java/lang/Thread/virtual/stress/Skynet.java b/test/jdk/java/lang/Thread/virtual/stress/Skynet.java index 5b63fe84b1d89..562a8dbd5e1bf 100644 --- a/test/jdk/java/lang/Thread/virtual/stress/Skynet.java +++ b/test/jdk/java/lang/Thread/virtual/stress/Skynet.java @@ -28,22 +28,12 @@ * @requires !vm.debug | vm.gc != "Z" * @run main/othervm/timeout=300 -Xmx1500m Skynet */ - -/* - * @test id=ZSinglegen - * @requires vm.debug == true & vm.continuations - * @requires vm.gc.ZSinglegen - * @run main/othervm/timeout=300 -XX:+UnlockDiagnosticVMOptions - * -XX:+UseZGC -XX:-ZGenerational - * -XX:+ZVerifyOops -XX:ZCollectionInterval=0.01 -Xmx1500m Skynet - */ - /* - * @test id=ZGenerational + * @test id=Z * @requires vm.debug == true & vm.continuations - * @requires vm.gc.ZGenerational + * @requires vm.gc.Z * @run main/othervm/timeout=300 -XX:+UnlockDiagnosticVMOptions - * -XX:+UseZGC -XX:+ZGenerational + * -XX:+UseZGC * -XX:+ZVerifyOops -XX:ZCollectionInterval=0.01 -Xmx1500m Skynet */ diff --git a/test/jdk/java/lang/management/MemoryMXBean/MemoryTest.java b/test/jdk/java/lang/management/MemoryMXBean/MemoryTest.java index 68a6671e77a03..f8abba4398399 100644 --- a/test/jdk/java/lang/management/MemoryMXBean/MemoryTest.java +++ b/test/jdk/java/lang/management/MemoryMXBean/MemoryTest.java @@ -34,27 +34,15 @@ */ /* - * @test id=ZSinglegen + * @test id=Z * @bug 4530538 * @summary Basic unit test of MemoryMXBean.getMemoryPools() and * MemoryMXBean.getMemoryManager(). - * @requires vm.gc.ZSinglegen + * @requires vm.gc.Z * @author Mandy Chung * * @modules jdk.management - * @run main/othervm -XX:+UseZGC -XX:-ZGenerational MemoryTest 2 1 - */ - -/* - * @test id=ZGenerational - * @bug 4530538 - * @summary Basic unit test of MemoryMXBean.getMemoryPools() and - * MemoryMXBean.getMemoryManager(). - * @requires vm.gc.ZGenerational - * @author Mandy Chung - * - * @modules jdk.management - * @run main/othervm -XX:+UseZGC -XX:+ZGenerational MemoryTest 4 2 + * @run main/othervm -XX:+UseZGC MemoryTest 4 2 */ /* diff --git a/test/jdk/jdk/dynalink/TypeConverterFactoryMemoryLeakTest.java b/test/jdk/jdk/dynalink/TypeConverterFactoryMemoryLeakTest.java index 7e907781e9d3d..4735bb4a08fa5 100644 --- a/test/jdk/jdk/dynalink/TypeConverterFactoryMemoryLeakTest.java +++ b/test/jdk/jdk/dynalink/TypeConverterFactoryMemoryLeakTest.java @@ -46,19 +46,11 @@ */ /* - * @test id=with_ZGC_Singlegen - * @requires vm.gc.ZSinglegen + * @test id=with_ZGC + * @requires vm.gc.Z * @bug 8198540 * @summary Test TypeConverterFactory is not leaking method handles (Z GC) - * @run main/othervm -XX:+UseZGC -XX:-ZGenerational TypeConverterFactoryMemoryLeakTest - */ - -/* - * @test id=with_ZGC_Generational - * @requires vm.gc.ZGenerational - * @bug 8198540 - * @summary Test TypeConverterFactory is not leaking method handles (Z GC) - * @run main/othervm -XX:+UseZGC -XX:+ZGenerational TypeConverterFactoryMemoryLeakTest + * @run main/othervm -XX:+UseZGC TypeConverterFactoryMemoryLeakTest */ /* diff --git a/test/jdk/jdk/dynalink/TypeConverterFactoryRetentionTests.java b/test/jdk/jdk/dynalink/TypeConverterFactoryRetentionTests.java index bdfd33eff4801..8ad972d450225 100644 --- a/test/jdk/jdk/dynalink/TypeConverterFactoryRetentionTests.java +++ b/test/jdk/jdk/dynalink/TypeConverterFactoryRetentionTests.java @@ -46,19 +46,11 @@ */ /* - * @test id=with_ZGC_Singlegen - * @requires vm.gc.ZSinglegen + * @test id=with_ZGC + * @requires vm.gc.Z * @bug 8198540 * @summary Test TypeConverterFactory is not leaking class loaders (Z GC) - * @run main/othervm -XX:+UseZGC -XX:-ZGenerational TypeConverterFactoryRetentionTests - */ - -/* - * @test id=with_ZGC_Generational - * @requires vm.gc.ZGenerational - * @bug 8198540 - * @summary Test TypeConverterFactory is not leaking class loaders (Z GC) - * @run main/othervm -XX:+UseZGC -XX:+ZGenerational TypeConverterFactoryRetentionTests + * @run main/othervm -XX:+UseZGC TypeConverterFactoryRetentionTests */ /* diff --git a/test/jdk/jdk/incubator/vector/VectorMaxConversionTests.java b/test/jdk/jdk/incubator/vector/VectorMaxConversionTests.java index 28c5348100afa..a8f89f9d705ea 100644 --- a/test/jdk/jdk/incubator/vector/VectorMaxConversionTests.java +++ b/test/jdk/jdk/incubator/vector/VectorMaxConversionTests.java @@ -41,26 +41,14 @@ */ /* - * @test id=ZSinglegen + * @test id=Z * @bug 8281544 * @summary Test that ZGC and vectorapi with KNL work together. - * @requires vm.gc.ZSinglegen + * @requires vm.gc.Z * @modules jdk.incubator.vector * @modules java.base/jdk.internal.vm.annotation * @run testng/othervm -XX:-TieredCompilation --add-opens jdk.incubator.vector/jdk.incubator.vector=ALL-UNNAMED - * -XX:+UnlockDiagnosticVMOptions -XX:+UseKNLSetting -XX:+UseZGC -XX:-ZGenerational -XX:+IgnoreUnrecognizedVMOptions - * VectorMaxConversionTests - */ - -/* - * @test id=ZGenerational - * @bug 8281544 - * @summary Test that ZGC and vectorapi with KNL work together. - * @requires vm.gc.ZGenerational - * @modules jdk.incubator.vector - * @modules java.base/jdk.internal.vm.annotation - * @run testng/othervm -XX:-TieredCompilation --add-opens jdk.incubator.vector/jdk.incubator.vector=ALL-UNNAMED - * -XX:+UnlockDiagnosticVMOptions -XX:+UseKNLSetting -XX:+UseZGC -XX:+ZGenerational -XX:+IgnoreUnrecognizedVMOptions + * -XX:+UnlockDiagnosticVMOptions -XX:+UseKNLSetting -XX:+UseZGC -XX:+IgnoreUnrecognizedVMOptions * VectorMaxConversionTests */ diff --git a/test/jdk/jdk/jfr/event/gc/collection/TestGarbageCollectionEventWithZMajor.java b/test/jdk/jdk/jfr/event/gc/collection/TestGarbageCollectionEventWithZMajor.java index 81e18439b5788..3766d9d0d32b0 100644 --- a/test/jdk/jdk/jfr/event/gc/collection/TestGarbageCollectionEventWithZMajor.java +++ b/test/jdk/jdk/jfr/event/gc/collection/TestGarbageCollectionEventWithZMajor.java @@ -35,10 +35,10 @@ /** * @test - * @requires vm.hasJFR & vm.gc.ZGenerational + * @requires vm.hasJFR & vm.gc.Z * @key jfr * @library /test/lib /test/jdk - * @run main/othervm -Xmx50m -XX:+UseZGC -XX:+ZGenerational -XX:+UnlockExperimentalVMOptions -XX:-UseFastUnorderedTimeStamps -Xlog:gc* jdk.jfr.event.gc.collection.TestGarbageCollectionEventWithZMajor + * @run main/othervm -Xmx50m -XX:+UseZGC -XX:+UnlockExperimentalVMOptions -XX:-UseFastUnorderedTimeStamps -Xlog:gc* jdk.jfr.event.gc.collection.TestGarbageCollectionEventWithZMajor */ public class TestGarbageCollectionEventWithZMajor { diff --git a/test/jdk/jdk/jfr/event/gc/collection/TestGarbageCollectionEventWithZMinor.java b/test/jdk/jdk/jfr/event/gc/collection/TestGarbageCollectionEventWithZMinor.java index e7e94cf9ff040..c8d681594dd74 100644 --- a/test/jdk/jdk/jfr/event/gc/collection/TestGarbageCollectionEventWithZMinor.java +++ b/test/jdk/jdk/jfr/event/gc/collection/TestGarbageCollectionEventWithZMinor.java @@ -40,12 +40,12 @@ /** * @test * @key jfr - * @requires vm.hasJFR & vm.gc.ZGenerational + * @requires vm.hasJFR & vm.gc.Z * @key jfr * @library /test/lib /test/jdk * @build jdk.test.whitebox.WhiteBox * @run driver jdk.test.lib.helpers.ClassFileInstaller jdk.test.whitebox.WhiteBox - * @run main/othervm -Xbootclasspath/a:. -XX:+UseZGC -XX:+ZGenerational -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -XX:+UnlockExperimentalVMOptions -XX:-UseFastUnorderedTimeStamps -Xlog:gc* jdk.jfr.event.gc.collection.TestGarbageCollectionEventWithZMinor + * @run main/othervm -Xbootclasspath/a:. -XX:+UseZGC -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -XX:+UnlockExperimentalVMOptions -XX:-UseFastUnorderedTimeStamps -Xlog:gc* jdk.jfr.event.gc.collection.TestGarbageCollectionEventWithZMinor */ public class TestGarbageCollectionEventWithZMinor { diff --git a/test/jdk/jdk/jfr/event/gc/collection/TestZOldGarbageCollectionEvent.java b/test/jdk/jdk/jfr/event/gc/collection/TestZOldGarbageCollectionEvent.java index 0f807f4f6e4ba..50177ffd16b77 100644 --- a/test/jdk/jdk/jfr/event/gc/collection/TestZOldGarbageCollectionEvent.java +++ b/test/jdk/jdk/jfr/event/gc/collection/TestZOldGarbageCollectionEvent.java @@ -35,10 +35,10 @@ /** * @test - * @requires vm.hasJFR & vm.gc.ZGenerational + * @requires vm.hasJFR & vm.gc.Z * @key jfr * @library /test/lib /test/jdk - * @run main/othervm -Xmx50m -XX:+UseZGC -XX:+ZGenerational -XX:+UnlockExperimentalVMOptions -XX:-UseFastUnorderedTimeStamps -Xlog:gc* jdk.jfr.event.gc.collection.TestZOldGarbageCollectionEvent + * @run main/othervm -Xmx50m -XX:+UseZGC -XX:+UnlockExperimentalVMOptions -XX:-UseFastUnorderedTimeStamps -Xlog:gc* jdk.jfr.event.gc.collection.TestZOldGarbageCollectionEvent */ public class TestZOldGarbageCollectionEvent { diff --git a/test/jdk/jdk/jfr/event/gc/collection/TestZYoungGarbageCollectionEvent.java b/test/jdk/jdk/jfr/event/gc/collection/TestZYoungGarbageCollectionEvent.java index c16bdaa5d642f..f8e4e8b344bda 100644 --- a/test/jdk/jdk/jfr/event/gc/collection/TestZYoungGarbageCollectionEvent.java +++ b/test/jdk/jdk/jfr/event/gc/collection/TestZYoungGarbageCollectionEvent.java @@ -35,10 +35,10 @@ /** * @test - * @requires vm.hasJFR & vm.gc.ZGenerational + * @requires vm.hasJFR & vm.gc.Z * @key jfr * @library /test/lib /test/jdk - * @run main/othervm -Xmx50m -XX:+UseZGC -XX:+ZGenerational -XX:+UnlockExperimentalVMOptions -XX:-UseFastUnorderedTimeStamps -Xlog:gc* jdk.jfr.event.gc.collection.TestZYoungGarbageCollectionEvent + * @run main/othervm -Xmx50m -XX:+UseZGC -XX:+UnlockExperimentalVMOptions -XX:-UseFastUnorderedTimeStamps -Xlog:gc* jdk.jfr.event.gc.collection.TestZYoungGarbageCollectionEvent */ public class TestZYoungGarbageCollectionEvent { diff --git a/test/jdk/jdk/jfr/event/gc/detailed/TestGCPhaseConcurrent.java b/test/jdk/jdk/jfr/event/gc/detailed/TestGCPhaseConcurrent.java index fb048ee6bd7c5..48e188346f871 100644 --- a/test/jdk/jdk/jfr/event/gc/detailed/TestGCPhaseConcurrent.java +++ b/test/jdk/jdk/jfr/event/gc/detailed/TestGCPhaseConcurrent.java @@ -31,19 +31,11 @@ import jdk.test.lib.jfr.Events; /** - * @test id=ZGenerational + * @test id=Z * @key jfr * @library /test/lib /test/jdk /test/hotspot/jtreg - * @requires vm.hasJFR & vm.gc.ZGenerational - * @run main/othervm -XX:+UseZGC -XX:+ZGenerational -Xmx32M jdk.jfr.event.gc.detailed.TestGCPhaseConcurrent Z - */ - -/** - * @test id=ZSinglegen - * @key jfr - * @library /test/lib /test/jdk /test/hotspot/jtreg - * @requires vm.hasJFR & vm.gc.ZSinglegen - * @run main/othervm -XX:+UseZGC -XX:-ZGenerational -Xmx32M jdk.jfr.event.gc.detailed.TestGCPhaseConcurrent X + * @requires vm.hasJFR & vm.gc.Z + * @run main/othervm -XX:+UseZGC -Xmx32M jdk.jfr.event.gc.detailed.TestGCPhaseConcurrent Z */ /** diff --git a/test/jdk/jdk/jfr/event/gc/detailed/TestZAllocationStallEvent.java b/test/jdk/jdk/jfr/event/gc/detailed/TestZAllocationStallEvent.java index 8977a574a627b..1b4ce2597847c 100644 --- a/test/jdk/jdk/jfr/event/gc/detailed/TestZAllocationStallEvent.java +++ b/test/jdk/jdk/jfr/event/gc/detailed/TestZAllocationStallEvent.java @@ -32,19 +32,11 @@ import jdk.test.lib.jfr.Events; /** - * @test id=ZSinglegen - * @requires vm.hasJFR & vm.gc.ZSinglegen + * @test + * @requires vm.hasJFR & vm.gc.Z * @key jfr * @library /test/lib /test/jdk /test/hotspot/jtreg - * @run main/othervm -XX:+UseZGC -XX:-ZGenerational -Xmx32M -Xlog:gc*:gc.log::filecount=0 jdk.jfr.event.gc.detailed.TestZAllocationStallEvent - */ - -/** - * @test id=ZGenerational - * @requires vm.hasJFR & vm.gc.ZGenerational - * @key jfr - * @library /test/lib /test/jdk /test/hotspot/jtreg - * @run main/othervm -XX:+UseZGC -XX:+ZGenerational -Xmx32M -Xlog:gc*:gc.log::filecount=0 jdk.jfr.event.gc.detailed.TestZAllocationStallEvent + * @run main/othervm -XX:+UseZGC -Xmx32M -Xlog:gc*:gc.log::filecount=0 jdk.jfr.event.gc.detailed.TestZAllocationStallEvent */ public class TestZAllocationStallEvent { diff --git a/test/jdk/jdk/jfr/event/gc/detailed/TestZPageAllocationEvent.java b/test/jdk/jdk/jfr/event/gc/detailed/TestZPageAllocationEvent.java index 182f7b3d509f3..d672a2654b814 100644 --- a/test/jdk/jdk/jfr/event/gc/detailed/TestZPageAllocationEvent.java +++ b/test/jdk/jdk/jfr/event/gc/detailed/TestZPageAllocationEvent.java @@ -32,19 +32,11 @@ import jdk.test.lib.jfr.Events; /** - * @test id=ZSinglegen - * @requires vm.hasJFR & vm.gc.ZSinglegen + * @test + * @requires vm.hasJFR & vm.gc.Z * @key jfr * @library /test/lib /test/jdk /test/hotspot/jtreg - * @run main/othervm -XX:+UseZGC -XX:-ZGenerational -Xmx32M jdk.jfr.event.gc.detailed.TestZPageAllocationEvent - */ - -/** - * @test id=ZGenerational - * @requires vm.hasJFR & vm.gc.ZGenerational - * @key jfr - * @library /test/lib /test/jdk /test/hotspot/jtreg - * @run main/othervm -XX:+UseZGC -XX:+ZGenerational -Xmx32M jdk.jfr.event.gc.detailed.TestZPageAllocationEvent + * @run main/othervm -XX:+UseZGC -Xmx32M jdk.jfr.event.gc.detailed.TestZPageAllocationEvent */ public class TestZPageAllocationEvent { diff --git a/test/jdk/jdk/jfr/event/gc/detailed/TestZRelocationSetEvent.java b/test/jdk/jdk/jfr/event/gc/detailed/TestZRelocationSetEvent.java index 8f07abeaaefd4..f14eec9754828 100644 --- a/test/jdk/jdk/jfr/event/gc/detailed/TestZRelocationSetEvent.java +++ b/test/jdk/jdk/jfr/event/gc/detailed/TestZRelocationSetEvent.java @@ -32,19 +32,11 @@ import jdk.test.lib.jfr.Events; /** - * @test id=ZSinglegen - * @requires vm.hasJFR & vm.gc.ZSinglegen + * @test + * @requires vm.hasJFR & vm.gc.Z * @key jfr * @library /test/lib /test/jdk /test/hotspot/jtreg - * @run main/othervm -XX:+UseZGC -XX:-ZGenerational -Xmx32M jdk.jfr.event.gc.detailed.TestZRelocationSetEvent - */ - -/** - * @test id=ZGenerational - * @requires vm.hasJFR & vm.gc.ZGenerational - * @key jfr - * @library /test/lib /test/jdk /test/hotspot/jtreg - * @run main/othervm -XX:+UseZGC -XX:+ZGenerational -Xmx32M jdk.jfr.event.gc.detailed.TestZRelocationSetEvent + * @run main/othervm -XX:+UseZGC -Xmx32M jdk.jfr.event.gc.detailed.TestZRelocationSetEvent */ public class TestZRelocationSetEvent { diff --git a/test/jdk/jdk/jfr/event/gc/detailed/TestZRelocationSetGroupEvent.java b/test/jdk/jdk/jfr/event/gc/detailed/TestZRelocationSetGroupEvent.java index b997d173ffb13..f00655cf94276 100644 --- a/test/jdk/jdk/jfr/event/gc/detailed/TestZRelocationSetGroupEvent.java +++ b/test/jdk/jdk/jfr/event/gc/detailed/TestZRelocationSetGroupEvent.java @@ -32,19 +32,11 @@ import jdk.test.lib.jfr.Events; /** - * @test id=ZSinglegen - * @requires vm.hasJFR & vm.gc.ZSinglegen + * @test + * @requires vm.hasJFR & vm.gc.Z * @key jfr * @library /test/lib /test/jdk /test/hotspot/jtreg - * @run main/othervm -XX:+UseZGC -XX:-ZGenerational -Xmx32M jdk.jfr.event.gc.detailed.TestZRelocationSetGroupEvent - */ - -/** - * @test id=ZGenerational - * @requires vm.hasJFR & vm.gc.ZGenerational - * @key jfr - * @library /test/lib /test/jdk /test/hotspot/jtreg - * @run main/othervm -XX:+UseZGC -XX:+ZGenerational -Xmx32M jdk.jfr.event.gc.detailed.TestZRelocationSetGroupEvent + * @run main/othervm -XX:+UseZGC -Xmx32M jdk.jfr.event.gc.detailed.TestZRelocationSetGroupEvent */ public class TestZRelocationSetGroupEvent { diff --git a/test/jdk/jdk/jfr/event/gc/detailed/TestZUncommitEvent.java b/test/jdk/jdk/jfr/event/gc/detailed/TestZUncommitEvent.java index 06fd9b5a1b868..e7a37e2de554d 100644 --- a/test/jdk/jdk/jfr/event/gc/detailed/TestZUncommitEvent.java +++ b/test/jdk/jdk/jfr/event/gc/detailed/TestZUncommitEvent.java @@ -34,19 +34,11 @@ import jdk.test.lib.jfr.Events; /** - * @test id=ZSinglegen - * @requires vm.hasJFR & vm.gc.ZSinglegen + * @test id=Z + * @requires vm.hasJFR & vm.gc.Z * @key jfr * @library /test/lib /test/jdk /test/hotspot/jtreg - * @run main/othervm -XX:+UseZGC -XX:-ZGenerational -Xms32M -Xmx128M -Xlog:gc,gc+heap -XX:+ZUncommit -XX:ZUncommitDelay=1 jdk.jfr.event.gc.detailed.TestZUncommitEvent - */ - -/** - * @test id=ZGenerational - * @requires vm.hasJFR & vm.gc.ZGenerational - * @key jfr - * @library /test/lib /test/jdk /test/hotspot/jtreg - * @run main/othervm -XX:+UseZGC -XX:+ZGenerational -Xms32M -Xmx128M -Xlog:gc,gc+heap -XX:+ZUncommit -XX:ZUncommitDelay=1 jdk.jfr.event.gc.detailed.TestZUncommitEvent + * @run main/othervm -XX:+UseZGC -Xms32M -Xmx128M -Xlog:gc,gc+heap -XX:+ZUncommit -XX:ZUncommitDelay=1 jdk.jfr.event.gc.detailed.TestZUncommitEvent */ public class TestZUncommitEvent { diff --git a/test/jdk/jdk/jfr/event/gc/detailed/TestZUnmapEvent.java b/test/jdk/jdk/jfr/event/gc/detailed/TestZUnmapEvent.java index 94460f8f27838..a19e89771c08c 100644 --- a/test/jdk/jdk/jfr/event/gc/detailed/TestZUnmapEvent.java +++ b/test/jdk/jdk/jfr/event/gc/detailed/TestZUnmapEvent.java @@ -32,19 +32,11 @@ import jdk.test.lib.jfr.Events; /** - * @test id=ZSinglegen - * @requires vm.hasJFR & vm.gc.ZSinglegen + * @test id=Z + * @requires vm.hasJFR & vm.gc.Z * @key jfr * @library /test/lib /test/jdk /test/hotspot/jtreg - * @run main/othervm -XX:+UseZGC -XX:-ZGenerational -Xmx32M jdk.jfr.event.gc.detailed.TestZUnmapEvent - */ - -/** - * @test id=ZGenerational - * @requires vm.hasJFR & vm.gc.ZGenerational - * @key jfr - * @library /test/lib /test/jdk /test/hotspot/jtreg - * @run main/othervm -XX:+UseZGC -XX:+ZGenerational -Xmx32M jdk.jfr.event.gc.detailed.TestZUnmapEvent + * @run main/othervm -XX:+UseZGC -Xmx32M jdk.jfr.event.gc.detailed.TestZUnmapEvent */ public class TestZUnmapEvent { diff --git a/test/jdk/jdk/jfr/event/oldobject/TestZ.java b/test/jdk/jdk/jfr/event/oldobject/TestZ.java index 224a12373240c..99605846382bf 100644 --- a/test/jdk/jdk/jfr/event/oldobject/TestZ.java +++ b/test/jdk/jdk/jfr/event/oldobject/TestZ.java @@ -33,23 +33,13 @@ import jdk.test.lib.jfr.Events; /** - * @test id=ZSinglegen - * @requires vm.hasJFR & vm.gc.ZSinglegen + * @test + * @requires vm.hasJFR & vm.gc.Z * @key jfr * @summary Test leak profiler with ZGC * @library /test/lib /test/jdk * @modules jdk.jfr/jdk.jfr.internal.test - * @run main/othervm -XX:TLABSize=2k -XX:+UseZGC -XX:-ZGenerational jdk.jfr.event.oldobject.TestZ - */ - -/** - * @test id=ZGenerational - * @requires vm.hasJFR & vm.gc.ZGenerational - * @key jfr - * @summary Test leak profiler with ZGC - * @library /test/lib /test/jdk - * @modules jdk.jfr/jdk.jfr.internal.test - * @run main/othervm -XX:TLABSize=2k -XX:+UseZGC -XX:+ZGenerational jdk.jfr.event.oldobject.TestZ + * @run main/othervm -XX:TLABSize=2k -XX:+UseZGC jdk.jfr.event.oldobject.TestZ */ public class TestZ { diff --git a/test/jdk/sun/tools/jmap/BasicJMapTest.java b/test/jdk/sun/tools/jmap/BasicJMapTest.java index d8a24ef05facc..991648b96c2f3 100644 --- a/test/jdk/sun/tools/jmap/BasicJMapTest.java +++ b/test/jdk/sun/tools/jmap/BasicJMapTest.java @@ -87,8 +87,8 @@ */ /* - * @test id=ZSinglegen - * @requires vm.gc.ZSinglegen + * @test id=Z + * @requires vm.gc.Z * @summary Unit test for jmap utility (Z GC) * @key intermittent * @library /test/lib @@ -96,20 +96,7 @@ * @build jdk.test.lib.hprof.model.* * @build jdk.test.lib.hprof.parser.* * @build jdk.test.lib.hprof.util.* - * @run main/othervm/timeout=240 -XX:+UseZGC -XX:-ZGenerational BasicJMapTest - */ - -/* - * @test id=ZGenerational - * @requires vm.gc.ZGenerational - * @summary Unit test for jmap utility (Z GC) - * @key intermittent - * @library /test/lib - * @build jdk.test.lib.hprof.* - * @build jdk.test.lib.hprof.model.* - * @build jdk.test.lib.hprof.parser.* - * @build jdk.test.lib.hprof.util.* - * @run main/othervm/timeout=240 -XX:+UseZGC -XX:+ZGenerational BasicJMapTest + * @run main/othervm/timeout=240 -XX:+UseZGC BasicJMapTest */ public class BasicJMapTest { diff --git a/test/jtreg-ext/requires/VMProps.java b/test/jtreg-ext/requires/VMProps.java index 465c641d44204..4f00846116cb6 100644 --- a/test/jtreg-ext/requires/VMProps.java +++ b/test/jtreg-ext/requires/VMProps.java @@ -323,17 +323,6 @@ protected void vmGC(SafeMap map) { for (GC gc: GC.values()) { map.put("vm.gc." + gc.name(), () -> "" + vmGCProperty.test(gc)); } - - // Special handling for ZGC modes - var vmGCZ = vmGCProperty.test(GC.Z); - var genZ = WB.getBooleanVMFlag("ZGenerational"); - var genZIsDefault = WB.isDefaultVMFlag("ZGenerational"); - // vm.gc.ZGenerational=true means: - // vm.gc.Z is true and ZGenerational is either explicitly true, or default - map.put("vm.gc.ZGenerational", () -> "" + (vmGCZ && (genZ || genZIsDefault))); - // vm.gc.ZSinglegen=true means: - // vm.gc.Z is true and ZGenerational is either explicitly false, or default - map.put("vm.gc.ZSinglegen", () -> "" + (vmGCZ && (!genZ || genZIsDefault))); } /** @@ -388,7 +377,6 @@ protected void vmOptFinalFlags(SafeMap map) { vmOptFinalFlag(map, "UseCompressedOops"); vmOptFinalFlag(map, "UseLargePages"); vmOptFinalFlag(map, "UseVectorizedMismatchIntrinsic"); - vmOptFinalFlag(map, "ZGenerational"); } /**