In order to build man pages and the full docs (see the
--enable-full-docs configure option) Graphviz and Pandoc are required. Any recent versions
-should work. For reference, and subject to change, Oracle builds use
-Graphviz 9.0.0 and Pandoc 2.19.2.
+href="https://pandoc.org">Pandoc is required. For full docs also Graphviz is required. Any recent
+versions should work. For reference, and subject to change, Oracle
+builds use Graphviz 9.0.0 and Pandoc 2.19.2.
Running Configure
To build the JDK, you need a "configuration", which consists of a
directory where to store the build output, coupled with information
diff --git a/doc/building.md b/doc/building.md
index 99bc509dc70d0..04db6e94d1c12 100644
--- a/doc/building.md
+++ b/doc/building.md
@@ -680,9 +680,9 @@ At least version 3.2 of GNU Bash must be used.
### Graphviz and Pandoc
-In order to build the full docs (see the `--enable-full-docs`
-configure option) [Graphviz](https://www.graphviz.org) and
-[Pandoc](https://pandoc.org) are required. Any recent versions should
+In order to build man pages and the full docs (see the `--enable-full-docs`
+configure option) [Pandoc](https://pandoc.org) is required. For full docs also
+[Graphviz](https://www.graphviz.org) is required. Any recent versions should
work. For reference, and subject to change, Oracle builds use Graphviz
9.0.0 and Pandoc 2.19.2.
diff --git a/make/Images.gmk b/make/Images.gmk
index 5f987a2f71a7d..c5d0ef11b5d2e 100644
--- a/make/Images.gmk
+++ b/make/Images.gmk
@@ -29,6 +29,7 @@ include $(SPEC)
include MakeBase.gmk
include CopyFiles.gmk
+include DebugInfoUtils.gmk
include Execute.gmk
include Modules.gmk
include Utils.gmk
diff --git a/make/StaticLibs.gmk b/make/StaticLibs.gmk
index 78918c456eed9..cfca2a774113d 100644
--- a/make/StaticLibs.gmk
+++ b/make/StaticLibs.gmk
@@ -29,6 +29,7 @@ include $(SPEC)
include MakeBase.gmk
include CopyFiles.gmk
+include DebugInfoUtils.gmk
include Modules.gmk
include modules/LauncherCommon.gmk
diff --git a/make/autoconf/boot-jdk.m4 b/make/autoconf/boot-jdk.m4
index d729012ad6a6f..d39e6e75a94c1 100644
--- a/make/autoconf/boot-jdk.m4
+++ b/make/autoconf/boot-jdk.m4
@@ -1,5 +1,5 @@
#
-# Copyright (c) 2011, 2023, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2011, 2024, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@@ -470,7 +470,7 @@ AC_DEFUN_ONCE([BOOTJDK_SETUP_BOOT_JDK_ARGUMENTS],
# Maximum amount of heap memory.
JVM_HEAP_LIMIT_32="768"
# Running a 64 bit JVM allows for and requires a bigger heap
- JVM_HEAP_LIMIT_64="1600"
+ JVM_HEAP_LIMIT_64="2048"
JVM_HEAP_LIMIT_GLOBAL=`expr $MEMORY_SIZE / 2`
if test "$JVM_HEAP_LIMIT_GLOBAL" -lt "$JVM_HEAP_LIMIT_32"; then
JVM_HEAP_LIMIT_32=$JVM_HEAP_LIMIT_GLOBAL
diff --git a/make/autoconf/flags-cflags.m4 b/make/autoconf/flags-cflags.m4
index 57654514eb64b..f78ccfe376214 100644
--- a/make/autoconf/flags-cflags.m4
+++ b/make/autoconf/flags-cflags.m4
@@ -302,7 +302,7 @@ AC_DEFUN([FLAGS_SETUP_QUALITY_CHECKS],
AC_DEFUN([FLAGS_SETUP_OPTIMIZATION],
[
- if test "x$TOOLCHAIN_TYPE" = xgcc; then
+ if test "x$TOOLCHAIN_TYPE" = xgcc || test "x$TOOLCHAIN_TYPE" = xclang; then
C_O_FLAG_HIGHEST_JVM="-O3"
C_O_FLAG_HIGHEST="-O3"
C_O_FLAG_HI="-O3"
@@ -311,6 +311,13 @@ AC_DEFUN([FLAGS_SETUP_OPTIMIZATION],
C_O_FLAG_DEBUG="-O0"
C_O_FLAG_DEBUG_JVM="-O0"
C_O_FLAG_NONE="-O0"
+
+ if test "x$TOOLCHAIN_TYPE" = xclang && test "x$OPENJDK_TARGET_OS" = xaix; then
+ C_O_FLAG_HIGHEST_JVM="${C_O_FLAG_HIGHEST_JVM} -finline-functions"
+ C_O_FLAG_HIGHEST="${C_O_FLAG_HIGHEST} -finline-functions"
+ C_O_FLAG_HI="${C_O_FLAG_HI} -finline-functions"
+ fi
+
# -D_FORTIFY_SOURCE=2 hardening option needs optimization (at least -O1) enabled
# set for lower O-levels -U_FORTIFY_SOURCE to overwrite previous settings
if test "x$OPENJDK_TARGET_OS" = xlinux -a "x$DEBUG_LEVEL" = "xfastdebug"; then
@@ -331,21 +338,6 @@ AC_DEFUN([FLAGS_SETUP_OPTIMIZATION],
C_O_FLAG_DEBUG_JVM="${C_O_FLAG_DEBUG_JVM} ${DISABLE_FORTIFY_CFLAGS}"
C_O_FLAG_NONE="${C_O_FLAG_NONE} ${DISABLE_FORTIFY_CFLAGS}"
fi
- elif test "x$TOOLCHAIN_TYPE" = xclang; then
- if test "x$OPENJDK_TARGET_OS" = xaix; then
- C_O_FLAG_HIGHEST_JVM="-O3 -finline-functions"
- C_O_FLAG_HIGHEST="-O3 -finline-functions"
- C_O_FLAG_HI="-O3 -finline-functions"
- else
- C_O_FLAG_HIGHEST_JVM="-O3"
- C_O_FLAG_HIGHEST="-O3"
- C_O_FLAG_HI="-O3"
- fi
- C_O_FLAG_NORM="-O2"
- C_O_FLAG_DEBUG_JVM="-O0"
- C_O_FLAG_SIZE="-Os"
- C_O_FLAG_DEBUG="-O0"
- C_O_FLAG_NONE="-O0"
elif test "x$TOOLCHAIN_TYPE" = xmicrosoft; then
C_O_FLAG_HIGHEST_JVM="-O2 -Oy-"
C_O_FLAG_HIGHEST="-O2"
diff --git a/make/autoconf/jdk-options.m4 b/make/autoconf/jdk-options.m4
index 61638ce5a2c7f..c09f581688ca0 100644
--- a/make/autoconf/jdk-options.m4
+++ b/make/autoconf/jdk-options.m4
@@ -121,7 +121,7 @@ AC_DEFUN_ONCE([JDKOPT_SETUP_JDK_OPTIONS],
if test "x$DOT" != "x"; then
AC_MSG_RESULT([yes])
else
- AC_MSG_RESULT([no, cannot generate full docs])
+ AC_MSG_RESULT([no, cannot generate full docs or man pages])
FULL_DOCS_AVAILABLE=false
fi
@@ -129,7 +129,7 @@ AC_DEFUN_ONCE([JDKOPT_SETUP_JDK_OPTIONS],
if test "x$ENABLE_PANDOC" = "xtrue"; then
AC_MSG_RESULT([yes])
else
- AC_MSG_RESULT([no, cannot generate full docs])
+ AC_MSG_RESULT([no, cannot generate full docs or man pages])
FULL_DOCS_AVAILABLE=false
fi
diff --git a/make/common/DebugInfoUtils.gmk b/make/common/DebugInfoUtils.gmk
new file mode 100644
index 0000000000000..69d6c24b5e037
--- /dev/null
+++ b/make/common/DebugInfoUtils.gmk
@@ -0,0 +1,58 @@
+#
+# Copyright (c) 2024, Oracle and/or its affiliates. All rights reserved.
+# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+#
+# This code is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License version 2 only, as
+# published by the Free Software Foundation. Oracle designates this
+# particular file as subject to the "Classpath" exception as provided
+# by Oracle in the LICENSE file that accompanied this code.
+#
+# This code is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+# version 2 for more details (a copy is included in the LICENSE file that
+# accompanied this code).
+#
+# You should have received a copy of the GNU General Public License version
+# 2 along with this work; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+# or visit www.oracle.com if you need additional information or have any
+# questions.
+#
+
+ifeq ($(_MAKEBASE_GMK), )
+ $(error You must include MakeBase.gmk prior to including DebugInfoUtils.gmk)
+endif
+
+################################################################################
+#
+# Common debuginfo utility functions
+#
+################################################################################
+
+################################################################################
+# Find native debuginfo files in a directory
+#
+# Param 1 - dir to find debuginfo files in
+FindDebuginfoFiles = \
+ $(wildcard $(addprefix $1/*, $(DEBUGINFO_SUFFIXES)) \
+ $(addprefix $1/*/*, $(DEBUGINFO_SUFFIXES)) \
+ $(addprefix $1/*/*/*, $(DEBUGINFO_SUFFIXES)))
+
+# Pick the correct debug info files to copy, either zipped or not.
+ifeq ($(ZIP_EXTERNAL_DEBUG_SYMBOLS), true)
+ DEBUGINFO_SUFFIXES += .diz
+else
+ DEBUGINFO_SUFFIXES := .debuginfo .pdb .map
+ # On Macosx, if debug symbols have not been zipped, find all files inside *.dSYM
+ # dirs.
+ ifeq ($(call isTargetOs, macosx), true)
+ $(call FillFindCache, \
+ $(SUPPORT_OUTPUTDIR)/modules_libs $(SUPPORT_OUTPUTDIR)/modules_cmds)
+ FindDebuginfoFiles = \
+ $(if $(wildcard $1), $(call containing, .dSYM/, $(call FindFiles, $1)))
+ endif
+endif
diff --git a/make/common/FileUtils.gmk b/make/common/FileUtils.gmk
index d546ab94a58f0..d3cc4872ebb8f 100644
--- a/make/common/FileUtils.gmk
+++ b/make/common/FileUtils.gmk
@@ -307,26 +307,3 @@ ifeq ($(DISABLE_CACHE_FIND), true)
else
FindFiles = $(CacheFindFiles)
endif
-
-# Find native debuginfo files in a directory
-#
-# Param 1 - dir to find debuginfo files in
-FindDebuginfoFiles = \
- $(wildcard $(addprefix $1/*, $(DEBUGINFO_SUFFIXES)) \
- $(addprefix $1/*/*, $(DEBUGINFO_SUFFIXES)) \
- $(addprefix $1/*/*/*, $(DEBUGINFO_SUFFIXES)))
-
-# Pick the correct debug info files to copy, either zipped or not.
-ifeq ($(ZIP_EXTERNAL_DEBUG_SYMBOLS), true)
- DEBUGINFO_SUFFIXES += .diz
-else
- DEBUGINFO_SUFFIXES := .debuginfo .pdb .map
- # On Macosx, if debug symbols have not been zipped, find all files inside *.dSYM
- # dirs.
- ifeq ($(call isTargetOs, macosx), true)
- $(call FillFindCache, \
- $(SUPPORT_OUTPUTDIR)/modules_libs $(SUPPORT_OUTPUTDIR)/modules_cmds)
- FindDebuginfoFiles = \
- $(if $(wildcard $1), $(call containing, .dSYM/, $(call FindFiles, $1)))
- endif
-endif
diff --git a/make/common/modules/LauncherCommon.gmk b/make/common/modules/LauncherCommon.gmk
index 77f39457b4c2d..98e110a0a86cf 100644
--- a/make/common/modules/LauncherCommon.gmk
+++ b/make/common/modules/LauncherCommon.gmk
@@ -192,9 +192,7 @@ ifeq ($(call isTargetOsType, unix)+$(MAKEFILE_PREFIX), true+Launcher)
MAN_FILES_MD := $(wildcard $(addsuffix /*.md, $(call FindModuleManDirs, $(MODULE))))
ifneq ($(MAN_FILES_MD), )
- ifeq ($(ENABLE_PANDOC), false)
- $(info Warning: pandoc not found. Not generating man pages)
- else
+ ifeq ($(ENABLE_PANDOC), true)
# Create dynamic man pages from markdown using pandoc. We need
# PANDOC_TROFF_MANPAGE_FILTER, a wrapper around
# PANDOC_TROFF_MANPAGE_FILTER_JAVASCRIPT. This is created by buildtools-jdk.
diff --git a/make/conf/version-numbers.conf b/make/conf/version-numbers.conf
index 055f9ca886618..383b7533dee5f 100644
--- a/make/conf/version-numbers.conf
+++ b/make/conf/version-numbers.conf
@@ -26,17 +26,17 @@
# Default version, product, and vendor information to use,
# unless overridden by configure
-DEFAULT_VERSION_FEATURE=24
+DEFAULT_VERSION_FEATURE=25
DEFAULT_VERSION_INTERIM=0
DEFAULT_VERSION_UPDATE=0
DEFAULT_VERSION_PATCH=0
DEFAULT_VERSION_EXTRA1=0
DEFAULT_VERSION_EXTRA2=0
DEFAULT_VERSION_EXTRA3=0
-DEFAULT_VERSION_DATE=2025-03-18
-DEFAULT_VERSION_CLASSFILE_MAJOR=68 # "`$EXPR $DEFAULT_VERSION_FEATURE + 44`"
+DEFAULT_VERSION_DATE=2025-09-16
+DEFAULT_VERSION_CLASSFILE_MAJOR=69 # "`$EXPR $DEFAULT_VERSION_FEATURE + 44`"
DEFAULT_VERSION_CLASSFILE_MINOR=0
DEFAULT_VERSION_DOCS_API_SINCE=11
-DEFAULT_ACCEPTABLE_BOOT_VERSIONS="23 24"
-DEFAULT_JDK_SOURCE_TARGET_VERSION=24
+DEFAULT_ACCEPTABLE_BOOT_VERSIONS="23 24 25"
+DEFAULT_JDK_SOURCE_TARGET_VERSION=25
DEFAULT_PROMOTED_VERSION_PRE=ea
diff --git a/make/hotspot/lib/JvmFeatures.gmk b/make/hotspot/lib/JvmFeatures.gmk
index b94031515f79e..09a48508effa8 100644
--- a/make/hotspot/lib/JvmFeatures.gmk
+++ b/make/hotspot/lib/JvmFeatures.gmk
@@ -174,6 +174,12 @@ ifeq ($(call check-jvm-feature, link-time-opt), true)
-fno-fat-lto-objects
JVM_LDFLAGS_FEATURES += $(CXX_O_FLAG_HIGHEST_JVM) -flto=auto \
-fuse-linker-plugin -fno-strict-aliasing
+ else ifeq ($(call isCompiler, clang), true)
+ JVM_CFLAGS_FEATURES += -flto -fno-strict-aliasing
+ ifeq ($(call isBuildOs, aix), true)
+ JVM_CFLAGS_FEATURES += -ffat-lto-objects
+ endif
+ JVM_LDFLAGS_FEATURES += $(CXX_O_FLAG_HIGHEST_JVM) -flto -fno-strict-aliasing
else ifeq ($(call isCompiler, microsoft), true)
JVM_CFLAGS_FEATURES += -GL
JVM_LDFLAGS_FEATURES += -LTCG:INCREMENTAL
diff --git a/make/ide/xcode/hotspot/CreateXcodeProject.gmk b/make/ide/xcode/hotspot/CreateXcodeProject.gmk
index db8f7f401eff4..1f92ba0716f1e 100644
--- a/make/ide/xcode/hotspot/CreateXcodeProject.gmk
+++ b/make/ide/xcode/hotspot/CreateXcodeProject.gmk
@@ -42,7 +42,7 @@ ifeq ($(call isTargetOs, macosx), true)
PROJECT_FILE_NAME := hotspot.xcodeproj
COMPILE_COMMAND_FILE := $(OUTPUTDIR)/compile_commands.json
- LINKER_FLAGS_FILE := $(MAKESUPPORT_OUTPUTDIR)/compile-commands/jvm-ldflags.txt
+ LINKER_FLAGS_FILE := $(MAKESUPPORT_OUTPUTDIR)/compile-commands/LIBRARY_hotspot_variant-server_libjvm_libjvm-ldflags.txt
$(eval $(call SetupJavaCompilation, BUILD_PROJECT_CREATOR, \
TARGET_RELEASE := $(TARGET_RELEASE_BOOTJDK), \
@@ -60,7 +60,7 @@ ifeq ($(call isTargetOs, macosx), true)
XCODE_PROJ_DEBUG_OPTION := -d
endif
- XCODE_PROJ_VARDEPS := $(WORKSPACE_ROOT) $(IDE_OUTPUTDIR) \
+ XCODE_PROJ_VARDEPS := $(TOPDIR) $(IDE_OUTPUTDIR) \
$(PROJECT_MAKER_DIR)/data $(COMPILE_COMMAND_FILE) $(LINKER_FLAGS_FILE)
XCODE_PROJ_VARDEPS_FILE := $(call DependOnVariable, XCODE_PROJ_VARDEPS, \
$(TOOLS_OUTPUTDIR)/xcodeproj.vardeps)
@@ -70,7 +70,7 @@ ifeq ($(call isTargetOs, macosx), true)
DEPS := $(BUILD_PROJECT_CREATOR) $(COMPILE_COMMAND_FILE) \
$(LINKER_FLAGS_FILE) $(XCODE_PROJ_VARDEPS_FILE), \
OUTPUT_DIR := $(TOOLS_OUTPUTDIR), \
- COMMAND := $(PROJECT_CREATOR_TOOL) $(WORKSPACE_ROOT) $(IDE_OUTPUTDIR) \
+ COMMAND := $(PROJECT_CREATOR_TOOL) $(TOPDIR) $(IDE_OUTPUTDIR) \
$(PROJECT_MAKER_DIR)/data $(COMPILE_COMMAND_FILE) \
$(LINKER_FLAGS_FILE) $(XCODE_PROJ_DEBUG_OPTION), \
))
diff --git a/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp b/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp
index 66742c1c82e80..a836d71205e95 100644
--- a/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp
+++ b/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp
@@ -5305,7 +5305,7 @@ MacroAssembler::KlassDecodeMode MacroAssembler::klass_decode_mode() {
if (operand_valid_for_logical_immediate(
/*is32*/false, (uint64_t)CompressedKlassPointers::base())) {
const size_t range = CompressedKlassPointers::klass_range_end() - CompressedKlassPointers::base();
- const uint64_t range_mask = right_n_bits(ceil_log2(range));
+ const uint64_t range_mask = right_n_bits(log2i_ceil(range));
if (((uint64_t)CompressedKlassPointers::base() & range_mask) == 0) {
return (_klass_decode_mode = KlassDecodeXor);
}
diff --git a/src/hotspot/cpu/riscv/macroAssembler_riscv.cpp b/src/hotspot/cpu/riscv/macroAssembler_riscv.cpp
index 44b806834f987..07c5a940a5091 100644
--- a/src/hotspot/cpu/riscv/macroAssembler_riscv.cpp
+++ b/src/hotspot/cpu/riscv/macroAssembler_riscv.cpp
@@ -785,7 +785,7 @@ void MacroAssembler::emit_static_call_stub() {
// Jump to the entry point of the c2i stub.
int32_t offset = 0;
- movptr(t1, 0, offset, t0); // lui + lui + slli + add
+ movptr2(t1, 0, offset, t0); // lui + lui + slli + add
jr(t1, offset);
}
diff --git a/src/hotspot/os/bsd/gc/z/zPhysicalMemoryBacking_bsd.cpp b/src/hotspot/os/bsd/gc/z/zPhysicalMemoryBacking_bsd.cpp
index 2e56c092a79b5..16835c8303931 100644
--- a/src/hotspot/os/bsd/gc/z/zPhysicalMemoryBacking_bsd.cpp
+++ b/src/hotspot/os/bsd/gc/z/zPhysicalMemoryBacking_bsd.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019, 2023, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2019, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -79,7 +79,7 @@ ZPhysicalMemoryBacking::ZPhysicalMemoryBacking(size_t max_capacity)
_initialized(false) {
// Reserve address space for backing memory
- _base = (uintptr_t)os::reserve_memory(max_capacity);
+ _base = (uintptr_t)os::reserve_memory(max_capacity, false, mtJavaHeap);
if (_base == 0) {
// Failed
ZInitialize::error("Failed to reserve address space for backing memory");
diff --git a/src/hotspot/os/linux/os_linux.cpp b/src/hotspot/os/linux/os_linux.cpp
index d159118016a05..ed832f37bdf40 100644
--- a/src/hotspot/os/linux/os_linux.cpp
+++ b/src/hotspot/os/linux/os_linux.cpp
@@ -4582,7 +4582,7 @@ static void workaround_expand_exec_shield_cs_limit() {
*/
char* hint = (char*)(os::Linux::initial_thread_stack_bottom() -
(StackOverflow::stack_guard_zone_size() + page_size));
- char* codebuf = os::attempt_reserve_memory_at(hint, page_size);
+ char* codebuf = os::attempt_reserve_memory_at(hint, page_size, false, mtThread);
if (codebuf == nullptr) {
// JDK-8197429: There may be a stack gap of one megabyte between
@@ -4590,15 +4590,13 @@ static void workaround_expand_exec_shield_cs_limit() {
// Linux kernel workaround for CVE-2017-1000364. If we failed to
// map our codebuf, try again at an address one megabyte lower.
hint -= 1 * M;
- codebuf = os::attempt_reserve_memory_at(hint, page_size);
+ codebuf = os::attempt_reserve_memory_at(hint, page_size, false, mtThread);
}
if ((codebuf == nullptr) || (!os::commit_memory(codebuf, page_size, true))) {
return; // No matter, we tried, best effort.
}
- MemTracker::record_virtual_memory_tag((address)codebuf, mtInternal);
-
log_info(os)("[CS limit NX emulation work-around, exec code at: %p]", codebuf);
// Some code to exec: the 'ret' instruction
diff --git a/src/hotspot/os_cpu/bsd_aarch64/os_bsd_aarch64.cpp b/src/hotspot/os_cpu/bsd_aarch64/os_bsd_aarch64.cpp
index 7702dbd17ad69..1e3602d08f428 100644
--- a/src/hotspot/os_cpu/bsd_aarch64/os_bsd_aarch64.cpp
+++ b/src/hotspot/os_cpu/bsd_aarch64/os_bsd_aarch64.cpp
@@ -501,6 +501,7 @@ static inline void atomic_copy64(const volatile void *src, volatile void *dst) {
}
extern "C" {
+ // needs local assembler label '1:' to avoid trouble when using linktime optimization
int SpinPause() {
// We don't use StubRoutines::aarch64::spin_wait stub in order to
// avoid a costly call to os::current_thread_enable_wx() on MacOS.
@@ -523,14 +524,14 @@ extern "C" {
// to entry for case SpinWait::NOP
" add %[d], %[d], %[o] \n"
" br %[d] \n"
- " b SpinPause_return \n" // case SpinWait::NONE (-1)
+ " b 1f \n" // case SpinWait::NONE (-1)
" nop \n" // padding
" nop \n" // case SpinWait::NOP ( 0)
- " b SpinPause_return \n"
+ " b 1f \n"
" isb \n" // case SpinWait::ISB ( 1)
- " b SpinPause_return \n"
+ " b 1f \n"
" yield \n" // case SpinWait::YIELD ( 2)
- "SpinPause_return: \n"
+ "1: \n"
: [d]"=&r"(br_dst)
: [o]"r"(off)
: "memory");
diff --git a/src/hotspot/share/cds/filemap.cpp b/src/hotspot/share/cds/filemap.cpp
index b7b08009dcc44..594d8817322fd 100644
--- a/src/hotspot/share/cds/filemap.cpp
+++ b/src/hotspot/share/cds/filemap.cpp
@@ -2731,8 +2731,8 @@ ClassFileStream* FileMapInfo::get_stream_from_class_loader(Handle class_loader,
const char* file_name,
TRAPS) {
JavaValue result(T_OBJECT);
- TempNewSymbol class_name_sym = SymbolTable::new_symbol(file_name);
- Handle ext_class_name = java_lang_String::externalize_classname(class_name_sym, CHECK_NULL);
+ oop class_name = java_lang_String::create_oop_from_str(file_name, THREAD);
+ Handle h_class_name = Handle(THREAD, class_name);
// byte[] ClassLoader.getResourceAsByteArray(String name)
JavaCalls::call_virtual(&result,
@@ -2740,7 +2740,7 @@ ClassFileStream* FileMapInfo::get_stream_from_class_loader(Handle class_loader,
vmClasses::ClassLoader_klass(),
vmSymbols::getResourceAsByteArray_name(),
vmSymbols::getResourceAsByteArray_signature(),
- ext_class_name,
+ h_class_name,
CHECK_NULL);
assert(result.get_type() == T_OBJECT, "just checking");
oop obj = result.get_oop();
diff --git a/src/hotspot/share/cds/metaspaceShared.cpp b/src/hotspot/share/cds/metaspaceShared.cpp
index ba17ccddb5219..f21b9c9060d50 100644
--- a/src/hotspot/share/cds/metaspaceShared.cpp
+++ b/src/hotspot/share/cds/metaspaceShared.cpp
@@ -281,7 +281,7 @@ void MetaspaceShared::initialize_for_static_dump() {
SharedBaseAddress = (size_t)_requested_base_address;
size_t symbol_rs_size = LP64_ONLY(3 * G) NOT_LP64(128 * M);
- _symbol_rs = ReservedSpace(symbol_rs_size);
+ _symbol_rs = ReservedSpace(symbol_rs_size, mtClassShared);
if (!_symbol_rs.is_reserved()) {
log_error(cds)("Unable to reserve memory for symbols: " SIZE_FORMAT " bytes.", symbol_rs_size);
MetaspaceShared::unrecoverable_writing_error();
diff --git a/src/hotspot/share/cds/runTimeClassInfo.cpp b/src/hotspot/share/cds/runTimeClassInfo.cpp
index 0acd89b5bce62..e2d41cd1de261 100644
--- a/src/hotspot/share/cds/runTimeClassInfo.cpp
+++ b/src/hotspot/share/cds/runTimeClassInfo.cpp
@@ -76,10 +76,13 @@ void RunTimeClassInfo::init(DumpTimeClassInfo& info) {
}
InstanceKlass* RunTimeClassInfo::klass() const {
- if (ArchiveBuilder::is_active() && ArchiveBuilder::current()->is_in_buffer_space((address)this)) {
- return ArchiveBuilder::current()->offset_to_buffered(_klass_offset);
- } else {
+ if (MetaspaceShared::is_in_shared_metaspace(this)) {
+ // is inside a mmaped CDS archive.
return ArchiveUtils::offset_to_archived_address(_klass_offset);
+ } else {
+ // is a temporary copy of a RunTimeClassInfo that's being initialized
+ // by the ArchiveBuilder.
+ return ArchiveBuilder::current()->offset_to_buffered(_klass_offset);
}
}
diff --git a/src/hotspot/share/classfile/classFileParser.cpp b/src/hotspot/share/classfile/classFileParser.cpp
index f0586cd7bcc32..a26831cd78328 100644
--- a/src/hotspot/share/classfile/classFileParser.cpp
+++ b/src/hotspot/share/classfile/classFileParser.cpp
@@ -153,6 +153,8 @@
#define JAVA_24_VERSION 68
+#define JAVA_25_VERSION 69
+
void ClassFileParser::set_class_bad_constant_seen(short bad_constant) {
assert((bad_constant == JVM_CONSTANT_Module ||
bad_constant == JVM_CONSTANT_Package) && _major_version >= JAVA_9_VERSION,
diff --git a/src/hotspot/share/gc/g1/g1CollectedHeap.cpp b/src/hotspot/share/gc/g1/g1CollectedHeap.cpp
index daf7eb5371baf..188ce354dd552 100644
--- a/src/hotspot/share/gc/g1/g1CollectedHeap.cpp
+++ b/src/hotspot/share/gc/g1/g1CollectedHeap.cpp
@@ -86,6 +86,7 @@
#include "gc/shared/isGCActiveMark.hpp"
#include "gc/shared/locationPrinter.inline.hpp"
#include "gc/shared/oopStorageParState.hpp"
+#include "gc/shared/partialArrayState.hpp"
#include "gc/shared/referenceProcessor.inline.hpp"
#include "gc/shared/suspendibleThreadSet.hpp"
#include "gc/shared/taskqueue.inline.hpp"
@@ -1165,6 +1166,7 @@ G1CollectedHeap::G1CollectedHeap() :
_cm_thread(nullptr),
_cr(nullptr),
_task_queues(nullptr),
+ _partial_array_state_manager(nullptr),
_ref_processor_stw(nullptr),
_is_alive_closure_stw(this),
_is_subject_to_discovery_stw(this),
@@ -1198,9 +1200,13 @@ G1CollectedHeap::G1CollectedHeap() :
_task_queues->register_queue(i, q);
}
+ _partial_array_state_manager = new PartialArrayStateManager(n_queues);
+
_gc_tracer_stw->initialize();
+}
- guarantee(_task_queues != nullptr, "task_queues allocation failure.");
+PartialArrayStateManager* G1CollectedHeap::partial_array_state_manager() const {
+ return _partial_array_state_manager;
}
G1RegionToSpaceMapper* G1CollectedHeap::create_aux_memory_mapper(const char* description,
diff --git a/src/hotspot/share/gc/g1/g1CollectedHeap.hpp b/src/hotspot/share/gc/g1/g1CollectedHeap.hpp
index 0f8bf9ffd2b4f..1b840392769cd 100644
--- a/src/hotspot/share/gc/g1/g1CollectedHeap.hpp
+++ b/src/hotspot/share/gc/g1/g1CollectedHeap.hpp
@@ -82,6 +82,7 @@ class GCMemoryManager;
class G1HeapRegion;
class MemoryPool;
class nmethod;
+class PartialArrayStateManager;
class ReferenceProcessor;
class STWGCTimer;
class WorkerThreads;
@@ -807,8 +808,9 @@ class G1CollectedHeap : public CollectedHeap {
// The concurrent refiner.
G1ConcurrentRefine* _cr;
- // The parallel task queues
- G1ScannerTasksQueueSet *_task_queues;
+ // Reusable parallel task queues and partial array manager.
+ G1ScannerTasksQueueSet* _task_queues;
+ PartialArrayStateManager* _partial_array_state_manager;
// ("Weak") Reference processing support.
//
@@ -874,6 +876,8 @@ class G1CollectedHeap : public CollectedHeap {
G1ScannerTasksQueueSet* task_queues() const;
G1ScannerTasksQueue* task_queue(uint i) const;
+ PartialArrayStateManager* partial_array_state_manager() const;
+
// Create a G1CollectedHeap.
// Must call the initialize method afterwards.
// May not return if something goes wrong.
diff --git a/src/hotspot/share/gc/g1/g1GCPhaseTimes.cpp b/src/hotspot/share/gc/g1/g1GCPhaseTimes.cpp
index 9e48a16018e30..4ec708ae09392 100644
--- a/src/hotspot/share/gc/g1/g1GCPhaseTimes.cpp
+++ b/src/hotspot/share/gc/g1/g1GCPhaseTimes.cpp
@@ -105,6 +105,7 @@ G1GCPhaseTimes::G1GCPhaseTimes(STWGCTimer* gc_timer, uint max_gc_threads) :
_gc_par_phases[UpdateDerivedPointers] = new WorkerDataArray("UpdateDerivedPointers", "Update Derived Pointers (ms):", max_gc_threads);
#endif
_gc_par_phases[EagerlyReclaimHumongousObjects] = new WorkerDataArray("EagerlyReclaimHumongousObjects", "Eagerly Reclaim Humongous Objects (ms):", max_gc_threads);
+ _gc_par_phases[ResetPartialArrayStateManager] = new WorkerDataArray("ResetPartialArrayStateManager", "Reset Partial Array State Manager (ms):", max_gc_threads);
_gc_par_phases[ProcessEvacuationFailedRegions] = new WorkerDataArray("ProcessEvacuationFailedRegions", "Process Evacuation Failed Regions (ms):", max_gc_threads);
_gc_par_phases[ScanHR]->create_thread_work_items("Scanned Cards:", ScanHRScannedCards);
@@ -517,6 +518,7 @@ double G1GCPhaseTimes::print_post_evacuate_collection_set(bool evacuation_failed
debug_phase(_gc_par_phases[UpdateDerivedPointers], 1);
#endif
debug_phase(_gc_par_phases[EagerlyReclaimHumongousObjects], 1);
+ trace_phase(_gc_par_phases[ResetPartialArrayStateManager]);
if (G1CollectedHeap::heap()->should_sample_collection_set_candidates()) {
debug_phase(_gc_par_phases[SampleCollectionSetCandidates], 1);
diff --git a/src/hotspot/share/gc/g1/g1GCPhaseTimes.hpp b/src/hotspot/share/gc/g1/g1GCPhaseTimes.hpp
index a54ef431abd2b..f3bc0efafb929 100644
--- a/src/hotspot/share/gc/g1/g1GCPhaseTimes.hpp
+++ b/src/hotspot/share/gc/g1/g1GCPhaseTimes.hpp
@@ -87,6 +87,7 @@ class G1GCPhaseTimes : public CHeapObj {
UpdateDerivedPointers,
#endif
EagerlyReclaimHumongousObjects,
+ ResetPartialArrayStateManager,
ProcessEvacuationFailedRegions,
ResetMarkingState,
NoteStartOfMark,
diff --git a/src/hotspot/share/gc/g1/g1ParScanThreadState.cpp b/src/hotspot/share/gc/g1/g1ParScanThreadState.cpp
index ad924b2fad49f..f3b7e87bc784b 100644
--- a/src/hotspot/share/gc/g1/g1ParScanThreadState.cpp
+++ b/src/hotspot/share/gc/g1/g1ParScanThreadState.cpp
@@ -61,8 +61,7 @@ G1ParScanThreadState::G1ParScanThreadState(G1CollectedHeap* g1h,
uint worker_id,
uint num_workers,
G1CollectionSet* collection_set,
- G1EvacFailureRegions* evac_failure_regions,
- PartialArrayStateAllocator* pas_allocator)
+ G1EvacFailureRegions* evac_failure_regions)
: _g1h(g1h),
_task_queue(g1h->task_queue(worker_id)),
_rdc_local_qset(rdcqs),
@@ -81,7 +80,7 @@ G1ParScanThreadState::G1ParScanThreadState(G1CollectedHeap* g1h,
_surviving_young_words(nullptr),
_surviving_words_length(collection_set->young_region_length() + 1),
_old_gen_is_full(false),
- _partial_array_state_allocator(pas_allocator),
+ _partial_array_state_allocator(g1h->partial_array_state_manager()),
_partial_array_stepper(num_workers, ParGCArrayScanChunk),
_string_dedup_requests(),
_max_num_optional_regions(collection_set->optional_region_length()),
@@ -254,7 +253,7 @@ void G1ParScanThreadState::do_partial_array(PartialArrayState* state) {
checked_cast(step._index),
checked_cast(step._index + _partial_array_stepper.chunk_size()));
// Release reference to the state, now that we're done with it.
- _partial_array_state_allocator->release(_worker_id, state);
+ _partial_array_state_allocator.release(state);
}
MAYBE_INLINE_EVACUATION
@@ -277,11 +276,10 @@ void G1ParScanThreadState::start_partial_objarray(G1HeapRegionAttr dest_attr,
assert(((array_length - step._index) % _partial_array_stepper.chunk_size()) == 0,
"invariant");
PartialArrayState* state =
- _partial_array_state_allocator->allocate(_worker_id,
- from_obj, to_obj,
- step._index,
- array_length,
- step._ncreate);
+ _partial_array_state_allocator.allocate(from_obj, to_obj,
+ step._index,
+ array_length,
+ step._ncreate);
for (uint i = 0; i < step._ncreate; ++i) {
push_on_queue(ScannerTask(state));
}
@@ -601,8 +599,7 @@ G1ParScanThreadState* G1ParScanThreadStateSet::state_for_worker(uint worker_id)
worker_id,
_num_workers,
_collection_set,
- _evac_failure_regions,
- &_partial_array_state_allocator);
+ _evac_failure_regions);
}
return _states[worker_id];
}
@@ -732,8 +729,7 @@ G1ParScanThreadStateSet::G1ParScanThreadStateSet(G1CollectedHeap* g1h,
_surviving_young_words_total(NEW_C_HEAP_ARRAY(size_t, collection_set->young_region_length() + 1, mtGC)),
_num_workers(num_workers),
_flushed(false),
- _evac_failure_regions(evac_failure_regions),
- _partial_array_state_allocator(num_workers)
+ _evac_failure_regions(evac_failure_regions)
{
for (uint i = 0; i < num_workers; ++i) {
_states[i] = nullptr;
diff --git a/src/hotspot/share/gc/g1/g1ParScanThreadState.hpp b/src/hotspot/share/gc/g1/g1ParScanThreadState.hpp
index f61f993f0280f..27aa29ee30c98 100644
--- a/src/hotspot/share/gc/g1/g1ParScanThreadState.hpp
+++ b/src/hotspot/share/gc/g1/g1ParScanThreadState.hpp
@@ -84,7 +84,7 @@ class G1ParScanThreadState : public CHeapObj {
// Indicates whether in the last generation (old) there is no more space
// available for allocation.
bool _old_gen_is_full;
- PartialArrayStateAllocator* _partial_array_state_allocator;
+ PartialArrayStateAllocator _partial_array_state_allocator;
PartialArrayTaskStepper _partial_array_stepper;
StringDedup::Requests _string_dedup_requests;
@@ -124,8 +124,7 @@ class G1ParScanThreadState : public CHeapObj {
uint worker_id,
uint num_workers,
G1CollectionSet* collection_set,
- G1EvacFailureRegions* evac_failure_regions,
- PartialArrayStateAllocator* partial_array_state_allocator);
+ G1EvacFailureRegions* evac_failure_regions);
virtual ~G1ParScanThreadState();
void set_ref_discoverer(ReferenceDiscoverer* rd) { _scanner.set_ref_discoverer(rd); }
@@ -247,7 +246,6 @@ class G1ParScanThreadStateSet : public StackObj {
uint _num_workers;
bool _flushed;
G1EvacFailureRegions* _evac_failure_regions;
- PartialArrayStateAllocator _partial_array_state_allocator;
public:
G1ParScanThreadStateSet(G1CollectedHeap* g1h,
diff --git a/src/hotspot/share/gc/g1/g1YoungGCPostEvacuateTasks.cpp b/src/hotspot/share/gc/g1/g1YoungGCPostEvacuateTasks.cpp
index 1d76a44f8f887..c72dcc9661821 100644
--- a/src/hotspot/share/gc/g1/g1YoungGCPostEvacuateTasks.cpp
+++ b/src/hotspot/share/gc/g1/g1YoungGCPostEvacuateTasks.cpp
@@ -42,6 +42,7 @@
#include "gc/g1/g1RemSet.hpp"
#include "gc/g1/g1YoungGCPostEvacuateTasks.hpp"
#include "gc/shared/bufferNode.hpp"
+#include "gc/shared/partialArrayState.hpp"
#include "jfr/jfrEvents.hpp"
#include "oops/access.inline.hpp"
#include "oops/compressedOops.inline.hpp"
@@ -944,6 +945,25 @@ class G1PostEvacuateCollectionSetCleanupTask2::ResizeTLABsTask : public G1Abstra
}
};
+class G1PostEvacuateCollectionSetCleanupTask2::ResetPartialArrayStateManagerTask
+ : public G1AbstractSubTask
+{
+public:
+ ResetPartialArrayStateManagerTask()
+ : G1AbstractSubTask(G1GCPhaseTimes::ResetPartialArrayStateManager)
+ {}
+
+ double worker_cost() const override {
+ return AlmostNoWork;
+ }
+
+ void do_work(uint worker_id) override {
+ // This must be in phase2 cleanup, after phase1 has destroyed all of the
+ // associated allocators.
+ G1CollectedHeap::heap()->partial_array_state_manager()->reset();
+ }
+};
+
G1PostEvacuateCollectionSetCleanupTask2::G1PostEvacuateCollectionSetCleanupTask2(G1ParScanThreadStateSet* per_thread_states,
G1EvacInfo* evacuation_info,
G1EvacFailureRegions* evac_failure_regions) :
@@ -955,6 +975,7 @@ G1PostEvacuateCollectionSetCleanupTask2::G1PostEvacuateCollectionSetCleanupTask2
if (G1CollectedHeap::heap()->has_humongous_reclaim_candidates()) {
add_serial_task(new EagerlyReclaimHumongousObjectsTask());
}
+ add_serial_task(new ResetPartialArrayStateManagerTask());
if (evac_failure_regions->has_regions_evac_failed()) {
add_parallel_task(new ProcessEvacuationFailedRegionsTask(evac_failure_regions));
diff --git a/src/hotspot/share/gc/g1/g1YoungGCPostEvacuateTasks.hpp b/src/hotspot/share/gc/g1/g1YoungGCPostEvacuateTasks.hpp
index 96eeaf27de19c..ad850af2eaceb 100644
--- a/src/hotspot/share/gc/g1/g1YoungGCPostEvacuateTasks.hpp
+++ b/src/hotspot/share/gc/g1/g1YoungGCPostEvacuateTasks.hpp
@@ -58,6 +58,7 @@ class G1PostEvacuateCollectionSetCleanupTask1 : public G1BatchedTask {
// - Redirty Logged Cards
// - Free Collection Set
// - Resize TLABs
+// - Reset the reusable PartialArrayStateManager.
class G1PostEvacuateCollectionSetCleanupTask2 : public G1BatchedTask {
class EagerlyReclaimHumongousObjectsTask;
#if COMPILER2_OR_JVMCI
@@ -68,6 +69,7 @@ class G1PostEvacuateCollectionSetCleanupTask2 : public G1BatchedTask {
class RedirtyLoggedCardsTask;
class FreeCollectionSetTask;
class ResizeTLABsTask;
+ class ResetPartialArrayStateManagerTask;
public:
G1PostEvacuateCollectionSetCleanupTask2(G1ParScanThreadStateSet* per_thread_states,
diff --git a/src/hotspot/share/gc/parallel/objectStartArray.cpp b/src/hotspot/share/gc/parallel/objectStartArray.cpp
index ef9de7abfd771..2a0f12ec70e72 100644
--- a/src/hotspot/share/gc/parallel/objectStartArray.cpp
+++ b/src/hotspot/share/gc/parallel/objectStartArray.cpp
@@ -47,11 +47,10 @@ void ObjectStartArray::initialize(MemRegion reserved_region) {
// Do not use large-pages for the backing store. The one large page region
// will be used for the heap proper.
- ReservedSpace backing_store(bytes_to_reserve);
+ ReservedSpace backing_store(bytes_to_reserve, mtGC);
if (!backing_store.is_reserved()) {
vm_exit_during_initialization("Could not reserve space for ObjectStartArray");
}
- MemTracker::record_virtual_memory_tag(backing_store.base(), mtGC);
// We do not commit any memory initially
_virtual_space.initialize(backing_store);
diff --git a/src/hotspot/share/gc/parallel/psPromotionManager.cpp b/src/hotspot/share/gc/parallel/psPromotionManager.cpp
index c740b1488d7b7..525285471c7d9 100644
--- a/src/hotspot/share/gc/parallel/psPromotionManager.cpp
+++ b/src/hotspot/share/gc/parallel/psPromotionManager.cpp
@@ -51,7 +51,7 @@ PSPromotionManager::PSScannerTasksQueueSet* PSPromotionManager::_stack_array_dep
PreservedMarksSet* PSPromotionManager::_preserved_marks_set = nullptr;
PSOldGen* PSPromotionManager::_old_gen = nullptr;
MutableSpace* PSPromotionManager::_young_space = nullptr;
-PartialArrayStateAllocator* PSPromotionManager::_partial_array_state_allocator = nullptr;
+PartialArrayStateManager* PSPromotionManager::_partial_array_state_manager = nullptr;
void PSPromotionManager::initialize() {
ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
@@ -61,21 +61,20 @@ void PSPromotionManager::initialize() {
const uint promotion_manager_num = ParallelGCThreads;
+ assert(_partial_array_state_manager == nullptr, "Attempt to initialize twice");
+ _partial_array_state_manager
+ = new PartialArrayStateManager(promotion_manager_num);
+
// To prevent false sharing, we pad the PSPromotionManagers
// and make sure that the first instance starts at a cache line.
assert(_manager_array == nullptr, "Attempt to initialize twice");
_manager_array = PaddedArray::create_unfreeable(promotion_manager_num);
- assert(_partial_array_state_allocator == nullptr, "Attempt to initialize twice");
- _partial_array_state_allocator
- = new PartialArrayStateAllocator(ParallelGCThreads);
-
- _stack_array_depth = new PSScannerTasksQueueSet(ParallelGCThreads);
+ _stack_array_depth = new PSScannerTasksQueueSet(promotion_manager_num);
// Create and register the PSPromotionManager(s) for the worker threads.
for(uint i=0; iregister_queue(i, _manager_array[i].claimed_stack_depth());
- _manager_array[i]._partial_array_state_allocator_index = i;
}
// The VMThread gets its own PSPromotionManager, which is not available
// for work stealing.
@@ -187,7 +186,8 @@ void PSPromotionManager::reset_stats() {
// Most members are initialized either by initialize() or reset().
PSPromotionManager::PSPromotionManager()
- : _partial_array_stepper(ParallelGCThreads, ParGCArrayScanChunk)
+ : _partial_array_state_allocator(_partial_array_state_manager),
+ _partial_array_stepper(ParallelGCThreads, ParGCArrayScanChunk)
{
// We set the old lab's start array.
_old_lab.set_start_array(old_gen()->start_array());
@@ -198,9 +198,6 @@ PSPromotionManager::PSPromotionManager()
_target_stack_size = GCDrainStackTargetSize;
}
- // Initialize to a bad value; fixed by initialize().
- _partial_array_state_allocator_index = UINT_MAX;
-
// let's choose 1.5x the chunk size
_min_array_size_for_chunking = (3 * ParGCArrayScanChunk / 2);
@@ -317,7 +314,7 @@ void PSPromotionManager::process_array_chunk(PartialArrayState* state) {
process_array_chunk_work(state->destination(), start, end);
}
// Release reference to state, now that we're done with it.
- _partial_array_state_allocator->release(_partial_array_state_allocator_index, state);
+ _partial_array_state_allocator.release(state);
}
void PSPromotionManager::push_objArray(oop old_obj, oop new_obj) {
@@ -331,11 +328,10 @@ void PSPromotionManager::push_objArray(oop old_obj, oop new_obj) {
if (step._ncreate > 0) {
TASKQUEUE_STATS_ONLY(++_arrays_chunked);
PartialArrayState* state =
- _partial_array_state_allocator->allocate(_partial_array_state_allocator_index,
- old_obj, new_obj,
- step._index,
- array_length,
- step._ncreate);
+ _partial_array_state_allocator.allocate(old_obj, new_obj,
+ step._index,
+ array_length,
+ step._ncreate);
for (uint i = 0; i < step._ncreate; ++i) {
push_depth(ScannerTask(state));
}
diff --git a/src/hotspot/share/gc/parallel/psPromotionManager.hpp b/src/hotspot/share/gc/parallel/psPromotionManager.hpp
index a69d975956dc3..a6de8623281c9 100644
--- a/src/hotspot/share/gc/parallel/psPromotionManager.hpp
+++ b/src/hotspot/share/gc/parallel/psPromotionManager.hpp
@@ -28,6 +28,7 @@
#include "gc/parallel/psPromotionLAB.hpp"
#include "gc/shared/copyFailedInfo.hpp"
#include "gc/shared/gcTrace.hpp"
+#include "gc/shared/partialArrayState.hpp"
#include "gc/shared/partialArrayTaskStepper.hpp"
#include "gc/shared/preservedMarks.hpp"
#include "gc/shared/stringdedup/stringDedup.hpp"
@@ -50,8 +51,6 @@
class MutableSpace;
class PSOldGen;
class ParCompactionManager;
-class PartialArrayState;
-class PartialArrayStateAllocator;
class PSPromotionManager {
friend class PSScavenge;
@@ -88,9 +87,9 @@ class PSPromotionManager {
uint _target_stack_size;
- static PartialArrayStateAllocator* _partial_array_state_allocator;
+ static PartialArrayStateManager* _partial_array_state_manager;
+ PartialArrayStateAllocator _partial_array_state_allocator;
PartialArrayTaskStepper _partial_array_stepper;
- uint _partial_array_state_allocator_index;
uint _min_array_size_for_chunking;
PreservedMarks* _preserved_marks;
diff --git a/src/hotspot/share/gc/serial/serialBlockOffsetTable.cpp b/src/hotspot/share/gc/serial/serialBlockOffsetTable.cpp
index 31f18652c63d6..7ac0fcc8b53c5 100644
--- a/src/hotspot/share/gc/serial/serialBlockOffsetTable.cpp
+++ b/src/hotspot/share/gc/serial/serialBlockOffsetTable.cpp
@@ -37,13 +37,11 @@ SerialBlockOffsetTable::SerialBlockOffsetTable(MemRegion reserved,
size_t init_word_size):
_reserved(reserved) {
size_t size = compute_size(reserved.word_size());
- ReservedSpace rs(size);
+ ReservedSpace rs(size, mtGC);
if (!rs.is_reserved()) {
vm_exit_during_initialization("Could not reserve enough space for heap offset array");
}
- MemTracker::record_virtual_memory_tag((address)rs.base(), mtGC);
-
if (!_vs.initialize(rs, 0)) {
vm_exit_during_initialization("Could not reserve enough space for heap offset array");
}
diff --git a/src/hotspot/share/gc/shared/partialArrayState.cpp b/src/hotspot/share/gc/shared/partialArrayState.cpp
index 48ef974ecfa53..60067c6547b86 100644
--- a/src/hotspot/share/gc/shared/partialArrayState.cpp
+++ b/src/hotspot/share/gc/shared/partialArrayState.cpp
@@ -52,27 +52,8 @@ void PartialArrayState::add_references(size_t count) {
assert(new_count >= count, "reference count overflow");
}
-class PartialArrayStateAllocator::Impl : public CHeapObj {
- struct FreeListEntry;
-
- Arena* _arenas;
- FreeListEntry** _free_lists;
- uint _num_workers;
-
+class PartialArrayStateAllocator::FreeListEntry {
public:
- Impl(uint num_workers);
- ~Impl();
-
- NONCOPYABLE(Impl);
-
- PartialArrayState* allocate(uint worker_id,
- oop src, oop dst,
- size_t index, size_t length,
- size_t initial_refcount);
- void release(uint worker_id, PartialArrayState* state);
-};
-
-struct PartialArrayStateAllocator::Impl::FreeListEntry {
FreeListEntry* _next;
FreeListEntry(FreeListEntry* next) : _next(next) {}
@@ -81,73 +62,80 @@ struct PartialArrayStateAllocator::Impl::FreeListEntry {
NONCOPYABLE(FreeListEntry);
};
-PartialArrayStateAllocator::Impl::Impl(uint num_workers)
- : _arenas(NEW_C_HEAP_ARRAY(Arena, num_workers, mtGC)),
- _free_lists(NEW_C_HEAP_ARRAY(FreeListEntry*, num_workers, mtGC)),
- _num_workers(num_workers)
-{
- for (uint i = 0; i < _num_workers; ++i) {
- ::new (&_arenas[i]) Arena(mtGC);
- _free_lists[i] = nullptr;
- }
-}
+PartialArrayStateAllocator::PartialArrayStateAllocator(PartialArrayStateManager* manager)
+ : _manager(manager),
+ _free_list(),
+ _arena(manager->register_allocator())
+{}
-PartialArrayStateAllocator::Impl::~Impl() {
- // We don't need to clean up the free lists. Deallocating the entries
+PartialArrayStateAllocator::~PartialArrayStateAllocator() {
+ // We don't need to clean up the free list. Deallocating the entries
// does nothing, since we're using arena allocation. Instead, leave it
- // to the arena destructor to release the memory.
- FREE_C_HEAP_ARRAY(FreeListEntry*, _free_lists);
- for (uint i = 0; i < _num_workers; ++i) {
- _arenas[i].~Arena();
- }
- FREE_C_HEAP_ARRAY(Arena*, _arenas);
+ // to the manager to release the memory.
+ // Inform the manager that an allocator is no longer in use.
+ _manager->release_allocator();
}
-PartialArrayState* PartialArrayStateAllocator::Impl::allocate(uint worker_id,
- oop src, oop dst,
- size_t index,
- size_t length,
- size_t initial_refcount) {
+PartialArrayState* PartialArrayStateAllocator::allocate(oop src, oop dst,
+ size_t index,
+ size_t length,
+ size_t initial_refcount) {
void* p;
- FreeListEntry* head = _free_lists[worker_id];
+ FreeListEntry* head = _free_list;
if (head == nullptr) {
- p = NEW_ARENA_OBJ(&_arenas[worker_id], PartialArrayState);
+ p = NEW_ARENA_OBJ(_arena, PartialArrayState);
} else {
- _free_lists[worker_id] = head->_next;
+ _free_list = head->_next;
head->~FreeListEntry();
p = head;
}
return ::new (p) PartialArrayState(src, dst, index, length, initial_refcount);
}
-void PartialArrayStateAllocator::Impl::release(uint worker_id, PartialArrayState* state) {
+void PartialArrayStateAllocator::release(PartialArrayState* state) {
size_t refcount = Atomic::sub(&state->_refcount, size_t(1), memory_order_release);
if (refcount != 0) {
assert(refcount + 1 != 0, "refcount underflow");
} else {
OrderAccess::acquire();
- state->~PartialArrayState();
- _free_lists[worker_id] = ::new (state) FreeListEntry(_free_lists[worker_id]);
+ // Don't need to call destructor; can't if not destructible.
+ static_assert(!std::is_destructible::value, "expected");
+ _free_list = ::new (state) FreeListEntry(_free_list);
}
}
-PartialArrayStateAllocator::PartialArrayStateAllocator(uint num_workers)
- : _impl(new Impl(num_workers))
+PartialArrayStateManager::PartialArrayStateManager(uint max_allocators)
+ : _arenas(NEW_C_HEAP_ARRAY(Arena, max_allocators, mtGC)),
+ _max_allocators(max_allocators),
+ _registered_allocators(0)
+ DEBUG_ONLY(COMMA _released_allocators(0))
{}
-PartialArrayStateAllocator::~PartialArrayStateAllocator() {
- delete _impl;
+PartialArrayStateManager::~PartialArrayStateManager() {
+ reset();
+ FREE_C_HEAP_ARRAY(Arena, _arenas);
}
-PartialArrayState* PartialArrayStateAllocator::allocate(uint worker_id,
- oop src, oop dst,
- size_t index,
- size_t length,
- size_t initial_refcount) {
- return _impl->allocate(worker_id, src, dst, index, length, initial_refcount);
+Arena* PartialArrayStateManager::register_allocator() {
+ uint idx = Atomic::fetch_then_add(&_registered_allocators, 1u, memory_order_relaxed);
+ assert(idx < _max_allocators, "exceeded configured max number of allocators");
+ return ::new (&_arenas[idx]) Arena(mtGC);
}
-void PartialArrayStateAllocator::release(uint worker_id, PartialArrayState* state) {
- _impl->release(worker_id, state);
+#ifdef ASSERT
+void PartialArrayStateManager::release_allocator() {
+ uint old = Atomic::fetch_then_add(&_released_allocators, 1u, memory_order_relaxed);
+ assert(old < Atomic::load(&_registered_allocators), "too many releases");
}
+#endif // ASSERT
+void PartialArrayStateManager::reset() {
+ uint count = Atomic::load(&_registered_allocators);
+ assert(count == Atomic::load(&_released_allocators),
+ "some allocators still active");
+ for (uint i = 0; i < count; ++i) {
+ _arenas[i].~Arena();
+ }
+ Atomic::store(&_registered_allocators, 0u);
+ DEBUG_ONLY(Atomic::store(&_released_allocators, 0u);)
+}
diff --git a/src/hotspot/share/gc/shared/partialArrayState.hpp b/src/hotspot/share/gc/shared/partialArrayState.hpp
index fb226e08665a0..3208c6d68077f 100644
--- a/src/hotspot/share/gc/shared/partialArrayState.hpp
+++ b/src/hotspot/share/gc/shared/partialArrayState.hpp
@@ -30,7 +30,9 @@
#include "utilities/globalDefinitions.hpp"
#include "utilities/macros.hpp"
+class Arena;
class PartialArrayStateAllocator;
+class PartialArrayStateManager;
// Instances of this class are used to represent processing progress for an
// array task in a taskqueue. When a sufficiently large array needs to be
@@ -52,8 +54,8 @@ class PartialArrayStateAllocator;
// referring to a given state that is added to a taskqueue must increase the
// reference count by one. When the processing of a task referring to a state
// is complete, the reference count must be decreased by one. When the
-// reference count reaches zero the state should be released to the allocator
-// for later reuse.
+// reference count reaches zero the state is released to the allocator for
+// later reuse.
class PartialArrayState {
oop _source;
oop _destination;
@@ -66,11 +68,13 @@ class PartialArrayState {
PartialArrayState(oop src, oop dst,
size_t index, size_t length,
size_t initial_refcount);
- ~PartialArrayState() = default;
+
+public:
+ // Deleted to require management by allocator object.
+ ~PartialArrayState() = delete;
NONCOPYABLE(PartialArrayState);
-public:
// Add count references, one per referring task being added to a taskqueue.
void add_references(size_t count);
@@ -91,39 +95,39 @@ class PartialArrayState {
// This class provides memory management for PartialArrayStates.
//
-// States are initially allocated from a set of arenas owned by the allocator.
-// This allows the entire set of allocated states to be discarded without the
-// need to keep track of or find them under some circumstances. For example,
-// if G1 concurrent marking is aborted and needs to restart because of a full
-// marking queue, the queue doesn't need to be searched for tasks referring to
-// states to allow releasing them. Instead the queue contents can just be
-// discarded, and the memory for the no longer referenced states will
-// eventually be reclaimed when the arenas are reset.
+// States are initially arena allocated from the manager, using a per-thread
+// allocator. This allows the entire set of allocated states to be discarded
+// without the need to keep track of or find them under some circumstances.
+// For example, if G1 concurrent marking is aborted and needs to restart
+// because of a full marking queue, the queue doesn't need to be searched for
+// tasks referring to states to allow releasing them. Instead the queue
+// contents can just be discarded, and the memory for the no longer referenced
+// states will eventually be reclaimed when the arena is reset.
//
-// A set of free-lists is placed in front of the arena allocators. This
-// causes the maximum number of allocated states to be based on the number of
+// The allocators each provide a free-list of states. When a state is
+// released and its reference count has reached zero, it is added to the
+// allocator's free-list, for use by future allocation requests. This causes
+// the maximum number of allocated states to be based on the number of
// in-progress arrays, rather than the total number of arrays that need to be
-// processed. The use of free-list allocators is the reason for reference
-// counting states.
+// processed.
//
-// The arena and free-list to use for an allocation operation is designated by
-// the worker_id used in the operation. This avoids locking and such on those
-// data structures, at the cost of possibly doing more total arena allocation
-// that would be needed with a single shared arena and free-list.
+// An allocator object is not thread-safe.
class PartialArrayStateAllocator : public CHeapObj {
- class Impl;
- Impl* _impl;
+ class FreeListEntry;
+
+ PartialArrayStateManager* _manager;
+ FreeListEntry* _free_list;
+ Arena* _arena; // Obtained from _manager.
public:
- PartialArrayStateAllocator(uint num_workers);
+ explicit PartialArrayStateAllocator(PartialArrayStateManager* manager);
~PartialArrayStateAllocator();
NONCOPYABLE(PartialArrayStateAllocator);
// Create a new state, obtaining the memory for it from the free-list or
- // arena associated with worker_id.
- PartialArrayState* allocate(uint worker_id,
- oop src, oop dst,
+ // from the associated manager.
+ PartialArrayState* allocate(oop src, oop dst,
size_t index, size_t length,
size_t initial_refcount);
@@ -131,7 +135,70 @@ class PartialArrayStateAllocator : public CHeapObj {
// state to the free-list associated with worker_id. The state must have
// been allocated by this allocator, but that allocation doesn't need to
// have been associated with worker_id.
- void release(uint worker_id, PartialArrayState* state);
+ void release(PartialArrayState* state);
+};
+
+// This class provides memory management for PartialArrayStates.
+//
+// States are allocated using an allocator object. Those allocators in turn
+// may request memory for a state from their associated manager. The manager
+// is responsible for obtaining and releasing memory used for states by the
+// associated allocators.
+//
+// A state may be allocated by one allocator, but end up on the free-list of a
+// different allocator. This can happen because a task referring to the state
+// may be stolen from the queue where it was initially added. This is permitted
+// because a state's memory won't be reclaimed until all of the allocators
+// associated with the manager that is ultimately providing the memory have
+// been deleted and the manager is reset.
+//
+// A manager is used in two distinct and non-overlapping phases.
+//
+// - allocating: This is the initial phase. During this phase, new allocators
+// may be created, and allocators may request memory from the manager.
+//
+// - releasing: When an allocator is destroyed the manager transitions to this
+// phase. It remains in this phase until all extent allocators associated with
+// this manager have been destroyed. During this phase, new allocators may not
+// be created, nor may extent allocators request memory from this manager.
+//
+// Once all the associated allocators have been destroyed the releasing phase
+// ends and the manager may be reset or deleted. Resetting transitions back
+// to the allocating phase.
+class PartialArrayStateManager : public CHeapObj {
+ friend class PartialArrayStateAllocator;
+
+ // Use an arena for each allocator, for thread-safe concurrent allocation by
+ // different allocators.
+ Arena* _arenas;
+
+ // Limit on the number of allocators this manager supports.
+ uint _max_allocators;
+
+ // The number of allocators that have been registered/released.
+ // Atomic to support concurrent registration, and concurrent release.
+ // Phasing restriction forbids registration concurrent with release.
+ volatile uint _registered_allocators;
+ DEBUG_ONLY(volatile uint _released_allocators;)
+
+ // These are all for sole use of the befriended allocator class.
+ Arena* register_allocator();
+ void release_allocator() NOT_DEBUG_RETURN;
+
+public:
+ explicit PartialArrayStateManager(uint max_allocators);
+
+ // Release the memory that has been requested by allocators associated with
+ // this manager.
+ // precondition: all associated allocators have been deleted.
+ ~PartialArrayStateManager();
+
+ NONCOPYABLE(PartialArrayStateManager);
+
+ // Recycle the memory that has been requested by allocators associated with
+ // this manager.
+ // precondition: all associated allocators have been deleted.
+ void reset();
};
#endif // SHARE_GC_SHARED_PARTIALARRAYSTATE_HPP
diff --git a/src/hotspot/share/gc/shenandoah/c2/shenandoahBarrierSetC2.cpp b/src/hotspot/share/gc/shenandoah/c2/shenandoahBarrierSetC2.cpp
index b906ae2ca0b96..c6bcbd18a1683 100644
--- a/src/hotspot/share/gc/shenandoah/c2/shenandoahBarrierSetC2.cpp
+++ b/src/hotspot/share/gc/shenandoah/c2/shenandoahBarrierSetC2.cpp
@@ -465,11 +465,17 @@ void ShenandoahBarrierSetC2::post_barrier(GraphKit* kit,
}
if (ReduceInitialCardMarks && obj == kit->just_allocated_object(kit->control())) {
- // We can skip marks on a freshly-allocated object in Eden.
- // Keep this code in sync with new_deferred_store_barrier() in runtime.cpp.
- // That routine informs GC to take appropriate compensating steps,
- // upon a slow-path allocation, so as to make this card-mark
- // elision safe.
+ // We use card marks to track old to young references in Generational Shenandoah;
+ // see flag ShenandoahCardBarrier above.
+ // Objects are always allocated in the young generation and initialized
+ // before they are promoted. There's always a safepoint (e.g. at final mark)
+ // before an object is promoted from young to old. Promotion entails dirtying of
+ // the cards backing promoted objects, so they will be guaranteed to be scanned
+ // at the next remembered set scan of the old generation.
+ // Thus, we can safely skip card-marking of initializing stores on a
+ // freshly-allocated object. If any of the assumptions above change in
+ // the future, this code will need to be re-examined; see check in
+ // ShenandoahCardBarrier::on_slowpath_allocation_exit().
return;
}
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahBarrierSet.cpp b/src/hotspot/share/gc/shenandoah/shenandoahBarrierSet.cpp
index 38e363664dc3a..62067bccb1ed7 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahBarrierSet.cpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahBarrierSet.cpp
@@ -88,6 +88,14 @@ bool ShenandoahBarrierSet::need_keep_alive_barrier(DecoratorSet decorators, Basi
return (on_weak_ref || unknown) && keep_alive;
}
+void ShenandoahBarrierSet::on_slowpath_allocation_exit(JavaThread* thread, oop new_obj) {
+#if COMPILER2_OR_JVMCI
+ assert(!ReduceInitialCardMarks || !ShenandoahCardBarrier || ShenandoahGenerationalHeap::heap()->is_in_young(new_obj),
+ "Error: losing card mark on initialzing store to old gen");
+#endif // COMPILER2_OR_JVMCI
+ assert(thread->deferred_card_mark().is_empty(), "We don't use this");
+}
+
void ShenandoahBarrierSet::on_thread_create(Thread* thread) {
// Create thread local data
ShenandoahThreadLocalData::create(thread);
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahBarrierSet.hpp b/src/hotspot/share/gc/shenandoah/shenandoahBarrierSet.hpp
index 8d1dc92761a59..0d38cc757f44a 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahBarrierSet.hpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahBarrierSet.hpp
@@ -77,17 +77,21 @@ class ShenandoahBarrierSet: public BarrierSet {
return (decorators & IN_NATIVE) != 0;
}
- void print_on(outputStream* st) const;
+ void print_on(outputStream* st) const override;
template
inline void arraycopy_barrier(T* src, T* dst, size_t count);
inline void clone_barrier(oop src);
void clone_barrier_runtime(oop src);
- virtual void on_thread_create(Thread* thread);
- virtual void on_thread_destroy(Thread* thread);
- virtual void on_thread_attach(Thread* thread);
- virtual void on_thread_detach(Thread* thread);
+ // Support for optimizing compilers to call the barrier set on slow path allocations
+ // that did not enter a TLAB. Used for e.g. ReduceInitialCardMarks to take any
+ // compensating actions to restore card-marks that might otherwise be incorrectly elided.
+ void on_slowpath_allocation_exit(JavaThread* thread, oop new_obj) override;
+ void on_thread_create(Thread* thread) override;
+ void on_thread_destroy(Thread* thread) override;
+ void on_thread_attach(Thread* thread) override;
+ void on_thread_detach(Thread* thread) override;
static inline oop resolve_forwarded_not_null(oop p);
static inline oop resolve_forwarded_not_null_mutator(oop p);
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahControlThread.cpp b/src/hotspot/share/gc/shenandoah/shenandoahControlThread.cpp
index 8a82498225a95..136ac22d840ff 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahControlThread.cpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahControlThread.cpp
@@ -56,15 +56,8 @@ void ShenandoahControlThread::run_service() {
const GCCause::Cause default_cause = GCCause::_shenandoah_concurrent_gc;
int sleep = ShenandoahControlIntervalMin;
- double last_shrink_time = os::elapsedTime();
double last_sleep_adjust_time = os::elapsedTime();
- // Shrink period avoids constantly polling regions for shrinking.
- // Having a period 10x lower than the delay would mean we hit the
- // shrinking with lag of less than 1/10-th of true delay.
- // ShenandoahUncommitDelay is in msecs, but shrink_period is in seconds.
- const double shrink_period = (double)ShenandoahUncommitDelay / 1000 / 10;
-
ShenandoahCollectorPolicy* const policy = heap->shenandoah_policy();
ShenandoahHeuristics* const heuristics = heap->heuristics();
while (!in_graceful_shutdown() && !should_terminate()) {
@@ -76,9 +69,6 @@ void ShenandoahControlThread::run_service() {
// This control loop iteration has seen this much allocation.
const size_t allocs_seen = reset_allocs_seen();
- // Check if we have seen a new target for soft max heap size.
- const bool soft_max_changed = heap->check_soft_max_changed();
-
// Choose which GC mode to run in. The block below should select a single mode.
GCMode mode = none;
GCCause::Cause cause = GCCause::_last_gc_cause;
@@ -136,6 +126,9 @@ void ShenandoahControlThread::run_service() {
assert (!gc_requested || cause != GCCause::_last_gc_cause, "GC cause should be set");
if (gc_requested) {
+ // Cannot uncommit bitmap slices during concurrent reset
+ ShenandoahNoUncommitMark forbid_region_uncommit(heap);
+
// GC is starting, bump the internal ID
update_gc_id();
@@ -238,29 +231,20 @@ void ShenandoahControlThread::run_service() {
}
}
- const double current = os::elapsedTime();
-
- if (ShenandoahUncommit && (is_gc_requested || soft_max_changed || (current - last_shrink_time > shrink_period))) {
- // Explicit GC tries to uncommit everything down to min capacity.
- // Soft max change tries to uncommit everything down to target capacity.
- // Periodic uncommit tries to uncommit suitable regions down to min capacity.
-
- double shrink_before = (is_gc_requested || soft_max_changed) ?
- current :
- current - (ShenandoahUncommitDelay / 1000.0);
-
- size_t shrink_until = soft_max_changed ?
- heap->soft_max_capacity() :
- heap->min_capacity();
-
- heap->maybe_uncommit(shrink_before, shrink_until);
- heap->phase_timings()->flush_cycle_to_global();
- last_shrink_time = current;
+ // Check if we have seen a new target for soft max heap size or if a gc was requested.
+ // Either of these conditions will attempt to uncommit regions.
+ if (ShenandoahUncommit) {
+ if (heap->check_soft_max_changed()) {
+ heap->notify_soft_max_changed();
+ } else if (is_gc_requested) {
+ heap->notify_explicit_gc_requested();
+ }
}
// Wait before performing the next action. If allocation happened during this wait,
// we exit sooner, to let heuristics re-evaluate new conditions. If we are at idle,
// back off exponentially.
+ const double current = os::elapsedTime();
if (heap->has_changed()) {
sleep = ShenandoahControlIntervalMin;
} else if ((current - last_sleep_adjust_time) * 1000 > ShenandoahControlIntervalAdjustPeriod){
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahGeneration.cpp b/src/hotspot/share/gc/shenandoah/shenandoahGeneration.cpp
index 1c644a9acccd7..a48b2baa18f38 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahGeneration.cpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahGeneration.cpp
@@ -73,8 +73,9 @@ class ShenandoahResetBitmapTask : public WorkerTask {
WorkerTask("Shenandoah Reset Bitmap"), _generation(generation) {}
void work(uint worker_id) {
- ShenandoahHeapRegion* region = _regions.next();
ShenandoahHeap* heap = ShenandoahHeap::heap();
+ assert(!heap->is_uncommit_in_progress(), "Cannot uncommit bitmaps while resetting them.");
+ ShenandoahHeapRegion* region = _regions.next();
ShenandoahMarkingContext* const ctx = heap->marking_context();
while (region != nullptr) {
auto const affiliation = region->affiliation();
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahGenerationalControlThread.cpp b/src/hotspot/share/gc/shenandoah/shenandoahGenerationalControlThread.cpp
index ef0fbf671a0dd..33af35c6b9555 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahGenerationalControlThread.cpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahGenerationalControlThread.cpp
@@ -67,15 +67,8 @@ void ShenandoahGenerationalControlThread::run_service() {
const GCMode default_mode = concurrent_normal;
ShenandoahGenerationType generation = GLOBAL;
- double last_shrink_time = os::elapsedTime();
uint age_period = 0;
- // Shrink period avoids constantly polling regions for shrinking.
- // Having a period 10x lower than the delay would mean we hit the
- // shrinking with lag of less than 1/10-th of true delay.
- // ShenandoahUncommitDelay is in msecs, but shrink_period is in seconds.
- const double shrink_period = (double)ShenandoahUncommitDelay / 1000 / 10;
-
ShenandoahCollectorPolicy* const policy = heap->shenandoah_policy();
// Heuristics are notified of allocation failures here and other outcomes
@@ -191,6 +184,9 @@ void ShenandoahGenerationalControlThread::run_service() {
assert (!gc_requested || cause != GCCause::_no_gc, "GC cause should be set");
if (gc_requested) {
+ // Cannot uncommit bitmap slices during concurrent reset
+ ShenandoahNoUncommitMark forbid_region_uncommit(heap);
+
// Blow away all soft references on this cycle, if handling allocation failure,
// either implicit or explicit GC request, or we are requested to do so unconditionally.
if (generation == GLOBAL && (alloc_failure_pending || is_gc_requested || ShenandoahAlwaysClearSoftRefs)) {
@@ -303,24 +299,14 @@ void ShenandoahGenerationalControlThread::run_service() {
}
}
- const double current = os::elapsedTime();
-
- if (ShenandoahUncommit && (is_gc_requested || soft_max_changed || (current - last_shrink_time > shrink_period))) {
- // Explicit GC tries to uncommit everything down to min capacity.
- // Soft max change tries to uncommit everything down to target capacity.
- // Periodic uncommit tries to uncommit suitable regions down to min capacity.
-
- double shrink_before = (is_gc_requested || soft_max_changed) ?
- current :
- current - (ShenandoahUncommitDelay / 1000.0);
-
- size_t shrink_until = soft_max_changed ?
- heap->soft_max_capacity() :
- heap->min_capacity();
-
- heap->maybe_uncommit(shrink_before, shrink_until);
- heap->phase_timings()->flush_cycle_to_global();
- last_shrink_time = current;
+ // Check if we have seen a new target for soft max heap size or if a gc was requested.
+ // Either of these conditions will attempt to uncommit regions.
+ if (ShenandoahUncommit) {
+ if (heap->check_soft_max_changed()) {
+ heap->notify_soft_max_changed();
+ } else if (is_gc_requested) {
+ heap->notify_explicit_gc_requested();
+ }
}
// Wait for ShenandoahControlIntervalMax unless there was an allocation failure or another request was made mid-cycle.
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp b/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp
index 6ef66926b72fa..c1bc9dc661643 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp
@@ -25,8 +25,10 @@
*/
#include "precompiled.hpp"
-#include "memory/allocation.hpp"
-#include "memory/universe.hpp"
+
+#include "cds/archiveHeapWriter.hpp"
+#include "classfile/systemDictionary.hpp"
+#include "code/codeCache.hpp"
#include "gc/shared/classUnloadingContext.hpp"
#include "gc/shared/fullGCForwarding.hpp"
@@ -42,17 +44,16 @@
#include "gc/shenandoah/heuristics/shenandoahYoungHeuristics.hpp"
#include "gc/shenandoah/shenandoahAllocRequest.hpp"
#include "gc/shenandoah/shenandoahBarrierSet.hpp"
-#include "gc/shenandoah/shenandoahClosures.inline.hpp"
+#include "gc/shenandoah/shenandoahCodeRoots.hpp"
#include "gc/shenandoah/shenandoahCollectionSet.hpp"
#include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
#include "gc/shenandoah/shenandoahConcurrentMark.hpp"
-#include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
#include "gc/shenandoah/shenandoahControlThread.hpp"
+#include "gc/shenandoah/shenandoahClosures.inline.hpp"
#include "gc/shenandoah/shenandoahFreeSet.hpp"
#include "gc/shenandoah/shenandoahGenerationalEvacuationTask.hpp"
#include "gc/shenandoah/shenandoahGenerationalHeap.hpp"
#include "gc/shenandoah/shenandoahGlobalGeneration.hpp"
-#include "gc/shenandoah/shenandoahPhaseTimings.hpp"
#include "gc/shenandoah/shenandoahHeap.inline.hpp"
#include "gc/shenandoah/shenandoahHeapRegionClosures.hpp"
#include "gc/shenandoah/shenandoahHeapRegion.inline.hpp"
@@ -65,13 +66,14 @@
#include "gc/shenandoah/shenandoahPacer.inline.hpp"
#include "gc/shenandoah/shenandoahPadding.hpp"
#include "gc/shenandoah/shenandoahParallelCleaning.inline.hpp"
+#include "gc/shenandoah/shenandoahPhaseTimings.hpp"
#include "gc/shenandoah/shenandoahReferenceProcessor.hpp"
#include "gc/shenandoah/shenandoahRootProcessor.inline.hpp"
#include "gc/shenandoah/shenandoahScanRemembered.inline.hpp"
#include "gc/shenandoah/shenandoahSTWMark.hpp"
+#include "gc/shenandoah/shenandoahUncommitThread.hpp"
#include "gc/shenandoah/shenandoahUtils.hpp"
#include "gc/shenandoah/shenandoahVerifier.hpp"
-#include "gc/shenandoah/shenandoahCodeRoots.hpp"
#include "gc/shenandoah/shenandoahVMOperations.hpp"
#include "gc/shenandoah/shenandoahWorkGroup.hpp"
#include "gc/shenandoah/shenandoahWorkerPolicy.hpp"
@@ -79,17 +81,16 @@
#include "gc/shenandoah/mode/shenandoahGenerationalMode.hpp"
#include "gc/shenandoah/mode/shenandoahPassiveMode.hpp"
#include "gc/shenandoah/mode/shenandoahSATBMode.hpp"
-#include "utilities/globalDefinitions.hpp"
#if INCLUDE_JFR
#include "gc/shenandoah/shenandoahJfrSupport.hpp"
#endif
-#include "cds/archiveHeapWriter.hpp"
-#include "classfile/systemDictionary.hpp"
-#include "code/codeCache.hpp"
+
+#include "memory/allocation.hpp"
#include "memory/classLoaderMetaspace.hpp"
#include "memory/metaspaceUtils.hpp"
+#include "memory/universe.hpp"
#include "nmt/mallocTracker.hpp"
#include "nmt/memTracker.hpp"
#include "oops/compressedOops.inline.hpp"
@@ -102,6 +103,7 @@
#include "runtime/safepointMechanism.hpp"
#include "runtime/stackWatermarkSet.hpp"
#include "runtime/vmThread.hpp"
+#include "utilities/globalDefinitions.hpp"
#include "utilities/events.hpp"
#include "utilities/powerOfTwo.hpp"
@@ -459,6 +461,10 @@ jint ShenandoahHeap::initialize() {
initialize_controller();
+ if (ShenandoahUncommit) {
+ _uncommit_thread = new ShenandoahUncommitThread(this);
+ }
+
print_init_logger();
FullGCForwarding::initialize(_heap_region);
@@ -530,6 +536,7 @@ ShenandoahHeap::ShenandoahHeap(ShenandoahCollectorPolicy* policy) :
_update_refs_iterator(this),
_global_generation(nullptr),
_control_thread(nullptr),
+ _uncommit_thread(nullptr),
_young_generation(nullptr),
_old_generation(nullptr),
_shenandoah_policy(policy),
@@ -800,60 +807,15 @@ bool ShenandoahHeap::is_in(const void* p) const {
}
}
-void ShenandoahHeap::maybe_uncommit(double shrink_before, size_t shrink_until) {
- assert (ShenandoahUncommit, "should be enabled");
-
- // Determine if there is work to do. This avoids taking heap lock if there is
- // no work available, avoids spamming logs with superfluous logging messages,
- // and minimises the amount of work while locks are taken.
-
- if (committed() <= shrink_until) return;
-
- bool has_work = false;
- for (size_t i = 0; i < num_regions(); i++) {
- ShenandoahHeapRegion* r = get_region(i);
- if (r->is_empty_committed() && (r->empty_time() < shrink_before)) {
- has_work = true;
- break;
- }
- }
-
- if (has_work) {
- static const char* msg = "Concurrent uncommit";
- ShenandoahConcurrentPhase gcPhase(msg, ShenandoahPhaseTimings::conc_uncommit, true /* log_heap_usage */);
- EventMark em("%s", msg);
-
- op_uncommit(shrink_before, shrink_until);
+void ShenandoahHeap::notify_soft_max_changed() {
+ if (_uncommit_thread != nullptr) {
+ _uncommit_thread->notify_soft_max_changed();
}
}
-void ShenandoahHeap::op_uncommit(double shrink_before, size_t shrink_until) {
- assert (ShenandoahUncommit, "should be enabled");
-
- // Application allocates from the beginning of the heap, and GC allocates at
- // the end of it. It is more efficient to uncommit from the end, so that applications
- // could enjoy the near committed regions. GC allocations are much less frequent,
- // and therefore can accept the committing costs.
-
- size_t count = 0;
- for (size_t i = num_regions(); i > 0; i--) { // care about size_t underflow
- ShenandoahHeapRegion* r = get_region(i - 1);
- if (r->is_empty_committed() && (r->empty_time() < shrink_before)) {
- ShenandoahHeapLocker locker(lock());
- if (r->is_empty_committed()) {
- if (committed() < shrink_until + ShenandoahHeapRegion::region_size_bytes()) {
- break;
- }
-
- r->make_uncommitted();
- count++;
- }
- }
- SpinPause(); // allow allocators to take the lock
- }
-
- if (count > 0) {
- notify_heap_changed();
+void ShenandoahHeap::notify_explicit_gc_requested() {
+ if (_uncommit_thread != nullptr) {
+ _uncommit_thread->notify_explicit_gc_requested();
}
}
@@ -1507,6 +1469,10 @@ void ShenandoahHeap::gc_threads_do(ThreadClosure* tcl) const {
tcl->do_thread(_control_thread);
}
+ if (_uncommit_thread != nullptr) {
+ tcl->do_thread(_uncommit_thread);
+ }
+
workers()->threads_do(tcl);
if (_safepoint_workers != nullptr) {
_safepoint_workers->threads_do(tcl);
@@ -2094,6 +2060,11 @@ void ShenandoahHeap::stop() {
// Step 3. Wait until GC worker exits normally.
control_thread()->stop();
+
+ // Stop 4. Shutdown uncommit thread.
+ if (_uncommit_thread != nullptr) {
+ _uncommit_thread->stop();
+ }
}
void ShenandoahHeap::stw_unload_classes(bool full_gc) {
@@ -2521,7 +2492,7 @@ bool ShenandoahHeap::uncommit_bitmap_slice(ShenandoahHeapRegion *r) {
if (is_bitmap_slice_committed(r, true)) {
// Some other region from the group is still committed, meaning the bitmap
- // slice is should stay committed, exit right away.
+ // slice should stay committed, exit right away.
return true;
}
@@ -2535,6 +2506,27 @@ bool ShenandoahHeap::uncommit_bitmap_slice(ShenandoahHeapRegion *r) {
return true;
}
+void ShenandoahHeap::forbid_uncommit() {
+ if (_uncommit_thread != nullptr) {
+ _uncommit_thread->forbid_uncommit();
+ }
+}
+
+void ShenandoahHeap::allow_uncommit() {
+ if (_uncommit_thread != nullptr) {
+ _uncommit_thread->allow_uncommit();
+ }
+}
+
+#ifdef ASSERT
+bool ShenandoahHeap::is_uncommit_in_progress() {
+ if (_uncommit_thread != nullptr) {
+ return _uncommit_thread->is_uncommit_in_progress();
+ }
+ return false;
+}
+#endif
+
void ShenandoahHeap::safepoint_synchronize_begin() {
StackWatermarkSet::safepoint_synchronize_begin();
SuspendibleThreadSet::synchronize();
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahHeap.hpp b/src/hotspot/share/gc/shenandoah/shenandoahHeap.hpp
index a9a793f9e605d..5f957b734104d 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahHeap.hpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahHeap.hpp
@@ -70,6 +70,7 @@ class ShenandoahFullGC;
class ShenandoahMonitoringSupport;
class ShenandoahPacer;
class ShenandoahReferenceProcessor;
+class ShenandoahUncommitThread;
class ShenandoahVerifier;
class ShenandoahWorkerThreads;
class VMStructs;
@@ -252,11 +253,14 @@ class ShenandoahHeap : public CollectedHeap {
// ---------- Periodic Tasks
//
-private:
+public:
+ // Notify heuristics and region state change logger that the state of the heap has changed
void notify_heap_changed();
-public:
+ // Force counters to update
void set_forced_counters_update(bool value);
+
+ // Update counters if forced flag is set
void handle_force_counters_update();
// ---------- Workers handling
@@ -440,11 +444,6 @@ class ShenandoahHeap : public CollectedHeap {
void cancel_gc(GCCause::Cause cause);
public:
- // These will uncommit empty regions if heap::committed > shrink_until
- // and there exists at least one region which was made empty before shrink_before.
- void maybe_uncommit(double shrink_before, size_t shrink_until);
- void op_uncommit(double shrink_before, size_t shrink_until);
-
// Returns true if the soft maximum heap has been changed using management APIs.
bool check_soft_max_changed();
@@ -478,14 +477,22 @@ class ShenandoahHeap : public CollectedHeap {
void notify_gc_no_progress();
size_t get_gc_no_progress_count() const;
-//
-// Mark support
+ // The uncommit thread targets soft max heap, notify this thread when that value has changed.
+ void notify_soft_max_changed();
+
+ // An explicit GC request may have freed regions, notify the uncommit thread.
+ void notify_explicit_gc_requested();
+
private:
ShenandoahGeneration* _global_generation;
protected:
+ // The control thread presides over concurrent collection cycles
ShenandoahController* _control_thread;
+ // The uncommit thread periodically attempts to uncommit regions that have been empty for longer than ShenandoahUncommitDelay
+ ShenandoahUncommitThread* _uncommit_thread;
+
ShenandoahYoungGeneration* _young_generation;
ShenandoahOldGeneration* _old_generation;
@@ -500,7 +507,7 @@ class ShenandoahHeap : public CollectedHeap {
ShenandoahMmuTracker _mmu_tracker;
public:
- ShenandoahController* control_thread() { return _control_thread; }
+ ShenandoahController* control_thread() const { return _control_thread; }
ShenandoahGeneration* global_generation() const { return _global_generation; }
ShenandoahYoungGeneration* young_generation() const {
@@ -726,6 +733,20 @@ class ShenandoahHeap : public CollectedHeap {
bool uncommit_bitmap_slice(ShenandoahHeapRegion *r);
bool is_bitmap_slice_committed(ShenandoahHeapRegion* r, bool skip_self = false);
+ // During concurrent reset, the control thread will zero out the mark bitmaps for committed regions.
+ // This cannot happen when the uncommit thread is simultaneously trying to uncommit regions and their bitmaps.
+ // To prevent these threads from working at the same time, we provide these methods for the control thread to
+ // prevent the uncommit thread from working while a collection cycle is in progress.
+
+ // Forbid uncommits (will stop and wait if regions are being uncommitted)
+ void forbid_uncommit();
+
+ // Allow the uncommit thread to process regions
+ void allow_uncommit();
+#ifdef ASSERT
+ bool is_uncommit_in_progress();
+#endif
+
// Liveness caching support
ShenandoahLiveData* get_liveness_cache(uint worker_id);
void flush_liveness_cache(uint worker_id);
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahPhaseTimings.hpp b/src/hotspot/share/gc/shenandoah/shenandoahPhaseTimings.hpp
index 05ab60c0bb66b..4c8cb8c20570d 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahPhaseTimings.hpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahPhaseTimings.hpp
@@ -189,7 +189,6 @@ class outputStream;
f(full_gc_reconstruct_remembered_set, " Reconstruct Remembered Set") \
f(full_gc_heapdump_post, " Post Heap Dump") \
\
- f(conc_uncommit, "Concurrent Uncommit") \
f(pacing, "Pacing") \
\
f(heap_iteration_roots, "Heap Iteration") \
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahUncommitThread.cpp b/src/hotspot/share/gc/shenandoah/shenandoahUncommitThread.cpp
new file mode 100644
index 0000000000000..85bb3349d5c97
--- /dev/null
+++ b/src/hotspot/share/gc/shenandoah/shenandoahUncommitThread.cpp
@@ -0,0 +1,198 @@
+/*
+ * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "gc/shenandoah/shenandoahHeap.inline.hpp"
+#include "gc/shenandoah/shenandoahHeapRegion.hpp"
+#include "gc/shenandoah/shenandoahUncommitThread.hpp"
+#include "logging/log.hpp"
+#include "runtime/mutexLocker.hpp"
+#include "utilities/events.hpp"
+
+ShenandoahUncommitThread::ShenandoahUncommitThread(ShenandoahHeap* heap)
+ : _heap(heap),
+ _stop_lock(Mutex::safepoint - 2, "ShenandoahUncommitStop_lock", true),
+ _uncommit_lock(Mutex::safepoint - 2, "ShenandoahUncommitCancel_lock", true) {
+ set_name("Shenandoah Uncommit Thread");
+ create_and_start();
+
+ // Allow uncommits. This is managed by the control thread during a GC.
+ _uncommit_allowed.set();
+}
+
+void ShenandoahUncommitThread::run_service() {
+ assert(ShenandoahUncommit, "Thread should only run when uncommit is enabled");
+
+ // poll_interval avoids constantly polling regions for shrinking.
+ // Having an interval 10x lower than the delay would mean we hit the
+ // shrinking with lag of less than 1/10-th of true delay.
+ // ShenandoahUncommitDelay is in millis, but shrink_period is in seconds.
+ const int64_t poll_interval = int64_t(ShenandoahUncommitDelay) / 10;
+ const double shrink_period = double(ShenandoahUncommitDelay) / 1000;
+ bool timed_out = false;
+ while (!should_terminate()) {
+ bool soft_max_changed = _soft_max_changed.try_unset();
+ bool explicit_gc_requested = _explicit_gc_requested.try_unset();
+
+ if (soft_max_changed || explicit_gc_requested || timed_out) {
+ double current = os::elapsedTime();
+ size_t shrink_until = soft_max_changed ? _heap->soft_max_capacity() : _heap->min_capacity();
+ double shrink_before = (soft_max_changed || explicit_gc_requested) ?
+ current :
+ current - shrink_period;
+
+ // Explicit GC tries to uncommit everything down to min capacity.
+ // Soft max change tries to uncommit everything down to target capacity.
+ // Periodic uncommit tries to uncommit suitable regions down to min capacity.
+ if (should_uncommit(shrink_before, shrink_until)) {
+ uncommit(shrink_before, shrink_until);
+ }
+ }
+ {
+ MonitorLocker locker(&_stop_lock, Mutex::_no_safepoint_check_flag);
+ if (!_stop_requested.is_set()) {
+ timed_out = locker.wait(poll_interval);
+ }
+ }
+ }
+}
+
+bool ShenandoahUncommitThread::should_uncommit(double shrink_before, size_t shrink_until) const {
+ // Only start uncommit if the GC is idle, is not trying to run and there is work to do.
+ return _heap->is_idle() && is_uncommit_allowed() && has_work(shrink_before, shrink_until);
+}
+
+bool ShenandoahUncommitThread::has_work(double shrink_before, size_t shrink_until) const {
+ // Determine if there is work to do. This avoids locking the heap if there is
+ // no work available, avoids spamming logs with superfluous logging messages,
+ // and minimises the amount of work while locks are held.
+
+ if (_heap->committed() <= shrink_until) {
+ return false;
+ }
+
+ for (size_t i = 0; i < _heap->num_regions(); i++) {
+ ShenandoahHeapRegion *r = _heap->get_region(i);
+ if (r->is_empty_committed() && (r->empty_time() < shrink_before)) {
+ return true;
+ }
+ }
+
+ return false;
+}
+
+void ShenandoahUncommitThread::notify_soft_max_changed() {
+ assert(is_uncommit_allowed(), "Only notify if uncommit is allowed");
+ if (_soft_max_changed.try_set()) {
+ MonitorLocker locker(&_stop_lock, Mutex::_no_safepoint_check_flag);
+ locker.notify_all();
+ }
+}
+
+void ShenandoahUncommitThread::notify_explicit_gc_requested() {
+ assert(is_uncommit_allowed(), "Only notify if uncommit is allowed");
+ if (_explicit_gc_requested.try_set()) {
+ MonitorLocker locker(&_stop_lock, Mutex::_no_safepoint_check_flag);
+ locker.notify_all();
+ }
+}
+
+bool ShenandoahUncommitThread::is_uncommit_allowed() const {
+ return _uncommit_allowed.is_set();
+}
+
+void ShenandoahUncommitThread::uncommit(double shrink_before, size_t shrink_until) {
+ assert(ShenandoahUncommit, "should be enabled");
+ assert(_uncommit_in_progress.is_unset(), "Uncommit should not be in progress");
+
+ if (!is_uncommit_allowed()) {
+ return;
+ }
+
+ const char* msg = "Concurrent uncommit";
+ EventMark em("%s", msg);
+ double start = os::elapsedTime();
+ log_info(gc, start)("%s", msg);
+
+ _uncommit_in_progress.set();
+
+ // Application allocates from the beginning of the heap, and GC allocates at
+ // the end of it. It is more efficient to uncommit from the end, so that applications
+ // could enjoy the near committed regions. GC allocations are much less frequent,
+ // and therefore can accept the committing costs.
+ size_t count = 0;
+ for (size_t i = _heap->num_regions(); i > 0; i--) {
+ if (!is_uncommit_allowed()) {
+ break;
+ }
+
+ ShenandoahHeapRegion* r = _heap->get_region(i - 1);
+ if (r->is_empty_committed() && (r->empty_time() < shrink_before)) {
+ SuspendibleThreadSetJoiner sts_joiner;
+ ShenandoahHeapLocker locker(_heap->lock());
+ if (r->is_empty_committed()) {
+ if (_heap->committed() < shrink_until + ShenandoahHeapRegion::region_size_bytes()) {
+ break;
+ }
+
+ r->make_uncommitted();
+ count++;
+ }
+ }
+ SpinPause(); // allow allocators to take the lock
+ }
+
+ {
+ MonitorLocker locker(&_uncommit_lock, Mutex::_no_safepoint_check_flag);
+ _uncommit_in_progress.unset();
+ locker.notify_all();
+ }
+
+ if (count > 0) {
+ _heap->notify_heap_changed();
+ }
+
+ double elapsed = os::elapsedTime() - start;
+ log_info(gc)("%s " PROPERFMT " (" PROPERFMT ") %.3fms",
+ msg, PROPERFMTARGS(count * ShenandoahHeapRegion::region_size_bytes()), PROPERFMTARGS(_heap->capacity()),
+ elapsed * MILLIUNITS);
+}
+
+void ShenandoahUncommitThread::stop_service() {
+ MonitorLocker locker(&_stop_lock, Mutex::_safepoint_check_flag);
+ _stop_requested.set();
+ locker.notify_all();
+}
+
+void ShenandoahUncommitThread::forbid_uncommit() {
+ MonitorLocker locker(&_uncommit_lock, Mutex::_no_safepoint_check_flag);
+ _uncommit_allowed.unset();
+ while (_uncommit_in_progress.is_set()) {
+ locker.wait();
+ }
+}
+
+void ShenandoahUncommitThread::allow_uncommit() {
+ _uncommit_allowed.set();
+}
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahUncommitThread.hpp b/src/hotspot/share/gc/shenandoah/shenandoahUncommitThread.hpp
new file mode 100644
index 0000000000000..6c4e26e4e0fd8
--- /dev/null
+++ b/src/hotspot/share/gc/shenandoah/shenandoahUncommitThread.hpp
@@ -0,0 +1,97 @@
+/*
+ * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_GC_SHENANDOAH_SHENANDOAHUNCOMMITTHREAD
+#define SHARE_GC_SHENANDOAH_SHENANDOAHUNCOMMITTHREAD
+
+#include "gc/shared/concurrentGCThread.hpp"
+
+class ShenandoahHeap;
+
+class ShenandoahUncommitThread : public ConcurrentGCThread {
+ ShenandoahHeap* const _heap;
+
+ // Indicates that `SoftMaxHeapSize` has changed
+ ShenandoahSharedFlag _soft_max_changed;
+
+ // Indicates that an explicit gc has been requested
+ ShenandoahSharedFlag _explicit_gc_requested;
+
+ // Indicates that the thread should stop and terminate
+ ShenandoahSharedFlag _stop_requested;
+
+ // Indicates whether it is safe to uncommit regions
+ ShenandoahSharedFlag _uncommit_allowed;
+
+ // Indicates that regions are being actively uncommitted
+ ShenandoahSharedFlag _uncommit_in_progress;
+
+ // This lock is used to coordinate stopping and terminating this thread
+ Monitor _stop_lock;
+
+ // This lock is used to coordinate allowing or forbidding regions to be uncommitted
+ Monitor _uncommit_lock;
+
+ // True if there are regions to uncommit and uncommits are allowed
+ bool should_uncommit(double shrink_before, size_t shrink_until) const;
+
+ // True if there are regions that have been empty for longer than ShenandoahUncommitDelay and the committed
+ // memory is higher than soft max capacity or minimum capacity
+ bool has_work(double shrink_before, size_t shrink_until) const;
+
+ // Perform the work of uncommitting empty regions
+ void uncommit(double shrink_before, size_t shrink_until);
+
+ // True if the control thread has allowed this thread to uncommit regions
+ bool is_uncommit_allowed() const;
+
+public:
+ explicit ShenandoahUncommitThread(ShenandoahHeap* heap);
+
+ // Periodically check for regions to uncommit
+ void run_service() override;
+
+ // Wake up this thread and try to uncommit for changed soft max size
+ void notify_soft_max_changed();
+
+ // Wake up this thread and try to uncommit for min heap size
+ void notify_explicit_gc_requested();
+
+ // Wait for uncommit operations to stop, returns immediately if uncommit thread is idle
+ void forbid_uncommit();
+
+ // Allows uncommit operations to happen, does not block
+ void allow_uncommit();
+
+ // True if uncommit is in progress
+ bool is_uncommit_in_progress() {
+ return _uncommit_in_progress.is_set();
+ }
+protected:
+ // Interrupt and stop this thread
+ void stop_service() override;
+};
+
+
+#endif //SHARE_GC_SHENANDOAH_SHENANDOAHUNCOMMITTHREAD
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahUtils.hpp b/src/hotspot/share/gc/shenandoah/shenandoahUtils.hpp
index 190822af9d6bb..fd30279d318a2 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahUtils.hpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahUtils.hpp
@@ -242,4 +242,19 @@ class ShenandoahSuspendibleThreadSetLeaver {
}
};
+// Regions cannot be uncommitted when concurrent reset is zeroing out the bitmaps.
+// This CADR class enforces this by forbidding region uncommits while it is in scope.
+class ShenandoahNoUncommitMark : public StackObj {
+ ShenandoahHeap* const _heap;
+public:
+ explicit ShenandoahNoUncommitMark(ShenandoahHeap* heap) : _heap(heap) {
+ _heap->forbid_uncommit();
+ }
+
+ ~ShenandoahNoUncommitMark() {
+ _heap->allow_uncommit();
+ }
+};
+
+
#endif // SHARE_GC_SHENANDOAH_SHENANDOAHUTILS_HPP
diff --git a/src/hotspot/share/memory/heap.cpp b/src/hotspot/share/memory/heap.cpp
index 658ec3e8de773..92a376defa5a5 100644
--- a/src/hotspot/share/memory/heap.cpp
+++ b/src/hotspot/share/memory/heap.cpp
@@ -227,13 +227,11 @@ bool CodeHeap::reserve(ReservedSpace rs, size_t committed_size, size_t segment_s
const size_t committed_segments_size = align_to_page_size(_number_of_committed_segments);
// reserve space for _segmap
- ReservedSpace seg_rs(reserved_segments_size);
+ ReservedSpace seg_rs(reserved_segments_size, mtCode);
if (!_segmap.initialize(seg_rs, committed_segments_size)) {
return false;
}
- MemTracker::record_virtual_memory_tag((address)_segmap.low_boundary(), mtCode);
-
assert(_segmap.committed_size() >= (size_t) _number_of_committed_segments, "could not commit enough space for segment map");
assert(_segmap.reserved_size() >= (size_t) _number_of_reserved_segments , "could not reserve enough space for segment map");
assert(_segmap.reserved_size() >= _segmap.committed_size() , "just checking");
diff --git a/src/hotspot/share/memory/virtualspace.cpp b/src/hotspot/share/memory/virtualspace.cpp
index 614a3ab784bbb..33574f7f5d1ea 100644
--- a/src/hotspot/share/memory/virtualspace.cpp
+++ b/src/hotspot/share/memory/virtualspace.cpp
@@ -45,20 +45,20 @@ ReservedSpace::ReservedSpace() : _base(nullptr), _size(0), _noaccess_prefix(0),
_alignment(0), _special(false), _fd_for_heap(-1), _executable(false) {
}
-ReservedSpace::ReservedSpace(size_t size) : _fd_for_heap(-1) {
+ReservedSpace::ReservedSpace(size_t size, MemTag mem_tag) : _fd_for_heap(-1) {
// Want to use large pages where possible. If the size is
// not large page aligned the mapping will be a mix of
// large and normal pages.
size_t page_size = os::page_size_for_region_unaligned(size, 1);
size_t alignment = os::vm_allocation_granularity();
- initialize(size, alignment, page_size, nullptr, false);
+ initialize(size, alignment, page_size, nullptr, false, mem_tag);
}
ReservedSpace::ReservedSpace(size_t size, size_t preferred_page_size) : _fd_for_heap(-1) {
// When a page size is given we don't want to mix large
// and normal pages. If the size is not a multiple of the
// page size it will be aligned up to achieve this.
- size_t alignment = os::vm_allocation_granularity();;
+ size_t alignment = os::vm_allocation_granularity();
if (preferred_page_size != os::vm_page_size()) {
alignment = MAX2(preferred_page_size, alignment);
size = align_up(size, alignment);
@@ -81,19 +81,19 @@ ReservedSpace::ReservedSpace(char* base, size_t size, size_t alignment, size_t p
}
// Helper method
-static char* attempt_map_or_reserve_memory_at(char* base, size_t size, int fd, bool executable) {
+static char* attempt_map_or_reserve_memory_at(char* base, size_t size, int fd, bool executable, MemTag mem_tag) {
if (fd != -1) {
return os::attempt_map_memory_to_file_at(base, size, fd);
}
- return os::attempt_reserve_memory_at(base, size, executable);
+ return os::attempt_reserve_memory_at(base, size, executable, mem_tag);
}
// Helper method
-static char* map_or_reserve_memory(size_t size, int fd, bool executable) {
+static char* map_or_reserve_memory(size_t size, int fd, bool executable, MemTag mem_tag) {
if (fd != -1) {
return os::map_memory_to_file(size, fd);
}
- return os::reserve_memory(size, executable);
+ return os::reserve_memory(size, executable, mem_tag);
}
// Helper method
@@ -154,7 +154,7 @@ static void log_on_large_pages_failure(char* req_addr, size_t bytes) {
}
static char* reserve_memory(char* requested_address, const size_t size,
- const size_t alignment, int fd, bool exec) {
+ const size_t alignment, int fd, bool exec, MemTag mem_tag) {
char* base;
// If the memory was requested at a particular address, use
// os::attempt_reserve_memory_at() to avoid mapping over something
@@ -163,12 +163,12 @@ static char* reserve_memory(char* requested_address, const size_t size,
assert(is_aligned(requested_address, alignment),
"Requested address " PTR_FORMAT " must be aligned to " SIZE_FORMAT,
p2i(requested_address), alignment);
- base = attempt_map_or_reserve_memory_at(requested_address, size, fd, exec);
+ base = attempt_map_or_reserve_memory_at(requested_address, size, fd, exec, mem_tag);
} else {
// Optimistically assume that the OS returns an aligned base pointer.
// When reserving a large address range, most OSes seem to align to at
// least 64K.
- base = map_or_reserve_memory(size, fd, exec);
+ base = map_or_reserve_memory(size, fd, exec, mem_tag);
// Check alignment constraints. This is only needed when there is
// no requested address.
if (!is_aligned(base, alignment)) {
@@ -220,7 +220,8 @@ void ReservedSpace::reserve(size_t size,
size_t alignment,
size_t page_size,
char* requested_address,
- bool executable) {
+ bool executable,
+ MemTag mem_tag) {
assert(is_aligned(size, alignment), "Size must be aligned to the requested alignment");
// There are basically three different cases that we need to handle below:
@@ -235,7 +236,7 @@ void ReservedSpace::reserve(size_t size,
// When there is a backing file directory for this space then whether
// large pages are allocated is up to the filesystem of the backing file.
// So UseLargePages is not taken into account for this reservation.
- char* base = reserve_memory(requested_address, size, alignment, _fd_for_heap, executable);
+ char* base = reserve_memory(requested_address, size, alignment, _fd_for_heap, executable, mem_tag);
if (base != nullptr) {
initialize_members(base, size, alignment, os::vm_page_size(), true, executable);
}
@@ -266,7 +267,7 @@ void ReservedSpace::reserve(size_t size,
}
// == Case 3 ==
- char* base = reserve_memory(requested_address, size, alignment, -1, executable);
+ char* base = reserve_memory(requested_address, size, alignment, -1, executable, mem_tag);
if (base != nullptr) {
// Successful mapping.
initialize_members(base, size, alignment, page_size, false, executable);
@@ -277,7 +278,8 @@ void ReservedSpace::initialize(size_t size,
size_t alignment,
size_t page_size,
char* requested_address,
- bool executable) {
+ bool executable,
+ MemTag mem_tag) {
const size_t granularity = os::vm_allocation_granularity();
assert((size & (granularity - 1)) == 0,
"size not aligned to os::vm_allocation_granularity()");
@@ -298,7 +300,7 @@ void ReservedSpace::initialize(size_t size,
alignment = MAX2(alignment, os::vm_page_size());
// Reserve the memory.
- reserve(size, alignment, page_size, requested_address, executable);
+ reserve(size, alignment, page_size, requested_address, executable, mem_tag);
// Check that the requested address is used if given.
if (failed_to_reserve_as_requested(_base, requested_address)) {
@@ -424,7 +426,7 @@ void ReservedHeapSpace::try_reserve_heap(size_t size,
p2i(requested_address),
size);
- reserve(size, alignment, page_size, requested_address, false);
+ reserve(size, alignment, page_size, requested_address, false, mtJavaHeap);
// Check alignment constraints.
if (is_reserved() && !is_aligned(_base, _alignment)) {
@@ -610,7 +612,7 @@ void ReservedHeapSpace::initialize_compressed_heap(const size_t size, size_t ali
// Last, desperate try without any placement.
if (_base == nullptr) {
log_trace(gc, heap, coops)("Trying to allocate at address null heap of size " SIZE_FORMAT_X, size + noaccess_prefix);
- initialize(size + noaccess_prefix, alignment, page_size, nullptr, false);
+ initialize(size + noaccess_prefix, alignment, page_size, nullptr, false, mtJavaHeap);
}
}
}
@@ -653,7 +655,7 @@ ReservedHeapSpace::ReservedHeapSpace(size_t size, size_t alignment, size_t page_
ShouldNotReachHere();
#endif // _LP64
} else {
- initialize(size, alignment, page_size, nullptr, false);
+ initialize(size, alignment, page_size, nullptr, false, mtJavaHeap);
}
assert(markWord::encode_pointer_as_mark(_base).decode_pointer() == _base,
@@ -661,10 +663,6 @@ ReservedHeapSpace::ReservedHeapSpace(size_t size, size_t alignment, size_t page_
assert(markWord::encode_pointer_as_mark(&_base[size]).decode_pointer() == &_base[size],
"area must be distinguishable from marks for mark-sweep");
- if (base() != nullptr) {
- MemTracker::record_virtual_memory_tag((address)base(), mtJavaHeap);
- }
-
if (_fd_for_heap != -1) {
::close(_fd_for_heap);
}
@@ -679,8 +677,7 @@ MemRegion ReservedHeapSpace::region() const {
ReservedCodeSpace::ReservedCodeSpace(size_t r_size,
size_t rs_align,
size_t rs_page_size) : ReservedSpace() {
- initialize(r_size, rs_align, rs_page_size, /*requested address*/ nullptr, /*executable*/ true);
- MemTracker::record_virtual_memory_tag((address)base(), mtCode);
+ initialize(r_size, rs_align, rs_page_size, /*requested address*/ nullptr, /*executable*/ true, mtCode);
}
// VirtualSpace
diff --git a/src/hotspot/share/memory/virtualspace.hpp b/src/hotspot/share/memory/virtualspace.hpp
index 022bcabe7536f..6139c3a413f0e 100644
--- a/src/hotspot/share/memory/virtualspace.hpp
+++ b/src/hotspot/share/memory/virtualspace.hpp
@@ -26,6 +26,7 @@
#define SHARE_MEMORY_VIRTUALSPACE_HPP
#include "memory/memRegion.hpp"
+#include "nmt/memTag.hpp"
#include "utilities/globalDefinitions.hpp"
class outputStream;
@@ -61,16 +62,16 @@ class ReservedSpace {
size_t page_size, bool special, bool executable);
void initialize(size_t size, size_t alignment, size_t page_size,
- char* requested_address, bool executable);
+ char* requested_address, bool executable, MemTag mem_tag = mtNone);
void reserve(size_t size, size_t alignment, size_t page_size,
- char* requested_address, bool executable);
+ char* requested_address, bool executable, MemTag mem_tag);
public:
// Constructor
ReservedSpace();
// Initialize the reserved space with the given size. Depending on the size
// a suitable page size and alignment will be used.
- explicit ReservedSpace(size_t size);
+ ReservedSpace(size_t size, MemTag mem_tag);
// Initialize the reserved space with the given size. The preferred_page_size
// is used as the minimum page size/alignment. This may waste some space if
// the given size is not aligned to that value, as the reservation will be
diff --git a/src/hotspot/share/opto/loopTransform.cpp b/src/hotspot/share/opto/loopTransform.cpp
index f644e26bbe77f..6efad72491775 100644
--- a/src/hotspot/share/opto/loopTransform.cpp
+++ b/src/hotspot/share/opto/loopTransform.cpp
@@ -2463,7 +2463,7 @@ bool PhaseIdealLoop::is_scaled_iv_plus_extra_offset(Node* exp1, Node* offset3, N
//------------------------------do_range_check---------------------------------
// Eliminate range-checks and other trip-counter vs loop-invariant tests.
-void PhaseIdealLoop::do_range_check(IdealLoopTree *loop, Node_List &old_new) {
+void PhaseIdealLoop::do_range_check(IdealLoopTree* loop) {
#ifndef PRODUCT
if (PrintOpto && VerifyLoopOptimizations) {
tty->print("Range Check Elimination ");
@@ -2526,8 +2526,9 @@ void PhaseIdealLoop::do_range_check(IdealLoopTree *loop, Node_List &old_new) {
// Range check elimination optimizes out conditions whose parameters are loop invariant in the main loop. They usually
// have control above the pre loop, but there's no guarantee that they do. There's no guarantee either that the pre
// loop limit has control that's out of loop (a previous round of range check elimination could have set a limit that's
- // not loop invariant).
- Node* new_limit_ctrl = dominated_node(pre_ctrl, pre_limit_ctrl);
+ // not loop invariant). new_limit_ctrl is used for both the pre and main loops. Early control for the main limit may be
+ // below the pre loop entry and the pre limit and must be taken into account when initializing new_limit_ctrl.
+ Node* new_limit_ctrl = dominated_node(pre_ctrl, pre_limit_ctrl, compute_early_ctrl(main_limit, main_limit_ctrl));
// Ensure the original loop limit is available from the
// pre-loop Opaque1 node.
@@ -2778,8 +2779,10 @@ void PhaseIdealLoop::do_range_check(IdealLoopTree *loop, Node_List &old_new) {
// new pre_limit can push Bool/Cmp/Opaque nodes down (when one of the eliminated condition has parameters that are not
// loop invariant in the pre loop.
set_ctrl(pre_opaq, new_limit_ctrl);
- set_ctrl(pre_end->cmp_node(), new_limit_ctrl);
- set_ctrl(pre_end->in(1), new_limit_ctrl);
+ // Can't use new_limit_ctrl for Bool/Cmp because it can be out of loop while they are loop variant. Conservatively set
+ // control to latest possible one.
+ set_ctrl(pre_end->cmp_node(), pre_end->in(0));
+ set_ctrl(pre_end->in(1), pre_end->in(0));
_igvn.replace_input_of(pre_opaq, 1, pre_limit);
@@ -2819,11 +2822,12 @@ void PhaseIdealLoop::do_range_check(IdealLoopTree *loop, Node_List &old_new) {
// The OpaqueNode is unshared by design
assert(opqzm->outcnt() == 1, "cannot hack shared node");
_igvn.replace_input_of(opqzm, 1, main_limit);
- // new main_limit can push Bool/Cmp nodes down (when one of the eliminated condition has parameters that are not loop
- // invariant in the pre loop.
+ // new main_limit can push opaque node for zero trip guard down (when one of the eliminated condition has parameters
+ // that are not loop invariant in the pre loop).
set_ctrl(opqzm, new_limit_ctrl);
- set_ctrl(iffm->in(1)->in(1), new_limit_ctrl);
- set_ctrl(iffm->in(1), new_limit_ctrl);
+ // Bool/Cmp nodes for zero trip guard should have been assigned control between the main and pre loop (because zero
+ // trip guard depends on induction variable value out of pre loop) so shouldn't need to be adjusted
+ assert(is_dominator(new_limit_ctrl, get_ctrl(iffm->in(1)->in(1))), "control of cmp should be below control of updated input");
C->print_method(PHASE_AFTER_RANGE_CHECK_ELIMINATION, 4, cl);
}
@@ -3402,7 +3406,7 @@ bool IdealLoopTree::iteration_split_impl(PhaseIdealLoop *phase, Node_List &old_n
// with full checks, but the main-loop with no checks. Remove said checks
// from the main body.
if (should_rce) {
- phase->do_range_check(this, old_new);
+ phase->do_range_check(this);
}
// Double loop body for unrolling. Adjust the minimum-trip test (will do
diff --git a/src/hotspot/share/opto/loopnode.hpp b/src/hotspot/share/opto/loopnode.hpp
index 698e48aadb4ef..07a5e28b23e1a 100644
--- a/src/hotspot/share/opto/loopnode.hpp
+++ b/src/hotspot/share/opto/loopnode.hpp
@@ -1416,7 +1416,7 @@ class PhaseIdealLoop : public PhaseTransform {
}
// Eliminate range-checks and other trip-counter vs loop-invariant tests.
- void do_range_check(IdealLoopTree *loop, Node_List &old_new);
+ void do_range_check(IdealLoopTree* loop);
// Clone loop with an invariant test (that does not exit) and
// insert a clone of the test that selects which version to
diff --git a/src/hotspot/share/opto/vectornode.cpp b/src/hotspot/share/opto/vectornode.cpp
index dedac80d10230..03e1e7ef247f9 100644
--- a/src/hotspot/share/opto/vectornode.cpp
+++ b/src/hotspot/share/opto/vectornode.cpp
@@ -1466,6 +1466,9 @@ bool VectorCastNode::implemented(int opc, uint vlen, BasicType src_type, BasicTy
if (is_java_primitive(dst_type) &&
is_java_primitive(src_type) &&
(vlen > 1) && is_power_of_2(vlen) &&
+ // In rare case, the input to the VectorCast could be a Replicate node. We need to make sure creating is supported:
+ // check the src_type:
+ VectorNode::vector_size_supported_auto_vectorization(src_type, vlen) &&
VectorNode::vector_size_supported_auto_vectorization(dst_type, vlen)) {
int vopc = VectorCastNode::opcode(opc, src_type);
return vopc > 0 && Matcher::match_rule_supported_auto_vectorization(vopc, vlen, dst_type);
diff --git a/src/hotspot/share/prims/jni.cpp b/src/hotspot/share/prims/jni.cpp
index 7bea3441fa5bf..40d3b5062580a 100644
--- a/src/hotspot/share/prims/jni.cpp
+++ b/src/hotspot/share/prims/jni.cpp
@@ -2402,11 +2402,10 @@ static char* get_bad_address() {
static char* bad_address = nullptr;
if (bad_address == nullptr) {
size_t size = os::vm_allocation_granularity();
- bad_address = os::reserve_memory(size);
+ bad_address = os::reserve_memory(size, false, mtInternal);
if (bad_address != nullptr) {
os::protect_memory(bad_address, size, os::MEM_PROT_READ,
/*is_committed*/false);
- MemTracker::record_virtual_memory_tag((void*)bad_address, mtInternal);
}
}
return bad_address;
diff --git a/src/hotspot/share/prims/whitebox.cpp b/src/hotspot/share/prims/whitebox.cpp
index af5f3cf79cabc..d25ee27c0e116 100644
--- a/src/hotspot/share/prims/whitebox.cpp
+++ b/src/hotspot/share/prims/whitebox.cpp
@@ -711,19 +711,11 @@ WB_ENTRY(void, WB_NMTFree(JNIEnv* env, jobject o, jlong mem))
WB_END
WB_ENTRY(jlong, WB_NMTReserveMemory(JNIEnv* env, jobject o, jlong size))
- jlong addr = 0;
-
- addr = (jlong)(uintptr_t)os::reserve_memory(size);
- MemTracker::record_virtual_memory_tag((address)addr, mtTest);
-
- return addr;
+ return (jlong)(uintptr_t)os::reserve_memory(size, false, mtTest);
WB_END
WB_ENTRY(jlong, WB_NMTAttemptReserveMemoryAt(JNIEnv* env, jobject o, jlong addr, jlong size))
- addr = (jlong)(uintptr_t)os::attempt_reserve_memory_at((char*)(uintptr_t)addr, (size_t)size);
- MemTracker::record_virtual_memory_tag((address)addr, mtTest);
-
- return addr;
+ return (jlong)(uintptr_t)os::attempt_reserve_memory_at((char*)(uintptr_t)addr, (size_t)size, false, mtTest);
WB_END
WB_ENTRY(void, WB_NMTCommitMemory(JNIEnv* env, jobject o, jlong addr, jlong size))
diff --git a/src/hotspot/share/runtime/safepointMechanism.cpp b/src/hotspot/share/runtime/safepointMechanism.cpp
index a6aadf5ebc400..c7e3f55eba355 100644
--- a/src/hotspot/share/runtime/safepointMechanism.cpp
+++ b/src/hotspot/share/runtime/safepointMechanism.cpp
@@ -58,9 +58,8 @@ void SafepointMechanism::default_initialize() {
// Polling page
const size_t page_size = os::vm_page_size();
const size_t allocation_size = 2 * page_size;
- char* polling_page = os::reserve_memory(allocation_size);
- os::commit_memory_or_exit(polling_page, allocation_size, false, "Unable to commit Safepoint polling page");
- MemTracker::record_virtual_memory_tag((address)polling_page, mtSafepoint);
+ char* polling_page = os::reserve_memory(allocation_size, !ExecMem, mtSafepoint);
+ os::commit_memory_or_exit(polling_page, allocation_size, !ExecMem, "Unable to commit Safepoint polling page");
char* bad_page = polling_page;
char* good_page = polling_page + page_size;
diff --git a/src/hotspot/share/utilities/debug.cpp b/src/hotspot/share/utilities/debug.cpp
index 7286f70412ae5..07e93ca5710e7 100644
--- a/src/hotspot/share/utilities/debug.cpp
+++ b/src/hotspot/share/utilities/debug.cpp
@@ -715,10 +715,9 @@ struct TestMultipleStaticAssertFormsInClassScope {
// Support for showing register content on asserts/guarantees.
#ifdef CAN_SHOW_REGISTERS_ON_ASSERT
void initialize_assert_poison() {
- char* page = os::reserve_memory(os::vm_page_size());
+ char* page = os::reserve_memory(os::vm_page_size(), !ExecMem, mtInternal);
if (page) {
- MemTracker::record_virtual_memory_tag(page, mtInternal);
- if (os::commit_memory(page, os::vm_page_size(), false) &&
+ if (os::commit_memory(page, os::vm_page_size(), !ExecMem) &&
os::protect_memory(page, os::vm_page_size(), os::MEM_PROT_NONE)) {
g_assert_poison = page;
g_assert_poison_read_only = page;
diff --git a/src/java.base/share/classes/java/io/ObjectInputStream.java b/src/java.base/share/classes/java/io/ObjectInputStream.java
index 31d0aecf83110..a47d2d5a325a9 100644
--- a/src/java.base/share/classes/java/io/ObjectInputStream.java
+++ b/src/java.base/share/classes/java/io/ObjectInputStream.java
@@ -42,7 +42,6 @@
import jdk.internal.event.DeserializationEvent;
import jdk.internal.misc.Unsafe;
import jdk.internal.util.ByteArray;
-import sun.reflect.misc.ReflectUtil;
/**
* An ObjectInputStream deserializes primitive data and objects previously
@@ -1828,12 +1827,6 @@ private ObjectStreamClass readClassDesc(boolean unshared)
};
}
- private boolean isCustomSubclass() {
- // Return true if this class is a custom subclass of ObjectInputStream
- return getClass().getClassLoader()
- != ObjectInputStream.class.getClassLoader();
- }
-
/**
* Reads in and returns class descriptor for a dynamic proxy class. Sets
* passHandle to proxy class descriptor's assigned handle. If proxy class
@@ -1879,12 +1872,6 @@ private ObjectStreamClass readProxyDesc(boolean unshared)
} else if (!Proxy.isProxyClass(cl)) {
throw new InvalidClassException("Not a proxy");
} else {
- // ReflectUtil.checkProxyPackageAccess makes a test
- // equivalent to isCustomSubclass so there's no need
- // to condition this call to isCustomSubclass == true here.
- ReflectUtil.checkProxyPackageAccess(
- getClass().getClassLoader(),
- cl.getInterfaces());
// Filter the interfaces
for (Class> clazz : cl.getInterfaces()) {
filterCheck(clazz, -1);
@@ -1954,12 +1941,9 @@ private ObjectStreamClass readNonProxyDesc(boolean unshared)
Class> cl = null;
ClassNotFoundException resolveEx = null;
bin.setBlockDataMode(true);
- final boolean checksRequired = isCustomSubclass();
try {
if ((cl = resolveClass(readDesc)) == null) {
resolveEx = new ClassNotFoundException("null class");
- } else if (checksRequired) {
- ReflectUtil.checkPackageAccess(cl);
}
} catch (ClassNotFoundException ex) {
resolveEx = ex;
diff --git a/src/java.base/share/classes/java/io/ObjectOutputStream.java b/src/java.base/share/classes/java/io/ObjectOutputStream.java
index 5225c673705d5..71bda09bd8c8f 100644
--- a/src/java.base/share/classes/java/io/ObjectOutputStream.java
+++ b/src/java.base/share/classes/java/io/ObjectOutputStream.java
@@ -35,7 +35,6 @@
import jdk.internal.util.ByteArray;
import jdk.internal.access.JavaLangAccess;
import jdk.internal.access.SharedSecrets;
-import sun.reflect.misc.ReflectUtil;
import static jdk.internal.util.ModifiedUtf.putChar;
import static jdk.internal.util.ModifiedUtf.utfLen;
@@ -1170,12 +1169,6 @@ private void writeClassDesc(ObjectStreamClass desc, boolean unshared)
}
}
- private boolean isCustomSubclass() {
- // Return true if this class is a custom subclass of ObjectOutputStream
- return getClass().getClassLoader()
- != ObjectOutputStream.class.getClassLoader();
- }
-
/**
* Writes class descriptor representing a dynamic proxy class to stream.
*/
@@ -1193,9 +1186,6 @@ private void writeProxyDesc(ObjectStreamClass desc, boolean unshared)
}
bout.setBlockDataMode(true);
- if (isCustomSubclass()) {
- ReflectUtil.checkPackageAccess(cl);
- }
annotateProxyClass(cl);
bout.setBlockDataMode(false);
bout.writeByte(TC_ENDBLOCKDATA);
@@ -1222,9 +1212,6 @@ private void writeNonProxyDesc(ObjectStreamClass desc, boolean unshared)
Class> cl = desc.forClass();
bout.setBlockDataMode(true);
- if (cl != null && isCustomSubclass()) {
- ReflectUtil.checkPackageAccess(cl);
- }
annotateClass(cl);
bout.setBlockDataMode(false);
bout.writeByte(TC_ENDBLOCKDATA);
diff --git a/src/java.base/share/classes/java/lang/Class.java b/src/java.base/share/classes/java/lang/Class.java
index 23b8ac3fb9093..7929dd1a09f11 100644
--- a/src/java.base/share/classes/java/lang/Class.java
+++ b/src/java.base/share/classes/java/lang/Class.java
@@ -60,7 +60,6 @@
import java.util.Arrays;
import java.util.Collection;
import java.util.HashMap;
-import java.util.HashSet;
import java.util.LinkedHashMap;
import java.util.LinkedHashSet;
import java.util.List;
@@ -91,7 +90,6 @@
import sun.reflect.generics.repository.ConstructorRepository;
import sun.reflect.generics.scope.ClassScope;
import sun.reflect.annotation.*;
-import sun.reflect.misc.ReflectUtil;
/**
* Instances of the class {@code Class} represent classes and
diff --git a/src/java.base/share/classes/java/lang/classfile/ClassFile.java b/src/java.base/share/classes/java/lang/classfile/ClassFile.java
index db293f415888a..d87d27676bcf8 100644
--- a/src/java.base/share/classes/java/lang/classfile/ClassFile.java
+++ b/src/java.base/share/classes/java/lang/classfile/ClassFile.java
@@ -641,6 +641,12 @@ default List verify(Path path) throws IOException {
/** The class major version of JAVA_24. */
int JAVA_24_VERSION = 68;
+ /**
+ * The class major version of JAVA_25.
+ * @since 25
+ */
+ int JAVA_25_VERSION = 69;
+
/**
* A minor version number indicating a class uses preview features
* of a Java SE version since 12, for major versions {@value
@@ -652,7 +658,7 @@ default List verify(Path path) throws IOException {
* {@return the latest major Java version}
*/
static int latestMajorVersion() {
- return JAVA_24_VERSION;
+ return JAVA_25_VERSION;
}
/**
diff --git a/src/java.base/share/classes/java/lang/reflect/ClassFileFormatVersion.java b/src/java.base/share/classes/java/lang/reflect/ClassFileFormatVersion.java
index 72abe96a55991..b2fa39e166170 100644
--- a/src/java.base/share/classes/java/lang/reflect/ClassFileFormatVersion.java
+++ b/src/java.base/share/classes/java/lang/reflect/ClassFileFormatVersion.java
@@ -318,6 +318,18 @@ public enum ClassFileFormatVersion {
* The Java Virtual Machine Specification, Java SE 24 Edition
*/
RELEASE_24(68),
+
+ /**
+ * The version introduced by the Java Platform, Standard Edition
+ * 25.
+ *
+ * @since 25
+ *
+ * @see
+ * The Java Virtual Machine Specification, Java SE 25 Edition
+ */
+ RELEASE_25(69),
; // Reduce code churn when appending new constants
// Note to maintainers: when adding constants for newer releases,
@@ -333,7 +345,7 @@ private ClassFileFormatVersion(int major) {
* {@return the latest class file format version}
*/
public static ClassFileFormatVersion latest() {
- return RELEASE_24;
+ return RELEASE_25;
}
/**
diff --git a/src/java.base/share/classes/java/lang/reflect/Proxy.java b/src/java.base/share/classes/java/lang/reflect/Proxy.java
index 77f3d3e1e7120..83a1520ce082f 100644
--- a/src/java.base/share/classes/java/lang/reflect/Proxy.java
+++ b/src/java.base/share/classes/java/lang/reflect/Proxy.java
@@ -52,7 +52,6 @@
import jdk.internal.misc.VM;
import jdk.internal.loader.ClassLoaderValue;
import jdk.internal.vm.annotation.Stable;
-import sun.reflect.misc.ReflectUtil;
import static java.lang.invoke.MethodType.methodType;
import static java.lang.module.ModuleDescriptor.Modifier.SYNTHETIC;
@@ -984,7 +983,7 @@ public static InvocationHandler getInvocationHandler(Object proxy)
return ih;
}
- private static final String PROXY_PACKAGE_PREFIX = ReflectUtil.PROXY_PACKAGE;
+ private static final String PROXY_PACKAGE_PREFIX = "com.sun.proxy";
/**
* A cache of Method -> MethodHandle for default methods.
diff --git a/src/java.base/share/classes/jdk/internal/org/objectweb/asm/ClassReader.java b/src/java.base/share/classes/jdk/internal/org/objectweb/asm/ClassReader.java
index f8cc49fe25e9d..91995e44ce6cc 100644
--- a/src/java.base/share/classes/jdk/internal/org/objectweb/asm/ClassReader.java
+++ b/src/java.base/share/classes/jdk/internal/org/objectweb/asm/ClassReader.java
@@ -227,7 +227,7 @@ public ClassReader(
this.b = classFileBuffer;
// Check the class' major_version. This field is after the magic and minor_version fields, which
// use 4 and 2 bytes respectively.
- if (checkClassVersion && readShort(classFileOffset + 6) > Opcodes.V24) {
+ if (checkClassVersion && readShort(classFileOffset + 6) > Opcodes.V25) {
throw new IllegalArgumentException(
"Unsupported class file major version " + readShort(classFileOffset + 6));
}
diff --git a/src/java.base/share/classes/jdk/internal/org/objectweb/asm/Opcodes.java b/src/java.base/share/classes/jdk/internal/org/objectweb/asm/Opcodes.java
index b51c8978061fd..12ac4e0a41710 100644
--- a/src/java.base/share/classes/jdk/internal/org/objectweb/asm/Opcodes.java
+++ b/src/java.base/share/classes/jdk/internal/org/objectweb/asm/Opcodes.java
@@ -314,6 +314,7 @@ public interface Opcodes {
int V22 = 0 << 16 | 66;
int V23 = 0 << 16 | 67;
int V24 = 0 << 16 | 68;
+ int V25 = 0 << 16 | 69;
/**
* Version flag indicating that the class is using 'preview' features.
diff --git a/src/java.base/share/classes/sun/reflect/generics/reflectiveObjects/TypeVariableImpl.java b/src/java.base/share/classes/sun/reflect/generics/reflectiveObjects/TypeVariableImpl.java
index 7f2829eda2004..75750d38f2fab 100644
--- a/src/java.base/share/classes/sun/reflect/generics/reflectiveObjects/TypeVariableImpl.java
+++ b/src/java.base/share/classes/sun/reflect/generics/reflectiveObjects/TypeVariableImpl.java
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, 2023, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -29,7 +29,6 @@
import java.lang.reflect.AnnotatedType;
import java.lang.reflect.Constructor;
import java.lang.reflect.GenericDeclaration;
-import java.lang.reflect.Member;
import java.lang.reflect.Method;
import java.lang.reflect.Type;
import java.lang.reflect.TypeVariable;
@@ -41,7 +40,6 @@
import sun.reflect.annotation.AnnotationType;
import sun.reflect.generics.factory.GenericsFactory;
import sun.reflect.generics.tree.FieldTypeSignature;
-import sun.reflect.misc.ReflectUtil;
/**
* Implementation of {@code java.lang.reflect.TypeVariable} interface
@@ -135,13 +133,9 @@ public Type[] getBounds() {
* @since 1.5
*/
public D getGenericDeclaration() {
- if (genericDeclaration instanceof Class> c)
- ReflectUtil.checkPackageAccess(c);
- else if ((genericDeclaration instanceof Method) ||
- (genericDeclaration instanceof Constructor))
- ReflectUtil.conservativeCheckMemberAccess((Member)genericDeclaration);
- else
- throw new AssertionError("Unexpected kind of GenericDeclaration");
+ assert genericDeclaration instanceof Class> ||
+ genericDeclaration instanceof Method ||
+ genericDeclaration instanceof Constructor : "Unexpected kind of GenericDeclaration";
return genericDeclaration;
}
diff --git a/src/java.base/share/classes/sun/reflect/misc/ConstructorUtil.java b/src/java.base/share/classes/sun/reflect/misc/ConstructorUtil.java
deleted file mode 100644
index e0e4233c8a5da..0000000000000
--- a/src/java.base/share/classes/sun/reflect/misc/ConstructorUtil.java
+++ /dev/null
@@ -1,45 +0,0 @@
-/*
- * Copyright (c) 2005, 2011, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation. Oracle designates this
- * particular file as subject to the "Classpath" exception as provided
- * by Oracle in the LICENSE file that accompanied this code.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-package sun.reflect.misc;
-
-import java.lang.reflect.Constructor;
-
-public final class ConstructorUtil {
-
- private ConstructorUtil() {
- }
-
- public static Constructor> getConstructor(Class> cls, Class>[] params)
- throws NoSuchMethodException {
- ReflectUtil.checkPackageAccess(cls);
- return cls.getConstructor(params);
- }
-
- public static Constructor>[] getConstructors(Class> cls) {
- ReflectUtil.checkPackageAccess(cls);
- return cls.getConstructors();
- }
-}
diff --git a/src/java.base/share/classes/sun/reflect/misc/FieldUtil.java b/src/java.base/share/classes/sun/reflect/misc/FieldUtil.java
deleted file mode 100644
index 705597c5ecba8..0000000000000
--- a/src/java.base/share/classes/sun/reflect/misc/FieldUtil.java
+++ /dev/null
@@ -1,48 +0,0 @@
-/*
- * Copyright (c) 2005, 2011, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation. Oracle designates this
- * particular file as subject to the "Classpath" exception as provided
- * by Oracle in the LICENSE file that accompanied this code.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-package sun.reflect.misc;
-
-import java.lang.reflect.Field;
-
-/*
- * Create a trampoline class.
- */
-public final class FieldUtil {
-
- private FieldUtil() {
- }
-
- public static Field getField(Class> cls, String name)
- throws NoSuchFieldException {
- ReflectUtil.checkPackageAccess(cls);
- return cls.getField(name);
- }
-
- public static Field[] getFields(Class> cls) {
- ReflectUtil.checkPackageAccess(cls);
- return cls.getFields();
- }
-}
diff --git a/src/java.base/share/classes/sun/reflect/misc/MethodUtil.java b/src/java.base/share/classes/sun/reflect/misc/MethodUtil.java
index fb03b68db1c66..8d950c9a81888 100644
--- a/src/java.base/share/classes/sun/reflect/misc/MethodUtil.java
+++ b/src/java.base/share/classes/sun/reflect/misc/MethodUtil.java
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2005, 2023, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -79,15 +79,9 @@ private MethodUtil() {
public static Method getMethod(Class> cls, String name, Class>[] args)
throws NoSuchMethodException {
- ReflectUtil.checkPackageAccess(cls);
return cls.getMethod(name, args);
}
- public static Method[] getMethods(Class> cls) {
- ReflectUtil.checkPackageAccess(cls);
- return cls.getMethods();
- }
-
/*
* Bounce through the trampoline.
*/
@@ -140,7 +134,6 @@ protected synchronized Class> loadClass(String name, boolean resolve)
throws ClassNotFoundException
{
// First, check if the class has already been loaded
- ReflectUtil.checkPackageAccess(name);
Class> c = findLoadedClass(name);
if (c == null) {
try {
diff --git a/src/java.base/share/classes/sun/reflect/misc/ReflectUtil.java b/src/java.base/share/classes/sun/reflect/misc/ReflectUtil.java
index 86eadc2b2eea1..812e77a0a82d0 100644
--- a/src/java.base/share/classes/sun/reflect/misc/ReflectUtil.java
+++ b/src/java.base/share/classes/sun/reflect/misc/ReflectUtil.java
@@ -25,10 +25,6 @@
package sun.reflect.misc;
-import java.lang.reflect.Member;
-import java.lang.reflect.Method;
-import java.lang.reflect.Modifier;
-import java.lang.reflect.Proxy;
import jdk.internal.reflect.Reflection;
public final class ReflectUtil {
@@ -67,112 +63,4 @@ public static void ensureMemberAccess(Class> currentClass,
target == null ? null : target.getClass(),
modifiers);
}
-
- /**
- * Does nothing.
- */
- public static void conservativeCheckMemberAccess(Member m) {
- }
-
- /**
- * Does nothing.
- */
- public static void checkPackageAccess(Class> clazz) {
- }
-
- /**
- * Does nothing
- */
- public static void checkPackageAccess(String name) {
- }
-
- /**
- * Returns true.
- */
- public static boolean isPackageAccessible(Class> clazz) {
- return true;
- }
-
- /**
- * Returns false.
- */
- public static boolean needsPackageAccessCheck(ClassLoader from, ClassLoader to) {
- return false;
- }
-
- /**
- * Does nothing
- */
- public static void checkProxyPackageAccess(Class> clazz) {
- }
-
- /**
- * Does nothing.
- */
- public static void checkProxyPackageAccess(ClassLoader ccl,
- Class>... interfaces) {
- }
-
- // Note that bytecode instrumentation tools may exclude 'sun.*'
- // classes but not generated proxy classes and so keep it in com.sun.*
- public static final String PROXY_PACKAGE = "com.sun.proxy";
-
- /**
- * Test if the given class is a proxy class that implements
- * non-public interface. Such proxy class may be in a non-restricted
- * package that bypasses checkPackageAccess.
- */
- public static boolean isNonPublicProxyClass(Class> cls) {
- if (!Proxy.isProxyClass(cls)) {
- return false;
- }
- return !Modifier.isPublic(cls.getModifiers());
- }
-
- /**
- * Check if the given method is a method declared in the proxy interface
- * implemented by the given proxy instance.
- *
- * @param proxy a proxy instance
- * @param method an interface method dispatched to a InvocationHandler
- *
- * @throws IllegalArgumentException if the given proxy or method is invalid.
- */
- public static void checkProxyMethod(Object proxy, Method method) {
- // check if it is a valid proxy instance
- if (proxy == null || !Proxy.isProxyClass(proxy.getClass())) {
- throw new IllegalArgumentException("Not a Proxy instance");
- }
- if (Modifier.isStatic(method.getModifiers())) {
- throw new IllegalArgumentException("Can't handle static method");
- }
-
- Class> c = method.getDeclaringClass();
- if (c == Object.class) {
- String name = method.getName();
- if (name.equals("hashCode") || name.equals("equals") || name.equals("toString")) {
- return;
- }
- }
-
- if (isSuperInterface(proxy.getClass(), c)) {
- return;
- }
-
- // disallow any method not declared in one of the proxy interfaces
- throw new IllegalArgumentException("Can't handle: " + method);
- }
-
- private static boolean isSuperInterface(Class> c, Class> intf) {
- for (Class> i : c.getInterfaces()) {
- if (i == intf) {
- return true;
- }
- if (isSuperInterface(i, intf)) {
- return true;
- }
- }
- return false;
- }
-
}
diff --git a/src/java.compiler/share/classes/javax/lang/model/SourceVersion.java b/src/java.compiler/share/classes/javax/lang/model/SourceVersion.java
index 59461b54a2bba..9260ce532f160 100644
--- a/src/java.compiler/share/classes/javax/lang/model/SourceVersion.java
+++ b/src/java.compiler/share/classes/javax/lang/model/SourceVersion.java
@@ -444,6 +444,18 @@ public enum SourceVersion {
* The Java Language Specification, Java SE 24 Edition
*/
RELEASE_24,
+
+ /**
+ * The version introduced by the Java Platform, Standard Edition
+ * 25.
+ *
+ * @since 25
+ *
+ * @see
+ * The Java Language Specification, Java SE 25 Edition
+ */
+ RELEASE_25,
; // Reduce code churn when appending new constants
// Note that when adding constants for newer releases, the
@@ -453,7 +465,7 @@ public enum SourceVersion {
* {@return the latest source version that can be modeled}
*/
public static SourceVersion latest() {
- return RELEASE_24;
+ return RELEASE_25;
}
private static final SourceVersion latestSupported = getLatestSupported();
@@ -468,7 +480,7 @@ public static SourceVersion latest() {
private static SourceVersion getLatestSupported() {
int intVersion = Runtime.version().feature();
return (intVersion >= 11) ?
- valueOf("RELEASE_" + Math.min(24, intVersion)):
+ valueOf("RELEASE_" + Math.min(25, intVersion)):
RELEASE_10;
}
diff --git a/src/java.compiler/share/classes/javax/lang/model/util/AbstractAnnotationValueVisitor14.java b/src/java.compiler/share/classes/javax/lang/model/util/AbstractAnnotationValueVisitor14.java
index 8940edd685f5a..4d36781c58488 100644
--- a/src/java.compiler/share/classes/javax/lang/model/util/AbstractAnnotationValueVisitor14.java
+++ b/src/java.compiler/share/classes/javax/lang/model/util/AbstractAnnotationValueVisitor14.java
@@ -44,7 +44,7 @@
* @see AbstractAnnotationValueVisitor9
* @since 14
*/
-@SupportedSourceVersion(RELEASE_24)
+@SupportedSourceVersion(RELEASE_25)
public abstract class AbstractAnnotationValueVisitor14 extends AbstractAnnotationValueVisitor9 {
/**
diff --git a/src/java.compiler/share/classes/javax/lang/model/util/AbstractAnnotationValueVisitorPreview.java b/src/java.compiler/share/classes/javax/lang/model/util/AbstractAnnotationValueVisitorPreview.java
index 724ebe593790c..4afcc7fbf1db3 100644
--- a/src/java.compiler/share/classes/javax/lang/model/util/AbstractAnnotationValueVisitorPreview.java
+++ b/src/java.compiler/share/classes/javax/lang/model/util/AbstractAnnotationValueVisitorPreview.java
@@ -50,7 +50,7 @@
* @see AbstractAnnotationValueVisitor14
* @since 23
*/
-@SupportedSourceVersion(RELEASE_24)
+@SupportedSourceVersion(RELEASE_25)
@PreviewFeature(feature=PreviewFeature.Feature.LANGUAGE_MODEL, reflective=true)
public abstract class AbstractAnnotationValueVisitorPreview extends AbstractAnnotationValueVisitor14 {
diff --git a/src/java.compiler/share/classes/javax/lang/model/util/AbstractElementVisitor14.java b/src/java.compiler/share/classes/javax/lang/model/util/AbstractElementVisitor14.java
index fb4ea7681ccae..3a57ef18c5bb2 100644
--- a/src/java.compiler/share/classes/javax/lang/model/util/AbstractElementVisitor14.java
+++ b/src/java.compiler/share/classes/javax/lang/model/util/AbstractElementVisitor14.java
@@ -50,7 +50,7 @@
* @see AbstractElementVisitor9
* @since 16
*/
-@SupportedSourceVersion(RELEASE_24)
+@SupportedSourceVersion(RELEASE_25)
public abstract class AbstractElementVisitor14 extends AbstractElementVisitor9 {
/**
* Constructor for concrete subclasses to call.
diff --git a/src/java.compiler/share/classes/javax/lang/model/util/AbstractElementVisitorPreview.java b/src/java.compiler/share/classes/javax/lang/model/util/AbstractElementVisitorPreview.java
index 7297e7c0c7b26..b9642b203fe10 100644
--- a/src/java.compiler/share/classes/javax/lang/model/util/AbstractElementVisitorPreview.java
+++ b/src/java.compiler/share/classes/javax/lang/model/util/AbstractElementVisitorPreview.java
@@ -53,7 +53,7 @@
* @see AbstractElementVisitor14
* @since 23
*/
-@SupportedSourceVersion(RELEASE_24)
+@SupportedSourceVersion(RELEASE_25)
@PreviewFeature(feature=PreviewFeature.Feature.LANGUAGE_MODEL, reflective=true)
public abstract class AbstractElementVisitorPreview extends AbstractElementVisitor14 {
/**
diff --git a/src/java.compiler/share/classes/javax/lang/model/util/AbstractTypeVisitor14.java b/src/java.compiler/share/classes/javax/lang/model/util/AbstractTypeVisitor14.java
index ff5fb44e1f1f2..eb96f4abb997a 100644
--- a/src/java.compiler/share/classes/javax/lang/model/util/AbstractTypeVisitor14.java
+++ b/src/java.compiler/share/classes/javax/lang/model/util/AbstractTypeVisitor14.java
@@ -47,7 +47,7 @@
* @see AbstractTypeVisitor9
* @since 14
*/
-@SupportedSourceVersion(RELEASE_24)
+@SupportedSourceVersion(RELEASE_25)
public abstract class AbstractTypeVisitor14 extends AbstractTypeVisitor9 {
/**
* Constructor for concrete subclasses to call.
diff --git a/src/java.compiler/share/classes/javax/lang/model/util/AbstractTypeVisitorPreview.java b/src/java.compiler/share/classes/javax/lang/model/util/AbstractTypeVisitorPreview.java
index 823bad0748acd..74b007356d483 100644
--- a/src/java.compiler/share/classes/javax/lang/model/util/AbstractTypeVisitorPreview.java
+++ b/src/java.compiler/share/classes/javax/lang/model/util/AbstractTypeVisitorPreview.java
@@ -53,7 +53,7 @@
* @see AbstractTypeVisitor14
* @since 23
*/
-@SupportedSourceVersion(RELEASE_24)
+@SupportedSourceVersion(RELEASE_25)
@PreviewFeature(feature=PreviewFeature.Feature.LANGUAGE_MODEL, reflective=true)
public abstract class AbstractTypeVisitorPreview extends AbstractTypeVisitor14 {
/**
diff --git a/src/java.compiler/share/classes/javax/lang/model/util/ElementKindVisitor14.java b/src/java.compiler/share/classes/javax/lang/model/util/ElementKindVisitor14.java
index d52b62e8e9f5d..d7941d6e15392 100644
--- a/src/java.compiler/share/classes/javax/lang/model/util/ElementKindVisitor14.java
+++ b/src/java.compiler/share/classes/javax/lang/model/util/ElementKindVisitor14.java
@@ -61,7 +61,7 @@
* @see ElementKindVisitor9
* @since 16
*/
-@SupportedSourceVersion(RELEASE_24)
+@SupportedSourceVersion(RELEASE_25)
public class ElementKindVisitor14 extends ElementKindVisitor9 {
/**
* Constructor for concrete subclasses; uses {@code null} for the
diff --git a/src/java.compiler/share/classes/javax/lang/model/util/ElementKindVisitorPreview.java b/src/java.compiler/share/classes/javax/lang/model/util/ElementKindVisitorPreview.java
index 71d5f15fc85b9..868e89c23eb02 100644
--- a/src/java.compiler/share/classes/javax/lang/model/util/ElementKindVisitorPreview.java
+++ b/src/java.compiler/share/classes/javax/lang/model/util/ElementKindVisitorPreview.java
@@ -67,7 +67,7 @@
* @see ElementKindVisitor14
* @since 23
*/
-@SupportedSourceVersion(RELEASE_24)
+@SupportedSourceVersion(RELEASE_25)
@PreviewFeature(feature=PreviewFeature.Feature.LANGUAGE_MODEL, reflective=true)
public class ElementKindVisitorPreview extends ElementKindVisitor14 {
/**
diff --git a/src/java.compiler/share/classes/javax/lang/model/util/ElementScanner14.java b/src/java.compiler/share/classes/javax/lang/model/util/ElementScanner14.java
index 2f6fb0e03a0d7..e0c05ab228eff 100644
--- a/src/java.compiler/share/classes/javax/lang/model/util/ElementScanner14.java
+++ b/src/java.compiler/share/classes/javax/lang/model/util/ElementScanner14.java
@@ -77,7 +77,7 @@
* @see ElementScanner9
* @since 16
*/
-@SupportedSourceVersion(RELEASE_24)
+@SupportedSourceVersion(RELEASE_25)
public class ElementScanner14 extends ElementScanner9 {
/**
* Constructor for concrete subclasses; uses {@code null} for the
diff --git a/src/java.compiler/share/classes/javax/lang/model/util/ElementScannerPreview.java b/src/java.compiler/share/classes/javax/lang/model/util/ElementScannerPreview.java
index 85c30afcf2bc2..6d80aa8c66182 100644
--- a/src/java.compiler/share/classes/javax/lang/model/util/ElementScannerPreview.java
+++ b/src/java.compiler/share/classes/javax/lang/model/util/ElementScannerPreview.java
@@ -81,7 +81,7 @@
* @see ElementScanner14
* @since 23
*/
-@SupportedSourceVersion(RELEASE_24)
+@SupportedSourceVersion(RELEASE_25)
@PreviewFeature(feature=PreviewFeature.Feature.LANGUAGE_MODEL, reflective=true)
public class ElementScannerPreview extends ElementScanner14 {
/**
diff --git a/src/java.compiler/share/classes/javax/lang/model/util/SimpleAnnotationValueVisitor14.java b/src/java.compiler/share/classes/javax/lang/model/util/SimpleAnnotationValueVisitor14.java
index e0d36308454c1..4682e2a7ee84e 100644
--- a/src/java.compiler/share/classes/javax/lang/model/util/SimpleAnnotationValueVisitor14.java
+++ b/src/java.compiler/share/classes/javax/lang/model/util/SimpleAnnotationValueVisitor14.java
@@ -52,7 +52,7 @@
* @see SimpleAnnotationValueVisitor9
* @since 14
*/
-@SupportedSourceVersion(RELEASE_24)
+@SupportedSourceVersion(RELEASE_25)
public class SimpleAnnotationValueVisitor14 extends SimpleAnnotationValueVisitor9 {
/**
* Constructor for concrete subclasses; uses {@code null} for the
diff --git a/src/java.compiler/share/classes/javax/lang/model/util/SimpleAnnotationValueVisitorPreview.java b/src/java.compiler/share/classes/javax/lang/model/util/SimpleAnnotationValueVisitorPreview.java
index c0444c91060a2..a477f33017c95 100644
--- a/src/java.compiler/share/classes/javax/lang/model/util/SimpleAnnotationValueVisitorPreview.java
+++ b/src/java.compiler/share/classes/javax/lang/model/util/SimpleAnnotationValueVisitorPreview.java
@@ -58,7 +58,7 @@
* @see SimpleAnnotationValueVisitor14
* @since 23
*/
-@SupportedSourceVersion(RELEASE_24)
+@SupportedSourceVersion(RELEASE_25)
@PreviewFeature(feature=PreviewFeature.Feature.LANGUAGE_MODEL, reflective=true)
public class SimpleAnnotationValueVisitorPreview extends SimpleAnnotationValueVisitor14 {
/**
diff --git a/src/java.compiler/share/classes/javax/lang/model/util/SimpleElementVisitor14.java b/src/java.compiler/share/classes/javax/lang/model/util/SimpleElementVisitor14.java
index 8c47818ab3042..db97e59152f94 100644
--- a/src/java.compiler/share/classes/javax/lang/model/util/SimpleElementVisitor14.java
+++ b/src/java.compiler/share/classes/javax/lang/model/util/SimpleElementVisitor14.java
@@ -58,7 +58,7 @@
* @see SimpleElementVisitor9
* @since 16
*/
-@SupportedSourceVersion(RELEASE_24)
+@SupportedSourceVersion(RELEASE_25)
public class SimpleElementVisitor14 extends SimpleElementVisitor9 {
/**
* Constructor for concrete subclasses; uses {@code null} for the
diff --git a/src/java.compiler/share/classes/javax/lang/model/util/SimpleElementVisitorPreview.java b/src/java.compiler/share/classes/javax/lang/model/util/SimpleElementVisitorPreview.java
index 79da686ee0869..0d9914f3852ef 100644
--- a/src/java.compiler/share/classes/javax/lang/model/util/SimpleElementVisitorPreview.java
+++ b/src/java.compiler/share/classes/javax/lang/model/util/SimpleElementVisitorPreview.java
@@ -61,7 +61,7 @@
* @see SimpleElementVisitor14
* @since 23
*/
-@SupportedSourceVersion(RELEASE_24)
+@SupportedSourceVersion(RELEASE_25)
@PreviewFeature(feature=PreviewFeature.Feature.LANGUAGE_MODEL, reflective=true)
public class SimpleElementVisitorPreview extends SimpleElementVisitor14 {
/**
diff --git a/src/java.compiler/share/classes/javax/lang/model/util/SimpleTypeVisitor14.java b/src/java.compiler/share/classes/javax/lang/model/util/SimpleTypeVisitor14.java
index 7ee6c25526d0f..3f962137987dc 100644
--- a/src/java.compiler/share/classes/javax/lang/model/util/SimpleTypeVisitor14.java
+++ b/src/java.compiler/share/classes/javax/lang/model/util/SimpleTypeVisitor14.java
@@ -56,7 +56,7 @@
* @see SimpleTypeVisitor9
* @since 14
*/
-@SupportedSourceVersion(RELEASE_24)
+@SupportedSourceVersion(RELEASE_25)
public class SimpleTypeVisitor14 extends SimpleTypeVisitor9 {
/**
* Constructor for concrete subclasses; uses {@code null} for the
diff --git a/src/java.compiler/share/classes/javax/lang/model/util/SimpleTypeVisitorPreview.java b/src/java.compiler/share/classes/javax/lang/model/util/SimpleTypeVisitorPreview.java
index 1efbe7108370c..13a0ad41d7ed8 100644
--- a/src/java.compiler/share/classes/javax/lang/model/util/SimpleTypeVisitorPreview.java
+++ b/src/java.compiler/share/classes/javax/lang/model/util/SimpleTypeVisitorPreview.java
@@ -62,7 +62,7 @@
* @see SimpleTypeVisitor14
* @since 23
*/
-@SupportedSourceVersion(RELEASE_24)
+@SupportedSourceVersion(RELEASE_25)
@PreviewFeature(feature=PreviewFeature.Feature.LANGUAGE_MODEL, reflective=true)
public class SimpleTypeVisitorPreview extends SimpleTypeVisitor14 {
/**
diff --git a/src/java.compiler/share/classes/javax/lang/model/util/TypeKindVisitor14.java b/src/java.compiler/share/classes/javax/lang/model/util/TypeKindVisitor14.java
index 9c86f855564d6..57d43e77500b5 100644
--- a/src/java.compiler/share/classes/javax/lang/model/util/TypeKindVisitor14.java
+++ b/src/java.compiler/share/classes/javax/lang/model/util/TypeKindVisitor14.java
@@ -61,7 +61,7 @@
* @see TypeKindVisitor9
* @since 14
*/
-@SupportedSourceVersion(RELEASE_24)
+@SupportedSourceVersion(RELEASE_25)
public class TypeKindVisitor14 extends TypeKindVisitor9 {
/**
* Constructor for concrete subclasses to call; uses {@code null}
diff --git a/src/java.compiler/share/classes/javax/lang/model/util/TypeKindVisitorPreview.java b/src/java.compiler/share/classes/javax/lang/model/util/TypeKindVisitorPreview.java
index 70e0498f5658b..3ae19353a0bbb 100644
--- a/src/java.compiler/share/classes/javax/lang/model/util/TypeKindVisitorPreview.java
+++ b/src/java.compiler/share/classes/javax/lang/model/util/TypeKindVisitorPreview.java
@@ -66,7 +66,7 @@
* @see TypeKindVisitor14
* @since 23
*/
-@SupportedSourceVersion(RELEASE_24)
+@SupportedSourceVersion(RELEASE_25)
@PreviewFeature(feature=PreviewFeature.Feature.LANGUAGE_MODEL, reflective=true)
public class TypeKindVisitorPreview extends TypeKindVisitor14 {
/**
diff --git a/src/java.desktop/macosx/classes/com/apple/laf/AquaCaret.java b/src/java.desktop/macosx/classes/com/apple/laf/AquaCaret.java
index 9b0981aa8b29a..3c1b147d1eab4 100644
--- a/src/java.desktop/macosx/classes/com/apple/laf/AquaCaret.java
+++ b/src/java.desktop/macosx/classes/com/apple/laf/AquaCaret.java
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2011, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -189,20 +189,25 @@ protected synchronized void damage(final Rectangle r) {
// intersection of the caret rectangle and the component less the border, if any.
final Rectangle caretRect = new Rectangle(x, y, width, height);
final Border border = getComponent().getBorder();
- if (border != null) {
- final Rectangle alloc = getComponent().getBounds();
- alloc.x = alloc.y = 0;
+ final Rectangle alloc = getComponent().getBounds();
+ alloc.x = alloc.y = 0;
+ if (border != null && border.isBorderOpaque()) {
final Insets borderInsets = border.getBorderInsets(getComponent());
alloc.x += borderInsets.left;
alloc.y += borderInsets.top;
alloc.width -= borderInsets.left + borderInsets.right;
alloc.height -= borderInsets.top + borderInsets.bottom;
Rectangle2D.intersect(caretRect, alloc, caretRect);
+ x = caretRect.x;
+ y = caretRect.y;
+ width = Math.max(caretRect.width, 1);
+ height = Math.max(caretRect.height, 1);
+ } else {
+ x = alloc.x;
+ y = alloc.y;
+ width = alloc.width;
+ height = alloc.height;
}
- x = caretRect.x;
- y = caretRect.y;
- width = Math.max(caretRect.width, 1);
- height = Math.max(caretRect.height, 1);
repaint();
}
diff --git a/src/java.desktop/share/classes/java/awt/font/TextLine.java b/src/java.desktop/share/classes/java/awt/font/TextLine.java
index 1e4b9c784a69c..681fcd900837a 100644
--- a/src/java.desktop/share/classes/java/awt/font/TextLine.java
+++ b/src/java.desktop/share/classes/java/awt/font/TextLine.java
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1998, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -1021,7 +1021,7 @@ public static TextLineComponent[] createComponentsOnRun(int runStart,
factory.createExtended(font, cm, decorator, startPos, startPos + lmCount);
++numComponents;
- if (numComponents >= components.length) {
+ if (numComponents > components.length) {
components = expandArray(components);
}
@@ -1076,7 +1076,7 @@ public static TextLineComponent[] getComponents(StyledParagraph styledParagraph,
pos = chunkLimit;
++numComponents;
- if (numComponents >= tempComponents.length) {
+ if (numComponents > tempComponents.length) {
tempComponents = expandArray(tempComponents);
}
diff --git a/src/java.management/share/classes/com/sun/jmx/mbeanserver/MBeanInstantiator.java b/src/java.management/share/classes/com/sun/jmx/mbeanserver/MBeanInstantiator.java
index 53eeb721a13c2..b1fcf7999c620 100644
--- a/src/java.management/share/classes/com/sun/jmx/mbeanserver/MBeanInstantiator.java
+++ b/src/java.management/share/classes/com/sun/jmx/mbeanserver/MBeanInstantiator.java
@@ -46,7 +46,6 @@
import javax.management.RuntimeErrorException;
import javax.management.RuntimeMBeanException;
import javax.management.RuntimeOperationsException;
-import sun.reflect.misc.ConstructorUtil;
/**
* Implements the MBeanInstantiator interface. Provides methods for
@@ -681,7 +680,7 @@ static Class>[] loadSignatureClasses(String signature[],
private Constructor> findConstructor(Class> c, Class>[] params) {
try {
- return ConstructorUtil.getConstructor(c, params);
+ return c.getConstructor(params);
} catch (Exception e) {
return null;
}
diff --git a/src/jdk.compiler/share/classes/com/sun/tools/javac/code/Source.java b/src/jdk.compiler/share/classes/com/sun/tools/javac/code/Source.java
index 572a7b126750c..f1c25d032d05c 100644
--- a/src/jdk.compiler/share/classes/com/sun/tools/javac/code/Source.java
+++ b/src/jdk.compiler/share/classes/com/sun/tools/javac/code/Source.java
@@ -148,6 +148,11 @@ public enum Source {
* 24, tbd
*/
JDK24("24"),
+
+ /**
+ * 25, tbd
+ */
+ JDK25("25"),
; // Reduce code churn when appending new constants
private static final Context.Key