diff --git a/src/hotspot/cpu/aarch64/nativeInst_aarch64.cpp b/src/hotspot/cpu/aarch64/nativeInst_aarch64.cpp index a7a2006aeb1..f304eaa4fd2 100644 --- a/src/hotspot/cpu/aarch64/nativeInst_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/nativeInst_aarch64.cpp @@ -185,7 +185,7 @@ address NativeCall::destination() const { // // Used in the runtime linkage of calls; see class CompiledIC. void NativeCall::set_destination_mt_safe(address dest) { - assert((Patching_lock->is_locked() || SafepointSynchronize::is_at_safepoint()) || + assert((CodeCache_lock->is_locked() || SafepointSynchronize::is_at_safepoint()) || CompiledICLocker::is_safe(addr_at(0)), "concurrent code patching"); diff --git a/src/hotspot/cpu/ppc/nativeInst_ppc.cpp b/src/hotspot/cpu/ppc/nativeInst_ppc.cpp index f60f3f147ae..97784b42837 100644 --- a/src/hotspot/cpu/ppc/nativeInst_ppc.cpp +++ b/src/hotspot/cpu/ppc/nativeInst_ppc.cpp @@ -92,10 +92,10 @@ address NativeCall::destination() const { // Used in the runtime linkage of calls; see class CompiledIC. // // Add parameter assert_lock to switch off assertion -// during code generation, where no patching lock is needed. +// during code generation, where no lock is needed. void NativeCall::set_destination_mt_safe(address dest, bool assert_lock) { assert(!assert_lock || - (Patching_lock->is_locked() || SafepointSynchronize::is_at_safepoint()) || + (CodeCache_lock->is_locked() || SafepointSynchronize::is_at_safepoint()) || CompiledICLocker::is_safe(addr_at(0)), "concurrent code patching"); diff --git a/src/hotspot/cpu/riscv/nativeInst_riscv.cpp b/src/hotspot/cpu/riscv/nativeInst_riscv.cpp index 9cfc7abb691..51de6ca7f45 100644 --- a/src/hotspot/cpu/riscv/nativeInst_riscv.cpp +++ b/src/hotspot/cpu/riscv/nativeInst_riscv.cpp @@ -138,10 +138,10 @@ address NativeCall::destination() const { // Used in the runtime linkage of calls; see class CompiledIC. // // Add parameter assert_lock to switch off assertion -// during code generation, where no patching lock is needed. +// during code generation, where no lock is needed. void NativeCall::set_destination_mt_safe(address dest, bool assert_lock) { assert(!assert_lock || - (Patching_lock->is_locked() || SafepointSynchronize::is_at_safepoint()) || + (CodeCache_lock->is_locked() || SafepointSynchronize::is_at_safepoint()) || CompiledICLocker::is_safe(addr_at(0)), "concurrent code patching"); diff --git a/src/hotspot/cpu/s390/nativeInst_s390.cpp b/src/hotspot/cpu/s390/nativeInst_s390.cpp index 95178e9ae74..fc43641cb31 100644 --- a/src/hotspot/cpu/s390/nativeInst_s390.cpp +++ b/src/hotspot/cpu/s390/nativeInst_s390.cpp @@ -658,8 +658,8 @@ void NativeGeneralJump::insert_unconditional(address code_pos, address entry) { void NativeGeneralJump::replace_mt_safe(address instr_addr, address code_buffer) { assert(((intptr_t)instr_addr & (BytesPerWord-1)) == 0, "requirement for mt safe patching"); - // Bytes_after_jump cannot change, because we own the Patching_lock. - assert(Patching_lock->owned_by_self(), "must hold lock to patch instruction"); + // Bytes_after_jump cannot change, because we own the CodeCache_lock. + assert(CodeCache_lock->owned_by_self(), "must hold lock to patch instruction"); intptr_t bytes_after_jump = (*(intptr_t*)instr_addr) & 0x000000000000ffffL; // 2 bytes after jump. intptr_t load_const_bytes = (*(intptr_t*)code_buffer) & 0xffffffffffff0000L; *(intptr_t*)instr_addr = load_const_bytes | bytes_after_jump; diff --git a/src/hotspot/cpu/x86/nativeInst_x86.cpp b/src/hotspot/cpu/x86/nativeInst_x86.cpp index 1df8c78c99d..ddedd01d915 100644 --- a/src/hotspot/cpu/x86/nativeInst_x86.cpp +++ b/src/hotspot/cpu/x86/nativeInst_x86.cpp @@ -218,7 +218,7 @@ void NativeCall::insert(address code_pos, address entry) { // (spinlock). Then patches the last byte, and then atomically replaces // the jmp's with the first 4 byte of the new instruction. void NativeCall::replace_mt_safe(address instr_addr, address code_buffer) { - assert(Patching_lock->is_locked() || + assert(CodeCache_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "concurrent code patching"); assert (instr_addr != nullptr, "illegal address for code patching"); @@ -281,7 +281,7 @@ void NativeCall::set_destination_mt_safe(address dest) { debug_only(verify()); // Make sure patching code is locked. No two threads can patch at the same // time but one may be executing this code. - assert(Patching_lock->is_locked() || SafepointSynchronize::is_at_safepoint() || + assert(CodeCache_lock->is_locked() || SafepointSynchronize::is_at_safepoint() || CompiledICLocker::is_safe(instruction_address()), "concurrent code patching"); // Both C1 and C2 should now be generating code which aligns the patched address // to be within a single cache line. diff --git a/src/hotspot/share/c1/c1_Runtime1.cpp b/src/hotspot/share/c1/c1_Runtime1.cpp index 41a6b151b28..4d28deb7cd3 100644 --- a/src/hotspot/share/c1/c1_Runtime1.cpp +++ b/src/hotspot/share/c1/c1_Runtime1.cpp @@ -880,7 +880,7 @@ static Klass* resolve_field_return_klass(const methodHandle& caller, int bci, TR // movl reg, [reg1 + ] (for field offsets) // jmp continue // -// patch_stub: jmp Runtim1::patch_code (through a runtime stub) +// patch_stub: jmp Runtime1::patch_code (through a runtime stub) // jmp patch_site // // If the class is being initialized the patch body is rewritten and @@ -1100,7 +1100,7 @@ JRT_ENTRY(void, Runtime1::patch_code(JavaThread* current, Runtime1::StubID stub_ // Now copy code back { - MutexLocker ml_patch (current, Patching_lock, Mutex::_no_safepoint_check_flag); + MutexLocker ml_code (current, CodeCache_lock, Mutex::_no_safepoint_check_flag); // // Deoptimization may have happened while we waited for the lock. // In that case we don't bother to do any patching we just return @@ -1265,12 +1265,8 @@ JRT_ENTRY(void, Runtime1::patch_code(JavaThread* current, Runtime1::StubID stub_ } } } - } - - // If we are patching in a non-perm oop, make sure the nmethod - // is on the right list. - { - MutexLocker ml_code (current, CodeCache_lock, Mutex::_no_safepoint_check_flag); + // If we are patching in a non-perm oop, make sure the nmethod + // is on the right list. nmethod* nm = CodeCache::find_nmethod(caller_frame.pc()); guarantee(nm != nullptr, "only nmethods can contain non-perm oops"); diff --git a/src/hotspot/share/code/nmethod.cpp b/src/hotspot/share/code/nmethod.cpp index 500ddbc2fe4..3808ee26988 100644 --- a/src/hotspot/share/code/nmethod.cpp +++ b/src/hotspot/share/code/nmethod.cpp @@ -1389,7 +1389,7 @@ bool nmethod::make_not_entrant() { } // leave critical region under CompiledMethod_lock #if INCLUDE_JVMCI - // Invalidate can't occur while holding the Patching lock + // Invalidate can't occur while holding the NMethodState_lock JVMCINMethodData* nmethod_data = jvmci_nmethod_data(); if (nmethod_data != nullptr) { nmethod_data->invalidate_nmethod_mirror(this); diff --git a/src/hotspot/share/runtime/mutexLocker.cpp b/src/hotspot/share/runtime/mutexLocker.cpp index 41bc71fc241..467fdb5a29a 100644 --- a/src/hotspot/share/runtime/mutexLocker.cpp +++ b/src/hotspot/share/runtime/mutexLocker.cpp @@ -36,7 +36,6 @@ // Mutexes used in the VM (see comment in mutexLocker.hpp): -Mutex* Patching_lock = nullptr; Mutex* CompiledMethod_lock = nullptr; Monitor* SystemDictionary_lock = nullptr; Mutex* InvokeMethodTypeTable_lock = nullptr; @@ -233,7 +232,6 @@ void mutex_init() { MUTEX_DEFN(Metaspace_lock , PaddedMutex , nosafepoint-3); MUTEX_DEFN(MetaspaceCritical_lock , PaddedMonitor, nosafepoint-1); - MUTEX_DEFN(Patching_lock , PaddedMutex , nosafepoint); // used for safepointing and code patching. MUTEX_DEFN(MonitorDeflation_lock , PaddedMonitor, nosafepoint); // used for monitor deflation thread operations MUTEX_DEFN(Service_lock , PaddedMonitor, service); // used for service thread operations diff --git a/src/hotspot/share/runtime/mutexLocker.hpp b/src/hotspot/share/runtime/mutexLocker.hpp index e039312ea31..cf280139dcc 100644 --- a/src/hotspot/share/runtime/mutexLocker.hpp +++ b/src/hotspot/share/runtime/mutexLocker.hpp @@ -31,7 +31,6 @@ // Mutexes used in the VM. -extern Mutex* Patching_lock; // a lock used to guard code patching of compiled code extern Mutex* CompiledMethod_lock; // a lock used to guard a compiled method and OSR queues extern Monitor* SystemDictionary_lock; // a lock on the system dictionary extern Mutex* InvokeMethodTypeTable_lock; diff --git a/test/hotspot/jtreg/compiler/c1/TestConcurrentPatching.java b/test/hotspot/jtreg/compiler/c1/TestConcurrentPatching.java new file mode 100644 index 00000000000..6a7179d0ce4 --- /dev/null +++ b/test/hotspot/jtreg/compiler/c1/TestConcurrentPatching.java @@ -0,0 +1,145 @@ +/* + * Copyright (c) 2024, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +import java.lang.reflect.Method; +import java.net.URL; +import java.net.URLClassLoader; +import java.util.ArrayList; + +/** + * @test + * @bug 8340313 + * @summary Test that concurrent patching of oop immediates is thread-safe in C1. + * @run main/othervm/timeout=480 -Xcomp -XX:CompileCommand=compileonly,TestConcurrentPatching::* -XX:TieredStopAtLevel=1 TestConcurrentPatching + */ + +class MyClass { } + +class Holder { + public static final MyClass OBJ1 = null; + public static final MyClass OBJ2 = null; + public static final MyClass OBJ3 = null; + public static final MyClass OBJ4 = null; + public static final MyClass OBJ5 = null; + public static final MyClass OBJ6 = null; + public static final MyClass OBJ7 = null; + public static final MyClass OBJ8 = null; + public static final MyClass OBJ9 = null; + public static final MyClass OBJ10 = null; + public static final MyClass OBJ11 = null; + public static final MyClass OBJ12 = null; + public static final MyClass OBJ13 = null; + public static final MyClass OBJ14 = null; + public static final MyClass OBJ15 = null; + public static final MyClass OBJ16 = null; + public static final MyClass OBJ17 = null; + public static final MyClass OBJ18 = null; + public static final MyClass OBJ19 = null; + public static final MyClass OBJ20 = null; +} + +public class TestConcurrentPatching { + // Increase to 100_000 for a good chance of reproducing the issue with a single run + static final int ITERATIONS = 1000; + + static Object field; + + // 'Holder' class is unloaded on first execution and therefore field + // accesses require patching when the method is C1 compiled (with -Xcomp). + public static void test() { + field = Holder.OBJ1; + field = Holder.OBJ2; + field = Holder.OBJ3; + field = Holder.OBJ4; + field = Holder.OBJ5; + field = Holder.OBJ6; + field = Holder.OBJ7; + field = Holder.OBJ8; + field = Holder.OBJ9; + field = Holder.OBJ10; + field = Holder.OBJ11; + field = Holder.OBJ12; + field = Holder.OBJ13; + field = Holder.OBJ14; + field = Holder.OBJ15; + field = Holder.OBJ16; + field = Holder.OBJ17; + field = Holder.OBJ18; + field = Holder.OBJ19; + field = Holder.OBJ20; + } + + // Appendix of invokedynamic call sites is unloaded on first execution and + // therefore requires patching when the method is C1 compiled (with -Xcomp). + public static void testIndy() throws Throwable { + field = (Runnable) () -> { }; + field = (Runnable) () -> { }; + field = (Runnable) () -> { }; + field = (Runnable) () -> { }; + field = (Runnable) () -> { }; + field = (Runnable) () -> { }; + field = (Runnable) () -> { }; + field = (Runnable) () -> { }; + field = (Runnable) () -> { }; + field = (Runnable) () -> { }; + } + + // Run 'test' by multiple threads to trigger concurrent patching of field accesses + static void runWithThreads(Method method) { + ArrayList threads = new ArrayList<>(); + for (int threadIdx = 0; threadIdx < 10; threadIdx++) { + threads.add(new Thread(() -> { + try { + method.invoke(null); + } catch (Exception e) { + throw new IllegalStateException(e); + } + })); + } + threads.forEach(Thread::start); + threads.forEach(t -> { + try { + t.join(); + } catch (Throwable e) { + throw new IllegalStateException(e); + } + }); + } + + public static void main(String[] args) throws Exception { + Class thisClass = TestConcurrentPatching.class; + ClassLoader defaultLoader = thisClass.getClassLoader(); + URL classesDir = thisClass.getProtectionDomain().getCodeSource().getLocation(); + + // Load the test class multiple times with a separate class loader to make sure + // that the 'Holder' class is unloaded for each compilation of method 'test' + // and that the appendix of the invokedynamic call site is unloaded for each + // compilation of method 'testIndy'. + for (int i = 0; i < ITERATIONS; ++i) { + URLClassLoader myLoader = URLClassLoader.newInstance(new URL[] {classesDir}, defaultLoader.getParent()); + Class testClass = Class.forName(thisClass.getCanonicalName(), true, myLoader); + runWithThreads(testClass.getDeclaredMethod("test")); + runWithThreads(testClass.getDeclaredMethod("testIndy")); + } + } +}